repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
pfalcon/ScratchABlock
tests_unit/test_liveness.py
Python
gpl-3.0
2,105
0.003325
import graph import dot from core import * import dataflow def make_inst(g, addr, dest, op, *args): def make_arg(a): if a is None: return None if isinstance(a, int): return VALUE(a) if isinstance(a, str): return REG(a) return a b = BBlock(addr) args = [make_arg(a) for a in args] b.add(Inst(make_arg(dest), op, args, addr)) g.add_node(addr, val=b) def test_nielson_2_1_4(): g = graph.Graph() make_inst(g, 1, "x", "=", 2) make_inst(g, 2, "y", "=", 4) make_inst(g, 3, "x", "=", 1) make_inst(g, 4, None, "if", COND(EXPR(">", REG("x"), REG("y")))) make_inst(g, 5, "z", "=", REG("y")) make_inst(g, 6, "z", "*", REG("y"), REG("y")) make_inst(g, 7, "x", "=", REG("z")) g.add_edge(1, 2) g.add_edge(2, 3) g.add_edge(3, 4) g.add_edge(4, 5) g.add_edge(4, 6) g.add_edge(5, 7) g.add_edge(6, 7) #dot.dot(g) #ana = dataflow.LiveVarAnalysis(g) #ana.init() #g.print_nodes() #print("===") ana = dataflow.LiveVarAnalysis(g) ana.solve() #g.print_nodes()
LV_entry = { 1: set(), 2: set(), 3: {REG("y")}, 4: {REG("x
"), REG("y")}, 5: {REG("y")}, 6: {REG("y")}, 7: {REG("z")}, } LV_exit = { 1: set(), 2: {REG("y")}, 3: {REG("x"), REG("y")}, 4: {REG("y")}, 5: {REG("z")}, 6: {REG("z")}, 7: set(), } GEN_LV = { 1: set(), 2: set(), 3: set(), 4: {REG("x"), REG("y")}, 5: {REG("y")}, 6: {REG("y")}, 7: {REG("z")}, } KILL_LV = { 1: {REG("x")}, 2: {REG("y")}, 3: {REG("x")}, 4: set(), 5: {REG("z")}, 6: {REG("z")}, 7: {REG("x")}, } for i, info in g.iter_sorted_nodes(): assert info["live_gen"] == GEN_LV[i] assert info["live_kill"] == KILL_LV[i] assert info["live_in"] == LV_entry[i], (info["live_in"], LV_entry[i]) assert info["live_out"] == LV_exit[i]
raspearsy/bme590hrm
test_hr.py
Python
mit
2,111
0.000947
from ecgmeasure import ECGMeasure import pandas as pd import numpy as np # need to test what happens when have too little data to create a chunk # need to throw an exception if have too little data def get_raw_data(): """.. function :: get_raw_data() Creates dataframe with raw data. """ times = [x*0.1 for x in range(0, 10*50)] voltages = [] for x in range(0, 10): for ii in range(0, 25+1): voltages.append(ii) for jj in range(24, 0, -1): voltages.append(jj) raw_data = pd.DataFrame({'time': times, 'voltage': voltages}) return raw_data def test_thresholdhr_unchanging(): """.. function:: test_thresholdhr_unchanging() Test that threshold is the same for all chunks of the raw data. """ thr = [] for x in range(0, 10): thr.append(0.9*25) thresholds = np.array(thr) chunk = 50 num_chunks = 10 biomeasure = ECGMeasure(file_bool=True, argument="test_hr.csv") # biomeasure.__hr_rawdata = get_raw_data() #print(biomeasure.__hr_rawdata) biomeasure.thresholdhr() [t, c, n] = biomeasure.data t_list = t.values.T.tolist()[0] assert (t_list == thresholds).all() assert c == chunk assert n == num_chunks def get_test_hr1(): """.. function:: get_test_hr1() Adds heartrate information to dataframe. """ initial_messages = [] hrs = [] for ii in range(0, 10): hrs.append(1/5*60) initial_messages.append('Bradycardia Detected') test_hr1 = pd.DataFrame({'HeartRate': hrs, 'B/T': initial_messages, 'time': list(range(0, 50, 5))}) return test_hr1 def test_hrdetector(): """.. function:: test_hrdetector() Test that hrdetector() correctly detects brady/tachycardia. """ biomeasure = ECGMeasure(file_bool=True, argument="test_hr.csv") # biomeasure.__raw_data = get_raw_data() test_hr1 = get_test_hr1() biomeasure.hrdetector() biomeasure.detect_rhythm() assert (biomeasure.data['HeartRate'] == test_hr1['H
eartRate']).all() assert
(biomeasure.data['B/T'] == test_hr1['B/T']).all()
respondcreate/django-versatileimagefield
tests/post_processor/models.py
Python
mit
559
0
from django.db import models from versatileimagefield.fields import VersatileImageField from versatileimagefield.placeholder import OnStoragePlaceholderImage
class VersatileImagePostProcessorTestModel(models.Model): """A model for testing VersatileImageFields.""" image = VersatileImageField( upload_to='./', blank=True,
placeholder_image=OnStoragePlaceholderImage( path='on-storage-placeholder/placeholder.png' ) ) class Meta: verbose_name = 'foo' verbose_name_plural = 'foos'
DolphinDream/sverchok
tests/ui_tests.py
Python
gpl-3.0
2,714
0.004422
from os import walk from os.path import basename, splitext, dirname, join, exists from glob import glob import importlib from inspect import getmembers, isclass import sverchok from sverchok.utils.testing import * from sverchok.utils.logging import debug, info, error from sverchok.node_tree import SverchCustomTreeNode class UiTests(SverchokTestCase): def test_all_nodes_have_icons(self): def has_icon(node_class): has_sv_icon = hasattr(node_class, "sv_icon") has_bl_icon = hasattr(node_class, "bl_icon") and node_class.bl_icon and node_class.bl_icon != 'OUTLINER_OB_EMPTY' #debug("Icon: %s: BL %s, SV %s", node_class.__name__, getattr(node_class, 'bl_icon', None), getattr(node_class, 'sv_icon', None)) return has_sv_icon or has_bl_icon ignore_list = [
'SvIterationNode', 'SvExMinimalScalarFieldNode', 'SvExScalarFieldGraphNode', 'SvMeshSurfaceFieldNode', 'SvExMeshNormalFieldNode', 'SvExMinimalVectorFieldNode', 'SvSolidCenterOfMassNode',
'SvIsSolidClosedNode', 'SvRefineSolidNode', 'SvSolidValidateNode' ] sv_init = sverchok.__file__ nodes_dir = join(dirname(sv_init), "nodes") def check_category(directory): category = basename(directory) from sverchok.node_tree import SverchCustomTreeNode for py_path in glob(join(directory, "*.py")): py_file = basename(py_path) py_name, ext = splitext(py_file) module = importlib.import_module(f"sverchok.nodes.{category}.{py_name}") for node_class_name, node_class in getmembers(module, isclass): if node_class.__module__ != module.__name__: continue if node_class_name in ignore_list: continue debug("Check: %s: %s: %s", node_class, node_class.__bases__, SverchCustomTreeNode in node_class.__bases__) if SverchCustomTreeNode in node_class.mro(): with self.subTest(node = node_class_name): if not has_icon(node_class): self.fail(f"Node <{node_class_name}> does not have icon!") for directory, subdirs, fnames in walk(nodes_dir): dir_name = basename(directory) if dir_name == "nodes": continue with self.subTest(directory=dir_name): check_category(directory)
MasLinoma/test
kyzs/kyzs/pipelines.py
Python
gpl-2.0
258
0
# Define your item pipelines here # # Don't forget to a
dd your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html class KyzsPipeline(object): def process_item(self, item, spider): return i
tem
ATT-JBO/RPICameraRemote
RPICamera/RPICamera/RPICameraRemote.py
Python
mit
7,821
0.009973
#!/usr/bin/env python # -*- coding: utf-8 -*- #important: before running this demo, make certain that you import the library #'paho.mqtt.client' into python (https://pypi.python.org/pypi/paho-mqtt) #also make certain that ATT_IOT is in the same directory as this script. import traceback # for logging exceptions import logging logging.getLogger().setLevel(logging.INFO) #before doing anything else, set the desired logging level, so all modules log correctly. from ConfigParser import * import RPi.GPIO as GPIO #provides pin support import ATT_IOT as IOT #provide cloud support from time import sleep #pause the app import picamera import cameraStreamer import sys import datetime # for generating a unique file name ConfigName = 'rpicamera.config' hasLISIPAROI = False LISIPAROIPin = 4 streamer = None camera = None PreviewId = 1 # turn on/off preview on the stream server RecordId = 2 # turn on/off recording on disk StreamServerId = 3 # assign the destination to stream the video to. ToggleLISIPAROIId = 4 PictureId = 5 _isPreview = False _isRecording = False def tryLoadConfig(): 'load the config from file' global hasLISIPAROI, LISIPAROIPin c = ConfigParser() if c.read(ConfigName): #set up the ATT internet of things platform IOT.DeviceId = c.get('cloud', 'deviceId') IOT.ClientId = c.get('cloud', 'clientId') IOT.ClientKey = c.get('cloud', 'clientKey') hasLISIPAROI = bool(c.get('camera', 'has LISIPAROI')) logging.info("has LISIPAROI:" + str(hasLISIPAROI) ) if hasLISIPAROI: LISIPAROIPin = int(c.get('camera', 'LISIPAROI pin')) logging.info("LISIPAROI pin:" + str(LISIPAROIPin) ) return True else: return False def setupCamera(): 'create the camera responsible for recording video and streaming object responsible for sending it to the server.' global streamer, camera camera = picamera.PiCamera() camera.resolution = (640, 480) camera.framerate = 30 streamer = cameraStreamer.CameraStreamer(camera) def setBacklight(value): '''turn on/off the backlight value: string ('true' or 'false') returns: true when input was succesfully processed, otherwise false ''' if value == "true": GPIO.output(LISIPAROIPin, GPIO.HIGH) elif value == "false": GPIO.output(LISIPAROIPin, GPIO.LOW) else: print("unknown value: " + value) IOT.send(value, ToggleLISIPAROIId) #provide feedback to the cloud that the operation was succesful def setPreview(value): if _isRecording: print("recording not allowed during preview, shutting down recording.") setRecord(False) if value == "true": _isPreview = True streamer.start_preview() elif value == "false": _isPreview = False streamer.stop_preview() else: print("unknown value: " + value) IOT.send(value, PreviewId) #provide feedback to the cloud that the operation was succesful def setRecord(value): if _isPreview: print("preview not allowed during recording, shutting down preview.") setPreview(False) if value == "true": camera.resolution = (1920, 1080) #set to max resulotion for record camera.start_recording('video' + datetime.date.today().strftime("%d_%b_%Y_%H_%M%_S") + '.h264') elif value == "false": camera.stop_recording() camera.resolution = (640, 480) #reset resulotion for preview else: print("unknown value: " + value) IOT.send(value, RecordId) #provide feedback to the cloud that the operation was succesful def takePicture(): 'take a single picture, max resoution' prevWasPreview = _isPreview prevWasRecording = _isRecording if _isRecording: print("record not allowed while taking picture.") setRecord(False) if not _isPreview: print("preview required for taking picture.") setPreview(True) sleep(2) # if preview was not running yet, give it some time to startup camera.capture('picture' + datetime.date.today().strftime("%d_%b_%Y_%H_%M%_S") + '.jpg') if prevWasPreview: print("reactivating preview.") setPreview(True) elif prevWasRecording: print("reactivating record.") setRecord(True) #callback: handles values sent from the cloudapp to the device def on_message(id, value): if id.endswith(str(ToggleLISIPAROIId)) == True: value = value.lower() #make certain that the value is in lower case, for 'True' vs 'true' setBacklight(value) elif id.endswith(str(PreviewId)) == True: value = value.lower() #make certain that the value is in lower case, for 'True' vs 'true' setPreview(value) elif id.endswith(str(RecordId)) == True: value = value.lower() #make certain that the value is in lower case, for 'True' vs 'true' setRecord(value) elif id.endswith(str(StreamServerId)) == True: streamer.streamServerIp = value IOT.send(value, StreamServerId) #provide feedback to the cloud that the operation was succesful elif id.endswith(str(PictureId)) == True: if value.lower() == "true": takePicture() else: print("unknown actuator: " + id) def setupCloud(): IOT.on_message = on_message #make certain that the device & it's features are defined in the cloudapp IOT.connect() if hasLISIPAROI: IOT.addAsset(ToggleLISIPAROIId, "LISIPAROI", "Control the light on the camera", False, "boolean") IOT.addAsset(PreviewId, "Preview", "Show/close a preview on the monitor that is connected to the RPI", True, "boolean") IOT.addAsset(RecordId, "Record", "Start/stop recording the video stream on sd-card", True, "boolean")
IOT.addAsset(PictureId, "Picture", "take a picture (max resoution) and store on sd-card", True, "boolean") IOT.addAsset(StreamServerId, "Stream server", "se
t the ip address of the server that manages the video", True, "string") # get any previously defined settings streamer.streamServerIp = IOT.getAssetState(StreamServerId) if streamer.streamServerIp: streamer.streamServerIp = streamer.streamServerIp['state']['value'] logging.info("sending stream to: " + streamer.streamServerIp) else: logging.info("no stream endpoint defined") IOT.subscribe() #starts the bi-directional communication # set current state of the device IOT.send("false", ToggleLISIPAROIId) IOT.send("false", PreviewId) IOT.send("false", RecordId) tryLoadConfig() setupCamera() # needs to be done before setting up the cloud, cause we will get the settings from the cloud and assign them to the camera. setupCloud() if hasLISIPAROI: try: #setup GPIO using Board numbering #GPIO.setmode(GPIO.BCM) GPIO.setmode(GPIO.BOARD) #set up the pins GPIO.setup(LISIPAROIPin, GPIO.OUT) except: logging.error(traceback.format_exc()) #main loop: run as long as the device is turned on while True: #main thread doesn't have to do much, all is handled on the thread calling the message handler (for the actuators) sleep(5)
mcornelio/synapse
synapse/__init__.py
Python
mit
29,118
0.033141
import sys import os import time import logging import socket import string import collections import logging import atexit __version__ = "1.1.26" __all__ = ['main','amqp'] class client_interface(object): def get_cell(self, key, value=None): """Returns the contents of the cell""" raise NotImplementedError("""get_cell(self, key, value=None)""") def set_cell(self, key, value=None): """Set the contents of the cell""" raise NotImplementedError("""set_cell(self, key, value=None)""") def get_prop(self, key, prop, value=None): """Returns the contents of the cell""" raise NotImplementedError("""get_prop(self, key, prop, value=None)""") def set_prop(self, key, prop, value=None): """Set the contents of the cell""" raise NotImplementedError("""set_prop(self, key, prop, value=None)""") def emergency_exit(status=1, msg=None, ): """Force an exit"
"" if msg: print msg os._exit(status) def trace_log_info(f, *args, **kw): """Trace function invocation""" logger.info("calling %s with args %s, %s" % (f.__name__, args, kw)) return f(*args, **kw) class base_dictionary(collections.MutableMapping): """A dictionary that applies an arbitrary key-altering function before accessing the keys""" def __init__(self, *args, **kwargs): self.store = dict() self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key): return self.store[self.__keytransform__(key)] def __setitem__(self, key, value): self.store[self.__keytransform__(key)] = value def __delitem__(self, key): del self.store[self.__keytransform__(key)] def __iter__(self): return iter(self.store) def __len__(self): return len(self.store) def __keytransform__(self, key): return key.lower() synapse = base_dictionary() synapse_process_id = "%s-%d" % (socket.gethostname(), os.getpid()) synapse_title = "Synapse Console Interface v" + __version__ synapse_ps1 = 'sc> ' synapse_ps2 = '.... ' synapse_prompts = {'ps1':'sc> ', 'ps2':'.... '} synapse_exit_prompt = "Use exit() plus Return to exit." synapse_dict_list = [] synapse_sheets = {} synapse_current_cell_engine_context = None synapse_current_cell_engine = None get_logger_file = 'synapse.log' get_logger_level = logging.WARNING def initialize_logger(name, file=get_logger_file, level=get_logger_level): # create logger with 'name' logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages if file: fh = logging.FileHandler(file) fh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(level) # create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') if file: fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger if file: logger.addHandler(fh) logger.addHandler(ch) return logger def get_logger(): """Returns the current logger""" global logger return logger; class cell_dictionary(base_dictionary,client_interface): """synapse Dictionary with Formulas and Guards""" __formulas__ = base_dictionary() __guards__ = base_dictionary() __props__ = base_dictionary() __engine__ = None def set_formula(self, name, formula): """ Sets the formula function for a cell. :param name: the name of the cell as string :param formula: a function that takes (key,value) where key=cell, value an optional value :return: None """ if formula == None: del self.__formulas__[name] else: self.__formulas__[name] = formula def set_guard(self, name, guard): """ Sets a guard function for a cell. :param name: the name of the cell as string :param guard: a function that takes (key,value) where key=cell, and value=value for the cell :return: None """ if guard == None: del self.__guards__[name] else: self.__guards__[name] = guard def set_cell(self, key, value): """ Set the value of a cell :param key: the name of the cell :param value: the value for the cell :return: the current value cell """ self.__setitem__(key, value) return self.__getitem__(key, value) def get_cell(self, key, value=None): """ Returns the current value of a cell :param key: the name of the cell as a string :param value: an optional value that may be passed to the cell's formula :return: the current value of the cell """ return self.__getitem__(key, value) def set_prop(self, key, prop, value): """ Sets a cell's named property to a value :param key: the name of the cell as a string :param prop: the name of the property as a string :param value: the current value of the property :return: """ key = self.__keytransform__(key) if not(key in self.__props__): self.__props__[key] = base_dictionary() props = self.__props__[key] props[prop] = value return props[prop] def get_prop(self, key, prop): """ Returns the current value of a cell's property :param key: the name of the cell as a string :param prop: the name of the property as a string :return: the current value of the property """ key = self.__keytransform__(key) if not(key in self.__props__): self.__props__[key] = base_dictionary() props = self.__props__[key] if (prop in props): return props[prop] else: return None def get_props(self, key): """ Returns all the properties of a cell :param key: the name of the cell as string :param prop: :return: all the properties as a string """ key = self.__keytransform__(key) if not(key in self.__props__): self.__props__[key] = base_dictionary() return self.__props__[key] def __getitem__(self, key, value=None): """ Returns the value of a cell when referenced as an item :param key: the name of the cell as a string :param value: an optional value :return: the value of the cell """ key = self.__keytransform__(key) if key in self.__formulas__: self.store[key] = self.__formulas__[key](key,value) if not(key in self.store): self.store[key] = None return self.store[key] def __setitem__(self, key, value): """ Sets the value of a cell when referenced as an item :param key: the name of the cell as a string :param value: the new value for the cell :return: the value of the cell """ if key in self.__guards__: self.store[key] = self.__guards__[key](key,value) else: self.store[self.__keytransform__(key)] = value def __delitem__(self, key): """ Deletes a cell when referenced as an item :param key: the name of the cell as a string :return: None """ key = self.__keytransform__(key) if key in self.__formulas__: del self.__formulas__[key] if key in self.__guards__: del self.__guards__[key] if not(key in self.store): return None del self.store[self.__keytransform__(key)] def get_cell_engine(context='root'): """Create a new CellEngine""" global synapse lname = context.lower() synapse_current_cell_engine_context = lname if lname in synapse_sheets: return synapse_sheets[lname] synapse_current_cell_engine = synapse_sheets[lname] = cell_dictionary() return synapse_current_cell_engine def wait_for_ctrlc(seconds=1): """ Wait for ctrlc interrupt from the board :param seconds: sleep time per loop in seconds :return: """ try: while True: time.sleep(seconds) except KeyboardInterrupt: pass class cell_engine(object): """ The Synapse Cell Engine class. """ def __set(self,key,value): """ Sets the value of a cell :param key: the name of the cell as a string :param value: the value for the cell :return: None """ self.__dict__[key] = value def __get(self,key): """ Returns the value of a cell :param key: the name of the cell as a string :return: the value of the cell """ return self.__dict__[key] def __init__(self,cells=None): """ Constructor for
brchiu/tensorflow
tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py
Python
apache-2.0
20,093
0.004977
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `MapVectorization` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time from absl.testing import parameterized import numpy as np from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.client import session from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import bitwise_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import parsing_ops from tensorflow.python.platform import test def _generate_unary_cwise_math_cases(): # TODO(rachelim): Consolidate tests with pfor when APIs are somewhat shared. bitwise_cases = [("Invert", bitwise_ops.invert)] logical_cases = [("LogicalNot", math_ops.logical_not)] complex_cases = [ ("Angle", math_ops.angle), ("ComplexAbs", math_ops.abs), ("Conj", math_ops.conj), ("Imag", math_ops.imag), ("Real", math_ops.real), ] real_cases = [ ("Abs", math_ops.abs), ("Acos", math_ops.acos), ("Acosh", lambda x: math_ops.acosh(1 + math_ops.square(x))), ("Asin", math_ops.asin), ("Asinh", math_ops.asinh), ("Atan", math_ops.atan), ("Atanh", math_ops.atanh), ("BesselI0e", math_ops.bessel_i0e), ("BesselI1e", math_ops.bessel_i1e), ("Ceil", math_ops.ceil), ("Cos", math_ops.cos), ("Cosh", math_ops.cosh), ("Digamma", math_ops.digamma), ("Elu", nn.elu), ("Erf", math_ops.erf), ("Erfc", math_ops.erfc), ("Exp", math_ops.exp), ("Expm1", math_ops.expm1), ("Floor", math_ops.floor), ("Inv", math_ops.inv), ("IsFinite", math_ops.is_finite), ("IsInf", math_ops.is_inf), ("Lgamma", math_ops.lgamma), ("Log", math_ops.log), ("Log1p", math_ops.log1p), ("Neg", math_ops.negative), ("Reciprocal", math_ops.reciprocal), ("Relu", nn.relu), ("Relu6", nn.relu6), ("Rint", math_ops.rint), ("Round", math_ops.round), ("Rsqrt", math_ops.rsqrt), ("Selu", nn.selu), ("Sigmoid", math_ops.sigmoid), ("Sign", math_ops.sign), ("Sin", math_ops.sin), ("Sinh", math_ops.sinh), ("Softplus", nn.softplus), ("Softsign", nn.softsign), ("Sqrt", math_ops.sqrt), ("Square", math_ops.square), ("Tan", math_ops.tan), ("Tanh", math_ops.tanh), ] random_input = np.random.rand(3, 5) complex_component = np.random.rand(3, 5) random_int = np.random.randint(0, 10, (7, 3, 5)) def bitwise_dataset_factory(): return dataset_ops.Dataset.from_tensor_slices(random_int) def logical_dataset_factory(): return dataset_ops.Dataset.from_tensor_slices(random_input > 0) def random_dataset_factory(): return dataset_ops.Dataset.from_tensor_slices(random_input) def complex_dataset_factory(): return dataset_ops.Dataset.from_tensor_slices( math_ops.complex(random_input, complex_component)) case_factory_pairs = [ (bitwise_cases, bitwise_dataset_factory), (logical_cases, logical_dataset_factory), (complex_cases, complex_dataset_factory), (real_cases, random_dataset_factory), ] return [(case[0], case[1], factory) for cases, factory in case_factory_pairs for case in cases] def _generate_binary_cwise_math_cases(): bitwise_cases = [("BitwiseAnd", bitwise_ops.bitwise_and), ("BitwiseOr", bitwise_ops.bitwise_or), ("BitwiseXor", bitwise_ops.bitwise_xor), ("LeftShift", bitwise_ops.left_shift), ("RightShift", bitwise_ops.right_shift)] logical_cases = [("LogicalAnd", math_ops.logical_and), ("LogicalOr", math_ops.logical_or)] # Wrapper functions restricting the range of inputs of zeta and polygamma. def safe_polygamma(x, y): return math_ops.polygamma( math_ops.round(clip_ops.clip_by_value(y, 1, 10)), x * x + 1) def safe_zeta(x, y): return math_ops.zeta(x * x + 1, y * y) real_cases = [ ("Add", math_ops.add), ("AddV2", math_ops.add_v2), ("Atan2", math_ops.atan2), ("Complex", math_ops.complex), ("DivNoNan", math_ops.div_no_nan), ("Equal", math_ops.equal), ("FloorDiv", math_ops.floor_div), ("Floor
Mod", math_ops.floor_mod), ("Greater", math_ops.greater), ("GreaterEqual", math_ops.greater_equal), ("Igamma", math_ops.igamma), ("Igammac", math_ops.igammac), ("IgammaGradA", math_ops.igamma_grad_a), ("Less", math_ops.less), ("LessEqual", math_ops.less_equal), ("Maximum", math_ops.maxi
mum), ("Minimum", math_ops.minimum), ("Mod", math_ops.mod), ("Mul", math_ops.multiply), ("NotEqual", math_ops.not_equal), ("Polygamma", safe_polygamma), ("Pow", math_ops.pow), ("RealDiv", math_ops.divide), ("SquareDifference", math_ops.squared_difference), ("Sub", math_ops.subtract), ("TruncateMod", math_ops.truncate_mod), ("Zeta", safe_zeta), ] # Exercises broadcasting capabilities x = np.random.rand(7, 3, 5) y = np.random.rand(3, 5) x_int = np.random.randint(0, 10, (7, 3, 5)) y_int = np.random.randint(0, 10, (3, 5)) def bitwise_dataset_factory(): return dataset_ops.Dataset.from_tensors((x_int, y_int)) def logical_dataset_factory(): return dataset_ops.Dataset.from_tensors((x > 0, y > 0)) def random_dataset_factory(): return dataset_ops.Dataset.from_tensors((x, y)) case_factory_pairs = [ (bitwise_cases, bitwise_dataset_factory), (logical_cases, logical_dataset_factory), (real_cases, random_dataset_factory), ] return [(case[0], case[1], factory) for cases, factory in case_factory_pairs for case in cases] def _generate_cwise_test_cases(): return _generate_unary_cwise_math_cases() + _generate_binary_cwise_math_cases( ) def _generate_csv_test_case(): def csv_factory(): return dataset_ops.Dataset.from_tensor_slices(["1.0:2:a", "2.4:5:c"]).repeat(5) def decode_csv_fn(x): return parsing_ops.decode_csv( x, record_defaults=[ constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.string) ], field_delim=":") return decode_csv_fn, csv_factory def _generate_parse_single_example_test_case(): def parse_example_factory(): def _int64_feature(*values): return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values)) def _bytes_feature(*values): return feature_pb2.Feature( bytes_list=feature_pb2.Bytes
ashishnitinpatil/dota2api_scripts
dota2api_scripts/historical_data.py
Python
bsd-2-clause
2,542
0.00236
""" Script to fetch historical data (since 2011) for matches (global, public). Gives results in a chronological order (ascending), as they happened. """ from __future__ import print_function from dota2py import api from time import sleep as wait_for_next_fetch def public_match_history(start_at_match_seq_num=None, matches_requested=500, fetch_delay=1, debug=True, **kwargs): """ Returns list of most recent public matches according to given kwargs Rate limits the API requests according to `fetch_delay` (in seconds) Output : last_response_status, last_response_detail, match_history """ # tracking variables matches_fetched = 0 last_match_seq_num = start_at_match_seq_num last_response_status = 1 match_history = [] last_response_detail = "Fetch successful" while last_response_status == 1 and matches_fetched < matches_requested: cur_response = api.get_match_history_by_sequence_num( start_at_match_seq_num=last_match_seq_num, **kwargs ) last_response_status = cur_response['result']['status'] if not last_response_status == 1: # unsuccessful query if not 'statusDetail' in cur_response['result']: last_response_detail = "Unknown error" else: last_response_detail = cur_response['result']['statusDetail'] break else
: # successful data fetch cur_matches = cur_response['result']['matches'] if len(cur_response['result']['matches']) >= 1: if not match_history: # very first fetch match_history.extend(cur_matches) matches_fetched += len(cur_matches) e
lse: # 2nd fetch onwards, ignore the first common match match_history.extend(cur_matches[1:]) matches_fetched += len(cur_matches) - 1 if len(cur_matches) == 1 and cur_matches[0]['match_id'] == last_match_id: break else: break last_match_seq_num = cur_matches[-1]['match_seq_num'] if debug: print("Matches fetched - #{}...".format(matches_fetched)) wait_for_next_fetch(fetch_delay) if debug: print("{0}: {1}".format(last_response_status, last_response_detail)) return {'status':last_response_status, 'statusDetail':last_response_detail, 'matches':match_history}
caio2k/RIDE
src/robotide/searchtests/__init__.py
Python
apache-2.0
608
0.001645
# Copyright 2008-2015 Nokia Solutions and Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by a
pplicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for th
e specific language governing permissions and # limitations under the License.
ecolitan/fatics
venv/lib/python2.7/site-packages/twisted/mail/pop3client.py
Python
agpl-3.0
24,412
0.00168
# -*- test-case-name: twisted.mail.test.test_pop3client -*- # Copyright (c) 2001-2004 Divmod Inc. # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ POP3 client protocol implementation Don't use this module directly. Use twisted.mail.pop3 instead. @author: Jp Calderone """ import re from hashlib import md5 from twisted.python import log from twisted.internet import defer from twisted.protocols import basic from twisted.protocols import policies from twisted.internet import error from twisted.internet import interfaces OK = '+OK' ERR = '-ERR' class POP3ClientError(Exception): """Base class for all exceptions raised by POP3Client. """ class InsecureAuthenticationDisallowed(POP3ClientError): """Secure authentication was required but no mechanism could be found. """ class TLSError(POP3ClientError): """ Secure authentication was required but either the transport does not support TLS or no TLS context factory was supplied. """ class TLSNotSupportedError(POP3ClientError): """ Secure authentication was required but the server does not support TLS. """ class ServerErrorResponse(POP3ClientError): """The server returned an error response to a request. """ def __init__(self, reason, consumer=None): POP3ClientError.__init__(self, reason) self.consumer = consumer class LineTooLong(POP3ClientError): """The server sent an extremely long line. """ class _ListSetter: # Internal helper. POP3 responses sometimes occur in the # form of a list of lines containing two pieces of data, # a message index and a value of some sort. When a message # is deleted, it is omitted from these responses. The # setitem method of this class is meant to be called with # these two values. In the cases where indexes are skipped, # it takes care of padding out the missing values with None. def __init__(self, L): self.L = L def setitem(self, (item, value)): diff = item - len(self.L) + 1 if diff > 0: self.L.extend([None] * diff) self.L[item] = value def _statXform(line): # Parse a STAT response numMsgs, totalSize = line.split(None, 1) return int(numMsgs), int(totalSize) def _listXform(line): # Parse a LIST response index, size = line.split(None, 1) return int(index) - 1, int(size) def _uidXform(line): # Parse a UIDL response index, uid = line.split(None, 1) return int(index) - 1, uid def _codeStatusSplit(line): # Parse an +OK or -ERR response parts = line.split(' ', 1) if len(parts) == 1: return parts[0], '' return parts def _dotUnquoter(line): """ C{'.'} characters which begin a line of a message are doubled to avoid confusing with the terminating C{'.\\r\\n'} sequence. This function unquotes them. """ if line.startswith('..'): return line[1:] return line class POP3Client(basic.LineOnlyReceiver, policies.TimeoutMixin): """POP3 client protocol implementation class Instances of this class provide a convenient, efficient API for retrieving and deleting messages from a POP3 server. @type startedTLS: C{bool} @ivar startedTLS: Whether TLS has been negotiated successfully. @type allowInsecureLogin: C{bool} @ivar allowInsecureLogin: Indicate whether login() should be allowed if the server offers no authentication challenge and if our transport does not offer any protection via encryption. @type serverChallenge: C{str} or C{None} @ivar serverChallenge: Challenge received from the server @type timeout: C{int} @ivar timeout: Number of seconds to wait before timing out a connection. If the number is <= 0, no timeout checking will be performed. """ startedTLS = False allowInsecureLogin = False timeout = 0 serverChallenge = None # Capabilities are not allowed to change during the session # (except when TLS is negotiated), so cache the first response and # use that for all later lookups _capCache = None # Regular expression to search for in the challenge string in the server # greeting line. _challengeMagicRe = re.compile('(<[^>]+>)') # List of pending calls. # We are a pipelining API but don't actually # support pipelining on the network yet. _blockedQueue = None # The Deferred to which the very next result will go. _waiting = None # Whether we dropped the connection because of a timeout _timedOut = False # If the server sends an initial -ERR, this is the message it sent # with it. _greetingError = None def _blocked(self, f, *a): # Internal helper. If commands are being blocked, append # the given command and arguments to a list and return a Deferred # that will be chained with the return value of the function # when it eventually runs. Otherwise, set up for commands to be # blocked and return None. if self._blockedQueue is not None: d = defer.Deferred() self._blockedQueue.append((d, f, a)) return d self._blockedQueue = [] return None def _unblock(self): # Internal helper. Indicate that a function has completed. # If there are blocked commands, run the next one. If there # are not, set up for the next command to not be blocked. if self._blockedQueue == []: self._blockedQueue = None elif self._blockedQueue is not None: _blockedQueue = self._blockedQueue self._blockedQueue = None d, f, a = _blockedQueue.pop(0) d2 = f(*a) d2.chainDeferred(d) # f is a function which uses _blocked (otherwise it wouldn't # have gotten into the blocked queue), which means it will have # re-set _blockedQueue to an empty list, so we can put the rest # of the blocked queue back into it now. self._blockedQueue.extend(_blockedQueue) def sendShort(self, cmd, args): # Internal helper. Send a command to which a short response # is expected. Return a Deferred that fires when the response # is received. Block all further commands from being sent until # the response is received. Transition the state to SHORT. d = self._blocked(self.sendShort, cmd, args) if d is not None: return d if args: self.sendLine(cmd + ' ' + args) else: self.sendLine(cmd) self.state = 'SHORT' self._waiting = defer.Deferred() return self._waiting def sendLong(self, cmd, args, consumer, xform): # Internal helper. Send a command to which a multiline # response is expected. Return a Deferred that fires when # the entire response is received. Block all further commands # from being sent until the entire response is received. # Transition the state to LONG_INITIAL. d = self._blocked(self.sendLong, cmd, args, consumer, xform) if d is not None: return d
if args: self.sendLine(cmd + ' ' + args) else: self.sendLine(cmd) self.state = 'LONG_INITIAL' self._xform = xform self._consumer = consumer self._waiting = defer.Deferred() return self._waiting # Twisted protocol callback d
ef connectionMade(self): if self.timeout > 0: self.setTimeout(self.timeout) self.state = 'WELCOME' self._blockedQueue = [] def timeoutConnection(self): self._timedOut = True self.transport.loseConnection() def connectionLost(self, reason): if self.timeout > 0: self.setTimeout(None) if self._timedOut: reason = error.TimeoutError() elif self._greetingError: reason = ServerErrorResponse(self._greetingError) d = [] if self._waiting is not None: d.append(self._waiting) self._wai
mineo/dnf-plugins-core
plugins/config_manager.py
Python
gpl-2.0
9,979
0.000701
# # Copyright (C) 2015 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # from __future__ import absolute_import from __future__ import unicode_literals from dnfpluginscore import _, logger import dnf import dnf.cli import dnf.pycomp import dnfpluginscore import dnfpluginscore.lib import os import re import shutil class ConfigManager(dnf.Plugin): name = 'config-manager' def __init__(self, base, cli): super(ConfigManager, self).__init__(base, cli) self.base = base self.cli = cli if self.cli is not None: self.cli.register_command(ConfigManagerCommand) class ConfigManagerCommand(dnf.cli.Command): aliases = ['config-manager'] summary = _('manage dnf configuration options and repositories') usage = '[%s] [%s]' % (_('OPTIONS'), _('KEYWORDS')) def __init__(self, cli): super(ConfigManagerCommand, self).__init__(cli) self.opts = None self.parser = None def configure(self, args): # setup sack and populate it with enabled repos demands = self.cli.demands demands.available_repos = True self.parser = dnfpluginscore.ArgumentParser(self.aliases[0]) self.parser.add_argument( 'repo', nargs='*', help=_('repo to modify')) self.parser.add_argument( '--save', default=False, action='store_true', help=_('save the current options (useful with --setopt)')) self.parser.add_argument( '--set-enabled', default=False, action='store_true', help=_('enable the specified repos (automatically saves)')) self.parser.add_argument( '--set-disabled', default=False, action='store_true', help=_('disable the specified repos (automatically saves)')) self.parser.add_argument( '--add-repo', default=[], action='append', metavar='URL', help=_('add (and enable) the repo from the specified file or url')) self.parser.add_argument( '--dump', default=False, action='store_true', help=_('print current configuration values to stdout')) self.opts = self.parser.parse_args(args) if self.opts.help_cmd: print(self.parser.format_help()) return if (self.opts.save or self.opts.set_enabled or self.opts.set_disabled or self.opts.add_repo): demands.root_user = True def run(self, _args): """Execute the util action here.""" if self.opts.help_cmd: return if self.opts.set_enabled and self.opts.set_disabled: logger.error( _("Error: Trying to enable and disable repos at the same time.")) self.opts.set_enabled = self.opts.set_disabled = False if self.opts.set_enabled and not self.opts.repo: logger.error(_("Error: Trying to enable already enabled repos.")) self.opts.set_enabled = False if self.opts.add_repo: self.add_repo() else: self.modify_repo() def modify_repo(self): """ process --set-enabled, --set-disabled and --setopt options """ sbc = self.base.conf modify = [] if hasattr(self.cli, 'main_setopts') and self.cli.main_setopts: modify = self.cli.main_setopts.items if not self.opts.repo or 'main' in self.opts.repo: if self.opts.dump: print(self.base.output.fmtSection('main')) print(self.base.conf.dump()) if self.opts.save and modify: # modify [main] in dnf.conf dnfpluginscore.lib.write_raw_configfile(dnf.const.CONF_FILENAME, 'main', sbc.substitutions, sbc.cfg.options, sbc.iteritems, sbc.optionobj, modify) if self.opts.set_enabled or self.opts.set_disabled: self.opts.save = True modify.append('enabled') if self.opts.repo: matched = [] for name in self.opts.repo: matched.extend(self.base.repos.get_matching(name)) else: matched = self.base.repos.iter_enabled() if not matched: raise dnf.exceptions.Error(_("No matching repo to modify: %s.") % ', '.join(self.opts.repo)) for repo in sorted(matched): if self.opts.dump: print(self.base.output.fmtSection('repo: ' + repo.id)) if self.opts.set_enabled and not repo.enabled: repo.enable() elif self.opts.set_disabled and repo.enabled: repo.disable() if self.opts.dump: print(repo.dump()) repo_modify = modify[:] if (hasattr(self.cli, 'repo_setopts') and repo.id in self.cli.repo_setopts): repo_modify.extend(self.cli.repo_setopts[repo.id].items) if self.opts.save and modify: dnfpluginscore.lib.write_raw_configfile(repo.repofile, repo.id, sbc.substitutions, repo.cfg.options, repo.iteritems, repo.optionobj, repo_modify) def add_repo(self): """ process --add-repo option """ # put
repo file into first reposdir which exists or create it myrep
odir = None for rdir in self.base.conf.reposdir: if os.path.exists(rdir): myrepodir = rdir break if not myrepodir: myrepodir = self.base.conf.reposdir[0] dnf.util.ensure_dir(myrepodir) for url in self.opts.add_repo: if dnf.pycomp.urlparse.urlparse(url).scheme == '': url = 'file://' + os.path.abspath(url) logger.info(_('Adding repo from: %s'), url) if url.endswith('.repo'): # .repo file - download, put into reposdir and enable it destname = os.path.basename(url) destname = os.path.join(myrepodir, destname) try: f = dnfpluginscore.lib.urlopen(self, None, url, 'w+') shutil.copy2(f.name, destname) os.chmod(destname, 0o644) f.close() except IOError as e: logger.error(e) continue else: # just url to repo, create .repo file on our own repoid = sanitize_url_to_fs(url) reponame = 'created by dnf config-manager from %s' % url destname = os.path.join(myrepodir, "%s.repo" % repoid) content = "[%s]\nname=%s\nbaseurl=%s\nenabled=1\n" % \
ThreeDRadio/playlists
backend/catalogue/migrations/0002_auto_20160628_1024.py
Python
mit
342
0
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('catal
ogue', '0001_initial'), ] operations = [ migrations.RenameModel( old_name='Cd', new_name=
'Release', ), ]
jashandeep-sohi/aiohttp
aiohttp/web_urldispatcher.py
Python
apache-2.0
26,407
0
import abc import asyncio import keyword import collections import mimetypes import re import os import sys import inspect import warnings from collections.abc import Sized, Iterable, Container from pathlib import Path from urllib.parse import urlencode, unquote from types import MappingProxyType from . import hdrs from .abc import AbstractRouter, AbstractMatchInfo, AbstractView from .protocol import HttpVersion11 from .web_exceptions import (HTTPMethodNotAllowed, HTTPNotFound, HTTPNotModified, HTTPExpectationFailed) from .web_reqrep import StreamResponse from .multidict import upstr __all__ = ('UrlDispatcher', 'UrlMappingMatchInfo', 'AbstractResource', 'Resource', 'PlainResource', 'DynamicResource', 'ResourceAdapter', 'AbstractRoute', 'ResourceRoute', 'Route', 'PlainRoute', 'DynamicRoute', 'StaticRoute', 'View') PY_35 = sys.version_info >= (3, 5) class AbstractResource(Sized, Iterable): def __init__(self, *, name=None): self._name = name @property def name(self): return self._name @abc.abstractmethod # pragma: no branch def url(self, **kwargs): """Construct url for resource with additional params.""" @asyncio.coroutine @abc.abstractmethod # pragma: no branch def resolve(self, method, path): """Resolve resource Return (UrlMappingMatchInfo, allowed_methods) pair.""" @abc.abstractmethod def get_info(self): """Return a dict with additional info useful for introspection""" @staticmethod def _append_query(url, query): if query is not None: return url + "?" + urlencode(query) else: return url class AbstractRoute(abc.ABC): METHODS = hdrs.METH_ALL | {hdrs.METH_ANY} def __init__(self, method, handler, *, expect_handler=None, resource=None): if expect_handler is None: expect_handler = _defaultExpectHandler assert asyncio.iscoroutinefunction(expect_handler), \ 'Coroutine is expected, got {!r}'.format(expect_handler) method = upstr(method) if method not in self.METHODS: raise ValueError("{} is not allowed HTTP method".format(method)) assert callable(handler), handler if asyncio.iscoroutinefunction(handler): pass elif inspect.isgeneratorfunction(handler): warnings.warn("Bare generators are deprecated, " "use @coroutine wrapper", DeprecationWarning) elif (isinstance(handler, type) and issubclass(handler, AbstractView)): pass else: @asyncio.coroutine def handler_wrapper(*args, **kwargs): result = old_handler(*args, **kwargs) if asyncio.iscoroutine(result): result = yield from result return result old_handler = handler handler = handler_wrapper self._method = method self._handler = handler self._expect_handler = expect_handler self._resource = resource @property def method(self): return self._method @property def handler(self): return self._handler @property @abc.abstractmethod def name(self): """Optional route's name, always equals to resource's name.""" @property def resource(self): return self._resource @abc.abstractmethod def get_info(self): """Return a dict with additional info useful for introspection""" @abc.abstractmethod # pragma: no branch def url(self, **kwargs): """Construct url for route with additional params.""" @asyncio.coroutine def handle_expect_header(self, request): return (yield from self._expect_handler(request)) class UrlMappingMatchInfo(dict, AbstractMatchInfo): def __init__(self, match_dict, route): super().__init__(match_dict) self._route = route @property def handler(self): return self._route.handler @property def route(self): return self._route @property def expect_handler(self): return self._route.handle_expect_header @property def http_exception(self): return None def get_info(self): return self._route.get_info() def __repr__(self): return "<MatchInfo {}: {}>".format(super().__repr__(), self._route) class MatchInfoError(UrlMappingMatchInfo): def __init__(self, http_exception): self._exception = http_exception super().__init__({}, SystemRoute(self._exception)) @property def http_exception(self): return self._exception def __repr__(self): return "<MatchInfoError {}: {}>".format(self._exception.status, self._exception.reason) @asyncio.coroutine def _defaultExpectHandler(request): """Default handler for Except header. Just send "100 Continue" to client. raise HTTPExpectationFailed if value of header is not "100-continue" """ expect = request.headers.get(hdrs.EXPECT) if request.version == HttpVersion11: if expect.lower() == "100-continue": request.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n") else: raise HTTPExpectationFailed(text="Unknown Expect: %s" % expect) class ResourceAdapter(AbstractResource): def __init__(self, route): assert isinstance(route, Route), \ 'Instance of Route class is required, got {!r}'.format(route) super().__init__(name=route.name) self._route = route route._resource = self def url(self, **kwargs): return self._route.url(**kwargs) @asyncio.coroutine def resolve(self, method, path): route_method = self._route.method allowed_methods = set() match_dict = self._route.match(path) if match_dict is not None: allowed_methods.add(route_method) if route_method == hdrs.METH_ANY or route_method == method: return (UrlMappingMatchInfo(match_dict, self._route), allowed_methods) return None, allowed_methods def get_info(self): return self._route.get_info() def __len__(self): return 1 def __iter__(self): yield self._route class Resource(AbstractResource): def __init__(self, *, name=None): super().__init__(name=name) self._routes = [] def add_route(self, method, handler, *, expect_handler=None): for route in self._routes: if route.method == method or route.method == hdrs.METH_ANY: raise RuntimeError("Added route will never be executed, " "method {route.method} is " "already registered".format(route=route)) route = ResourceRoute(method, handler, self, expect_handler=expect_handler) self.register_route(route) retu
rn route def register_route(self, route): assert isinstance(route, ResourceRoute), \ 'Instance of Route class is required, got {!r}'.format(route) self._routes.append(route) @asyncio.coroutine def resolve(self, method, path): allowed_methods = set() match_dict = self._match(path) if match_dict is N
one: return None, allowed_methods for route in self._routes: route_method = route.method allowed_methods.add(route_method) if route_method == method or route_method == hdrs.METH_ANY: return UrlMappingMatchInfo(match_dict, route), allowed_methods else: return None, allowed_methods def __len__(self): return len(self._routes) def __iter__(self): return iter(self._routes) class PlainResource(Resource): def __init__(self, path, *, name=None): super().__init__(name=name) self._path = path
lkluft/python-toolbox
scripts/matrixoperationen.py
Python
gpl-3.0
599
0
# -*- coding: utf-8 -*- """Umgang mit mehrdimensionalen Arrays. Im Folgenden w
ird der Umgang mit mehrdimensionalen Arrays veranschaulicht. Die Beispiele zeigen zweidimensionale Arrays (Matrizen), das Verhalten lässt sich jedoch auf Arrays höherer Dimensionen übertragen. """ import numpy as np # Definition zufälliger Matrizen A = np.random.random_integers(0, 10, (3, 3)) B = np.random.random_integers(0, 10, (3, 3)) # Rechenarten A + B # Addition A - B # Subraktion A * B # Multiplikatio
n A @ B # Matrixmultiplikation np.cross(A, B) # Kreuzprodukt A.T # Transponieren einer Matrix
onshape-public/onshape-clients
python/onshape_client/oas/models/bt_default_unit_info.py
Python
mit
4,695
0
# coding: utf-8 """ Onshape REST API The Onshape REST API consumed by all clients. # noqa: E501 The version of the OpenAPI document: 1.113 Contact: [email protected] Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 import sys # noqa: F401 import six # noqa: F401 import nulltype # noqa: F401 from onshape_client.oas.model_utils import ( # noqa: F401 ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) class BTDefaultUnitInfo(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = {} validations = {} additional_properties_type = None @staticmethod def openapi_types(): """ This must be a class method so a model may have properties that are of type self, this ensures that we don't create a cyclic import Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { "key": (str,), # noqa: E501 "value": (str,), # noqa: E501 } @staticmethod def discriminator(): return None attribute_map = { "key": "key", # noqa: E501 "value": "value", # noqa: E501 } @staticmethod def _composed_schemas(): return None required_properties = set( [ "_data_store", "_check_type", "_from_server", "_path_to_item", "_configuration", ] ) def __init__( self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs ): # noqa: E501 """bt_default_unit_info.BTDefaultUnitInfo - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _from_server (bool): True if the data is from the server False if the data is from the client (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. key (str): [optional] # noqa: E501 value (str): [optional] #
noqa: E501 """ self._data_store = {} s
elf._check_type = _check_type self._from_server = _from_server self._path_to_item = _path_to_item self._configuration = _configuration for var_name, var_value in six.iteritems(kwargs): if ( var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None ): # discard variable. continue setattr(self, var_name, var_value)
insomnia-lab/calibre
src/calibre/ebooks/docx/fields.py
Python
gpl-3.0
4,524
0.0042
#!/usr/bin/env python # vim:fileencoding=utf-8 from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' import re from calibre.ebooks.docx.names import XPath, get class Field(object): def __init__(self, start): self.start = start self.end = None self.contents = [] self.instructions = [] def add_instr(self, elem): raw = elem.text if not raw: return name, rest = raw.strip().partition(' ')[0::2] self.instructions.append((name, rest.strip())) WORD, FLAG = 0, 1 scanner = re.Scanner([ (r'\\\S{1}', lambda s, t: (t, FLAG)), # A flag of the form \x (r'"[^"]*"', lambda s, t: (t[1:-1], WORD)), # Quoted word (r'[^\s\\"]\S*', lambda s, t: (t, WORD)), # A non-quoted word, must not start with a backslash or a space or a quote (r'\s+', None), ], flags=re.DOTALL) def parse_hyperlink(raw, log): ans = {} last_option = None raw = raw.replace('\\\\', '\x01').replace('\\"', '\x02') for token, token_type in scanner.scan(raw)[0]: token = token.replace('\x01', '\\').replace('\x02', '"') if token_type is FLAG: last_option = {'l':'anchor', 'm':'image-map', 'n':'target', 'o':'title', 't':'target'}.get(token[1], None) if last_option is not None: ans[last_option] = None elif token_type is WORD: if last_option is None: ans['url'] = token else: ans[last_option] = token last_option = None return ans class Fields(object): def __init__(self): self.fields = [] def __call__(self, doc, log): stack = [] for elem in XPath( '//*[name()="w:p" or name()="w:r" or name()="w:instrText" or (name()="w:fldChar" and (@w:fldCharType="begin" or @w:fldCharType="end"))]')(doc): if elem.tag.endswith('}fldChar'): typ = get(elem, 'w:fldCharType'
) if typ == 'begin': stack.append(Field(elem)) self.fields.append(stack[-1]) els
e: try: stack.pop().end = elem except IndexError: pass elif elem.tag.endswith('}instrText'): if stack: stack[-1].add_instr(elem) else: if stack: stack[-1].contents.append(elem) # Parse hyperlink fields self.hyperlink_fields = [] for field in self.fields: if len(field.instructions) == 1 and field.instructions[0][0] == 'HYPERLINK': hl = parse_hyperlink(field.instructions[0][1], log) if hl: if 'target' in hl and hl['target'] is None: hl['target'] = '_blank' all_runs = [] current_runs = [] # We only handle spans in a single paragraph # being wrapped in <a> for x in field.contents: if x.tag.endswith('}p'): if current_runs: all_runs.append(current_runs) current_runs = [] elif x.tag.endswith('}r'): current_runs.append(x) if current_runs: all_runs.append(current_runs) for runs in all_runs: self.hyperlink_fields.append((hl, runs)) def test_parse_hyperlink(): import unittest class TestParseHyperLink(unittest.TestCase): def test_parsing(self): self.assertEqual(parse_hyperlink( r'\l anchor1', None), {'anchor':'anchor1'}) self.assertEqual(parse_hyperlink( r'www.calibre-ebook.com', None), {'url':'www.calibre-ebook.com'}) self.assertEqual(parse_hyperlink( r'www.calibre-ebook.com \t target \o tt', None), {'url':'www.calibre-ebook.com', 'target':'target', 'title': 'tt'}) self.assertEqual(parse_hyperlink( r'"c:\\Some Folder"', None), {'url': 'c:\\Some Folder'}) suite = unittest.TestLoader().loadTestsFromTestCase(TestParseHyperLink) unittest.TextTestRunner(verbosity=4).run(suite)
ychen820/microblog
y/google-cloud-sdk/platform/google_appengine/google/appengine/api/files/records.py
Python
bsd-3-clause
11,204
0.006426
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Files API. .. deprecated:: 1.8.1 Use Google Cloud Storage Client library instead. Lightweight record format. This format implements log file format from leveldb: http://leveldb.googlecode.com/svn/trunk/doc/log_format.txt Full specification of format follows in case leveldb decides to change it. The log file contents are a sequence of 32KB blocks. The only exception is that the tail of the file may contain a partial block. Each block consists of a sequence of records: block := record* trailer? record := checksum: uint32 // masked crc32c of type and data[] length: uint16 type: uint8 // One of FULL, FIRST, MIDDLE, LAST data: uint8[length] A record never starts within the last six bytes of a block (since it won't fit). Any leftover bytes here form the trailer, which must consist entirely of zero bytes and must be skipped by readers. Aside: if exactly seven bytes are left in the current block, and a new non-zero length record is added, the writer must emit a FIRST record (which contains zero bytes of user data) to fill up the trailing seven bytes of the block and then emit all of the user data in subsequent blocks. More types may be added in the future. Some Readers may skip record types they do not understand, others may report that some data was skipped. FULL == 1 FIRST == 2 MIDDLE == 3 LAST == 4 The FULL record contains the contents of an entire user record. FIRST, MIDDLE, LAST are types used for user records that have been split into multiple fragments (typically because of block boundaries). FIRST is the type of the first fragment of a user record, LAST is the type of the last fragment of a user record, and MID is the type of all interior fragments of a user record. Example: consider a sequence of user records: A: length 1000 B: length 97270 C: length 8000 A will be stored as a FULL record in the first block. B will be split into three fragments: first fragment occupies the rest of the first block, second fragment occupies the entirety of the second block, and the third fragment occupies a prefix of the third block. This will leave six bytes free in the third block, which will be left empty as the trailer. C will be stored as a FULL record in the fourth block. """ import logging import struct import google from google.appengine.api.files import crc32c BLOCK_SIZE = 32 * 1024 HEADER_FORMAT = '<IHB' HEADER_LENGTH = struct.calcsize(HEADER_FORMAT) RECORD_TYPE_NONE = 0 RECORD_TYPE_FULL = 1 RECORD_TYPE_FIRST = 2 RECORD_TYPE_MIDDLE = 3 RECORD_TYPE_LAST = 4 class Error(Exception): """Base class for exceptions in this module.""" class InvalidRecordError(Error): """Raised when invalid record encountered.""" class FileWriter(object): """Interface specification for writers to be used with records module.""" def write(self, data): """Write data to the file. Args: data: byte array, string or iterable over bytes. """ raise NotImplementedError() class FileReader(object): """Interface specification for writers to be used with recordrecords module. FileReader defines a reader with position and efficient seek/position determining. All reads occur at current position. """ def read(self, size): """Read data from file. Reads data from current position and advances position past the read data block. Args: size: number of bytes to read. Returns: iterable over bytes. If number of bytes read is less then 'size' argument, it is assumed that end of file was reached. """ raise NotImplementedError() def tell(self): """Get current file position. Returns: current position as a byte offset in the file as integer. """ raise NotImplementedError() _CRC_MASK_DELTA = 0xa282ead8 def _mask_crc(crc): """Mask crc. Args: crc: integer crc. Returns: masked integer crc. """ return (((crc >> 15) | (crc << 17)) + _CRC_MASK_DELTA) & 0xFFFFFFFFL def _unmask_crc(masked_crc): """Unmask crc. Args: masked_crc: masked integer crc. Retruns: orignal crc. """ rot = (masked_crc - _CRC_MASK_DELTA) & 0xFFFFFFFFL return ((rot >> 17) | (rot << 15)) & 0xFFFFFFFFL class RecordsWriter(object): """A writer for records format. This writer should be used only inside with statement: with records.RecordsWriter(file) as writer: writer.write("record") RecordsWriter will pad last block with 0 when exiting with statement scope. """ def __init__(self, writer, _pad_last_block=True): """Constructor. Args: writer: a writer to use. Should conform to FileWriter interface. """ self.__writer = writer self.__position = 0 self.__entered = False self.__pad_last_block = _pad_last_block def __write_record(self, record_type, data): """Write single physical record.""" length = len(data) crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) crc = crc32c.crc_update(crc, data) crc = crc32c.crc_finalize(crc) self.__writer.write( struct.pack(HEADER_FORMAT, _mask_crc(crc), length, record_type)) self.__writer.write(data) self.__position += HEADER_LENGTH + length def write(self, data): """Write single record. Args: data: record data to write as string, byte array or byte sequence. """ if not self.__entered: raise Exception("RecordWriter should be used only with 'with' statement.") block_remaining = BLOCK_SIZE - self.__position % BLOCK_SIZE if block_remaining < HEADER_LENGTH: self.__writer.write('\x00' * block_remaining) self.__position += block_remaining block_remaining = BLOCK_SIZE if block_remaining < len(data) + HEADER_LENGTH: first_chunk = data[:block_remaining - HEADER_LENGTH] self.__write_record(RECORD_TYPE_FIRST, first_chunk) data = data[len(first_chunk):] while True: block_remaining = BLOCK_SIZE - self.__position % BLOCK_SIZE if block_remaining >= len(data) + HEADER_LENGTH: self.__write_record(RECORD_TYPE_LAST, data) break else: chunk = data[:block_remaining - HEADER_LENGTH] self.__write_record(RECORD_TYPE_MIDDLE, chunk) data = data[len(chunk):] else: self.__write_record(RECORD_TYPE_FULL, data) def __enter__(self): self.__entered = True return self def __exit__(self, atype, value, traceback): self.close() def close(self): if self.__pad_last_block: pad_length = BLOCK_SIZE - self.__position % BLOCK_SIZE if pad_length and pad_length != BLOCK_SIZE: self.__writer.write('\x00' * pad_length) class RecordsReader(object): """A reader for records format.""" def __init__(self, reader): self.__reader = reader def __try_read_record(self): """Try reading a record. Returns: (data, record_type) tuple. Raises: EOFError: when end of file was reached. InvalidRecordError: when valid record could not be read. """ block_remain
ing = BLOCK_SIZE - self.__reader.tell() % BLOCK_SIZE if block_remaining < HEADER_LENGTH: return ('', RECORD_TYPE_NONE) header = self.__reader.read(HEADER_LENGTH) if len(header) != HEADER_LENGT
H: raise EOFError('Read %s bytes instead of %s' % (len(header), HEADER_LENGTH)) (masked_crc, length, record_type) = struct.unpack(HEADER_FORMAT, header) crc = _unmask_crc(masked_crc) if length + HEADER_LENGTH > block_remaining: raise
philgyford/django-spectator
spectator/events/views.py
Python
mit
8,037
0.000249
from django.db.models import Min from django.http import Http404 from django.utils.encoding import force_str from django.utils.translation import gettext as _ from django.views.generic import DetailView, YearArchiveView from django.views.generic.detail import SingleObjectMixin from spectator.core import app_settings from spectator.core.views import PaginatedListView from .models import Event, Venue, Work class EventListView(PaginatedListView): """ Includes context of counts of all different Event types, plus the kind of event this page is for, plus adding `event_list` (synonym for `object_list`). Expects a `kind_slug` like 'movies', 'gigs', 'concerts', etc. """ model = Event ordering = [ "-date", ] def get(self, request, *args, **kwargs): slug = self.kwargs.get("kind_slug", None) if slug is not None and slug not in Event.get_valid_kind_slugs(): raise Http404("Invalid kind_slug: '%s'" % slug) return super().get(request, *args, **kwargs) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update(self.get_event_counts()) # e.g. 'concert' kind = self.get_event_kind() context["event_kind"] = kind if kind: # e.g. 'Concert': context["event_kind_name"] = Event.get_kind_name(kind) # e.g. 'Concerts': context["event_kind_name_plural"] = Event.get_kind_name_plural(kind) context["event_list"] = context["object_list"] return context def get_event_counts(self): """ Returns a dict like: {'counts': { 'all': 30, 'movie': 12, 'gig': 10, }} """ counts = { "all": Event.objects.count(), } for val in Event.Kind.values: # e.g. 'movie_count': counts[val] = Event.objects.filter(kind=val).count() return { "counts": counts, } def get_event_kind(self): """ Unless we're on the front page we'll have a kind_slug like 'movies'. We need to translate that into an event `kind` like 'movie'. """ slug = self.kwargs.get("kind_slug", None) if slug is None: return None # Front page; showing all Event kinds. else: slugs_to_kinds = {v: k for k, v in Event.Kind.slugs().items()} return slugs_to_kinds.get(slug, None) def get_queryset(self): "Restrict to a single kind of event, if any, and include Venue data." qs = super().get_queryset() kind = self.get_event_kind() if kind is not None: qs = qs.filter(kind=kind) qs = qs.select_related("venue") return qs class EventDetailView(DetailView): model = Event class EventYearArchiveView(YearArchiveView): allow_empty = True date_field = "date" make_object_list = True model = Event ordering = "date" def get_queryset(self): "Reduce the number of queries and speed things up." qs = super().get_queryset() qs = qs.select_related("venue") return qs def get_dated_items(self): items, qs, info = super().get_dated_items() if "year" in info and info["year"]: # Get the earliest date we have an Event for: date_min = Event.objects.aggregate(Min("date"))["date__min"] # Make it a 'yyyy-01-01' date: min_year_date = date_min.replace(month=1, day=1) if info["year"] < min_year_date: # The year we're viewing is before our minimum date, so 404. raise Http404( _("No %(verbose_name_plural)s available") % { "verbose_name_plural": force_str( qs.model._meta.verbose_name_plural ) } ) elif info["year"] == min_year_date: # This is the earliest year we have events for, so # there is no previous year. info["previous_year"] = None return items, qs, info # WORKS class WorkMixin: kind_slug = None def get(self, request, *args, **kwargs): slug = self.kwargs.get("kind_slug", None) if slug is not None and slug not in Work.get_valid_kind_slugs(): raise Http404("Invalid kind_slug: '%s'" % slug) else: self.kind_slug = slug return super().get(request, *args, **kwargs) def get_work_kind(self): """ We'll have a kind_slug like 'movies'. We need to translate that into a work `kind` like 'movie'. """ slugs_to_kinds = {v: k for k, v in Work.Kind.slugs().items()} return slugs_to_kinds.get(self.kind_slug, None) class WorkListView(WorkMixin, PaginatedListView): model = Work def get_queryset(self): kind = self.get_work_kind() qs = super().get_queryset() qs = qs.filter(kind=kind) qs = qs.prefetch_related("roles__creator") return qs def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) #
'movie', 'Movie', and 'Movies' respectively: kind = self.get_work_kind() kind_name = Work.get_kind_name(kind) ki
nd_name_plural = Work.get_kind_name_plural(kind) context["page_title"] = kind_name_plural context["breadcrumb_list_title"] = kind_name_plural context["work_kind"] = kind context["work_kind_name"] = kind_name context["work_kind_name_plural"] = kind_name_plural context["breadcrumb_list_url"] = self.model().get_list_url( kind_slug=self.kind_slug ) return context class WorkDetailView(WorkMixin, DetailView): model = Work def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) kind = self.get_work_kind() context["breadcrumb_list_title"] = Work.get_kind_name_plural(kind) context["breadcrumb_list_url"] = self.model().get_list_url( kind_slug=self.kind_slug ) return context # VENUES class VenueListView(PaginatedListView): model = Venue ordering = ["name_sort"] def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["country_list"] = self.get_countries() return context def get_countries(self): """ Returns a list of dicts, one per country that has at least one Venue in it. Each dict has 'code' and 'name' elements. The list is sorted by the country 'name's. """ qs = ( Venue.objects.values("country") .exclude(country="") .distinct() .order_by("country") ) countries = [] for c in qs: countries.append( {"code": c["country"], "name": Venue.get_country_name(c["country"])} ) return sorted(countries, key=lambda k: k["name"]) class VenueDetailView(SingleObjectMixin, PaginatedListView): template_name = "spectator_events/venue_detail.html" def get(self, request, *args, **kwargs): self.object = self.get_object(queryset=Venue.objects.all()) return super().get(request, *args, **kwargs) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["venue"] = self.object context["event_list"] = context["object_list"] if self.object.latitude is None or self.object.longitude is None: context["SPECTATOR_MAPS"] = {"enable": False} else: context["SPECTATOR_MAPS"] = app_settings.MAPS return context def get_queryset(self): return self.object.event_set.order_by("-date")
danrschlosser/eventum
eventum/forms/CreateProfileForm.py
Python
mit
1,023
0
""" .. module:: CreateProfileForm :synopsis: A form for completing a user's profile. .. moduleauthor:: Dan Schlosser <[email protected]> """ from flask.ext.wtf import Form from wtforms import StringField, Hid
denField from wtforms.validators import URL, Email, Required EMAIL_ERROR = '
Please provide a valid email address.' class CreateProfileForm(Form): """A form for completing a :class:`~app.models.User` profile after they login to Eventum for the first time. :ivar email: :class:`wtforms.fields.StringField` - The user's email address. :ivar name: :class:`wtforms.fields.StringField` - The user's name. :ivar next: :class:`wtforms.fields.HiddenField` - The URL that they should be redirected to after completing their profile. """ name = StringField('Full Name') email = StringField('Email Address', [Email(message=EMAIL_ERROR), Required(message=EMAIL_ERROR)]) next = HiddenField('hidden', [URL(require_tld=False)])
achapkowski/ArcREST
src/arcrest/common/geometry.py
Python
apache-2.0
20,189
0.009114
import os import json import arcpy import types import general from .._ab
stract import abstract ######################################################################## class SpatialReference(abstract.AbstractGeometry): """ creates a spatial reference instance """ _wkid = None #---------------------------------------------------------------------- def __init__(self, wkid)
: """Constructor""" self._wkid = wkid #---------------------------------------------------------------------- @property def wkid(self): """ get/set the wkid """ return self._wkid @wkid.setter def wkid(self, wkid): """ get/set the wkid """ self._wkid = wkid @property def asDictionary(self): """returns the wkid id for use in json calls""" return {"wkid": self._wkid} #---------------------------------------------------------------------- @property def value(self): """returns the wkid id for use in json calls""" return {"wkid": self._wkid} ######################################################################## class Point(abstract.AbstractGeometry): """ Point Geometry Inputs: coord - list of [X,Y] pair or arcpy.Point Object wkid - well know id of spatial references z - is the Z coordinate value m - m value """ _x = None _y = None _z = None _m = None _wkid = None _json = None _geom = None _dict = None #---------------------------------------------------------------------- def __init__(self, coord, wkid, z=None, m=None): """Constructor""" if isinstance(coord, list): self._x = float(coord[0]) self._y = float(coord[1]) elif isinstance(coord, arcpy.Geometry): self._x = coord.centroid.X self._y = coord.centroid.Y self._z = coord.centroid.Z self._m = coord.centroid.M self._geom = coord.centroid self._wkid = wkid if not z is None: self._z = float(z) if not m is None: self._m = m #---------------------------------------------------------------------- def __str__(self): """ returns the object as a string """ return json.dumps(self.asDictionary, default=general._date_handler) #---------------------------------------------------------------------- @property def spatialReference(self): """returns the geometry spatial reference""" return {'wkid' : self._wkid} #---------------------------------------------------------------------- @property def type(self): """ returns the geometry type """ return "esriGeometryPoint" #---------------------------------------------------------------------- @property def asJSON(self): """ returns a geometry as JSON """ value = self._json if value is None: value = json.dumps(self.asDictionary, default=general._date_handler) self._json = value return self._json #---------------------------------------------------------------------- @property def asArcPyObject(self): """ returns the Point as an ESRI arcpy.Point object """ return arcpy.AsShape(self.asDictionary, True) #---------------------------------------------------------------------- @property def asDictionary(self): """ returns the object as a python dictionary """ # template = {"x" : self._x, "y" : self._y, "spatialReference" : {"wkid" : self._wkid} } if not self._z is None: template['z'] = self._z if not self._m is None: template['z'] = self._m return template #---------------------------------------------------------------------- @property def asList(self): """ returns a Point value as a list of [x,y,<z>,<m>] """ base = [self._x, self._y] if not self._z is None: base.append(self._z) elif not self._m is None: base.append(self._m) return base #---------------------------------------------------------------------- @property def X(self): """ gets the X coordinate """ return self._x #---------------------------------------------------------------------- @X.setter def X(self, value): """sets the X coordinate""" if isinstance(value, (int, float, long, types.NoneType)): self._x = value #---------------------------------------------------------------------- @property def Y(self): """ gets the Y Coordinate """ return self._y #---------------------------------------------------------------------- @Y.setter def Y(self, value): """ sets the Y coordinate """ if isinstance(value, (int, float, long, types.NoneType)): self._y = value #---------------------------------------------------------------------- @property def Z(self): """ gets the Z Coordinate """ return self._z #---------------------------------------------------------------------- @Z.setter def Z(self, value): """ sets the Z coordinate """ if isinstance(value, (int, float, long, types.NoneType)): self._z = value #---------------------------------------------------------------------- @property def wkid(self): """ gets the wkid """ return self._wkid #---------------------------------------------------------------------- @wkid.setter def wkid(self, value): """ sets the wkid """ if isinstance(value, (int, long)): self._wkid = value ######################################################################## class MultiPoint(abstract.AbstractGeometry): """ Implements the ArcGIS JSON MultiPoint Geometry Object """ _geom = None _json = None _dict = None _wkid = None _points = None _hasZ = False _hasM = False #---------------------------------------------------------------------- def __init__(self, points, wkid, hasZ=False, hasM=False): """Constructor""" if isinstance(points, list): self._points = points elif isinstance(points, arcpy.Geometry): self._points = self.__geomToPointList(points) self._wkid = wkid self._hasZ = hasZ self._hasM = hasM #---------------------------------------------------------------------- def __geomToPointList(self, geom): """ converts a geometry object to a common.Geometry object """ if isinstance(geom, arcpy.Multipoint): feature_geom = [] fPart = [] for part in geom: fPart = [] for pnt in part: fPart.append(Point(coord=[pnt.X, pnt.Y], wkid=geom.spatialReference.factoryCode, z=pnt.Z, m=pnt.M)) feature_geom.append(fPart) return feature_geom #---------------------------------------------------------------------- @property def spatialReference(self): """returns the geometry spatial reference""" return {'wkid' : self._wkid} #---------------------------------------------------------------------- @property def type(self): """ returns the geometry type """ return "esriGeometryMultipoint" #---------------------------------------------------------------------- @property def asJSON(self): """ returns a geometry as JSON """ value = self._json if value is None: value = json.dumps(self.asDictionary, default=general._date_handler) self._json = value return self._json #----------
stryder199/RyarkAssignments
Assignment2/ttt/archive/_old/KnR/KnR_1-7b.py
Python
mit
689
0.015965
game_type = 'input_output' parameter_list = [['$x1','int'], ['$y0','int'], ['$y1','int']] tuple_list = [ ['KnR_1-7b_',[-3,None,None]] ] global_code_template = '''\ d #include &lt;st
dio.h> x #include <stdio.h> dx dx /* power: raise base to n-th power; n >= 0 */ dx /* (old-style version) */ dx power(base, n) dx int base, n; dx { dx int i, p; dx dx p = 1; dx for (i = 1; i <= n; ++i) dx p = p * base; dx return p; dx } dx dx /* test power function */ ''' main_code_template = '''\ dx int i; dx dx for (i = 0; i < 3 ; ++i) dx printf("%d %d %d\\n", i, power(2,i), power($x1,i)); ''' argv_template = '' stdin_template = '' stdout_template = ''
'\ 0 1 1 1 2 $y0 2 4 $y1 '''
ComputerNetworks-UFRGS/OpERA
python/experiment_design/transmission_config.py
Python
apache-2.0
3,622
0.004694
__author__ = 'jtsreinaldo' from radio_constants import * from validation_constants import * class TXConfigRadioGenerator(object): """ A class for the reception configuration of a radio. """ def __init__(self): """ CTOR """ pass @staticmethod def tx_generator(radio): """ Receives a variable formatted in YAML file style, containing information about some information about radio configurations, which will be used to generate an source file. @param radio """ # Checks if the transmitter is OFDM or GMSK and creates the correct instance. If it is not either of them, # raise an exception. if OFDM in radio[TX][TYPE]: tx_type = OFDM elif GMSK in radio[TX][TYPE]: tx_type = GMSK else: raise Exception("The type of the transmitter should be gmsk or ofdm!") if tx_type == OFDM: # The user may not given all the parameters (all of them have default values), so we have to be # precautious and use the try/except statement. try: the_fft_length = radio[TX][FFT_LENGTH] except: # from DEFAULTS dict: the_fft_length = DEFAULTS[TX][OFDM][FFT_LENGTH] try: the_cp_length = radio[TX][CP_LENGTH] except: # from DEFAULTS dict: the_cp_length = DEFAULTS[TX][OFDM][FFT_LENGTH] try: occ_tones = radio[TX][OCC_TONES] except: # from DEFAULTS dict: occ_tones = DEFAULTS[TX][OFDM][OCC_TONES] try: the_modulation = radio[TX][MODULATION] except: #from DEFAULTS dict: the_modulation = DEFAULTS[TX][OFDM][MODULATION] tx_arch = "PacketOFDMTx(modulation={modulation}, cp_length={cp_length}, fft_length={fft_length}, " \ "occupied_tones={occupied_tones})" # The modulation needs to be a string, so we have to format it. str_modulation = "\"{modulation}\"" str_modulation = str_modulation.format(modulation=the_modulation) the_modulation = str_modulation tx_arch = tx_arch.format(fft_length=the_fft_length, cp_length=the_cp_length, modulation=the_modulation, occupied_tones=occ_tones) elif tx_type == GMSK: try: samples_per_symbol = radio[TX][SAMPLES_PER_SYMBOL] except: samples_per_symbol = DEFAULTS[TX][GMSK][SAMPLES_PER_SYMBOL] try: bt = radio[TX][BT] except: bt = DEFAULTS[TX][GMSK][SAMPLES_PER_SYMBOL]
try: modulator = "digital.gmsk_mod(samples_per_symbol={samples_per_symbol},
bt={bt})" modulator = modulator.format(samples_per_symbol=samples_per_symbol, bt=bt) except: modulator = DEFAULTS[TX][GMSK][MODULATOR] tx_arch = "PacketGMSKTx(modulator={modulator})" tx_arch = tx_arch.format(modulator=modulator) ip = radio[USRP][IP] # Checks if the user passed the usrp ip address. if ip is None: uhd_sink = "UHDSink()" else: uhd_sink = "UHDSink(\"addr={ip}\")" uhd_sink = uhd_sink.format(ip=ip) return tx_type, tx_arch, uhd_sink
lujinda/iotop
iotop/ui.py
Python
gpl-2.0
24,215
0.000991
# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either ve
rsion 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Bosto
n, MA 02110-1301 USA # # See the COPYING file for license information. # # Copyright (c) 2007 Guillaume Chazarain <[email protected]> # Allow printing with same syntax in Python 2/3 from __future__ import print_function import curses import errno import locale import math import optparse import os import select import signal import sys import time from iotop.data import find_uids, TaskStatsNetlink, ProcessList, Stats,find_pids from iotop.data import ThreadInfo from iotop.version import VERSION from iotop import ioprio from iotop.ioprio import IoprioSetError # # Utility functions for the UI # UNITS = ['B', 'K', 'M', 'G', 'T', 'P', 'E'] def human_size(size): if size > 0: sign = '' elif size < 0: sign = '-' size = -size else: return '0.00 B' expo = int(math.log(size / 2, 2) / 10) return '%s%.2f %s' % ( sign, (float(size) / (1 << (10 * expo))), UNITS[expo]) def format_size(options, bytes): if options.kilobytes: return '%.2f K' % (bytes / 1024.0) return human_size(bytes) def format_bandwidth(options, size, duration): return format_size(options, size and float(size) / duration) + '/s' def format_stats(options, process, duration): # Keep in sync with TaskStatsNetlink.members_offsets and # IOTopUI.get_data(self) def delay2percent(delay): # delay in ns, duration in s return '%.2f %%' % min(99.99, delay / (duration * 10000000.0)) if options.accumulated: stats = process.stats_accum display_format = lambda size, duration: format_size(options, size) duration = time.time() - process.stats_accum_timestamp else: stats = process.stats_delta display_format = lambda size, duration: format_bandwidth( options, size, duration) io_delay = delay2percent(stats.blkio_delay_total) swapin_delay = delay2percent(stats.swapin_delay_total) read_bytes = display_format(stats.read_bytes, duration) written_bytes = stats.write_bytes - stats.cancelled_write_bytes written_bytes = max(0, written_bytes) write_bytes = display_format(written_bytes, duration) return io_delay, swapin_delay, read_bytes, write_bytes def get_max_pid_width(): try: return len(open('/proc/sys/kernel/pid_max').read().strip()) except Exception as e: print(e) # Reasonable default in case something fails return 5 MAX_PID_WIDTH = get_max_pid_width() # # UI Exceptions # class CancelInput(Exception): pass class InvalidInt(Exception): pass class InvalidPid(Exception): pass class InvalidTid(Exception): pass class InvalidIoprioData(Exception): pass # # The UI # class IOTopUI(object): # key, reverse sorting_keys = [ (lambda p, s: p.pid, False), (lambda p, s: p.ioprio_sort_key(), False), (lambda p, s: p.get_user(), False), (lambda p, s: s.read_bytes, True), (lambda p, s: s.write_bytes - s.cancelled_write_bytes, True), (lambda p, s: s.swapin_delay_total, True), # The default sorting (by I/O % time) should show processes doing # only writes, without waiting on them (lambda p, s: s.blkio_delay_total or int(not(not(s.read_bytes or s.write_bytes))), True), (lambda p, s: p.get_cmdline(), False), ] def __init__(self, win, process_list, options): self.process_list = process_list self.options = options self.sorting_key = 6 self.sorting_reverse = IOTopUI.sorting_keys[self.sorting_key][1] if not self.options.batch: self.win = win self.resize() try: curses.use_default_colors() curses.start_color() curses.curs_set(0) except curses.error: # This call can fail with misconfigured terminals, for example # TERM=xterm-color. This is harmless pass def resize(self): self.height, self.width = self.win.getmaxyx() def run(self): iterations = 0 poll = select.poll() if not self.options.batch: poll.register(sys.stdin.fileno(), select.POLLIN | select.POLLPRI) while self.options.iterations is None or \ iterations < self.options.iterations: total, current = self.process_list.refresh_processes() self.refresh_display(iterations == 0, total, current, self.process_list.duration) if self.options.iterations is not None: iterations += 1 if iterations >= self.options.iterations: break elif iterations == 0: iterations = 1 try: events = poll.poll(self.options.delay_seconds * 1000.0) except select.error as e: if e.args and e.args[0] == errno.EINTR: events = [] else: raise for (fd, event) in events: if event & (select.POLLERR | select.POLLHUP): sys.exit(1) if not self.options.batch: self.resize() if events: key = self.win.getch() self.handle_key(key) def reverse_sorting(self): self.sorting_reverse = not self.sorting_reverse def adjust_sorting_key(self, delta): orig_sorting_key = self.sorting_key self.sorting_key += delta self.sorting_key = max(0, self.sorting_key) self.sorting_key = min(len(IOTopUI.sorting_keys) - 1, self.sorting_key) if orig_sorting_key != self.sorting_key: self.sorting_reverse = IOTopUI.sorting_keys[self.sorting_key][1] # I wonder if switching to urwid for the display would be better here def prompt_str(self, prompt, default=None, empty_is_cancel=True): self.win.hline(1, 0, ord(' ') | curses.A_NORMAL, self.width) self.win.addstr(1, 0, prompt, curses.A_BOLD) self.win.refresh() curses.echo() curses.curs_set(1) inp = self.win.getstr(1, len(prompt)) curses.curs_set(0) curses.noecho() if inp not in (None, ''): return inp if empty_is_cancel: raise CancelInput() return default def prompt_int(self, prompt, default=None, empty_is_cancel=True): inp = self.prompt_str(prompt, default, empty_is_cancel) try: return int(inp) except ValueError: raise InvalidInt() def prompt_pid(self): try: return self.prompt_int('PID to ionice: ') except InvalidInt: raise InvalidPid() except CancelInput: raise def prompt_tid(self): try: return self.prompt_int('TID to ionice: ') except InvalidInt: raise InvalidTid() except CancelInput: raise def prompt_data(self, ioprio_data): try: if ioprio_data is not None: inp = self.prompt_int('I/O priority data (0-7, currently %s): ' % ioprio_data, ioprio_data, False) else: inp = self.prompt_int('I/O priority data (0-7): ', None, False) except InvalidInt: raise InvalidIoprioData() if
jferreir/mbed
workspace_tools/host_tests/host_registry.py
Python
apache-2.0
1,214
0.004942
""" mbed SDK Copyright (c) 2011-2013 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and lim
itations under the License. """ class HostRegistry: """ Class stores registry with host tests and objects representing them """ HOST_TESTS = {} # host_test_name -> host_test_ojbect def register_host_test(self, ht_name, ht_object): if ht_name not in self.HOST_TESTS: self.HOST_TESTS[ht_name] = ht_object
def unregister_host_test(self): if ht_name in HOST_TESTS: self.HOST_TESTS[ht_name] = None def get_host_test(self, ht_name): return self.HOST_TESTS[ht_name] if ht_name in self.HOST_TESTS else None def is_host_test(self, ht_name): return ht_name in self.HOST_TESTS
matthewrmshin/isodatetime
metomi/isodatetime/tests/test_datetimeoper.py
Python
lgpl-3.0
15,759
0
# -*- coding: utf-8 -*- # ---------------------------------------------------------------------------- # Copyright (C) 2013-2019 British Crown (Met Office) & Contributors. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ---------------------------------------------------------------------------- """Test isodatetime.datetimeoper functionalities.""" import os import unittest from unittest.mock import patch from metomi.isodatetime.data import ( get_timepoint_from_seconds_since_unix_epoch as seconds2point) import metomi.isodatetime.datetimeoper as idt_dtoper class TestDateTimeOperator(unittest.TestCase): """Test isodatetime.datetimeoper.TestDateTimeOperator functionalities.""" @patch('metomi.isodatetime.datetimeoper.now2point') def test_process_time_point_str_now_0(self, mock_now_func): """DateTimeOperator.process_time_point_str()""" # 2009-02-13T23:31:30Z mock_now = seconds2point(1234567890) mock_now_func.return_value = mock_now datetimeoper = idt_dtoper.DateTimeOperator() self.assertEqual(str(mock_now), datetimeoper.process_time_point_str()) self.assertEqual( str(mock_now), datetimeoper.process_time_point_str(datetimeoper.STR_NOW)) @patch('metomi.isodatetime.datetimeoper.now2point') def test_process_time_point_str_ref_0(self, mock_now_func): """DateTimeOperator.process_time_point_str('ref') But without explicit reference time, so default to now. """ # 2009-02-13T23:31:30Z mock_now = seconds2point(1234567890) mock_now_func.return_value = mock_now datetimeoper = idt_dtoper.DateTimeOperator() # Ensure that the ISODATETIMEREF environment variable is not set # Or the test may not work. environ = os.environ.copy() if datetimeoper.ENV_REF in environ: del environ[datetimeoper.ENV_REF] with patch.dict(os.environ, environ, clear=True): self.assertEqual( str(mock_now), datetimeoper.process_time_point_str(datetimeoper.STR_REF)) def test_process_time_point_str_ref_1(self): """DateTimeOperator.process_time_point_str('ref') With explicit reference time. """ # 2009-02-13T23:31:30Z ref_point_str = str(seconds2point(1234567890)) datetimeoper = idt_dtoper.DateTimeOperator( ref_point_str=ref_point_str) self.assertEqual( ref_point_str, datetimeoper.process_time_point_str(datetimeoper.STR_REF)) def test_process_time_point_str_ref_2(self): """DateTimeOperator.process_time_point_str('ref') With explicit reference time as ISODATETIMEREF environment variable. """ # 2009-02-13T23:31:30Z ref_point_str = str(seconds2point(1234567890)) # Set ISODATETIMEREF. # Or the test may not work. environ = os.environ.copy() environ[idt_dtoper.DateTimeOperator.ENV_REF] = ( ref_point_str) with patch.dict(os.environ, environ): datetimeoper = idt_dtoper.DateTimeOperator() self.assertEqual( ref_point_str, datetimeoper.process_time_point_str(datetimeoper.STR_REF)) def test_process_time_point_str_x(self): """DateTimeOperator.process_time_point_str(...) Basic parse and dump of a time point string. """ # 2009-02-13T23:31:30Z point_str = str(seconds2point(1234567890)) datetimeoper = idt_dtoper.DateTimeOperator() # Unix time self.assertEqual( '2019-01-11T10:40:15Z', datetimeoper.process_time_point_str( 'Fri 11 Jan 10:40:15 UTC 2019', print_format=datetimeoper.CURRENT_TIME_DUMP_FORMAT_Z)) # Basic self.assertEqual( point_str, datetimeoper.process_time_point_str(point_str)) # +ve offset point_str_1 = str(seconds2point(1234567890 + 3600)) self.assertEqual( point_str_1, datetimeoper.process_time_point_str(point_str, ['PT1H'])) # +ve offset, time point like duration point_str_1 = str(seconds2point(1234567890 + 3600)) self.assertEqual( point_str_1, datetimeoper.process_time_point_str(point_str, ['P0000-00-00T01'])) # -ve offset point_str_2 = str(seconds2point(1234567890 - 86400)) self.assertEqual( point_str_2,
datetimeoper.process_time_point_s
tr(point_str, ['-P1D'])) # offsets that cancel out self.assertEqual( point_str, datetimeoper.process_time_point_str(point_str, ['PT1H', '-PT60M'])) # Multiple offsets in 1 string point_str_3 = str(seconds2point(1234567890 - 86400 - 3600)) self.assertEqual( point_str_3, datetimeoper.process_time_point_str(point_str, ['-P1DT1H'])) # Multiple offsets self.assertEqual( point_str_3, datetimeoper.process_time_point_str(point_str, ['-P1D', '-PT1H'])) # Bad time point string self.assertRaises( ValueError, datetimeoper.process_time_point_str, 'teatime') # Bad offset string with self.assertRaises( idt_dtoper.OffsetValueError, ) as ctxmgr: datetimeoper.process_time_point_str(point_str, ['ages']) self.assertEqual('ages: bad offset value', str(ctxmgr.exception)) # Bad offset string, unsupported time point like duration with self.assertRaises( idt_dtoper.OffsetValueError, ) as ctxmgr: datetimeoper.process_time_point_str(point_str, ['P0000-W01-1']) self.assertEqual( 'P0000-W01-1: bad offset value', str(ctxmgr.exception)) def test_process_time_point_str_calendar(self): """DateTimeOperator.process_time_point_str(...) Alternate calendars. """ self.assertEqual( 'gregorian', idt_dtoper.DateTimeOperator.get_calendar_mode()) self.assertRaises( KeyError, idt_dtoper.DateTimeOperator.set_calendar_mode, 'milkywaygalactic') for cal, str_in, offsets, str_out in [ # 360day ('360day', '20130301', ['-P1D'], '20130230'), ('360day', '20130230', ['P1D'], '20130301'), # 360_day ('360_day', '20130301', ['-P1D'], '20130230'), ('360_day', '20130230', ['P1D'], '20130301'), # 365day ('365day', '20130301', ['-P1D'], '20130228'), ('365day', '20130228', ['P1D'], '20130301'), # 365_day ('365_day', '20130301', ['-P1D'], '20130228'), ('365_day', '20130228', ['P1D'], '20130301'), # 366day ('366day', '20130301', ['-P1D'], '20130229'), ('366day', '20130229', ['P1D'], '20130301'), # 366_day ('366_day', '20130301', ['-P1D'], '20130229'), ('366_day', '20130229', ['P1D'], '20130301'), ]: # Calendar mode, is unfortunately, a global variable, # so needs to reset value on return. calendar_mode = ( idt_dtoper.DateTimeOperator.get_calendar_mode()) # Calendar mode by constructor. try: datetimeoper = idt_dtoper.DateTimeOperator( calendar_mode=cal)
artefactual/archivematica-storage-service
storage_service/locations/south_migrations/0006_duracloud.py
Python
agpl-3.0
25,555
0.001722
# -*- coding: utf-8 -*- from __future__ import absolute_import from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Duracloud' db.create_table( u"locations_duracloud", ( (u"id", self.gf("django.db.models.fields.AutoField")(primary_key=True)), ( "space", self.gf("django.db.models.fields.related.OneToOneField")( to=orm["locations.Space"], to_field="uuid", unique=True ), ), ("host", self.gf("django.db.models.fields.CharField")(max_length=256)), ("user", self.gf("django.db.models.fields.CharField")(max_length=64)), ( "password", self.gf("django.db.models.fields.CharField")(max_length=64), ), ( "duraspace", self.gf("django.db.models.fields.CharField")(max_length=64), ), ), ) db.send_create_signal("locations", ["Duracloud"]) def backwards(self, orm): # Deleting model 'Duracloud' db.delete_table(u"locations_duracloud") models = { u"auth.group": { "Meta": {"object_name": "Group"}, u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}), "name": ( "django.db.models.fields.CharField", [], {"unique": "True", "max_length": "80"}, ), "permissions": ( "django.db.models.fields.related.ManyToManyField", [], { "to": u"orm['auth.Permission']", "symmetrical": "False", "blank": "True", }, ), }, u"auth.permission": { "Meta": { "ordering": "(u'content_type__app_label', u'content_type__model', u'codename')", "unique_together": "((u'content_type', u'codename'),)", "object_name": "Permission", }, "codename": ( "django.db.models.fields.CharField", [], {"max_length": "100"}, ), "content_type": ( "django.db.models.fields.related.ForeignKey", [], {"to": u"orm['contenttypes.ContentType']"}, ), u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}), "name": ("django.db.models.fields.CharField", [], {"max_length": "50"}), }, u"auth.user": { "Meta": {"object_name": "User"}, "date_joined": ( "django.db.models.fields.DateTimeField", [], {"default": "datetime.datetime.now"}, ), "email": ( "django.db.models.fields.EmailField", [], {"max_length": "75", "blank": "True"}, ), "first_name": ( "django.db.models.fields.CharField", [], {"max_length": "30", "blank": "True"}, ), "groups": ( "django.db.models.fields.related.ManyToManyField", [], {"to": u"orm['auth.G
roup']", "symmetrical": "False", "blank": "True"}, ), u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}), "is_active": ( "django.db.models.fields.BooleanField", [], {"default": "True"}, ), "is_staff": ( "
django.db.models.fields.BooleanField", [], {"default": "False"}, ), "is_superuser": ( "django.db.models.fields.BooleanField", [], {"default": "False"}, ), "last_login": ( "django.db.models.fields.DateTimeField", [], {"default": "datetime.datetime.now"}, ), "last_name": ( "django.db.models.fields.CharField", [], {"max_length": "30", "blank": "True"}, ), "password": ( "django.db.models.fields.CharField", [], {"max_length": "128"}, ), "user_permissions": ( "django.db.models.fields.related.ManyToManyField", [], { "to": u"orm['auth.Permission']", "symmetrical": "False", "blank": "True", }, ), "username": ( "django.db.models.fields.CharField", [], {"unique": "True", "max_length": "30"}, ), }, u"contenttypes.contenttype": { "Meta": { "ordering": "('name',)", "unique_together": "(('app_label', 'model'),)", "object_name": "ContentType", "db_table": "'django_content_type'", }, "app_label": ( "django.db.models.fields.CharField", [], {"max_length": "100"}, ), u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}), "model": ("django.db.models.fields.CharField", [], {"max_length": "100"}), "name": ("django.db.models.fields.CharField", [], {"max_length": "100"}), }, "locations.callback": { "Meta": {"object_name": "Callback"}, "enabled": ( "django.db.models.fields.BooleanField", [], {"default": "True"}, ), "event": ("django.db.models.fields.CharField", [], {"max_length": "15"}), "expected_status": ( "django.db.models.fields.IntegerField", [], {"default": "200"}, ), u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}), "method": ("django.db.models.fields.CharField", [], {"max_length": "10"}), "uri": ("django.db.models.fields.CharField", [], {"max_length": "1024"}), "uuid": ( "django.db.models.fields.CharField", [], {"max_length": "36", "blank": "True"}, ), }, "locations.duracloud": { "Meta": {"object_name": "Duracloud"}, "duraspace": ( "django.db.models.fields.CharField", [], {"max_length": "64"}, ), "host": ("django.db.models.fields.CharField", [], {"max_length": "256"}), u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}), "password": ("django.db.models.fields.CharField", [], {"max_length": "64"}), "space": ( "django.db.models.fields.related.OneToOneField", [], { "to": "orm['locations.Space']", "to_field": "'uuid'", "unique": "True", }, ), "user": ("django.db.models.fields.CharField", [], {"max_length": "64"}), }, "locations.event": { "Meta": {"object_name": "Event"}, "admin_id": ( "django.db.models.fields.related.ForeignKey", [], {"to": u"orm['auth.User']", "null": "True", "blank": "True"}, ), "event_reason": ("django.db.models.fields.TextField", [], {}), "event_type": ( "django.db.models.fields.CharField", [], {"max_length": "8"}, ), u"id": ("django.db.models.fields.AutoField", [], {"pr
macosforge/ccs-calendarserver
contrib/performance/benchmarks/event_move.py
Python
apache-2.0
2,708
0
## # Copyright (c) 2010-2017 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## from itertools import count, cycle from urllib2 import HTTPDigestAuthHandler from twisted.internet import reactor from twisted.internet.defer import inlineCallbacks, returnValue from twisted.web.client import Agent from twisted.web.http_headers import Headers from twisted.web.http import CREATED from contrib.performance.httpauth import AuthHandlerAgent from contrib.p
erformance.httpclient import StringProducer from contrib.performance.benchlib import initialize, sample from contrib.performance.benchmarks.event import makeEvent @inlineCallbacks def measure(host, port, dtrace, attendeeCount, samples): organizerSequence = 1 user = password = "
user%02d" % (organizerSequence,) root = "/" principal = "/" # Two calendars between which to move the event. fooCalendar = "event-move-foo-benchmark" barCalendar = "event-move-bar-benchmark" authinfo = HTTPDigestAuthHandler() authinfo.add_password( realm="Test Realm", uri="http://%s:%d/" % (host, port), user=user, passwd=password) agent = AuthHandlerAgent(Agent(reactor), authinfo) # Set up the calendars first for calendar in [fooCalendar, barCalendar]: yield initialize( agent, host, port, user, password, root, principal, calendar) fooURI = 'http://%s:%d/calendars/__uids__/%s/%s/some-event.ics' % ( host, port, user, fooCalendar) barURI = 'http://%s:%d/calendars/__uids__/%s/%s/some-event.ics' % ( host, port, user, barCalendar) # Create the event that will move around headers = Headers({"content-type": ["text/calendar"]}) yield agent.request( 'PUT', fooURI, headers, StringProducer(makeEvent(1, organizerSequence, attendeeCount))) # Move it around sooo much source = cycle([fooURI, barURI]) dest = cycle([barURI, fooURI]) params = ( ('MOVE', source.next(), Headers({"destination": [dest.next()], "overwrite": ["F"]})) for i in count(1)) samples = yield sample(dtrace, samples, agent, params.next, CREATED) returnValue(samples)
sjsucohort6/openstack
python/venv/lib/python2.7/site-packages/cliff/formatters/shell.py
Python
mit
1,337
0
"""Output formatters using shell syntax. """ from .base import SingleFormatter import argparse import six class ShellFormatter(SingleFormatter): def add_argument_group(self, parser): group = parser.add_argument_group( title='shell formatter', description='a format a UNIX shell can parse (variable="value")', ) group.add_argument( '--variable', action='append', default=[], dest
='variables', metavar='VARIABLE', help=argparse.SUPPRESS, ) group.add_argument( '--prefix', action='store', default='', dest='prefix', help='add a prefix to all variable names', ) def emit_one(self, column_names, data, stdout, parsed_args): variabl
e_names = [c.lower().replace(' ', '_') for c in column_names ] desired_columns = parsed_args.variables for name, value in zip(variable_names, data): if name in desired_columns or not desired_columns: if isinstance(value, six.string_types): value = value.replace('"', '\\"') stdout.write('%s%s="%s"\n' % (parsed_args.prefix, name, value)) return
myint/language-check
test.py
Python
lgpl-3.0
5,483
0
#!/usr/bin/env python # -*- coding: utf-8 -*- """Test suite for language_check.""" from __future__ import unicode_literals import unittest import warnings from collections import namedtuple import language_check class TestLanguageTool(unittest.TestCase): CheckTest = namedtuple('CheckTest', ('text', 'matches')) Match = namedtuple('Match', ('fromy', 'fromx', 'ruleId')) check_tests = { 'en': [ CheckTest( ('Paste your own text here... or check this text too see ' 'a few of the problems that that LanguageTool can detect. ' 'Did you notice that their is no spelcheckin included?'), [ Match(0, 47, 'TOO_TO'), Match(0, 132, 'THEIR_IS'), ] ), ], 'fr': [ CheckTest( ('Se texte est un exemple pour pour vous montrer ' 'le fonctionnement de
LanguageTool. ' 'notez que LanguageTool ne comporte pas ' 'de correcteur orthographique.'), [ Match(0, 0, 'SE_CE'), Match(0, 3, 'TE_NV'), M
atch(0, 24, 'FRENCH_WORD_REPEAT_RULE'), Match(0, 82, 'UPPERCASE_SENTENCE_START'), ] ), CheckTest( 'je me rappelle de tout sans aucun soucis!', [ Match(0, 0, 'UPPERCASE_SENTENCE_START'), Match(0, 6, 'RAPPELER_DE'), Match(0, 28, 'ACCORD_NOMBRE'), Match(0, 34, 'FRENCH_WHITESPACE'), ] ), ], } correct_tests = { 'en-US': { 'that would of been to impressive.': 'That would have been too impressive.', }, 'fr': { 'il monte en haut si il veut.': 'Il monte s’il veut.', }, } def test_check(self): lang_check = language_check.LanguageTool() for language, tests in self.check_tests.items(): try: lang_check.language = language except ValueError: version = language_check.get_version() warnings.warn( 'LanguageTool {} doesn’t support language {!r}' .format(version, language) ) for text, expected_matches in tests: matches = lang_check.check(text) for expected_match in expected_matches: for match in matches: if ( (match.fromy, match.fromx, match.ruleId) == (expected_match.fromy, expected_match.fromx, expected_match.ruleId) ): break else: raise IndexError( 'can’t find {!r}'.format(expected_match)) def test_correct(self): lang_check = language_check.LanguageTool() for language, tests in self.correct_tests.items(): try: lang_check.language = language except ValueError: version = language_check.get_version() warnings.warn( 'LanguageTool {} doesn’t support language {!r}' .format(version, language) ) for text, result in tests.items(): self.assertEqual(lang_check.correct(text), result) def test_languages(self): self.assertIn('en', language_check.get_languages()) def test_version(self): self.assertTrue(language_check.get_version()) def test_get_build_date(self): self.assertTrue(language_check.get_build_date()) def test_get_directory(self): path = language_check.get_directory() language_check.set_directory(path) self.assertEqual(path, language_check.get_directory()) def test_disable_spellcheck(self): sentence_with_misspelling = 'This is baad.' lang_check = language_check.LanguageTool() self.assertTrue(lang_check.check(sentence_with_misspelling)) lang_check.disable_spellchecking() self.assertFalse(lang_check.check(sentence_with_misspelling)) lang_check.enable_spellchecking() self.assertTrue(lang_check.check(sentence_with_misspelling)) def test_README_with_unicode(self): tool = language_check.LanguageTool('en-US') text = ('A sentence with a error in the ' 'Hitchhiker’s Guide tot he Galaxy') matches = tool.check(text) self.assertEqual(len(matches), 2) self.assertEqual((matches[0].fromy, matches[0].fromx), (0, 16)) self.assertEqual((matches[0].ruleId, matches[0].replacements), ('EN_A_VS_AN', ['an'])) self.assertEqual((matches[1].fromy, matches[1].fromx), (0, 50)) self.assertEqual((matches[1].ruleId, matches[1].replacements), ('TOT_HE', ['to the'])) corrected = language_check.correct(text, matches) self.assertEqual(corrected, 'A sentence with an error in the ' 'Hitchhiker’s Guide to the Galaxy') if __name__ == '__main__': unittest.main()
yeleman/uninond
uninond/tools.py
Python
mit
6,367
0
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim: ai ts=4 sts=4 et sw=4 nu from __future__ import (unicode_literals, absolute_import, division, print_function) import re import unicodedata import datetime import subprocess from py3compat import string_types, text_type from django.utils import timezone from django.conf import settings from uninond.models.SMSMessages import SMSMessage # default country prefix COUNTRY_PREFIX = getattr(settings, 'COUNTRY_PREFIX', 223) ALL_COUNTRY_CODES = [1242, 1246, 1264, 1268, 1284, 1340, 1345, 1441, 1473, 1599, 1649, 1664, 1670, 1671, 1684, 1758, 1767, 1784, 1809, 1868, 1869, 1876, 1, 20, 212, 213, 216, 218, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 27, 290, 291, 297, 298, 299, 30, 31, 32, 33, 34, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 36, 370, 371, 372, 373, 374, 375, 376, 377, 378, 380, 381, 382, 385, 386, 387, 389, 39, 40, 41, 420, 421, 423, 43, 44, 45, 46, 47, 48, 49, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 51, 52, 53, 54, 55, 56, 57, 58, 590, 591, 592, 593, 595, 597, 598, 599, 60, 61, 62, 63, 64, 65, 66, 670, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 685, 686, 687, 688, 689, 690, 691, 692, 7, 81, 82, 84, 850, 852, 853, 855, 856, 86, 870, 880, 886, 90, 91, 92, 93, 94, 95, 960, 961, 962, 963, 964, 965, 966, 967, 968, 970, 971, 972, 973, 974, 975, 976, 977, 98, 992, 993, 994, 995, 996, 998] MONTHS = ['J', 'F', 'M', 'A', 'Y', 'U', 'L', 'G', 'S', 'O', 'N', 'D'] ALPHA = 'abcdefghijklmnopqrstuvwxyz' def phonenumber_isint(number): ''' whether number is in international format ''' if re.match(r'^[+|(]', number): return True if re.match(r'^\d{1,4}\.\d+$', number): return True return False def phonenumber_indicator(number): ''' extract indicator from number or "" ''' for indic in ALL_COUNTRY_CODES: if number.startswith("%{}".format(indic)) \ or number.startswith("+{}".format(indic)): return str(indic) return "" def phonenumber_cleaned(number): ''' return (indicator, number) cleaned of space and other ''' # clean up if not isinstance(number, string_types): number = number.__str__() # cleanup mar
kup clean_number = re.sub(r'[^\d\+]', '', number) if phonenumber_isint(clean_number): h, indicator, clean_number = \ clean_number.partition(phonenumber_indicator(clean_number)) return (indicator, clean_number) return (None, clean_number) def join_phonenumber(prefix, number, force_intl=True): if not number: return None if not prefix and force_intl: prefix = COUNTRY_PREFIX return "
+{prefix}{number}".format(prefix=prefix, number=number) def phonenumber_repr(number, skip_indicator=str(COUNTRY_PREFIX)): ''' properly formated for visualization: (xxx) xx xx xx xx ''' def format(number): if len(number) % 2 == 0: span = 2 else: span = 3 # use NBSP return " ".join(["".join(number[i:i + span]) for i in range(0, len(number), span)]) indicator, clean_number = phonenumber_cleaned(number) # string-only identity goes into indicator if indicator is None and not clean_number: return number.strip() if indicator and indicator != skip_indicator: return "(%(ind)s) %(num)s" \ % {'ind': indicator, 'num': format(clean_number)} return format(clean_number) def normalized_phonenumber(number_text): if number_text is None or not number_text.strip(): return None return join_phonenumber(*phonenumber_cleaned(number_text)) def operator_from_malinumber(number, default=settings.FOREIGN): ''' ORANGE or MALITEL based on the number prefix ''' indicator, clean_number = phonenumber_cleaned( normalized_phonenumber(number)) if indicator is not None and indicator != str(COUNTRY_PREFIX): return default for operator, opt in settings.OPERATORS.items(): for prefix in opt[1]: if clean_number.startswith(str(prefix)): return operator return default def send_sms(to, text): return SMSMessage.objects.create( direction=SMSMessage.OUTGOING, identity=to, event_on=timezone.now(), text=text) def fake_message(to, text): message = send_sms(to, text) message.handled = True message.save() return message def to_ascii(text): return unicodedata.normalize('NFKD', unicode(text)) \ .encode('ASCII', 'ignore').strip() def date_to_ident(adate): year, month, day = adate.timetuple()[0:3] hyear = text_type(year)[-1] if day > 16: hmonth = ALPHA[month * 2] hday = hex(day // 2)[2:] else: hmonth = ALPHA[month] hday = hex(day)[2:] return "{y}{m}{d}".format(m=hmonth, d=hday, y=hyear) def ident_to_date(ident): hyear, hmonth, hday = ident[0], ident[1], ident[2:] year = int('201{}'.format(hyear)) day = int(hday, 16) month = ALPHA.index(hmonth) if month > 12: month //= 2 day *= 2 return datetime.date(year, month, day) def dispatch_sms(text, roles, root): sent_messages = [] for identity in root.ancestors_contacts(roles, identies_only=True): sent_messages.append(send_sms(identity, text)) return sent_messages def datetime_repr(adatetime): return ("{date} à {time}" .format(date=adatetime.strftime("%A %-d"), time=adatetime.strftime("%Hh%M")).lower()) def exec_cmd(command): process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) process.wait() return process.returncode
abhirevan/Yelp-Rate-my-Review
src/review_stats.py
Python
mit
2,318
0.004745
import csv from numpy import histogram def review_stats(count_ratings, length): # print "in extract_rows" ip_csv = "data\input\yelp_academic_dataset_review_ext.csv" with open(ip_csv, "rb") as source: rdr = csv.reader(source) firstline = True for r in rdr: if firstline: # skip first line firstline = False continue count_ratings[int(r[2])] += 1 length.append(len(r[0])) def business_stats(categories, category_count): ip_csv = "data\input\yelp_academic_dataset_business_ext.csv" with open(ip_csv, "rb") as source: rdr = csv.reader(source) next(rdr) # c = 0 for r in rdr: cat = r[0] items = cat.split(',') for i in items: i = i.lstrip() if category_count.has_key(i): category_count[i] = category_count[i] + 1 else: category_count[i] = 1 categories.append(i) # print categories # print category_count if __name__ == '__main__': count_ratings = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0} length = [] review_stats(count_ratings, length) print "Review Stats" print ('-' * 100) print "total reviews", count_ratings[1] + count_ratings[2] + count_ratings[3] + count_ratings[4] + count_ratings[5] print "Review breakup per ratings" print "Review 1 star", count_ratings[1] print "Review 2 star", count_ratings[2] print "Review 3 star", count_ratings[3] print "Review 4 star", count_ratings[4] print "Review 5
star", count_ratings[5] length.sort() sum = 0.0 for i in length: sum += i print "Min length: ", min(length), "Max length: ", max(length) print "Avg length: ", sum / len(length), "Median: ", length[len(length) / 2] hist,bin_edges = histogram(a=length,bins=20) print hist print bin_edges ''' print "Business Stats" print ('-' * 100) categories = [] category_count = {} business_stats(categories, category_count) print "Number of
categories", len(categories) print "Reviews per category:" for c in categories: print c + "?" + str(category_count[c]) '''
kubeflow/code-intelligence
py/notifications/notifications.py
Python
mit
6,630
0.008748
from code_intelligence import graphql import fire import github3 import json import logging import os import numpy as np import pprint import retrying import json TOKEN_NAME_PREFERENCE = ["INPUT_GITHUB_PERSONAL_ACCESS_TOKEN", "GITHUB_PERSONAL_ACCESS_TOKEN", "GITHUB_TOKEN"] for token in TOKEN_NAME_PREFERENCE: if os.getenv(token): TOKEN_NAME = token break assert TOKEN_NAME, f"You must supply one of the following environment variables: {', '.join(TOKEN_NAME_PREFERENCE)}" PULL_REQUEST_TYPE = "PullRequest" # TODO(jlewi): Rewrite this code to use: # i) graphql.unpack_and_split_nodes # ii) graphql.shard_writer def process_notification(n): # Mark as read anything that isn't an explicit mention. # For PRs there doesn't seem like a simple way to detect if the notice # is because the state changed # # We exclude mentions on PR because that gets overwhelmed by "/assign" # statements. We should potentially be more discerning and not mark the # notification as read for PR
s which aren't assigned to the user. if n.reason == "mention": if n.subject.get("type") != "PullRequest": return title = n.subject.get("title") logging.info("Marking as read: type: %s reason: %s title: %s", n.subject.get("type"), n.reason, title) n.mark() def process_issue_results(data): """Process the data returned by the issues GraphQL request. Args: data: The data returned Returns: issues: A lis
t of dicts; each dict is the data for some of the results """ edges = data.get("data").get("repository").get("issues").get("edges") issues = [] for e in edges: issues.append(e["node"]) return issues class NotificationManager(object): def mark_read(self, user): token = os.getenv(TOKEN_NAME) if not token: raise ValueError(("Environment variable {0} needs to be set to a GitHub " "token.").format(token)) client = github3.GitHub(username=user, token=token) notifications = client.notifications() # https://developer.github.com/v3/activity/notifications/ # # How do we identify closed pull requests? for n in notifications: process_notification(n) def write_notifications(self, user, output): """Write all notifications to a file. Args: user: Name of the user to get notifications for output: The file to write notifications to. Fetches all notifications, including ones marked read, and writes them to the supplied file. """ token = os.getenv(TOKEN_NAME) if not token: raise ValueError(("Environment variable {0} needs to be set to a GitHub " "token.").format(token)) client = github3.GitHub(username=user, token=token) notifications = client.notifications(all=True) # https://developer.github.com/v3/activity/notifications/ # # How do we identify closed pull requests? i = 0 with open(output, mode="w") as hf: for n in notifications: i += 1 hf.write(n.as_json()) hf.write("\n") logging.info("Wrote %s notifications to %s", i, output) def fetch_issues(self, org, repo, output): """Fetch issues for a repository Args: org: The org that owns the repository repo: The directory for the repository output: The directory to write the results Writes the issues along with the first comments to a file in output directory. """ client = graphql.GraphQLClient() num_issues_per_page = 100 query_template = """{{ repository(owner: "{org}", name: "{repo}") {{ issues(first:{num_issues_per_page} {issues_cursor}) {{ totalCount pageInfo {{ endCursor hasNextPage }} edges{{ node {{ author {{ __typename ... on User {{ login }} ... on Bot{{ login }} }} title body comments(first:20, ){{ totalCount edges {{ node {{ author {{ __typename ... on User {{ login }} ... on Bot{{ login }} }} body createdAt }} }} }} }} }} }} }} }} """ shard = 0 num_pages = None if not os.path.exists(output): os.makedirs(output) total_issues = None has_next_issues_page = True # TODO(jlewi): We should persist the cursors to disk so we can resume # after errors issues_cursor = None while has_next_issues_page: issues_cursor_text = "" if issues_cursor: issues_cursor_text = "after:\"{0}\"".format(issues_cursor) query = query_template.format(org=org, repo=repo, num_issues_per_page=num_issues_per_page, issues_cursor=issues_cursor_text) results = client.run_query(query) if results.get("errors"): logging.error("There was a problem issuing the query; errors:\n%s", "\n".join(results.get("errors"))) return if not total_issues: total_issues = results["data"]["repository"]["issues"]["totalCount"] num_pages = int(np.ceil(total_issues/float(num_issues_per_page))) logging.info("%s/%s has a total of %s issues", org, repo, total_issues) shard_file = os.path.join( output, "issues-{0}-{1}-{2:03d}-of-{3:03d}.json".format(org, repo, shard, num_pages)) issues = process_issue_results(results) with open(shard_file, "w") as hf: for i in issues: json.dump(i, hf) hf.write("\n") logging.info("Wrote shard %s to %s", shard, shard_file) shard += 1 page_info = results["data"]["repository"]["issues"]["pageInfo"] issues_cursor = page_info["endCursor"] has_next_issues_page = page_info["hasNextPage"] def _create_client(self, user): token = os.getenv(TOKEN_NAME) if not token: raise ValueError(("Environment variable {0} needs to be set to a GitHub " "token.").format(token)) client = github3.GitHub(username=user, token=token) return client if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format=('%(levelname)s|%(asctime)s' '|%(message)s|%(pathname)s|%(lineno)d|'), datefmt='%Y-%m-%dT%H:%M:%S', ) fire.Fire(NotificationManager)
jacobajit/ion
intranet/apps/notifications/urls.py
Python
gpl-2.0
483
0.008282
# -*- coding:
utf-8 -*- from django.conf.urls import url from . import views urlpatterns = [ url(r"^/android/setup$", views.android_setup_view, name="notif_android_setup"), url(r"
^/chrome/setup$", views.chrome_setup_view, name="notif_chrome_setup"), url(r"^/chrome/getdata$", views.chrome_getdata_view, name="notif_chrome_getdata"), url(r"^/gcm/post$", views.gcm_post_view, name="notif_gcm_post"), url(r"^/gcm/list$", views.gcm_list_view, name="notif_gcm_list") ]
erasche/galactic-radio-telescope
api/migrations/0001_initial.py
Python
agpl-3.0
3,931
0.003561
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-08-04 09:25 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='GalaxyInstance', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('url', models.URLField(help_text='Instance URL', null=True)), ('title', models.CharField(help_text='The name / title of the instance. E.g. GalaxyP', max_length=256, null=True)), ('description', models.TextField(help_text='Any extra description you wish to add.', null=True)), ('users_recent', models.IntegerField(default=0)), ('users_total', models.IntegerField(default=0)), ('jobs_run', models.IntegerField(default=0)), ('api_key', models.UUIDField(default=uuid.uuid4, editable=False)), ('last_import', models.FloatField(default=-1)), ('owners', models.ManyToManyField(to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Job', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('external_job_id', models.IntegerField(default=-1)), ('tool_id', models.CharField(max_length=255)), ('tool_version', models.TextField()), ('state', models.CharField(max_length=16)), ('create_time', models.DateTimeField(blank=True, null=True)), ('instance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.GalaxyInstance')), ], ), migrations.CreateModel( name='JobParam', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('external_job_id', models.IntegerField(default=-1)), ('name', m
odels.CharField(max_length=256)), ('value', models.TextField()), ('instance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.GalaxyInstance')), ], ), migrations.CreateModel( name='MetricNumeric', fields=[ ('id', mode
ls.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('external_job_id', models.IntegerField(default=-1)), ('plugin', models.CharField(max_length=256)), ('name', models.CharField(max_length=256)), ('value', models.DecimalField(decimal_places=7, max_digits=22)), ('instance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.GalaxyInstance')), ], ), migrations.CreateModel( name='MetricText', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('external_job_id', models.IntegerField(default=-1)), ('plugin', models.CharField(max_length=256)), ('name', models.CharField(max_length=256)), ('value', models.CharField(max_length=256)), ('instance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.GalaxyInstance')), ], ), migrations.AlterUniqueTogether( name='job', unique_together=set([('instance', 'external_job_id')]), ), ]
kadhikari/navitia
source/jormungandr/tests/stif_tests.py
Python
agpl-3.0
7,989
0.005007
# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved. # # This file is part of Navitia, # the software to build cool stuff with public transport. # # Hope you'll enjoy and contribute to this project, # powered by Canal TP (www.canaltp.fr). # Help us simplify mobility and open public transport: # a non ending quest to the responsive locomotion way of traveling! # # LICENCE: This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Stay tuned using # twitter @navitia # IRC #navitia on freenode # https://groups.google.com/d/forum/navitia # www.navitia.io from __future__ import absolute_import from .tests_mechanism import AbstractTestFixture, dataset from .check_utils import * @dataset({"main_stif_test": {}}) class TestStif(AbstractTestFixture): """ Test the stif scenario responses Possible journeys from A to B: 1/ 8h00 ====(line A)====> 10h00 2/ 9h00 ==(line B + C)==> 11h00 3/ 10h00 ====(line A)====> 12h00 """ def test_stif_simple(self): """ Test of simple request : * we want to make at least 2 journey calls (not only the best journey, but also try next) * we don't want 2 journeys using the same line and changing at same points So here we want journeys 1 and 2 """ query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \ "&min_nb_journeys=1&_min_journeys_calls=2&_final_line_filter=true&_max_successive_physical_mode=3"\ .format(from_sp="stopA", to_sp="stopB", datetime="20140614T075500") response = self.query_region(query) assert len(response['journeys']) == 2 assert response['journeys'][0]['arrival_date_time'] == '20140614T100000' assert response['journeys'][1]['arrival_date_time'] == '20140614T110000' def test_stif_override_min_journeys_calls(self): """ Test of simple request : * we only want 1 journey calls (no next call) So here we only want journeys 1 """ query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_
override_scenario=new_default" \ "&min_nb_journeys=1&_min_journeys_calls=1&_final_line_filter=true&_max_successive_physical_mode=3"\ .format(from_sp="stopA", to_sp="stopB", datetime="20140614T075500") response = self.query_region(query) assert len(resp
onse['journeys']) == 1 assert response['journeys'][0]['arrival_date_time'] == '20140614T100000' def test_stif_override_final_line_filter(self): """ Test of simple request : * we want to make at least 2 journey calls (not only the best journey, but also try next) * we deactivate the filter on journeys using the same line and changing at same points So here we want journeys 1, 2 and 3 """ query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \ "&min_nb_journeys=1&_min_journeys_calls=2&_final_line_filter=false&_max_successive_physical_mode=3"\ .format(from_sp="stopA", to_sp="stopB", datetime="20140614T075500") response = self.query_region(query) assert len(response['journeys']) == 3 assert response['journeys'][0]['arrival_date_time'] == '20140614T100000' assert response['journeys'][1]['arrival_date_time'] == '20140614T110000' assert response['journeys'][2]['arrival_date_time'] == '20140614T120000' def test_stif_max_successive_buses(self): """ BUS Bus Bus Bus stopP ----> stopQ ----> stopR ----> stopS ----> stopT 15:00 16:00 17:00 18:00 19:00 Bus stopP ----------------------------------------> stopT 15:00 20:00 Test of request with parameter "_max_successive_physical_mode": * we want to make at least 2 journey calls (not only the best journey, but also try next) * we don't want the journey using more than 3 Buses So here we want journey1 """ query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \ "&_max_successive_physical_mode=3&_max_additional_connections=10"\ .format(from_sp="stopP", to_sp="stopT", datetime="20140614T145500") response = self.query_region(query) assert len(response['journeys']) == 1 #As we modify the value of _max_successive_physical_mode to 5 we want two journeys query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \ "&_max_successive_physical_mode=5&_max_additional_connections=10"\ .format(from_sp="stopP", to_sp="stopT", datetime="20140614T145500") response = self.query_region(query) assert len(response['journeys']) == 2 def test_stif_max_successive_buses_with_tram_in_between(self): """ BUS Bus Bus Bus Tram Bus Bus stopP ----> stopQ ----> stopR ----> stopS ----> stopT ----> stopU ----> stopV ----> stopW 15:00 16:00 17:00 18:00 19:00 19:30 20:00 20:30 Bus stopP ----------------------------------------------------------------------------> stopW 15:00 21:00 Test of request with parameter "_max_successive_physical_mode": * we want to make at least 2 journey calls (not only the best journey, but also try next) * we don't want the journey using more than 3 Buses successive * we have "Bus" and "Tram" as means of transport """ #As there are 4 buses successive to be used from stopP to stopW and _max_successive_physical_mode = 3 # we have 1 journey query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default"\ "&_max_successive_physical_mode=3&_max_additional_connections=10"\ .format(from_sp="stopP", to_sp="stopW", datetime="20140614T145500") response = self.query_region(query) assert len(response['journeys']) == 1 #As we modify the value of _max_successive_physical_mode to 5 we want two journeys query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \ "&_max_successive_physical_mode=5&_max_additional_connections=10"\ .format(from_sp="stopP", to_sp="stopW", datetime="20140614T145500") response = self.query_region(query) assert len(response['journeys']) == 2 # As we modify the value of _max_additional_connections to 2 we delete the second journey because # it contains more then nb_connections + 2 () query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \ "&_max_successive_physical_mode=5&_max_additional_connections=2"\ .format(from_sp="stopP", to_sp="stopW", datetime="20140614T145500") response = self.query_region(query) assert len(response['journeys']) == 1
toha10/python-cudet
cudet/flock.py
Python
gpl-2.0
3,884
0
# The MIT License (MIT) # Copyright (c) 2009 Max Polk # Permission i
s hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense
, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import errno import fcntl import os class FLock: ''' Ensures application is running only once, by using a lock file. Ensure call to lock works. Then call unlock at program exit. You cannot read or write to the lock file, but for some reason you can remove it. Once removed, it is still in a locked state somehow. Another application attempting to lock against the file will fail, even though the directory listing does not show the file. Mysterious, but we are glad the lock integrity is upheld in such a case. Instance variables: lockfile -- Full path to lock file lockfd -- File descriptor of lock file exclusively locked ''' def __init__(self, lockfile): self.lockfile = lockfile self.lockfd = None def lock(self): ''' Creates and holds on to the lock file with exclusive access. Returns True if lock successful, False if it is not, and raises an exception upon operating system errors encountered creating the lock file. ''' try: # # Create or else open and trucate lock file, in read-write mode. # # A crashed app might not delete the lock file, so the # os.O_CREAT | os.O_EXCL combination that guarantees # atomic create isn't useful here. That is, we don't want to # fail locking just because the file exists. # # Could use os.O_EXLOCK, but that doesn't exist yet in my Python # self.lockfd = os.open(self.lockfile, os.O_TRUNC | os.O_CREAT | os.O_RDWR) # Acquire exclusive lock on the file, # but don't block waiting for it fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) # Writing to file is pointless, nobody can see it os.write(self.lockfd, "lockfile") return True except (OSError, IOError), e: # Lock cannot be acquired is okay, # everything else reraise exception if e.errno in (errno.EACCES, errno.EAGAIN): return False else: raise def unlock(self): try: # FIRST unlink file, then close it. This way, we avoid file # existence in an unlocked state os.unlink(self.lockfile) # Just in case, let's not leak file descriptors os.close(self.lockfd) except (OSError, IOError): # Ignore error destroying lock file. See class doc about how # lockfile can be erased and everything still works normally. pass
liuxu0703/lx_bash_script
android_script/keyword_manager.py
Python
mit
5,470
0.006764
#!/usr/bin/python # AUTHOR : [email protected] # used to extract keyword sets from xml # used by aplog_helper.sh and adblogcat.sh import os import sys import getopt from xml.dom.minidom import parse, parseString #======================================= class KeywordSet: def __init__(self, xml_node): self.name = self.getText(xml_node.getElementsByTagName('name')[0]) self.type = self.getText(xml_node.getElementsByTagName('type')[0]) active = self.getText(xml_node.getElementsByTagName('active')[0]) if active == 'true': self.active = True else: self.active = False try: self.project = self.getText(xml_node.getElementsByTagName('project')[0]) except: self.project = 'None' self.keywords = [] sel
f.readKeywo
rds(xml_node.getElementsByTagName('keywords')[0]) def getText(self, text_node): '''get text from xml node $text_node should be a node with type NODE_TEXT return str of the text ''' ret = '' for n in text_node.childNodes: ret = ret + n.nodeValue return ret def readKeywords(self, keywords_node): '''read keywords and store them in self.keywords $keywords_node should be xml node with name of <keywords> return none ''' for n in keywords_node.getElementsByTagName('k'): self.keywords.append(self.getText(n)) def printKeywords(self): '''print all keywords in self.keywords return none ''' for k in self.keywords: print k def printAllInfo(self): print 'name: ' + self.name print 'type: ' + self.type print 'proj: ' + self.project print 'acti: ' + str(self.active) word_str = '' for k in self.keywords: word_str = word_str + k + '; ' print 'keywords:' print word_str print ' ' #======================================= class KeywordManager: def __init__(self, path): if not os.path.isfile(path): print '*. cannot find keywordset.xml file !' return self.path = path self.xml_doc = parse(self.path) self.xml_ksm = self.xml_doc.getElementsByTagName('KeywordSetManager')[0] self.xml_ks_list = self.xml_ksm.getElementsByTagName('keywordset') self.keywordset_list = [] self.print_inactive = False for node in self.xml_ks_list: #print self.getText(node.getElementsByTagName('name')[0]) self.readKeywordSet(node) self.keywordset_list.sort(lambda x,y: self.compare(x, y)) def compare(self, a, b): '''compare between two KeywordSet instance $a and $b should be instance of KeywordSet return -1, 0, 1 ''' if a.type != b.type: if a.type == 'include': return -1 if a.type == 'exclude': return 1 if a.project != b.project: if a.project == 'None': return -1 if b.project == 'None': return 1 cmp_result = cmp(a.project, b.project) if cmp_result != 0: return cmp_result return cmp(a.name, b.name) def getText(self, text_node): '''get text from xml node $text_node should be a node with type NODE_TEXT return str of the text ''' r = '' for n in text_node.childNodes: r = r + n.nodeValue return r #param $node should be a 'keywordset' node in xml file def readKeywordSet(self, node): '''read keywords and store them in self.keywordset_list $keywords_node should be xml node twith name of <keywordset> return none ''' ks = KeywordSet(node) self.keywordset_list.append(ks) #param should be true or false def setPrintInactiveEnabled(self, inactive): '''set self.print_inactive ''' self.print_inactive = inactive def listSets(self): '''print all keywordsets ''' for ks in self.keywordset_list: if ks.active or self.print_inactive: print ks.name #param $set_type should be either include or exclude def listSetsByType(self, set_type): '''list keywordsets by include/exclude type ''' for ks in self.keywordset_list: if ks.type == set_type: if ks.active or self.print_inactive: print ks.name #param $set_name should be name of a keywordset def printKeywordsBySetName(self, set_name): '''list keywords in a keywordset by name if more than one keywordsets are with the same name, print them all ''' for ks in self.keywordset_list: if ks.name == set_name: if ks.active or self.print_inactive: ks.printKeywords() if __name__ == '__main__': opts, args = getopt.getopt(sys.argv[2:], 't:n:d') xml = sys.argv[1]; km = KeywordManager(xml) for op, value in opts: if op == '-t': km.listSetsByType(value) elif op == '-n': km.printKeywordsBySetName(value) elif op == '-d': for ks in km.keywordset_list: ks.printAllInfo()
gdsfactory/gdsfactory
gdsfactory/samples/20_components.py
Python
mit
482
0
"""# Components. You can adapt
some component functions from the `gdsfactory.components` module. Each function there returns a Component object Here are two equivalent functions """ import gdsfactory as gf def straight_wide1(width=10, **kwargs) -> gf.Component: return gf.components.straight(width=width, **kwargs) straight_wide2 = gf.partial(gf.components.straight, width=10) if __name__ == "__main__": # c = straight_wi
de1() c = straight_wide2() c.show()
yipenggao/moose
python/peacock/Input/BlockHighlighterPlugin.py
Python
lgpl-2.1
3,928
0.001527
from PyQt5 import QtCore, QtWidgets import chigger import peacock from peacock.ExodusViewer.plugins.ExodusPlugin import ExodusPlugin from MeshBlockSelectorWidget import MeshBlockSelectorWidget class BlockHighlighterPlugin(peacock.base.PeacockCollapsibleWidget, ExodusPlugin): """ Widget for controlling the visible blocks/nodesets/sidesets of the mesh. Mirrored off of peaocock.Exodus.plugins.BlockPlugin """ #: pyqtSignal: Emitted when window needs to change windowRequiresUpdate = QtCore.pyqtSignal() highlight = QtCore.pyqtSignal(object, object, object) def __init__(self, collapsible_layout=QtWidgets.QHBoxLayout, **kwargs): peacock.base.PeacockCollapsibleWidget.__init__(self, collapsible_layout=collapsible_layout) ExodusPlugin.__init__(self, **kwargs) self.setTitle('Highlight') self.setEnabled(False) self.MainLayout = self.collapsibleLayout() # Block, nodeset, and sideset selector widgets self.BlockSelector = MeshBlockSelectorWidget(chigger.exodus.ExodusReader.BLOCK, 'Blocks:') self.SidesetSelector = MeshBlockSelectorWidget(chigger.exodus.ExodusReader.SIDESET, 'Boundaries:') self.NodesetSelector = MeshBlockSelectorWidget(chigger.exodus.ExodusReader.NODESET, 'Nodesets:') self.MainLayout.addWidget(self.BlockSelector) self.MainLayout.addWidget(self.SidesetSelector) self.MainLayout.addWidget(self.NodesetSelector) self.BlockSelector.selectionChanged.connect(self.setBlock) self.SidesetSelector.selectionChanged.connect(self.setSideset) self.NodesetSelector.selectionChanged.connect(self.setNodeset) self.setup() def onWindowCreated(self, *args): """ Initializes the selector widgets for the supplied reader/results. """ super(BlockHighlighterPlugin, self).onWindowCreated(*args) self.BlockSelector.updateBlocks(self._reader, True) self.SidesetSelector.updateBlocks(self._reader, True) self.NodesetSelector.updateBlocks(self._reader, True) self.__updateVariableState() def onWindowUpdated(self): """ Update boundary/nodeset visibility when window is updated. """ if self._reader: self.blockSignals(True) self.BlockSelector.updateBlocks(self._reader) self.SidesetSelector.updateBlocks(self._reader) self.NodesetSelector.updateBlocks(self._reader) self.blockSignals(False) self.__updateVariableState() def setBlock(self): """ Highlights a block and resets nodesets/sidesets """ block = self.BlockSelector.getBlocks() self.SidesetSelector.reset() self.NodesetSelector.reset() self.highlight.emit(block, None, None) def setSideset(self): """ Highlights a sideset and resets nodesets/blocks """ sideset = self.SidesetSelector.getBlocks() self.BlockSelector.reset() self.NodesetSelector.reset() self.highlight.emit(None, sideset, None) def setNodeset(self): """ Highlights a nodeset and resets sidesets/blocks """ nodeset = self.NodesetSelector.getBlocks() self.BlockSelector.reset() self.SidesetSelector.reset() self.highlight.emit(None, None, nodeset) def __updateVariableState(self): """ Enable/disable the nodeset/sidest selection based on variable type. """ var
info = self._result[0].getCurrentVariableInformation() if varinfo: if varinfo.object_type == chigger.exodus.ExodusReader.ELEMENTAL: self.SidesetSelector.setEnabled(False) self.NodesetSelector.setEnabled(False) else: self.SidesetSelector.setEnabled(True) self.NodesetSelecto
r.setEnabled(True)
CNR-Engineering/ModelerTools
common/qt_log_in_textbrowser.py
Python
gpl-3.0
788
0.002538
"
"" Handle logging in a Message Box? """ from PyQt4 import QtGui, QtCore import logging import sys class MyQWidget(QtGui.QWidget): def center(self):
frameGm = self.frameGeometry() screen = QtGui.QApplication.desktop().screenNumber(QtGui.QApplication.desktop().cursor().pos()) centerPoint = QtGui.QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft()) class ConsoleWindowLogHandler(logging.Handler): def __init__(self, sigEmitter): super(ConsoleWindowLogHandler, self).__init__() self.sigEmitter = sigEmitter def emit(self, logRecord): message = str(logRecord.getMessage()) self.sigEmitter.emit(QtCore.SIGNAL("logMsg(QString)"), message)
formencode/formencode
src/formencode/htmlfill.py
Python
mit
23,202
0.000431
""" Parser for HTML forms, that fills in defaults and errors. See ``render``. """ from __future__ import absolute_import import re from formencode.rewritingparser import RewritingParser, html_quote import six __all__ = ['render', 'htmlliteral', 'default_formatter', 'none_formatter', 'escape_formatter', 'FillingParser'] def render(form, defaults=None, errors=None, use_all_keys=False, error_formatters=None, add_attributes=None, auto_insert_errors=True, auto_error_formatter=None, text_as_default=False, checkbox_checked_if_present=False, listener=None, encoding=None, error_class='error', prefix_error=True, force_defaults=True, skip_passwords=False, data_formencode_form=None, data_formencode_ignore=None, ): """ Render the ``form`` (which should be a string) given the ``defaults`` and ``errors``. Defaults are the values that go in the input fields (overwriting any values that are there) and errors are displayed inline in the form (and also effect input classes). Returns the rendered string. If ``auto_insert_errors`` is true (the default) then any errors for which ``<form:error>`` tags can't be found will be put just above the associated input field, or at the top of the form if no field can be found. If ``use_all_keys`` is true, if there are any extra fields from defaults or errors that couldn't be used in the form it will be an error. ``error_formatters`` is a dictionary of formatter names to one-argument functions that format an error into HTML. Some default formatters are provided if you don't provide this. ``error_class`` is the class added to input fields when there is an error for that field. ``add_attributes`` is a dictionary of field names to a dictionary of attribute name/values. If the name starts with ``+`` then the value will be appended to any existing attribute (e.g., ``{'+class': ' important'}``). ``auto_error_formatter`` is used to create the HTML that goes above the fields. By default it wraps the error message in a span and adds a ``<br>``. If ``text_as_default`` is true (default false) then ``<input type="unknown">`` will be treated as text inputs. If ``checkbox_checked_if_present`` is true (default false) then ``<input type="checkbox">`` will be set to ``checked`` if any corresponding key is found in the ``defaults`` dictionary, even a value that evaluates to False (like an empty string). This can be used to support pre-filling of checkboxes that do not have a ``value`` attribute, since browsers typically will only send the name of the checkbox in the form submission if the checkbox is checked, so simply the presence of the key would mean the box should be checked. ``listener`` can be an object that watches fields pass; the only one currently is in ``htmlfill_schemabuilder.SchemaBuilder`` ``encoding`` specifies an encoding to assume when mixing str and unicode text in the template. ``prefix_error`` specifies if the HTML created by auto_error_formatter is put before the input control (default) or after the control. ``force_defaults`` specifies if a field default is not given in the ``defaults`` dictionary then the control associated with the field should be set as an unsuccessful control. So checkboxes will be cleared, radio and select controls will have no value selected, and textareas will be emptied. This defaults to ``True``, which is appropriate the defaults are the result of a form submission. ``skip_passwords`` specifies if password fields should be skipped when rendering form-content. If disabled the password fields will not be filled with anything, which is useful when you don't want to return a user's password in plain-text source. ``data_formencode_form`` if a string is passed in (default `None`) only fields with the html attribute `data-formencode-form` that matches this string will be processed. For example: if a HTML fragment has two forms they can be differentiated to Formencode by decorating the input elements with attributes such as `data-formencode-form="a"` or `data-formencode-form="b"`, then instructing `render()` to only process the "a" or "b" fields. ``data_formencode_ignore`` if True (default `None`) fields with the html attribute `data-formencode-ignore` will not be processed. This attribute need only be present in the tag: `data-formencode-ignore="1"`, `data-formencode-ignore=""` and `data-formencode-ignore` without a value are all valid signifiers. """ if defaults is None: defaults = {} if auto_insert_errors and auto_error_formatter is None: auto_error_formatter = default_formatter p = FillingParser( defaults=defaults, errors=errors, use_all_keys=use_all_keys, error_formatters=error_formatters, add_attributes=add_attributes, auto_error_formatter=auto_error_formatter, text_as_default=text_as_default, checkbox_checked_if_present=checkbox_checked_if_present, listener=listener, encoding=encoding, prefix_error=prefix_error, error_class=error_class, force_defaults=force_defaults, skip_passwords=skip_passwords, data_formencode_form=data_formencode_form, data_formencode_ignore=data_formencode_ignore, ) p.feed(form) p.close() return p.text() class htmlliteral(object): def __init__(self, html, text=None): if text is None: text = re.sub(r'<.*?>', '', html) text = html.replace('&gt;', '>') text = html.replace('&lt;', '<') text = html.replace('&quot;', '"') # @@: Not very complete self.html = html self.text = text def __str__(self): return self.text def __repr__(self): return '<%s html=%r text=%r>' % ( self.__class__.__name__, self.html, self.text) def __html__(self): return self.html def default_formatter(error): """ Formatter that escapes the error, wraps the error in a span with class ``error-message``, and adds a ``<br>`` """ return '<span class="error-message">%s</span><br />\n' % html_quote(error) def none_formatter(error): """ Formatter that does nothing, no escaping HTML, nothin' """ return error def escape_formatter(error): """ Formatter that escapes HTML, no more. """ return html_quote(error) def escapenl_formatter(error): """ Formatter that escapes HTML, and translates newlines to ``<br>`` """ error = html_quote(error) error = error.replace('\n', '<br>\n') return error def ignore_formatter(error): """ Formatter that emits nothing, regardless of the error. """ return '' class FillingParser(RewritingParser): r""" Fills HTML with default values, as in a form. Examples:: >>> defaults = dict(name='Bob Jones', ... occupation='Crazy Cultist', ... address='14 W. Canal\nNew Guinea', ...
living='no', ... nice_guy=0) >>> parser = FillingParser(defaults) >>> parser.feed('''<input type="text" name="name" value="fill"> ... <select name="occupation"> <option value="">Default</o
ption> ... <option value="Crazy Cultist">Crazy cultist</option> </select> ... <textarea cols="20" style="width: 100%" name="address"> ... An address</textarea> ... <input type="radio" name="living" value="yes"> ... <input type="radio" name="living" value="no"> ... <input type="checkbox" name="nice_guy" checked="checked">''') >>> parser.close() >>> print (parser.text()) # doctest: +NORMALIZE_WHITESPACE <input type="text" name="name" value="Bob Jones"> <select name="occupation"> <option value="">Default</option> <option value="Cra
disappearedgod/stokeScrapy
stokeScrapy/pipelines.py
Python
gpl-2.0
1,353
0.031042
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html # import codecs # import json # class stokeScrapyPipeline(object): # def __init__(self): # self.file=codecs.open("stokeScrapy.json",mode="wb",encoding='utf-8') # self.file.write('{"hah"'+':[') import pymongo from
scrapy.conf import settings f
rom scrapy.exceptions import DropItem from scrapy import log #MongoDBPipeline class MongoDBPipeline( object ): def __init__( self ): connection = pymongo.MongoClient( settings[ 'MONGODB_SERVER' ], settings[ 'MONGODB_PORT' ] ) db = connection[settings[ 'MONGODB_DB' ]] self .collection = db[settings[ 'MONGODB_COLLECTION' ]] def process_item( self , item, spider): valid = True for data in item: if not data: valid = False raise DropItem( "Missing {0}!" . format (data)) if valid: self .collection.insert( dict (item)) log.msg( "Stoke added to MongoDB database!" , level = log.DEBUG, spider = spider) return item # def process_item(self, item, spider): # line = json.dumps(dict(item))+"," # self.file.write(line.decode("unicode_escape")) # return item
BrainPad/FindYourCandy
robot-arm/calibration/adjust.py
Python
apache-2.0
4,494
0.00267
# Copyright 2017 BrainPad Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import, division, print_function, unicode_literals import os import json import numpy class AdjustForPictureToRobot(object): def __init__(self): in_data_json = os.path.join(os.path.dirname(__file__), 'adjust_data.json') if not os.path.isfile(in_data_json): self.make_adjust_data() try: in_data = open(in_data_json).read() except Exception as e: raise e self.adjust_data = json.loads(in_data) def make_adjust_data(self): try: in_meas_json = os.path.join(os.path.dirname(__file__), 'adjust_measurement.json') in_data = open(in_meas_json).read() except Exception as e: raise e measurement = json.loads(in_data) from_point = numpy.array([[-0.3, 1.5, 1], [-0.3, -1.5, 1], [0.3, 0, 1]]) to_point = numpy.array([[measurement['-0.3,1.5']['x'], measurement['-0.3,1.5']['y'], 1], [measurement['-0.3,-1.5']['x'], measurement['-0.3,-1.5']['y'], 1], [measurement['0.3,0']['x'], measurement['0.3,0']['y'], 1]]) inv_to_point = numpy.linalg.inv(to_point.T) trans = numpy.dot(from_point.T, inv_to_point) out_data = {} for key, value in sorted(measurement.items()): x_in, y_in = key.split(',') x_picture = float(x_in) y_picture = float(y_in) new_key = '%s,%s' % (round(x_picture, 1), round(y_picture, 1)) if value: x_robot = value['x'] y_robot = value['y'] temp_point = numpy.dot(numpy.array([x_robot, y_robot, 1]), trans.T) x_picture_conv = float(temp_point[0]) y_picture_conv = float(temp_point[1]) x_picture_diff = float(x_picture - x_picture_conv) y_picture_diff = float(y_picture - y_picture_conv) out_data.update({new_key: {'x_picture': x_picture, 'x_picture_conv': x_picture_conv, 'x_picture_diff': x_picture_diff, 'x_robot': x_robot, 'y_picture': y_picture, 'y_picture_conv': y_picture_conv, 'y_picture_diff': y_picture_diff, 'y_robot': y_robot}}) el
se: out_data.update({new_key: None}) try: out_data_json = os.path.join(os.path.dirname(__file__), 'adjust_data.json') f = open(out_data_json, 'w') f.write(json.dumps(out_data, sort_keys=True, indent=4)) f.close() except Exception as e: raise e def adjust(self, x, y): if -1
<= x <= 1 and -1.5 <= y <= 1.5: pass else: message = "Error: x=%s y=%s coordinate is out of range in sheet." % (x, y) raise Exception(message) x_round = round(x, 1) y_round = round(y, 1) if x_round == -0.0: x_round = 0.0 if y_round == -0.0: y_round = 0.0 key = '%s,%s' % (x_round, y_round) try: self.adjust_data[key] except: message = "Error: x=%s y=%s coordinate is out of range in robot arm." % (x_round, y_round) raise Exception(message) x_diff = self.adjust_data[key]['x_picture_diff'] y_diff = self.adjust_data[key]['y_picture_diff'] x_adjust = x - x_diff y_adjust = y - y_diff return x_adjust, y_adjust
jacobajit/ion
intranet/apps/eighth/forms/admin/blocks.py
Python
gpl-2.0
1,644
0.003041
# -*- coding: utf-8 -*- from django import forms from django.core.validators import RegexValidator from ...models import EighthBlock block_letter_validator = RegexValidator(r"^[a-z A-Z0-9_-]{1,10}$", "A block letter must be less than 10 characters long, and include only alphanumeric characters and spaces.") class BlockDisplayField(forms.ModelChoiceField): def label_from_instance(self,
obj): return "{}: {}".format(obj.id, str(obj)) class BlockSelectionForm(forms.Form): def __init__(self, label="Block", exclude_before_date=None, only_locked=False, *args, **kwargs): super(BlockSelectionForm, self).__init__(*args, **kwargs) filter_params = {} if exclude_before_date is not None: filter_params["date__gte"] = exclude_before_date if only_locked: filter_params["locked"] = True queryset = EighthBlock.objects
.filter(**filter_params) self.fields["block"] = BlockDisplayField(queryset=queryset, label=label, empty_label="Select a block") class QuickBlockForm(forms.ModelForm): block_letter = forms.CharField(max_length=10, validators=[block_letter_validator]) class Meta: model = EighthBlock fields = ["date", "block_letter"] class BlockForm(forms.ModelForm): block_letter = forms.CharField(max_length=10, validators=[block_letter_validator]) class Meta: model = EighthBlock fields = [ "date", "block_letter", "locked", # "override_blocks", "signup_time", "comments" ]
wangybgit/Chameleon
hostapd-OpenWrt/tests/hwsim/test_bgscan.py
Python
apache-2.0
6,284
0.003024
# bgscan tests # Copyright (c) 2014, Jouni Malinen <[email protected]> # # This software may be distributed under the terms of the BSD license. # See README for more details. import time import logging logger = logging.getLogger() import os import hostapd def test_bgscan_simple(dev, apdev): """bgscan_simple""" hostapd.add_ap(apdev[0]['ifname'], { "ssid": "bgscan" }) hostapd.add_ap(apdev[1]['ifname'], { "ssid": "bgscan" }) dev[0].connect("bgscan", key_mgmt="NONE", scan_freq="2412", bgscan="simple:1:-20:2") dev[1].connect("bgscan", key_mgmt="NONE", scan_freq="2412", bgscan="simple:1:-45:2") dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412", bgscan="simple:1:-45") dev[2].request("REMOVE_NETWORK all") dev[2].wait_disconnected() dev[2].connect("bgscan", key_mgmt="NONE", scan_
freq="2412", bgscan="simple:0:0") dev[2].request("REMOVE_NETWORK all") dev[2].wait_disconnected() dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412", bgscan="simple") dev[2].request("REMOVE
_NETWORK all") dev[2].wait_disconnected() dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412", bgscan="simple:1") dev[2].request("REMOVE_NETWORK all") dev[2].wait_disconnected() ev = dev[0].wait_event(["CTRL-EVENT-SIGNAL-CHANGE"], timeout=10) if ev is None: raise Exception("dev0 did not indicate signal change event") if "above=0" not in ev: raise Exception("Unexpected signal change event contents from dev0: " + ev) ev = dev[1].wait_event(["CTRL-EVENT-SIGNAL-CHANGE"], timeout=10) if ev is None: raise Exception("dev1 did not indicate signal change event") if "above=1" not in ev: raise Exception("Unexpected signal change event contents from dev1: " + ev) ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=3) if ev is None: raise Exception("dev0 did not start a scan") ev = dev[1].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=3) if ev is None: raise Exception("dev1 did not start a scan") ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 5) if ev is None: raise Exception("dev0 did not complete a scan") ev = dev[1].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 5) if ev is None: raise Exception("dev1 did not complete a scan") def test_bgscan_learn(dev, apdev): """bgscan_learn""" hostapd.add_ap(apdev[0]['ifname'], { "ssid": "bgscan" }) hostapd.add_ap(apdev[1]['ifname'], { "ssid": "bgscan" }) try: os.remove("/tmp/test_bgscan_learn.bgscan") except: pass try: dev[0].connect("bgscan", key_mgmt="NONE", scan_freq="2412", bgscan="learn:1:-20:2") id = dev[1].connect("bgscan", key_mgmt="NONE", scan_freq="2412", bgscan="learn:1:-45:2:/tmp/test_bgscan_learn.bgscan") dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412", bgscan="learn:1:-45") dev[2].request("REMOVE_NETWORK all") dev[2].wait_disconnected() dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412", bgscan="learn:0:0") dev[2].request("REMOVE_NETWORK all") dev[2].wait_disconnected() dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412", bgscan="learn") dev[2].request("REMOVE_NETWORK all") dev[2].wait_disconnected() dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412", bgscan="learn:1") dev[2].request("REMOVE_NETWORK all") dev[2].wait_disconnected() ev = dev[0].wait_event(["CTRL-EVENT-SIGNAL-CHANGE"], timeout=10) if ev is None: raise Exception("dev0 did not indicate signal change event") if "above=0" not in ev: raise Exception("Unexpected signal change event contents from dev0: " + ev) ev = dev[1].wait_event(["CTRL-EVENT-SIGNAL-CHANGE"], timeout=10) if ev is None: raise Exception("dev1 did not indicate signal change event") if "above=1" not in ev: raise Exception("Unexpected signal change event contents from dev1: " + ev) ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=3) if ev is None: raise Exception("dev0 did not start a scan") ev = dev[1].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=3) if ev is None: raise Exception("dev1 did not start a scan") ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 5) if ev is None: raise Exception("dev0 did not complete a scan") ev = dev[1].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 5) if ev is None: raise Exception("dev1 did not complete a scan") dev[0].request("DISCONNECT") dev[1].request("DISCONNECT") dev[0].request("REMOVE_NETWORK all") with open("/tmp/test_bgscan_learn.bgscan", "r") as f: lines = f.read().splitlines() if lines[0] != "wpa_supplicant-bgscan-learn": raise Exception("Unexpected bgscan header line") if 'BSS 02:00:00:00:03:00 2412' not in lines: raise Exception("Missing BSS1") if 'BSS 02:00:00:00:04:00 2412' not in lines: raise Exception("Missing BSS2") if 'NEIGHBOR 02:00:00:00:03:00 02:00:00:00:04:00' not in lines: raise Exception("Missing BSS1->BSS2 neighbor entry") if 'NEIGHBOR 02:00:00:00:04:00 02:00:00:00:03:00' not in lines: raise Exception("Missing BSS2->BSS1 neighbor entry") dev[1].set_network(id, "scan_freq", "") dev[1].connect_network(id) ev = dev[1].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=10) if ev is None: raise Exception("dev1 did not start a scan") ev = dev[1].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 10) if ev is None: raise Exception("dev1 did not complete a scan") dev[1].request("REMOVE_NETWORK all") finally: try: os.remove("/tmp/test_bgscan_learn.bgscan") except: pass
Ecotrust/COMPASS
mp/wsgi.py
Python
apache-2.0
378
0
""" WSGI confi
g for lot project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "s
ettings") application = get_wsgi_application()
iliawnek/SystematicReview
systematic_review/settings.py
Python
mit
2,999
0
""" Django settings for systematic_review project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates') TEMPLATE_DIRS = ( TEMPLATE_PATH, ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', ) AUTHENTICATION_BACKENDS = ( # Needed to login by username in Django admin 'django.contrib.auth.backends.ModelBackend', ) SITE_ID = 1 EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = '[email protected]' EMAIL_HOST_PASSWORD = 'wed46-sysrev' EMAIL_PORT = 587 EMAIL_USE_TLS = True # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '_+ntbtxcjxj5u)av$wt4q!lsad58v-7x_%zb1lc9f*$_#=p^f%' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True # TEMPLATE_DEBUG = True ALLOWED_HOSTS = ['iliawnek.pyt
honanywhere.com', '127.0.0.1', 'localhost'] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contr
ib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'django.contrib.formtools', 'sysrev', 'registration', 'bootstrapform' ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'systematic_review.urls' WSGI_APPLICATION = 'systematic_review.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True # USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_PATH = os.path.join(BASE_DIR, 'static') STATIC_ROOT = os.path.join(BASE_DIR, 'static-root') STATIC_URL = '/static/' # You may find this is already defined as such. STATICFILES_DIRS = ( STATIC_PATH, ) # Registration REGISTRATION_OPEN = True LOGIN_REDIRECT_URL = '/' LOGIN_URL = '/accounts/login/'
skosukhin/spack
var/spack/repos/builtin/packages/r-speedglm/package.py
Python
lgpl-2.1
1,759
0.000569
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RSpeedglm(RPackage): """Fitting linear mode
ls and generalized linear models to large data sets by updating algorithms.""" homepage = "https://cran.r
-project.org/package=speedglm" url = "https://cran.rstudio.com/src/contrib/speedglm_0.3-2.tar.gz" list_url = "https://cran.rstudio.com/src/contrib/Archive/speedglm" version('0.3-2', 'c4874d4c2a677d657a335186ebb63131') depends_on('r-mass', type=('build', 'run')) depends_on('r-matrix', type=('build', 'run'))
gramps-project/addons-source
SetAttributeTool/SetAttributeTool.gpr.py
Python
gpl-2.0
1,411
0.02197
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2009 Douglas S. Blank <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # $Id$ register(TOOL, id = 'SetAttribute', name = _("Set Attribute"), description = _("Set an attribute to a given value."), version = '0.0.32', gramps_target_version = "5.1", status = STABLE, # not yet tested with python 3 fname = 'SetAttributeTool.py', authors = ["
Douglas S. Blank"], authors
_email = ["[email protected]"], category = TOOL_DBPROC, toolclass = 'SetAttributeWindow', optionclass = 'SetAttributeOptions', tool_modes = [TOOL_MODE_GUI], )
HiSPARC/station-software
user/python/Lib/unittest/signals.py
Python
gpl-3.0
2,411
0.002074
import signal import weakref from functools import wraps __unittest = True class _InterruptHandler(object): def __init__(self, default_handler): self.called = False self.original_handler = default_handler if isinstance(default_handler, (int, long)): if default_handler == signal.SIG_DFL: # Pretend it's signal.default_int_handler instead. default_handler = signal.default_int_handler elif default_handler == signal.SIG_IGN: # Not quite the same thing as SIG_IGN, but the closest we # can make it: do nothing. def default_handler(unused_signum, unused_frame): pass else: raise TypeError("expected SIGINT signal handler to be " "signal.SIG_IGN, signal.SIG_DFL, or a " "callable object") self.default_handler = default_handler def __call__(self, signum, frame): installed_handler = signal.getsignal(signal.SIGINT) if installed_handler is not self:
# if we aren't the installe
d handler, then delegate immediately # to the default handler self.default_handler(signum, frame) if self.called: self.default_handler(signum, frame) self.called = True for result in _results.keys(): result.stop() _results = weakref.WeakKeyDictionary() def registerResult(result): _results[result] = 1 def removeResult(result): return bool(_results.pop(result, None)) _interrupt_handler = None def installHandler(): global _interrupt_handler if _interrupt_handler is None: default_handler = signal.getsignal(signal.SIGINT) _interrupt_handler = _InterruptHandler(default_handler) signal.signal(signal.SIGINT, _interrupt_handler) def removeHandler(method=None): if method is not None: @wraps(method) def inner(*args, **kwargs): initial = signal.getsignal(signal.SIGINT) removeHandler() try: return method(*args, **kwargs) finally: signal.signal(signal.SIGINT, initial) return inner global _interrupt_handler if _interrupt_handler is not None: signal.signal(signal.SIGINT, _interrupt_handler.original_handler)
numenta/nupic.research
projects/imagenet/experiments/sparse_r1.py
Python
agpl-3.0
5,411
0.00037
# Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2020, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see htt"://www.gnu.org/licenses. # # http://numenta.org/licenses/ # import copy import inspect import numpy as np import ray.tune as tune import torch from .base import DEFAULT """ An initial set of good parameters for sparse networks, i.e. Release 1. The sparse 100 configuration here gets between 80.5 to 82% in top-1 accuracy, after 60 epochs and has about 77.5% average weight sparsity. The sparse 1000 configuration, with the same weight sparsities, gets about 72.23% in top-1 accuracy after 120 epochs. """ def my_auto_sparse_conv_params(in_channels, out_channels, kernel_size): """ Custom weight params. :return: a dict to pass to `SparseWeights2d` """ weights_per_channel = kernel_size * kernel_size * in_channels if weights_per_channel < 100: weights_density = 0.7 elif weights_per_channel < 200: weights_density = 0.5 elif weights_per_channel < 500: weights_density = 0.4 elif weights_per_channel < 1000: weights_density = 0.3 elif weights_per_channel < 2000: weights_density = 0.2 elif weights_per_channel < 4000: weights_density = 0.2 else: weights_density = 0.15 return dict( weight_sparsity=weights_density, ) def my_
auto_sparse_
activation_params(in_channels, out_channels, kernel_size): """ A custom auto sparse params function. :return: a dict to pass to `KWinners2d` as params. """ percent_on = 1.0 if kernel_size != 1: if out_channels >= 128: percent_on = 0.3 if percent_on >= 1.0: return None else: return dict( percent_on=percent_on, boost_strength=1.0, boost_strength_factor=0.9, local=True, k_inference_factor=1.0, ) def my_auto_sparse_linear_params(input_size, output_size): """ Custom weight params. :return: a dict to pass to `SparseWeights` """ return dict( weight_sparsity=0.25, ) # This configuration gets between 80.5 to 82% after 60 epochs SPARSE100_R1 = copy.deepcopy(DEFAULT) SPARSE100_R1.update(dict( # No weight decay from batch norm modules batch_norm_weight_decay=False, init_batch_norm=True, epochs=60, checkpoint_freq=1, keep_checkpoints_num=2, checkpoint_score_attr="training_iteration", checkpoint_at_end=True, seed=tune.sample_from(lambda spec: np.random.randint(2, 10000)), num_classes=100, model_args=dict(config=dict( num_classes=100, defaults_sparse=True, activation_params_func=my_auto_sparse_activation_params, conv_params_func=my_auto_sparse_conv_params, linear_params_func=my_auto_sparse_linear_params )), # Use a higher learning rate and no momentum for sparse superconvergence lr_scheduler_class=torch.optim.lr_scheduler.OneCycleLR, lr_scheduler_args=dict( max_lr=6.0, div_factor=6, # initial_lr = 1.0 final_div_factor=4000, # min_lr = 0.00025 pct_start=5.0 / 60.0, epochs=60, anneal_strategy="linear", max_momentum=0.01, cycle_momentum=False, ), optimizer_args=dict( lr=0.1, weight_decay=0.0001, momentum=0.0, nesterov=False, ), weight_params=inspect.getsource(my_auto_sparse_conv_params), activation_params=inspect.getsource(my_auto_sparse_activation_params), linear_params=inspect.getsource(my_auto_sparse_linear_params), )) # Try much longer number of epochs that 100 category version and a lower # weight decay. (see https://arxiv.org/abs/1711.04291) # With 120 epochs this gets 72.23% top-1 accuracy. Earlier version got # about 73% with 200 epochs. SPARSE1000_R1 = copy.deepcopy(SPARSE100_R1) SPARSE1000_R1.update(dict( num_classes=1000, epochs=120, model_args=dict(config=dict( num_classes=1000, defaults_sparse=True, activation_params_func=my_auto_sparse_activation_params, conv_params_func=my_auto_sparse_conv_params, linear_params_func=my_auto_sparse_linear_params )), lr_scheduler_args=dict( max_lr=6.0, div_factor=6, # initial_lr = 1.0 final_div_factor=4000, # min_lr = 0.00025 pct_start=5.0 / 120.0, epochs=120, anneal_strategy="linear", max_momentum=0.01, cycle_momentum=False, ), optimizer_args=dict( lr=0.1, weight_decay=0.00005, momentum=0.0, nesterov=False, ), )) CONFIGS = dict( sparse_100_r1=SPARSE100_R1, sparse_1000_r1=SPARSE1000_R1, )
radical-software/mongrey
mongrey/web/worker.py
Python
bsd-3-clause
1,033
0.003872
#!/
usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import multiprocessing import gunicorn.app.base from gunicorn.six import iteritems def number_of_workers(): return (multiprocessing.cpu_count() * 2) + 1 class StandaloneApplication(gunicorn.app.base.BaseApplication
): def __init__(self, app, options=None): self.options = options or {} self.application = app super(StandaloneApplication, self).__init__() def load_config(self): config = dict([(key, value) for key, value in iteritems(self.options) if key in self.cfg.settings and value is not None]) for key, value in iteritems(config): self.cfg.set(key.lower(), value) def load(self): return self.application if __name__ == '__main__': from .wsgi import create_app options = { 'bind': '%s:%s' % ('127.0.0.1', '8081'), 'workers': number_of_workers(), } StandaloneApplication(create_app, options).run()
matthiask/feincms3
feincms3/plugins/__init__.py
Python
bsd-3-clause
269
0
# flake8: noqa from . import html, ric
htext, snippet try: import requests except ImportError: # pragma: no cover pass else: from . import external try: import
imagefield except ImportError: # pragma: no cover pass else: from . import image
PYPIT/arclines
arclines/scripts/match.py
Python
bsd-3-clause
3,106
0.007727
#!/usr/bin/env python """ Match input spectrum to ID lines """ from __future__ import (print_function, absolute_import, division, unicode_literals) import pdb try: # Python 3 ustr = unicode except NameError: ustr = str def parser(options=None): import argparse # Parse parser = argparse.ArgumentParser( description='Match input spectrum to arclines line lists') parser.add_argument("spectrum", type=str, help="Spectrum file (.ascii, .fits, .json)") parser.add_argument("wvcen", type=float, help="Guess at central wavelength (within 1000A)") parser.add_argument("disp", type=float, help="Accurate dispersion (Ang/pix)") parser.add_argument("lines", type=str, help="Comma separated list of lamps") parser.add_argument("--outroot", type=str, help="Root filename for plot, IDs") parser.add_argument("--min_ampl", default=100., type=float, help="Minimum amplitude for line analysis [default: 100.]") parser.add_argument("--debug", default=False, action='store_true', help="Debug") parser.add_argument("--fit", default=False, action='store_true', help="Fit the lines?") parser.add_argument("--brute", default=False, action='store_true', help="Use semi_brute?") parser.add_argument("--show_spec", default=False, action='store_true', help="Show the input spectrum?") if options is None: args = parser.parse_args() else: args = parser.parse_args(options) return args def main(pargs=None): """ Run Parameters ---------- args Returns ------- """ import numpy as np from matplotlib import pyplot as plt from linetools import utils as ltu from arclines import io as arcl_io from arclines.holy import utils as arch_utils from arclines.holy.grail import general, semi_brute from arclines.holy import patterns as arch_patt from arclines.holy import fitting as arch_fit if pargs.outroot is None: pargs.outroot = 'tmp_matches' # Defaults # Load spectrum spec = arcl_io.load_spectrum(pargs.spectrum) if pargs.show_spec: plt.clf() ax = plt.gca() ax.plot(spec) plt.show() # Arc lines lines = pargs.lines.split(',') # Call brute if pargs.brute: best_dict, final_fit = semi_brute(spec, lines, pargs.wvcen, pargs.disp, min_ampl=pargs.min_ampl, debug=pargs.debug, outroot=pargs.outroot, do_fit=pargs.fit, verbose=True) #best_dict, final_fit = grail.semi_brute(spec, lines, wv_cen, disp, siglev=siglev, # min_ampl=min_ampl, min_nmatch=min_match, outroot=outroot) else: best_dict, final_fit = general(spec, lines, do_fit=pargs.fit, verbose=True, debug=pargs.debug, min_ampl=pargs.min_ampl, outroot=pargs.outroot) if
pargs.debug: pdb.set_trace() if pargs.fit:
ltu.savejson(pargs.outroot+'_fit.json', ltu.jsonify(final_fit), easy_to_read=True, overwrite=True)
cocofree/azkaban_assistant
schedule/webapp/handler/job_update.py
Python
apache-2.0
2,583
0.021624
#!/usrbin/python #encoding:utf-8 ''' Author: wangxu Email: [email protected] 任务更新 ''' import sys reload(sys) sys.setdefaultencoding("utf-8") import logging import tornado.web import json import os import time CURRENTPATH = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.join(CURRENTPATH, '../../')) from job_define import Job #指标处理类 class JobUpdateHandler(tornado.web.RequestHandler): #统一调用post方法 def get(self): self.post() #action为操作类型 def post(self): #更新完跳转到列表页 title = '调度任务列表' #需要先从azkaban登陆 session_id = self.get_argument('session_id','') login_user = self.get_argument('login_user','') if session_id=='' or login_user=='': self.render('to_login.html') return #参数 query_name = self.get_argument('query_name','') query_project_name = self.get_argument('query_project_name','') query_server_host = self.get_argument('query_server_host','') query_user = self.get_argument('query_user','') #生成job attr_list = Job.get_attr_list() dependencies_box = self.get_argument('dependencies_box','') logging.info('>>>>>>>>>>>'+str(typ
e(dependencies_box))) logging.info('>>>>>>>>>>>'+str(dependencies_box)) job = Job() #动态加载字段,默认均为字符串 for attr in attr_list: value = str(self.get_argument(attr,'')).strip() if value!='': setattr(job,attr,value) logging.info(attr+':'+value) #默认设置 job.name = job.name.replace('.','-') job.updater = login_user job.upda
te_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) if job.creator == '': job.creator = job.updater job.create_time = job.update_time #更新 job.update_job() #列表 jobs = Job.get_alljobs(query_name,query_project_name,query_server_host,query_user,login_user) query_dict = { 'query_name':query_name, 'query_project_name':query_project_name, 'query_server_host':query_server_host, 'query_user':query_user, 'session_id':session_id, 'login_user':login_user } logging.info('[%s] update job [%s]' % (login_user,job.name)) self.render('list.html',title=title,jobs=jobs,query_dict=query_dict)
paulocheque/epub-meta
setup.py
Python
agpl-3.0
1,554
0.001931
#from distutils.core import
setup from setuptools import setup, find_packages # http://guide.python-distribute.org/quickstart.html # python setup.py sdist # python setup.py register # python setup.py sdist upload # pip install epub_meta # pip install epub_meta --upgrade --no-deps # Manual upload to PypI # http://pypi.python.org/pypi/epub_meta # Go to 'edit' link # Update version and save # Go to 'files' link and upload the file VERSION = '0.0.7' tests_require = [ ] install_requires = [ ] # from pip.req import parse_r
equirements # install_requires = parse_requirements('requirements.txt') # tests_require = parse_requirements('requirements-dev.txt') setup(name='epub_meta', url='https://github.com/paulocheque/epub-meta', author='Pluralsight', keywords='python epub metadata', description='', license='MIT', classifiers=[ 'Operating System :: OS Independent', 'Topic :: Software Development', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation :: PyPy', ], version=VERSION, install_requires=install_requires, test_suite='tests', tests_require=tests_require, extras_require={'test': tests_require}, packages=find_packages(), )
jcfr/mystic
examples2/g09.py
Python
bsd-3-clause
1,961
0.009179
#!/usr/bin/env python # # Problem definition: # A-R Hedar and M Fukushima, "Derivative-Free Filter Simulated Annealing # Method for Constrained Continuous Global Optimization", Journal of # Global Optimization, 35(4), 521-549 (2006). # # Original Matlab code written by A. Hedar (Nov. 23, 2005) # http://www-optima.amp.i.kyoto-u.ac.jp/member/student/hedar/Hedar_files/go.htm # and ported to Python by Mike McKerns (December 2014) # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2015 California Institute of Technology. # License: 3-clause BSD. The full license text is available at: # - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE def objective(x): x0,x1,x2,x3,x4,x5,x6 = x return (x0-10)**2 + 5*(x1-12)**2 + x2**4 + 3*(x3-11)**2 + \ 10*x4**6 + 7*x5**2
+ x6**4 - 4*x5*x6 - 10*x5 - 8*x6 bounds = [(-10.,10.)]*7 # with penalty='penalty' applied, solution is: xs = [2.330499, 1.951372, -0.4775414, 4.365726, -0.6244870, 1.038131, 1.594227] ys = 680.6300573 from mystic.symbolic import generate_constrain
t, generate_solvers, solve from mystic.symbolic import generate_penalty, generate_conditions equations = """ 2*x0**2 + 3*x1**4 + x2 + 4*x3**2 + 5*x4 - 127.0 <= 0.0 7*x0 + 3*x1 + 10*x2**2 + x3 - x4 - 282.0 <= 0.0 23*x0 + x1**2 + 6*x5**2 - 8*x6 - 196.0 <= 0.0 4*x0**2 + x1**2 - 3*x0*x1 + 2*x2**2 + 5*x5 - 11*x6 <= 0.0 """ #cf = generate_constraint(generate_solvers(solve(equations))) #XXX: inequalities pf = generate_penalty(generate_conditions(equations), k=1e12) from mystic.constraints import as_constraint cf = as_constraint(pf) if __name__ == '__main__': from mystic.solvers import diffev2 from mystic.math import almostEqual result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, npop=40, gtol=200, disp=False, full_output=True) assert almostEqual(result[0], xs, rel=1e-2) assert almostEqual(result[1], ys, rel=1e-2) # EOF
leosartaj/pymarkdown
setup.py
Python
bsd-3-clause
599
0.001669
#!/usr/bin/env python import os from setuptools import setup setup(name='pymarkdown', version='0.1.4', description='Evaluate code in markdown', url='http://github.com/mrocklin/pymarkdown', aut
hor='Matthew Rocklin', author_email='[email protected]', license='BSD', keywords='markdown documentation', packages=['pymarkdown'], install_requires=['toolz'], long_description=(open('README.rst').read() if os.path.exists('README.rst') else ''), zip_safe=False, scripts=[os.path.j
oin('bin', 'pymarkdown')])
mtill/MailWebsiteChanges
mwcfeedserver.py
Python
gpl-2.0
1,039
0.002887
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright: (2013-2017) Michael Till Beck <[email protected]> # License: GPL-2.0+ import http.server import socketserver import importlib import sys import getopt bind = 'localhost' port = 8000 configMod = 'config' try: opts, args = getopt.getopt(sys.argv[1:], 'hc:b:p:', ['help', 'config=', 'bind=', 'port=']) except ge
topt.GetoptError: print('Usage: FeedServer.py --config=config --port=8000 --bind=localhost') sys.exit(1) for opt, arg in opts: if opt == '-h': print('Usage:
FeedServer.py --config=config --bind=localhost --port=8000') exit() elif opt in ('-c', '--config'): configMod = arg elif opt in ('-b', '--bind'): bind = arg elif opt in ('-p', '--port'): port = int(arg) config = importlib.import_module(configMod) handler = http.server.SimpleHTTPRequestHandler httpd = socketserver.TCPServer((bind, port), handler) print('Bond to ' + bind + ', listening on port ' + str(port)) httpd.serve_forever()
infinigrove/TerminalRoastDB
cmds/Roaster_Set_Time.py
Python
gpl-3.0
324
0.003086
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # TerminalRoastDB, released under GPLv3 # Roaster Set Time import Pyro
4 import sys new_roaster_time = sys.argv[1] roast_control = Pyro4.Proxy("PYRONAME:roaster.sr700") if int(new_roaster_time) > 0 and int(
new_roaster_time) <1200: roast_control.set_time(new_roaster_time)
coopsource/taiga-back
taiga/searches/migrations/0001_initial.py
Python
agpl-3.0
1,689
0.002368
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('wiki', '00
01_initial'), ('userstories', '0009_remove_userstory_is_archived'), ('issues', '0005_auto_20150623_1923'), ('tasks', '0006_auto_20150623_1923'), ] operations = [ migrations.RunSQL( """
CREATE INDEX "userstories_full_text_idx" ON userstories_userstory USING gin(to_tsvector('simple', coalesce(subject, '') || ' ' || coalesce(ref) || ' ' || coalesce(description, ''))); """, reverse_sql="""DROP INDEX IF EXISTS "userstories_full_text_idx";""" ), migrations.RunSQL( """ CREATE INDEX "tasks_full_text_idx" ON tasks_task USING gin(to_tsvector('simple', coalesce(subject, '') || ' ' || coalesce(ref) || ' ' || coalesce(description, ''))); """, reverse_sql="""DROP INDEX IF EXISTS "tasks_full_text_idx";""" ), migrations.RunSQL( """ CREATE INDEX "issues_full_text_idx" ON issues_issue USING gin(to_tsvector('simple', coalesce(subject, '') || ' ' || coalesce(ref) || ' ' || coalesce(description, ''))); """, reverse_sql="""DROP INDEX IF EXISTS "issues_full_text_idx";""" ), migrations.RunSQL( """ CREATE INDEX "wikipages_full_text_idx" ON wiki_wikipage USING gin(to_tsvector('simple', coalesce(slug, '') || ' ' || coalesce(content, ''))); """, reverse_sql="""DROP INDEX IF EXISTS "wikipages_full_text_idx";""" ), ]
michaelnetbiz/mistt-solution
config.py
Python
mit
1,994
0.005015
import os import json from urlparse import urlparse from pymongo import uri_parser def get_private_key(): with open('mistt-solution-d728e8f21f47.json') as f: return json.loads(f.read()).items() # Flask CSRF_SESSION_KEY = os.getenv('FLASK_SESSION_KEY', 'notsecret') SECRET_KEY = os.getenv('FLASK_SECRET_KEY', 'notsecret') CSRF_ENABLED = True BASE_DIR = os.path.abspath(os.path.dirname(__file__)) DEBUG = True ADMIN
_EMAIL = os.getenv('ADMIN_EMAIL', 'notsecret') ADMIN_USERNAME = os.getenv('ADMIN_USERNAME', 'notsecret') ADMIN_PASSWORD = os.getenv('ADMIN_PASSWORD', 'notsecret') SECURITY_PASSWORD_SALT = os.getenv('SECURITY_PASSWORD_SALT', 'notsecret') # postgres SQLALCHEMY_TRACK_MODIFICATIONS = True SQLALCHEMY_MIGRATE_REPO = os.path.join(BASE_DIR, 'db') SQLALCHEMY_DATAB
ASE_URI = os.getenv('DATABASE_URL', 'postgres://localhost:5432/mistt') # dumps PAGE_DUMP_PATH = os.getenv('PAGE_DUMP_PATH', 'page_dump_110116') PATH_TO_CASE_EXPORTS = os.getenv('PATH_TO_CASE_EXPORTS') # Drupal DRUPAL_URL = os.getenv('DRUPAL_URL') DRUPAL_LOGIN = os.getenv('DRUPAL_LOGIN') DRUPAL_PASSWORD = os.getenv('DRUPAL_PASSWORD') DRUPAL_USERNAME = os.getenv('DRUPAL_USERNAME') # flask-mail settings MAIL_SERVER = os.getenv('MAIL_SERVER') MAIL_PORT=465 MAIL_USE_TLS=False MAIL_USE_SSL=True MAIL_USERNAME = os.getenv('MAIL_USERNAME') MAIL_PASSWORD = os.getenv('MAIL_PASSWORD') MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER') # cases MONGODB_URI = os.getenv('MONGODB_URI') uri = uri_parser.parse_uri(MONGODB_URI) MONGODB_DATABASE = uri['database'] MONGODB_HOST, MONGODB_PORT = uri['nodelist'][0] MONGODB_USERNAME = uri['username'] MONGODB_PASSWORD = uri['password'] # # GOOGLE_SERVICE_ACCOUNT_EMAIL=os.getenv('GOOGLE_SERVICE_ACCOUNT_EMAIL') # GOOGLE_SERVICE_ACCOUNT_PRIVATE_KEY=get_private_key()[0][1] GOOGLE_ANALYTICS_CLIENT_SECRET=os.getenv('GOOGLE_ANALYTICS_CLIENT_SECRET') GOOGLE_ANALYTICS_CLIENT_ID=os.getenv('GOOGLE_ANALYTICS_CLIENT_ID') UPLOAD_FOLDER=os.path.join(os.getcwd(),'uploads')
srinath-chakravarthy/ovito
tests/scripts/test_suite/python_data_plot_overlay.py
Python
gpl-3.0
1,873
0.034704
from ovito import * from ovito.io import * from ovito.modifiers import * from ovito.vis import * import matplotlib # Activate 'agg' backend for off-screen plotting. matplotlib.use('Agg') import matplotlib.pyplot as plt import PyQt5.QtGui node = import_file("../../files/CFG/fcc_coherent_twin.0.cfg") node.modifiers.append(CoordinationNumberModifier()) node.modifiers.append(HistogramModifier()) node.add_to_scene() vp = dataset.viewports.active_vp def render(painter, **args): # Find the existing HistogramModifier in the pipeline # and get its histogram data. for mod in ovito.dataset.selected_node.modifiers: if isinstance(mod, HistogramModifier): x = mod.histogram[:,0] y = mod.histogram[:,1] break if not 'x' in locals(): raise RuntimeError('Histogram modifier not found.') # Get size of rendered viewport image in pixels. viewport_width = painter.window().width() viewport_height = painter.window().height() # Compute plot size in inches (DPI determines label size) dpi = 80 plot_width = 0.5 * viewport_width / dpi plot_height = 0.5 * viewport_height / dpi # Create figure fig, ax = plt.subplots(figsize=(plot_width,plot_height), dpi=dpi) fig.patch.set_alpha(0.5) plt.title('Coordination') # Plot histogram data ax.bar(x, y) plt.tight_layout() # Render figure to an in-memory buffer. buf = fig.canvas.print_to_buffer() # Create a QImage from the m
emory buffer res_x, res_y = buf[1] img = PyQt5.QtGui.QImage(buf[0], res_x, res
_y, PyQt5.QtGui.QImage.Format_RGBA8888) # Paint QImage onto rendered viewport painter.drawImage(0,0,img) print("Overlay function was executed") overlay = PythonViewportOverlay() overlay.function = render vp.overlays.append(overlay) if ovito.headless_mode: ovito.dataset.render_settings.renderer = TachyonRenderer(ambient_occlusion = False, antialiasing = False) vp.render()
juliarizza/web2courses
languages/ro.py
Python
mit
29,001
0.024039
# -*- coding: utf-8 -*- { '\n\nThank you!': '\n\nThank you!', '\n\nWe will wait and let you know when your payment is confirmed.': '\n\nWe will wait and let you know when your payment is confirmed.', '\n- %s from %s to %s': '\n- %s from %s to %s', '\nAmount: R$%.2f': '\nAmount: R$%.2f', "\nSomething happened and we couldn't verify your payment.\n": "\nSomething happened and we couldn't verify your payment.\n", '\nThank you for your purchase!': '\nThank you for your purchase!', '\nThank you!': '\nThank you!', '\nThank you.': '\nThank you.', '\nThe total amount was R$%.2f.': '\nThe total amount was R$%.2f.', '\nWe will wait and let you know when your payment is confirmed.\n': '\nWe will wait and let you know when your payment is confirmed.\n', '\nYou can check your payment history after login in to your profile.': '\nYou can check your payment history after login in to your profile.', '!=': '!=', '!langcode!': 'ro', '!langname!': 'Română', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" (actualizează) este o expresie opțională precum "câmp1=\'valoare_nouă\'". Nu puteți actualiza sau șterge rezultatele unui JOIN', '%(month)s %(day)sth': '%(month)s %(day)sth', '%(nrows)s records found': '%(nrows)s înregistrări găsite', '%02d/%02d': '%02d/%02d', '%B %d, %Y': '%B %d, %Y', '%d days ago': '%d days ago', '%d weeks ago': '%d weeks ago', '%d%% OFF': '%d%% OFF', '%d/%d': '%d/%d', '%m-%d-%Y': '%m-%d-%Y', '%s %%{row} deleted': '%s linii șterse', '%s %%{row} updated': '%s linii actualizate', '%s %dth': '%s %dth', '%s Certificate': '%s Certificate', '%s of %s': '%s of %s', '%s selected': '%s selectat(e)', '%Y-%m-%d': '%Y-%m-%d', '%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', '(something like "it-it")': '(ceva ce seamănă cu "it-it")', '- %s from %s to %s': '- %s from %s to %s', '- %s from %s to %s\n': '- %s from %s to %s\n', '1 day ago': '1 day ago', '1 week ago': '1 week ago', '<': '<', '<=': '<=', '=': '=', '>': '>', '>=': '>=', '?': '?', 'A new version of web2py is available': 'O nouă versiune de web2py este disponibilă', 'A new version of web2py is available: %s': 'O nouă versiune de web2py este disponibilă: %s', 'About': 'Despre', 'about': 'despre', 'About application': 'Despre aplicație', 'Access': 'Access', 'Access Control': 'Control acces', 'Access the /appadmin to make at least one teacher user:': 'Access the /appadmin to make at least one teacher user:', 'Actions': 'Actions', 'Add': 'Adaugă', 'Add more': 'Add more', 'additional code for your application': 'cod suplimentar pentru aplicația dvs.', 'admin disabled because no admin password': 'administrare dezactivată deoarece parola de administrator nu a fost furnizată', 'admin disabled because not supported on google app engine': 'administrare dezactivată deoarece funcționalitatea nu e suportat pe Google App Engine', 'admin disabled because unable to access password file': 'administrare dezactivată deoarece nu există acces la fișierul cu parole', 'Admin is disabled because insecure channel': 'Adminstrarea este dezactivată deoarece conexiunea nu este sigură', 'Admin is disabled because unsecure channel': 'Administrarea este dezactivată deoarece conexiunea nu este securizată', 'Administration': 'Administrare', 'Administrative Interface': 'Interfață administrare', 'Administrator Password:': 'Parolă administrator:', 'Ajax Recipes': 'Rețete Ajax', 'All certificates sent!': 'All certificates sent!', 'All Classes': 'All Classes', 'Alternative A': 'Alternative A', 'Alternative B': 'Alternative B', 'Alternative C': 'Alternative C', 'Alternative D': 'Alternative D', 'Amount': 'Amount', 'Amount: R$%.2f': 'Amount: R$%.2f', 'Amount: R$%.2f\n': 'Amount: R$%.2f\n', 'And': 'Și', 'and enroll!': 'and enroll!', 'and go to': 'and go to', 'and rename it (required):': 'și renumiți (obligatoriu):', 'and rename it:': ' și renumiți:', 'Announcements': 'Announcements', 'appadmin': 'appadmin', 'appadmin is disabled because insecure channel': 'appadmin dezactivat deoarece conexiunea nu e sigură', 'application "%s" uninstalled': 'aplicația "%s" a fost dezinstalată', 'application compiled': 'aplicația a fost compilată', 'application is compiled and cannot be designed': 'aplicația este compilată și nu poate fi editată', 'Are you sure you want to delete file "%s"?': 'Sigur ștergeți fișierul "%s"?', 'Are you sure you want to delete this object?': 'Sigur ștergeți acest obiect?', 'Are you sure you want to uninstall application "%s"': 'Sigur dezinstalați aplicația "%s"', 'Are you sure you want to uninstall application "%s"?': 'Sigur dezinstalați aplicația "%s"?', 'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENȚIE: Nu vă puteți conecta decât utilizând o conexiune securizată (HTTPS) sau rulând aplicația pe computerul local.', 'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENȚIE: Nu puteți efectua mai multe teste o dată deoarece lansarea în execuție a mai multor subpocese nu este sigură.', 'ATTENTION: you cannot edit the running application!': 'ATENȚIE: nu puteți edita o aplicație în curs de execuție!', 'Authentication': 'Autentificare', 'Available Databases and Tables': 'Baze de date și tabele disponibile', 'Available Until': 'Available Until', 'Back': 'Înapoi', 'Banner': 'Banner', 'Body': 'Body', 'Buy Now': 'Buy Now', 'Buy this book': 'Cumpără această carte', 'Cache': 'Cache', 'cache': 'cache', 'Cache Cleared': 'Cache Cleared', 'Cache Keys': 'Chei cache', 'cache, errors and sessions cleaned': 'cache, erori și sesiuni golite', 'Calendar': 'Calendar', 'Cannot be empty': 'Nu poate fi vid', 'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Compilare imposibilă: aplicația conține erori. Debogați aplicația și încercați din nou.', 'cannot create file': 'fișier imposibil de creat', 'cannot upload file "%(filename)s"': 'imposibil de încărcat fișierul "%(filename)s"', 'Certificates': 'Certificates', 'Change password': 'Schimbare parolă', 'Change Password': 'Schimbare parolă', 'change password': 'schimbare parolă', 'check all': 'coșați tot', 'Check to delete': 'Coșați pentru a șterge', 'Class %s': 'Class %s', 'Class Id': 'Class Id', 'Classes': 'Classes', 'clean': 'golire', 'Clear': 'Golește', 'Clear CACHE?': 'Clear CACHE?', 'Clear DISK': 'Clear DISK', 'Clear RAM': 'Clear RAM', 'click to check for upgrades': 'Clic pentru a verifica dacă există upgrade-uri', 'Client IP': 'IP client', 'Closed': 'Closed', 'Community': 'Comunitate', 'compile': 'compilare', 'compiled application removed': 'aplicația compilată a fost ștearsă', 'Components and Plugins': 'Componente și plugin-uri', 'Confirmation Time': 'Confirmation Time', 'Confirmed': 'Confirmed', 'Contact': 'Contact', 'contains': 'conține', 'Continue Shopping': 'Continue Shopping', 'Controller': 'Controlor', 'Controllers': 'Controlori', 'controllers': 'controlori', 'Copyright': 'Drepturi de autor', 'Correct Alternative': 'Correct Alternative', 'Course': 'Course', 'Course Announcements': 'Course Announcements', 'Course Id': 'Course Id', "Course's end": "Course's end", "Course's start": "Course's start", 'Courses': 'Courses', 'create file with filename:'
: 'crează fișier cu numele:', 'Create new app
lication': 'Creați aplicație nouă', 'create new application:': 'crează aplicație nouă:', 'crontab': 'crontab', 'Current request': 'Cerere curentă', 'Current response': 'Răspuns curent', 'Current session': 'Sesiune curentă', 'currently saved or': 'în prezent salvat sau', 'customize me!': 'Personalizează-mă!', 'DASHBOARD': 'DASHBOARD', 'Dashboard': 'Dashboard', 'data uploaded': 'date încărcate', 'Database': 'bază de date', 'Database %s select': 'selectare bază de date %s', 'database administration': 'administrare bază de date', 'Database Administration (appadmin)': 'Database Administration (appadmin)', 'Date': 'Date', 'Date and Time': 'Data și ora', 'db': 'db', 'DB Model': 'Model bază de date', 'defines tables': 'definire tabele', 'Delete': 'Șterge', 'delete': 'șterge', 'delete all checked': 'șterge tot ce e coșat', 'Delete:': 'Șterge:', 'Demo': 'Demo', 'Denied': 'Denied', 'Deploy on Google App Eng
samabhi/pstHealth
venv/lib/python2.7/site-packages/requests/packages/urllib3/_collections.py
Python
mit
4,119
0.000243
# urllib3/_collections.py # Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from collections import deque from threading import RLock __all__ = ['RecentlyUsedContainer'] class AccessEntry(object): __slots__ = ('key', 'is_valid') def __init__(self, key, is_valid=True): self.key = key self.is_valid = is_valid class RecentlyUsedContainer(dict): """ Provides a dict-like that maintains up to ``maxsize`` keys while throwing away the least-recently-used keys beyond ``maxsize``. """ # If len(self.access_log) exceeds self._maxsize * CLEANUP_FACTOR, then we # will attempt to cleanup the invalidated entries in the access_log # datastructure during the next 'get' operation. CLEANUP_FACTOR = 10 def __init__(self, maxsize=10): self._maxsize = maxsize self._container = {} # We use a deque to to store our keys ordered by the last access. self.access_log = deque() self.access_log_lock = RLock() # We look up the access log entry by the key to invalidate it so we can # insert a new authorative entry at the head without having to dig and # find the old entry for removal immediately. self.access_lookup = {} # Trigger a heap cleanup when we get past this size self.access_log_limit = maxsize * self.CLEANUP_FACTOR def _invalidate_entry(self, key): "If exists: Invalidate old entry and return it." old_entry = self.access_lookup.get(key) if old_entry: old_entry.is_valid = False return old_entry def _push_entry(self, key): "Push entry onto our access log, invalidate the old entry if exists." self._invalidate_entry(key) new_entry = AccessEntry(key) self.access_lookup[key] = new_entry self.access_log_lock.acquire() self.access_log.appendleft(new_entry) self.access_log_lock.release() def _prune_entries(self, num): "Pop entries from our access log until we popped ``num`` valid ones." while num > 0: self.access_log_lock.acquire() p = self.access_log.pop() self.access_log_lock.release() if not p.is_valid: continue # Invalidated entry, skip dict.pop(self, p.key, None) self.access_lookup.pop(p.key, None) num -= 1 def _prune_invalidated_entries(self): "Rebuild our access_log without the invalidated entries." self.access_log_lock.acquire() self.access_log = deque(e for e in self.access_log if e.is_valid) self.access_log_lock.release() def _get_ordered_access_keys(self): "Return ordered access keys for inspection. Used for testing." self.access_log_lock.acquire() r = [e.key for e in self.access_log if e.is
_valid] self.access_log_lock.release() return r def __getitem__(self
, key): item = dict.get(self, key) if not item: raise KeyError(key) # Insert new entry with new high priority, also implicitly invalidates # the old entry. self._push_entry(key) if len(self.access_log) > self.access_log_limit: # Heap is getting too big, try to clean up any tailing invalidated # entries. self._prune_invalidated_entries() return item def __setitem__(self, key, item): # Add item to our container and access log dict.__setitem__(self, key, item) self._push_entry(key) # Discard invalid and excess entries self._prune_entries(len(self) - self._maxsize) def __delitem__(self, key): self._invalidate_entry(key) self.access_lookup.pop(key, None) dict.__delitem__(self, key) def get(self, key, default=None): try: return self[key] except KeyError: return default
keithhendry/treadmill
tests/runtime_test.py
Python
apache-2.0
4,097
0
"""Unit test for treadmill.runtime. """ import errno import socket import unittest import mock import treadmill import treadmill.rulefile import treadmill.runtime from treadmill import exc class RuntimeTest(unittest.TestCase): """Tests for treadmill.runtime.""" @mock.patch('socket.socket.bind', mock.Mock()) def test__allocate_sockets(self): """Test allocating sockets. """ # access protected module _allocate_sockets # pylint: disable=w0212 socket.socket.bind.side_effect = [ socket.error(errno.EADDRINUSE, 'In use'), mock.DEFAULT, mock.DEFAULT, mock.DEFAULT ] sockets = treadmill.runtime._allocate_sockets( 'prod', '0.0.0.0', socket.SOCK_STREAM, 3 ) self.assertEqual(3, len(sockets)) @mock.patch('socket.socket.bind', mock.Mock()) def test__allocate_sockets_fail(self): """Test allocating sockets when all are taken. """ # access protected module _allocate_sockets # pylint: disable=w0212 socket.socket.bind.side_effect
= socket.error(errno.EADDRINUSE, 'In use') with self.assertRaises(exc.ContainerSetupError): treadmill.runtime._allocate_sockets( 'prod', '0.0.0.0', socket.SOCK_STREAM, 3
) @mock.patch('socket.socket', mock.Mock(autospec=True)) @mock.patch('treadmill.runtime._allocate_sockets', mock.Mock()) def test_allocate_network_ports(self): """Test network port allocation. """ # access protected module _allocate_network_ports # pylint: disable=w0212 treadmill.runtime._allocate_sockets.side_effect = \ lambda _x, _y, _z, count: [socket.socket()] * count mock_socket = socket.socket.return_value mock_socket.getsockname.side_effect = [ ('unused', 50001), ('unused', 60001), ('unused', 10000), ('unused', 10001), ('unused', 10002), ('unused', 12345), ('unused', 54321), ] manifest = { 'type': 'native', 'environment': 'dev', 'endpoints': [ { 'name': 'http', 'port': 8000, 'proto': 'tcp', }, { 'name': 'ssh', 'port': 0, 'proto': 'tcp', }, { 'name': 'dns', 'port': 5353, 'proto': 'udp', }, { 'name': 'port0', 'port': 0, 'proto': 'udp', } ], 'ephemeral_ports': {'tcp': 3, 'udp': 0}, } treadmill.runtime.allocate_network_ports( '1.2.3.4', manifest ) # in the updated manifest, make sure that real_port is specificed from # the ephemeral range as returnd by getsockname. self.assertEqual( 8000, manifest['endpoints'][0]['port'] ) self.assertEqual( 50001, manifest['endpoints'][0]['real_port'] ) self.assertEqual( 60001, manifest['endpoints'][1]['port'] ) self.assertEqual( 60001, manifest['endpoints'][1]['real_port'] ) self.assertEqual( 5353, manifest['endpoints'][2]['port'] ) self.assertEqual( 12345, manifest['endpoints'][2]['real_port'] ) self.assertEqual( 54321, manifest['endpoints'][3]['port'] ) self.assertEqual( 54321, manifest['endpoints'][3]['real_port'] ) self.assertEqual( [10000, 10001, 10002], manifest['ephemeral_ports']['tcp'] ) if __name__ == '__main__': unittest.main()
maneeshd/Algorithms-and-Data-Structures
algorithms/QuickSort.py
Python
mit
1,581
0
""" Author: Maneesh Divana <[email protected]> Interpreter: Python 3.6.8 Quick Sort Worst Case: O(n^2) Average Case: O(nlog n) Best Case: O(nlog n) """ from random import shuffle def partition(arr: list, left: int, right: int) -> int: """Partitions the given array based on a pivot element, then sorts the sub-arrays and returns the partition index""" # Take the right most element as pivot pivot = arr[right] # i tracks the smallest element, currently invalid i = left - 1 for j in range(left, right): # Check if the current element is smaller than pivot element if arr[j] <= pivot: i += 1 # If so, swap the smallest element and the current element arr[i], arr[j] = arr[j], arr[i] # One final swap to put pivot element at its correct position arr[i + 1], arr[right] = arr[right], arr[i + 1] # Return the partition index return i + 1 def qsort(arr: list, left: int, right: int) -> None: """Recursively partitions the given array and sorts based on QuickSort algorithm.""" if left < right: # Partition the array and get the partitio
n index p_idx = partition(arr, left, right) # Recursively partition and sort the sub-arrays qsort(arr, left, p_idx - 1) qsort(arr, p_idx + 1, right) if __name__ == "__main__": ARR = list(range(0, 10)) shuffle(ARR) LEFT = 0 RIGHT = len(ARR) - 1 print("\nQuickSort\n") print("Input array:", ARR) qso
rt(ARR, LEFT, RIGHT) print("\nSorted array:", ARR, "\n")
anhstudios/swganh
data/scripts/templates/object/tangible/wearables/base/shared_base_sleeve_both.py
Python
mit
465
0.045161
##
## NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/wearables/base/shared_base_sleeve_both.iff" result.attribute_template_i
d = 11 result.stfName("wearables_name","default_sleeves") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
cohadar/learn-python-the-hard-way
ex51.py
Python
mit
13
0.076923
# see ex50.
py
fasihahmad/django-rest-framework-related-views
rest_framework_related/py2_3.py
Python
gpl-3.0
239
0
# Python 2 and 3: try: # Python 3 only:
from urllib.parse import urlencode, urlsplit, parse_qs, unquote except ImportError: # Python 2 only: from urlparse import pa
rse_qs, urlsplit from urllib import urlencode, unquote
nikesh-mahalka/cinder
cinder/tests/unit/fake_hp_client_exceptions.py
Python
apache-2.0
3,077
0
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Fake HP client exceptions to use when mocking HP clients.""" class UnsupportedVersion(Exception): """Unsupported version of the client.""" pass class ClientException(Exception): """The base exception class for these fake exceptions.""" _error_code = None _error_desc = None _error_ref = None _debug1 = None _debug2 = None def __init__(self, error=None): if error: if 'code' in error: self._error_code = error['code'] if 'desc' in error: self._error_desc = error['desc'] if 'ref' in error: self._error_ref = error['ref'] if 'debug1' in error: self._debug1 = error['debug1'] if 'debug2' in error: self._debug2 = error['debug2'] def get_code(self): return self._error_code def get_description(self): return self._error_desc def get_ref(self): return self._error_ref def __str__(self): formatted_string = self.message if self.http_status: formatted_string += " (HTTP %s)" % self.http_status if self._error_code: formatted_string += " %s" % self._error_code if self._error_desc: formatted_string += " - %s" % self._error_desc if self._error_ref: formatted_string += " - %s" % self._error_ref if self._debug1: formatted_string += " (1: '%s')" % self._debug1 if self._debug2: formatted_str
ing += " (2: '%s')" % self._debug2 return formatted_string class HTTPConflict(Exceptio
n): http_status = 409 message = "Conflict" def __init__(self, error=None): if error and 'message' in error: self._error_desc = error['message'] def get_description(self): return self._error_desc class HTTPNotFound(Exception): http_status = 404 message = "Not found" class HTTPForbidden(ClientException): http_status = 403 message = "Forbidden" class HTTPBadRequest(Exception): http_status = 400 message = "Bad request" class HTTPServerError(Exception): http_status = 500 message = "Error" def __init__(self, error=None): if error and 'message' in error: self._error_desc = error['message'] def get_description(self): return self._error_desc
ErwanAliasr1/syslinux
com32/cmenu/menugen.py
Python
gpl-2.0
10,693
0.033947
#!/usr/bin/env python import sys, re, getopt class Menusystem: types = {"run" : "OPT_RUN", "inactive" : "OPT_INACTIVE", "checkbox" : "OPT_CHECKBOX", "radiomenu": "OPT_RADIOMENU", "sep" : "OPT_SEP", "invisible": "OPT_INVISIBLE", "radioitem": "OPT_RADIOITEM", "exitmenu" : "OPT_EXITMENU", "login" : "login", # special type "submenu" : "OPT_SUBMENU"} entry_init = { "item" : "", "info" : "", "data" : "", "ipappend" : 0, # flag to send in case of PXELINUX "helpid" : 65535, # 0xFFFF "shortcut":"-1", "state" : 0, # initial state of checkboxes "argsmenu": "", # name of menu containing arguments "perms" : "", # permission required to execute this entry "_updated" : None, # has this dictionary been updated "type" : "run" } menu_init = { "title" : "", "row" : "0xFF", # let system decide position "col" : "0xFF", "_updated" : None, "name" : "" } system_init ={ "videomode" : "0xFF", "title" : "Menu System", "top" : "1", "left" : "1" , "bot" : "21", "right":"79", "helpdir" : "/isolinux/help", "pwdfile" : "", "pwdrow" : "23", "editrow" : "23", "skipcondn" : "0", "skipcmd" : ".exit", "startfile": "", "onerrorcmd":".repeat", "exitcmd" : ".exit", "exitcmdroot" : "", "timeout" : "600", "timeoutcmd":".beep", "totaltimeout" : "0", "totaltimeoutcmd" : ".wait" } shift_flags = { "alt" : "ALT_PRESSED", "ctrl" : "CTRL_PRESSED", "shift": "SHIFT_PRESSED", "caps" : "CAPSLOCK_ON", "num" : "NUMLOCK_ON", "ins" : "INSERT_ON" } reqd_templates = ["item","login","menu","system"] def __init__(self,template): self.state = "system" self.code_template_filename = template self.menus = [] self.init_entry() self.init_menu() self.init_system() self.vtypes = " OR ".join(list(self.types.keys())) self.vattrs = " OR ".join([x for x in list(self.entry.keys()) if x[0] != "_"]) self.mattrs = " OR ".join([x for x in list(self.menu.keys()) if x[0] != "_"]) def init_entry(self): self.entry = self.entry_init.copy() def init_menu(self): self.menu = self.menu_init.copy() def init_system(self): self.system = self.system_init.copy() def add_menu(self,name): self.add_item() self.init_menu() self.menu["name"] = name self.menu["_updated"] = 1 self.menus.append( (self.menu,[]) ) def add_item(self): if self.menu["_updated"]: # menu details have changed self.menus[-1][0].update(self.menu) self.init_menu() if self.entry["_updated"]: if not self.entry["info"]: self.entry["info"] = self.entry["data"] if not self.menus: print("Error before line %d" % self.lineno) print("REASON: menu must be declared before a menu item is declared") sys.exit(1) self.menus[-1][1].append(self.entry) self.init_entry() def set_i
tem(self,name,value): if name not in self.entry: msg = ["Unknown attribute %s in line %d" % (name,self.lineno)] msg.append("
REASON: Attribute must be one of %s" % self.vattrs) return "\n".join(msg) if name=="type" and value not in self.types: msg = [ "Unrecognized type %s in line %d" % (value,self.lineno)] msg.append("REASON: Valid types are %s" % self.vtypes) return "\n".join(msg) if name=="shortcut": if (value != "-1") and not re.match("^[A-Za-z0-9]$",value): msg = [ "Invalid shortcut char '%s' in line %d" % (value,self.lineno) ] msg.append("REASON: Valid values are [A-Za-z0-9]") return "\n".join(msg) elif value != "-1": value = "'%s'" % value elif name in ["state","helpid","ipappend"]: try: value = int(value) except: return "Value of %s in line %d must be an integer" % (name,self.lineno) self.entry[name] = value self.entry["_updated"] = 1 return "" def set_menu(self,name,value): if name not in self.menu: return "Error: Unknown keyword %s" % name self.menu[name] = value self.menu["_updated"] = 1 return "" def set_system(self,name,value): if name not in self.system: return "Error: Unknown keyword %s" % name if name == "skipcondn": try: # is skipcondn a number? a = int(value) except: # it is a "-" delimited sequence value = value.lower() parts = [ self.shift_flags.get(x.strip(),None) for x in value.split("-") ] self.system["skipcondn"] = " | ".join([_f for _f in parts if _f]) else: self.system[name] = value def set(self,name,value): # remove quotes if given if (value[0] == value[-1]) and (value[0] in ['"',"'"]): # remove quotes value = value[1:-1] if self.state == "system": err = self.set_system(name,value) if not err: return if self.state == "menu": err = self.set_menu(name,value) # change state to entry it menu returns error if err: err = None self.state = "item" if self.state == "item": err = self.set_item(name,value) if not err: return # all errors so return item's error message print(err) sys.exit(1) def print_entry(self,entry,fd): entry["type"] = self.types[entry["type"]] if entry["type"] == "login": #special type fd.write(self.templates["login"] % entry) else: fd.write(self.templates["item"] % entry) def print_menu(self,menu,fd): if menu["name"] == "main": self.foundmain = 1 fd.write(self.templates["menu"] % menu) if (menu["row"] != "0xFF") or (menu["col"] != "0xFF"): fd.write(' set_menu_pos(%(row)s,%(col)s);\n' % menu) def output(self,filename): curr_template = None contents = [] self.templates = {} regbeg = re.compile(r"^--(?P<name>[a-z]+) BEGINS?--\n$") regend = re.compile(r"^--[a-z]+ ENDS?--\n$") ifd = open(self.code_template_filename,"r") for line in ifd.readlines(): b = regbeg.match(line) e = regend.match(line) if e: # end of template if curr_template: self.templates[curr_template] = "".join(contents) curr_template = None continue if b: curr_template = b.group("name") contents = [] continue if not curr_template: continue # lines between templates are ignored contents.append(line) ifd.close() missing = None for x in self.reqd_templates: if x not in self.templates: missing = x if missing: print("Template %s required but not defined in %s" % (missing,self.code_template_filename)) if filename == "-": fd = sys.stdout else: fd = open(filename,"w") self.foundmain = None fd.write(self.templates["header"]) fd.write(self.templates["system"] % self.system) for (menu,items) in self.menus: self.print_menu(menu,fd) for entry in items: self.print_entry(entry,fd) fd.write(self.templates["footer"]) fd.clo
mava-ar/sgk
src/turnos/migrations/0005_auto_20160816_2140.py
Python
apache-2.0
415
0
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-08-17 00:40 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('turnos', '0004_auto_20160519_0134'), ] operations = [ migrations.RenameField('Turno', 'asistio', 'no_a
sistio'
), migrations.RenameField('Turno', 'aviso', 'no_aviso') ]
mrnamingo/vix4-34-enigma2-bcm
lib/python/Screens/TimerEntry.py
Python
gpl-2.0
25,002
0.027718
# -*- coding: utf-8 -*- from time import localtime, mktime, time, strftime from datetime import datetime from enigma import eEPGCa
che from Screens.Screen import Screen import
ChannelSelection from ServiceReference import ServiceReference from Components.config import config, ConfigSelection, ConfigText, ConfigSubList, ConfigDateTime, ConfigClock, ConfigYesNo, getConfigListEntry from Components.ActionMap import NumberActionMap, ActionMap from Components.ConfigList import ConfigListScreen from Components.MenuList import MenuList from Components.Button import Button from Components.Label import Label from Components.Pixmap import Pixmap from Components.SystemInfo import SystemInfo from Components.UsageConfig import defaultMoviePath from Components.Sources.Boolean import Boolean from Screens.MovieSelection import getPreferredTagEditor from Screens.LocationBox import MovieLocationBox from Screens.ChoiceBox import ChoiceBox from Screens.MessageBox import MessageBox from Screens.VirtualKeyBoard import VirtualKeyBoard from Screens.Setup import SetupSummary from RecordTimer import AFTEREVENT class TimerEntry(Screen, ConfigListScreen): def __init__(self, session, timer): Screen.__init__(self, session) self.setup_title = _("Timer entry") self.timer = timer self.entryDate = None self.entryService = None self["HelpWindow"] = Pixmap() self["HelpWindow"].hide() self["VKeyIcon"] = Boolean(False) self["description"] = Label("") self["oktext"] = Label(_("OK")) self["canceltext"] = Label(_("Cancel")) self["ok"] = Pixmap() self["cancel"] = Pixmap() self.createConfig() self["actions"] = NumberActionMap(["SetupActions", "GlobalActions", "PiPSetupActions", "ColorActions"], { "ok": self.keySelect, "save": self.keyGo, "cancel": self.keyCancel, "volumeUp": self.incrementStart, "volumeDown": self.decrementStart, "size+": self.incrementEnd, "size-": self.decrementEnd, }, -2) self["VirtualKB"] = ActionMap(["VirtualKeyboardActions"], { "showVirtualKeyboard": self.KeyText, }, -2) self["VirtualKB"].setEnabled(False) self.onChangedEntry = [ ] self.list = [] ConfigListScreen.__init__(self, self.list, session = session) self.createSetup("config") self.onLayoutFinish.append(self.layoutFinished) if not self.selectionChanged in self["config"].onSelectionChanged: self["config"].onSelectionChanged.append(self.selectionChanged) self.selectionChanged() def createConfig(self): justplay = self.timer.justplay always_zap = self.timer.always_zap rename_repeat = self.timer.rename_repeat afterevent = { AFTEREVENT.NONE: "nothing", AFTEREVENT.DEEPSTANDBY: "deepstandby", AFTEREVENT.STANDBY: "standby", AFTEREVENT.AUTO: "auto" }[self.timer.afterEvent] if self.timer.record_ecm and self.timer.descramble: recordingtype = "descrambled+ecm" elif self.timer.record_ecm: recordingtype = "scrambled+ecm" elif self.timer.descramble: recordingtype = "normal" weekday_table = ("mon", "tue", "wed", "thu", "fri", "sat", "sun") # calculate default values day = [] weekday = 0 for x in (0, 1, 2, 3, 4, 5, 6): day.append(0) if self.timer.repeated: # repeated type = "repeated" if self.timer.repeated == 31: # Mon-Fri repeated = "weekdays" elif self.timer.repeated == 127: # daily repeated = "daily" else: flags = self.timer.repeated repeated = "user" count = 0 for x in (0, 1, 2, 3, 4, 5, 6): if flags == 1: # weekly # print "Set to weekday " + str(x) weekday = x if flags & 1 == 1: # set user defined flags day[x] = 1 count += 1 else: day[x] = 0 flags >>= 1 if count == 1: repeated = "weekly" else: # once type = "once" repeated = None weekday = int(strftime("%u", localtime(self.timer.begin))) - 1 day[weekday] = 1 self.timerentry_justplay = ConfigSelection(choices = [ ("zap", _("zap")), ("record", _("record")), ("zap+record", _("zap and record"))], default = {0: "record", 1: "zap", 2: "zap+record"}[justplay + 2*always_zap]) if SystemInfo["DeepstandbySupport"]: shutdownString = _("go to deep standby") else: shutdownString = _("shut down") self.timerentry_afterevent = ConfigSelection(choices = [("nothing", _("do nothing")), ("standby", _("go to standby")), ("deepstandby", shutdownString), ("auto", _("auto"))], default = afterevent) self.timerentry_recordingtype = ConfigSelection(choices = [("normal", _("normal")), ("descrambled+ecm", _("descramble and record ecm")), ("scrambled+ecm", _("don't descramble, record ecm"))], default = recordingtype) self.timerentry_type = ConfigSelection(choices = [("once",_("once")), ("repeated", _("repeated"))], default = type) self.timerentry_name = ConfigText(default = self.timer.name.replace('\xc2\x86', '').replace('\xc2\x87', '').encode("utf-8"), visible_width = 50, fixed_size = False) self.timerentry_description = ConfigText(default = self.timer.description, visible_width = 50, fixed_size = False) self.timerentry_tags = self.timer.tags[:] # if no tags found, make name of event default tag set. if not self.timerentry_tags: tagname = self.timer.name.strip() if tagname: tagname = tagname[0].upper() + tagname[1:].replace(" ", "_") self.timerentry_tags.append(tagname) self.timerentry_tagsset = ConfigSelection(choices = [not self.timerentry_tags and "None" or " ".join(self.timerentry_tags)]) self.timerentry_repeated = ConfigSelection(default = repeated, choices = [("weekly", _("weekly")), ("daily", _("daily")), ("weekdays", _("Mon-Fri")), ("user", _("user defined"))]) self.timerentry_renamerepeat = ConfigYesNo(default = rename_repeat) self.timerentry_date = ConfigDateTime(default = self.timer.begin, formatstring = _("%d %B %Y"), increment = 86400) self.timerentry_starttime = ConfigClock(default = self.timer.begin) self.timerentry_endtime = ConfigClock(default = self.timer.end) self.timerentry_showendtime = ConfigSelection(default = False, choices = [(True, _("yes")), (False, _("no"))]) default = self.timer.dirname or defaultMoviePath() tmp = config.movielist.videodirs.value if default not in tmp: tmp.append(default) self.timerentry_dirname = ConfigSelection(default = default, choices = tmp) self.timerentry_repeatedbegindate = ConfigDateTime(default = self.timer.repeatedbegindate, formatstring = _("%d.%B %Y"), increment = 86400) self.timerentry_weekday = ConfigSelection(default = weekday_table[weekday], choices = [("mon",_("Monday")), ("tue", _("Tuesday")), ("wed",_("Wednesday")), ("thu", _("Thursday")), ("fri", _("Friday")), ("sat", _("Saturday")), ("sun", _("Sunday"))]) self.timerentry_day = ConfigSubList() for x in (0, 1, 2, 3, 4, 5, 6): self.timerentry_day.append(ConfigYesNo(default = day[x])) # FIXME some service-chooser needed here servicename = "N/A" try: # no current service available? servicename = str(self.timer.service_ref.getServiceName()) except: pass self.timerentry_service_ref = self.timer.service_ref self.timerentry_service = ConfigSelection([servicename]) def createSetup(self, widget): self.list = [] self.entryName = getConfigListEntry(_("Name"), self.timerentry_name, _("Set the name the recording will get.")) self.list.append(self.entryName) self.entryDescription = getConfigListEntry(_("Description"), self.timerentry_description, _("Set the description of the recording.")) self.list.append(self.entryDescription) self.timerJustplayEntry = getConfigListEntry(_("Timer type"), self.timerentry_justplay, _("Chose between record and ZAP.")) self.list.append(self.timerJustplayEntry) self.timerTypeEntry = getConfigListEntry(_("Repeat type"), self.timerentry_type, _("A repeating timer or just once?")) self.list.append(self.timerTypeEntry) if self.timerentry_type.value == "once": self.frequencyEntry = None else: # repeated self.frequencyEntry = getConfigListEntry(_("Repeats"), self.timerentry_repeated, _("Choose between Daily, Weekly, Weekdays or user defined.")) self.list.append(self.frequencyEntry) self.repeatedbegindateEntry = getConfigListEntry(_("Starting on"),
usc-isi/extra-specs
nova/tests/rpc/test_dispatcher.py
Python
apache-2.0
3,730
0.001072
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for rpc.dispatcher """ from nova import context from nova.rpc import dispatcher from nova.rpc import common as rpc_common from nova import test class RpcDispatcherTestCase(test.TestCase): class API1(object): RPC_API_VERSION = '1.0' def __init__(self): self.test_method_ctxt = None self.test_method_arg1 = None def test_method(self, ctxt, arg1): self.test_method_ctxt = ctxt self.test_method_arg1 = arg1 class API2(object): RPC_API_VERSION = '2.1' def __init__(self): self.test_method_ctxt = None self.test_method_arg1 = None def test_method(self, ctxt, arg1): self.test_method_ctxt = ctxt self.test_method_arg1 = arg1 class API3(object): RPC_API_VERSION = '3.5' def __init__(self): self.test_method_ctxt = None self.test_method_arg1 = None def test_method(self, ctxt, arg1): self.test_method_ctxt = ctxt
self.test_method_arg1 = arg1 def setUp(self): self.
ctxt = context.RequestContext('fake_user', 'fake_project') super(RpcDispatcherTestCase, self).setUp() def tearDown(self): super(RpcDispatcherTestCase, self).tearDown() def _test_dispatch(self, version, expectations): v2 = self.API2() v3 = self.API3() disp = dispatcher.RpcDispatcher([v2, v3]) disp.dispatch(self.ctxt, version, 'test_method', arg1=1) self.assertEqual(v2.test_method_ctxt, expectations[0]) self.assertEqual(v2.test_method_arg1, expectations[1]) self.assertEqual(v3.test_method_ctxt, expectations[2]) self.assertEqual(v3.test_method_arg1, expectations[3]) def test_dispatch(self): self._test_dispatch('2.1', (self.ctxt, 1, None, None)) self._test_dispatch('3.5', (None, None, self.ctxt, 1)) def test_dispatch_lower_minor_version(self): self._test_dispatch('2.0', (self.ctxt, 1, None, None)) self._test_dispatch('3.1', (None, None, self.ctxt, 1)) def test_dispatch_higher_minor_version(self): self.assertRaises(rpc_common.UnsupportedRpcVersion, self._test_dispatch, '2.6', (None, None, None, None)) self.assertRaises(rpc_common.UnsupportedRpcVersion, self._test_dispatch, '3.6', (None, None, None, None)) def test_dispatch_lower_major_version(self): self.assertRaises(rpc_common.UnsupportedRpcVersion, self._test_dispatch, '1.0', (None, None, None, None)) def test_dispatch_higher_major_version(self): self.assertRaises(rpc_common.UnsupportedRpcVersion, self._test_dispatch, '4.0', (None, None, None, None)) def test_dispatch_no_version_uses_v1(self): v1 = self.API1() disp = dispatcher.RpcDispatcher([v1]) disp.dispatch(self.ctxt, None, 'test_method', arg1=1) self.assertEqual(v1.test_method_ctxt, self.ctxt) self.assertEqual(v1.test_method_arg1, 1)
lucasdavid/grapher
tests/parsers/query_test.py
Python
mit
1,148
0.000871
from unittest import TestCase from unittest.mock import Mock from grapher import errors from grapher.parsers import QueryParser from grapher.parsers import query from nose_parameterized import parameterized class QueryParserTest(TestCase): def setUp(self): r = Mock() r.args = Mock() r.args.get = Mock() query.request = r @parameterized.expand([
({}, {'query': {}, 'skip': 0, 'limit': None}), ({'skip': '2'}, {'query': {}, 'skip': 2, 'limit': None}), ({ 'query': '{"test":"test 1"}', 'skip': '0', 'limit': '10' }, {'query': {'test': 'test 1'}, 'skip': 0, 'limit': 10}), ]) def
test_parse(self, request_query, expected): query.request.args.get.side_effect = lambda e: request_query[e] if e in request_query else None actual = QueryParser.parse() self.assertEqual(actual, expected) def test_invalid_query(self): query.request.args.get.return_value = 'invalid$query:{{{}' with self.assertRaises(errors.BadRequestError): QueryParser.parse()
mathieugouin/tradesim
demo/demo_ystockquote.py
Python
gpl-3.0
998
0.001002
# To make print working for Python2/3 from __future__ import print_function import ystockquote as ysq def _main(): for s in ["NA.TO", "XBB.TO", "NOU.V", "AP-UN.TO", "BRK-A", "AAPL"]: print("=============================================") print("s: {}".format(s)) print("get_name: {}".format(ysq.get_name(s))) print("get_price: {}".format(ysq.get_price(s))) print("get_volume: {}".format(ysq.get_volume(s))) print("get_stock_exchange: {}".format(ysq.get_stock_exchange(s))) print("get_market_cap:
{}".format(ysq.get_market_cap(s))) print("get_dividend_yield: {}".format(ysq.get_dividend_yield(s))
) print("get_price_earnings_ratio: {}".format(ysq.get_price_earnings_ratio(s))) print("get_52_week_low: {}".format(ysq.get_52_week_low(s))) print("get_52_week_high: {}".format(ysq.get_52_week_high(s))) print("get_currency: {}".format(ysq.get_currency(s))) if __name__ == '__main__': _main()
dbrgn/django-simplepaginator
simple_paginator/__init__.py
Python
lgpl-3.0
4,672
0.001712
# coding=utf-8 from django.core.paginator import Paginator, InvalidPage, EmptyPage from urllib import urlencode try: from urlparse import parse_qs except ImportError: from cgi import parse_qs class SimplePaginator(object): """A simple wrapper around the Django paginator.""" def __init__(self, request, prefix, data, columns=None, per_page=20, orphans=1): """Initialize a Paginator and set some properties. Return a tuple containing items and ordering key. Keyword arguments:
request -- The request object prefix -- The prefix for the controls' css-class data -- Elements to paginate columns -- A tuple of tuples containing column name and key (default None) per_page -- How many elements to display per page (default 20) orphans -- Whether to move orphans to the previous page (default 1) """ self.request = request self.prefix = prefix self.data = data self.columns = co
lumns self.per_page = per_page self.orphans = orphans def get_base_url(self): '''Get query string from request, remove all necessary parts and return two variants - one for the page suffix, one for the order suffix. ''' # Get querystring and path, initialize variables qsd = parse_qs(self.request.META['QUERY_STRING']) path = self.request.META['PATH_INFO'] qs_pa = qs_or = '' qs = baseurl = {} # Remove arguments that mighmight overwritten if qsd: if self.prefix + '_pa' in qsd: qs_pa = qsd.pop(self.prefix + '_pa')[0] if self.prefix + '_or' in qsd: qs_or = qsd.pop(self.prefix + '_or')[0] # Get querystring for both suffix variants qs_base = [(k, qsd[k][0]) for k in qsd] if qs_or: qs['pa'] = urlencode(qs_base + [(self.prefix + '_or', qs_or)]) if qs_pa: qs['or'] = urlencode(qs_base + [(self.prefix + '_pa', qs_pa)]) # Build base url if 'pa' in qs: baseurl['pa'] = '%s?%s&' % (path, qs['pa']) if 'or' in qs: baseurl['or'] = '%s?%s&' % (path, qs['or']) if qsd: if not 'pa' in baseurl: baseurl['pa'] = '%s?%s&' % (path, urlencode(qs_base)) if not 'or' in baseurl: baseurl['or'] = '%s?%s&' % (path, urlencode(qs_base)) else: if not 'pa' in baseurl: baseurl['pa'] = path + '?' if not 'or' in baseurl: baseurl['or'] = path + '?' return baseurl def paginate(self): # Make sure page number is an int. If not, deliver first page. try: page = int(self.request.GET.get('%s_pa' % self.prefix, 1)) except ValueError: page = 1 # Get sorting key try: order = int(self.request.GET.get('%s_or' % self.prefix, 0)) except ValueError: order = 0 # Sort data # First, check if data supports order_by (e.g. a queryset) # TODO default ordering feature if hasattr(self.data, 'order_by') and self.columns and order: key = self.columns[abs(order) - 1][1] model_attr = None if hasattr(self.data, 'model') and hasattr(self.data.model, key): model_attr = getattr(self.data.model, key) if model_attr and callable(model_attr): self.data = sorted(self.data, key=model_attr, reverse=(order <= 0)) else: order_str = '%s' if order > 0 else '-%s' order_key = order_str % key self.data = self.data.order_by(order_key) # If data doesn't support order_by, sort by index elif order: sortfunc = lambda x: x[abs(order) - 1] * cmp(order, 0) self.data = sorted(self.data, key=sortfunc) # Initialize paginator self.paginator = Paginator(self.data, self.per_page, self.orphans) # Get pagination items for current page. If page request is out of # range, deliver last page of results. try: items = self.paginator.page(page) except (EmptyPage, InvalidPage): items = self.paginator.page(self.paginator.num_pages) # Get base url baseurl = self.get_base_url() return items, order, baseurl def paginate(*args, **kwargs): """Shortcut function to avoid having to instantiate the SimplePaginator Class.""" p = SimplePaginator(*args, **kwargs) return p.paginate()
Lucretiel/genetics
genetics/simulation/__init__.py
Python
lgpl-2.1
41
0
from .dis
crete
import DiscreteSimulation
lgrahl/scripthookvpy3k
python/scripts/metadata.py
Python
mit
857
0.002334
import asyncio import gta.utils # The following metadata will not be processed but is recommended # Author name and E-Mail __author__ = 'Full Name <[email protected]>' # Status of the script: Use one of 'Prototype', 'Development', 'Production' __status__ = 'Development' # The following metadata will be parsed and should always be provided # Version number: This should always be a string and formatted in the x.x.x notation __version__ = '0.0.1' # A list of dependencies in the requirement specifiers format # See: https://pip.pypa.io/en/latest/reference/pip_install.html#require
ment-specifiers __dependencies__ = ('aiohttp>=0.15.3',) @asyncio.coroutine def main(): """ Does absolute
ly nothing but show you how to provide metadata. """ logger = gta.utils.get_logger('gta.metadata') logger.debug('Hello from the metadata example')
jessicalucci/TaskManagement
taskflow/task.py
Python
apache-2.0
9,682
0
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (C) 2013 Rackspace Hosting Inc. All Rights Reserved. # Copyright (C) 2013 Yahoo! Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from taskflow.utils import misc from taskflow.utils import reflection def _save_as_to_mapping(save_as): """Convert save_as to mapping name => index Result should follow storage convention for mappings. """ # TODO(harlowja): we should probably document this behavior & convention # outside of code so that its more easily understandable, since what a task # returns is pretty crucial for other later operations. if save_as is None: return {} if isinstance(save_as, six.string_types): # NOTE(harlowja): this means that your task will only return one item # instead of a dictionary-like object or a indexable object (like a # list or tuple). return {save_as: None} elif isinstance(save_as, (tuple, list)): # NOTE(harlowja): this means that your task will return a indexable # object, like a list or tuple and the results can be mapped by index # to that tuple/list that is returned for others to use. return dict((key, num) for num, key in enumerate(save_as)) elif isinstance(save_as, set): # NOT
E(harlowja): in the case where a set is given we will not be # able to determine the numeric ordering in a reliable way (since it is # a unordered set) so the only way for us to easily ma
p the result of # the task will be via the key itself. return dict((key, key) for key in save_as) raise TypeError('Task provides parameter ' 'should be str, set or tuple/list, not %r' % save_as) def _build_rebind_dict(args, rebind_args): if rebind_args is None: return {} elif isinstance(rebind_args, (list, tuple)): rebind = dict(zip(args, rebind_args)) if len(args) < len(rebind_args): rebind.update((a, a) for a in rebind_args[len(args):]) return rebind elif isinstance(rebind_args, dict): return rebind_args else: raise TypeError('Invalid rebind value: %s' % rebind_args) def _check_args_mapping(task_name, rebind, args, accepts_kwargs): args = set(args) rebind = set(rebind.keys()) extra_args = rebind - args missing_args = args - rebind if not accepts_kwargs and extra_args: raise ValueError('Extra arguments given to task %s: %s' % (task_name, sorted(extra_args))) if missing_args: raise ValueError('Missing arguments for task %s: %s' % (task_name, sorted(missing_args))) def _build_arg_mapping(task_name, reqs, rebind_args, function, do_infer): task_args = reflection.get_required_callable_args(function) accepts_kwargs = reflection.accepts_kwargs(function) result = {} if reqs: result.update((a, a) for a in reqs) if do_infer: result.update((a, a) for a in task_args) result.update(_build_rebind_dict(task_args, rebind_args)) _check_args_mapping(task_name, result, task_args, accepts_kwargs) return result class BaseTask(object): """An abstraction that defines a potential piece of work that can be applied and can be reverted to undo the work as a single unit. """ __metaclass__ = abc.ABCMeta TASK_EVENTS = ('update_progress', ) def __init__(self, name, provides=None): self._name = name # An *immutable* input 'resource' name mapping this task depends # on existing before this task can be applied. # # Format is input_name:arg_name self.rebind = {} # An *immutable* output 'resource' name dict this task # produces that other tasks may depend on this task providing. # # Format is output index:arg_name self.save_as = _save_as_to_mapping(provides) # This identifies the version of the task to be ran which # can be useful in resuming older versions of tasks. Standard # major, minor version semantics apply. self.version = (1, 0) # List of callback functions to invoke when progress updated. self._on_update_progress_notify = [] self._events_listeners = {} @property def name(self): return self._name def __str__(self): return "%s==%s" % (self.name, misc.get_task_version(self)) @abc.abstractmethod def execute(self, *args, **kwargs): """Activate a given task which will perform some operation and return. This method can be used to apply some given context and given set of args and kwargs to accomplish some goal. Note that the result that is returned needs to be serializable so that it can be passed back into this task if reverting is triggered. """ def revert(self, *args, **kwargs): """Revert this task using the given context, result that the apply provided as well as any information which may have caused said reversion. """ @property def provides(self): return set(self.save_as) @property def requires(self): return set(self.rebind.values()) def update_progress(self, progress, **kwargs): """Update task progress and notify all registered listeners. :param progress: task progress float value between 0 and 1 :param kwargs: task specific progress information """ self._trigger('update_progress', progress, **kwargs) def _trigger(self, event, *args, **kwargs): """Execute all handlers for the given event type.""" if event in self._events_listeners: for handler in self._events_listeners[event]: event_data = self._events_listeners[event][handler] handler(self, event_data, *args, **kwargs) def bind(self, event, handler, **kwargs): """Attach a handler to an event for the task. :param event: event type :param handler: function to execute each time event is triggered :param kwargs: optional named parameters that will be passed to the event handler :raises ValueError: if invalid event type passed """ if event not in self.TASK_EVENTS: raise ValueError("Unknown task event %s" % event) if event not in self._events_listeners: self._events_listeners[event] = {} self._events_listeners[event][handler] = kwargs def unbind(self, event, handler=None): """Remove a previously-attached event handler from the task. If handler function not passed, then unbind all event handlers. :param event: event type :param handler: previously attached to event function """ if event in self._events_listeners: if not handler: self._events_listeners[event] = {} else: if handler in self._events_listeners[event]: self._events_listeners[event].pop(handler) class Task(BaseTask): """Base class for user-defined tasks Adds following features to Task: - auto-generates name from type of self - adds all execute argument names to task requirements - items provided by the task may be specified via 'default_provides' class attribute or property """ default_provides = None def __init__(self, name=None, provides=None, requires=None,
flavour/eden
controllers/disease.py
Python
mit
8,481
0.008843
# -*- coding: utf-8 -*- """ Disease Case Tracking and Contact Tracing """ if not settings.has_module(c): raise HTTP(404, body="Module disabled: %s" % c) # ----------------------------------------------------------------------------- def index(): "Module's Home Page" module_name = settings.modules[c].get("name_nice") response.title = module_name return {"module_name": module_name, } # ----------------------------------------------------------------------------- def disease(): """ Disease Information Controller """ return s3_rest_controller(rheader = s3db.disease_rheader) # ----------------------------------------------------------------------------- def case(): """ Case Tracking Controller """ def prep(r): if settings.get_disease_case_id(): ptable = s3db.pr_person ptable.pe_label.label = T("ID") if r.record: # Do not allow changing the person ID person_id = r.table.person_id person_id.writable = False person_id.comment = None else: dtable = s3db.disease_disease diseases = db(dtable.deleted == False).select(dtable.id, limitby=(0, 2) ) if len(diseases) == 1: # Default to only disease field = r.table.disease_id field.default = diseases.first().id field.writable = False component_name = r.component_name if component_name in ("contact", "exposure"): field = r.component.table.tracing_id field.readable = field.writable = False if component_name == "contact": # Adapt CRUD strings to perspective s3.crud_strings["disease_exposure"] = Storage( label_create = T("Add Close Contact"), title_display = T("Close Contact Details"), title_list = T("Close Contacts"), title_update = T("Edit Contact"), label_list_button = T("List Close Contacts"), label_delete_button = T("Delete Contact"), msg_record_created = T("Contact added"), msg_record_modified = T("Contact updated"), msg_record_deleted = T("Contact deleted"), msg_list_empty = T("No Close Contacts currently registered")) return True s3.prep = prep def postp(r, output): if isinstance(output, dict) and "buttons" in output: buttons = output["buttons"] if "list_btn" in buttons and "summary_btn" in buttons: buttons["list_btn"] = buttons[
"summary_btn"] return output s3.postp = postp return s3_rest_controller(rheader = s3db.disease_rheader) # ----------------------------------------------------------------------------- def person(): """ Delegated person-controller for case tab """ def prep(r): resource = r.resource table = resource.table table.pe_label.label = T("ID") get_vars = r.get_vars if "viewing" in get_vars: try:
vtablename, record_id = get_vars["viewing"].split(".") except ValueError: return False if vtablename == "disease_case": # Get the person_id from the case ctable = s3db[vtablename] query = (ctable.id == record_id) row = db(query).select(ctable.person_id, limitby = (0, 1), ).first() if not row: r.error(404, current.ERROR.BAD_RECORD) # Update the request request = s3base.S3Request("pr", "person", args = [str(row.person_id)], vars = {}, ) r.resource = resource = request.resource r.record = request.record r.id = request.id # Name fields in name-format order NAMES = ("first_name", "middle_name", "last_name") keys = s3base.StringTemplateParser.keys(settings.get_pr_name_format()) name_fields = [fn for fn in keys if fn in NAMES] # Fields in form from s3 import S3SQLInlineComponent crud_fields = name_fields + \ ["gender", "date_of_birth", S3SQLInlineComponent( "contact", fields = [("", "value")], filterby = {"field": "contact_method", "options": "SMS", }, label = settings.get_ui_label_mobile_phone(), multiple = False, name = "phone", ), S3SQLInlineComponent( "contact", fields = [("", "value")], filterby = {"field": "contact_method", "options": "EMAIL", }, label = T("Email"), multiple = False, name = "email", ), ] resource.configure(crud_form = s3base.S3SQLCustomForm(*crud_fields), deletable = False, ) return True else: return False s3.prep = prep def postp(r, output): # Remove list- and summary-buttons if r.record and isinstance(output, dict): buttons = output.get("buttons") if buttons: buttons.pop("list_btn", None) buttons.pop("summary_btn", None) return output s3.postp = postp return s3_rest_controller("pr", "person", rheader = s3db.disease_rheader, ) # ----------------------------------------------------------------------------- def tracing(): """ Contact Tracing Controller """ def prep(r): if r.id and r.component_name == "exposure": ctable = r.component.table case_id = ctable.case_id case_id.default = r.id case_id.readable = case_id.writable = False crud_strings = s3.crud_strings[r.component.tablename] crud_strings["label_create"] = T("Add Contact Person") crud_strings["label_delete_button"] = T("Delete Contact Person") return True s3.prep = prep return s3_rest_controller(rheader = s3db.disease_rheader) # ----------------------------------------------------------------------------- def testing_report(): """ Testing Site Daily Summary Report: RESTful CRUD Controller """ return s3_rest_controller() # ----------------------------------------------------------------------------- def testing_device(): """ Testing Device Registry: RESTful CRUD Controller """ return s3_rest_controller() # ----------------------------------------------------------------------------- def case_diagnostics(): """ Diagnostic Tests: RESTful CRUD Controller """ return s3_rest_controller() # ----------------------------------------------------------------------------- def statistic(): """ RESTful CRUD Controller """ return s3_rest_controller() # ----------------------------------------------------------------------------- def stats_data(): """ RESTful CRUD C
jleclanche/fireplace
tests/full_game.py
Python
agpl-3.0
577
0.025997
#!/usr/bin/env python import sys from fireplace import cards from fireplace.exceptions import GameOver from fireplace.utils import play_full_game
sys.path.append("..") def test_full_game(): try: play_full_game() except GameOver: print("Game completed normally."
) def main(): cards.db.initialize() if len(sys.argv) > 1: numgames = sys.argv[1] if not numgames.isdigit(): sys.stderr.write("Usage: %s [NUMGAMES]\n" % (sys.argv[0])) exit(1) for i in range(int(numgames)): test_full_game() else: test_full_game() if __name__ == "__main__": main()
IfcOpenShell/IfcOpenShell
src/blenderbim/test/bim/bootstrap.py
Python
lgpl-3.0
15,500
0.003032
# BlenderBIM Add-on - OpenBIM Blender Add-on # Copyright (C) 2021 Dion Moult <[email protected]> # # This file is part of BlenderBIM Add-on. # # BlenderBIM Add-on is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # BlenderBIM Add-on is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with BlenderBIM Add-on. If not, see <http://www.gnu.org/licenses/>. import os import re import bpy import pytest import webbrowser import blenderbim import ifcopenshell import ifcopenshell.util.representation from blenderbim.bim.ifc import IfcStore from mathutils import Vector # Monkey-patch webbrowser opening since we want to test headlessly webbrowser.open = lambda x: True variables = {"cwd": os.getcwd(), "ifc": "IfcStore.get_file()"} class NewFile: @pytest.fixture(autouse=True) def setup(self): IfcStore.purge() bpy.ops.wm.read_homefile(app_template="") if bpy.data.objects: bpy.data.batch_remove(bpy.data.objects) bpy.ops.outliner.orphans_purge(do_local_ids=True, do_linked_ids=True, do_recursive=True) blenderbim.bim.handler.setDefaultProperties(None) class NewIfc: @pytest.fixture(autouse=True) def setup(self): IfcStore.purge() bpy.ops.wm.read_homefile(app_template="") bpy.data.batch_remove(bpy.data.objects) bpy.ops.outliner.orphans_purge(do_local_ids=True, do_linked_ids=True, do_recursive=True) blenderbim.bim.handler.setDefaultProperties(None) bpy.ops.bim.create_project() def scenario(function): def subfunction(self): run(function(self)) return subfunction def scenario_debug(function): def subfunction(self): run_debug(function(self)) return subfunction def an_empty_ifc_project(): bpy.ops.bim.create_project() def i_add_a_cube(): bpy.ops.mesh.primitive_cube_add() def i_add_a_cube_of_size_size_at_location(size, location): bpy.ops.mesh.primitive_cube_add(size=float(size), location=[float(co) for co in location.split(",")]) def the_object_name_is_selected(name): i_deselect_all_objects() additionally_the_object_name_is_selected(name) def additionally_the_object_name_is_selected(name): obj = bpy.context.scene.objects.get(name) if not obj: assert False, 'The object "{name}" could not be selected' bpy.context.view_layer.objects.active = obj obj.select_set(True) def i_deselect_all_objects(): bpy.context.view_layer.objects.active = None bpy.ops.object.select_all(action="DESELECT") def i_am_on_frame_number(number): bpy.context.scene.frame_set(int(number)) def i_set_prop_to_value(prop, value): try: eval(f"bpy.context.{prop}") except: assert False, "Property does not exist" try: exec(f'bpy.context.{prop} = "{value}"') except: exec(f"bpy.context.{prop} = {value}") def prop_is_value(prop, value): is_value = False try: exec(f'assert bpy.context.{prop} == "{value}"') is_value = True except: try: exec(f"assert bpy.context.{prop} == {value}") is_value = True except: try: exec(f"assert list(bpy.context.{prop}) == {value}") is_value = True except: pass if not is_value: actual_value = eval(f"bpy.context.{prop}") assert False, f"Value is {actual_value}" def i_enable_prop(prop): exec(f"bpy.context.{prop} = True") def i_press_operator(operator): if "(" in operator: exec(f"bpy.ops.{operator}") else: exec(f"bpy.ops.{operator}()") def i_rename_the_object_name1_to_name2(name1, name2): the_object_name_exists(name1).name = name2 def the_object_name_exists(name): obj = bpy.data.objects.get(name) if not obj: assert False, f'The object "{name}" does not exist' return obj def an_ifc_file_exists(): ifc = IfcStore.get_file() if not ifc: assert False, "No IFC file is available" return ifc def an_ifc_file_does_not_exist(): ifc = IfcStore.get_file() if ifc: assert False, "An IFC is available" def the_object_name_does_not_exist(name): assert bpy.data.objects.get(name) is None, "Object exists" def the_object_name_is_an_ifc_class(name, ifc_class): ifc = an_ifc_file_exists() element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id) assert element.is_a(ifc_class), f'Object "{name}" is an {element.is_a()}' def the_object_name_is_not_an_ifc_element(name): id = the_object_name_exists(name).BIMObjectProperties.ifc_definition_id assert id == 0, f"The ID is {id}" def the_object_name_is_in_the_collection_collection(name, collection): assert collection in [c.name for c in the_object_name_exists(name).users_collection] def the_object_name_is_not_in_the_collection_collection(name, collection): assert collection not in [c.name for c in the_object_name_exists(name).users_collection] def the_object_name_has_a_body_of_value(name, value): assert the_object_name_exists(name).data.body == value def the_collection_name1_is_in_the_collection_name2(name1, name2): assert bpy.data.collections.get(name2).children.get(name1) def the_collection_name1_is_not_in_the_collection_name2(name1, name2): assert not bpy.data.collections.get(name2).children.get(name1) def the_object_name_is_placed_in_the_collection_collection(name, collection): obj = the_object_name_exists(name) [c.objects.unlink(obj) for c in obj.users_collection] bpy.data.collections.get(collection).objects.link(obj) def the_object_name_has_a_type_representation_of_context(name, type, context): ifc = an_ifc_file_exists() element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id) context, subcontext, target_view = context.split("/") assert ifcopenshell.util.representation.get_representation( element, context, subcontext or None, target_view or None ) def the_object_name_is_contained_in_container_name(name, container_name): ifc = an_ifc_file_exists() element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id) container = ifcopenshell.util.element.get_container(element) if not container: assert False, f'Object "{name}" is not in any container' assert container.Name == container_name, f'Object "{name}" is in {container}' def i_duplicate_the_selected_objects(): bpy.ops.object.duplicate_move() blenderbim.bim.handler.active_object_callback() def i_delete_the_selected_objects(): bpy.ops.object.delete() blenderbim.bim.handler.active_object_callback() def the_object_name1_and_name2_are_different_elements(name1, name2): ifc = an_ifc_file_exists() element1 = ifc.by_id(the_object_name_exists(name1).BIMObjectProperties.ifc_definition_id) element2 = ifc.by_id(the_object_n
ame_exists(name2).BIMObjectProperties.ifc_definition_id) assert element1 != element2, f"Objects {name1} and {name2} have same elements {element1} and {element2}" def the_file_name_should_contain_value(name, value): with open(name, "r") as f: assert value in f.read() def the_object_name1_has_
a_boolean_difference_by_name2(name1, name2): obj = the_object_name_exists(name1) for modifier in obj.modifiers: if modifier.type == "BOOLEAN" and modifier.object and modifier.object.name == name2: return True assert False, "No boolean found" def the_object_name1_has_no_boolean_difference_by_name2(name1, name2): obj = the_object_name_exists(name1) for modifier in obj.modifiers: if modifier.type == "BOOLEAN" and modifier.object an
chrismattmann/girder
tests/cases/api_describe_test.py
Python
apache-2.0
4,148
0
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright 2013 Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### from .. import base from girder.api import access, describe from girder.api.rest import Resource OrderedRoutes = [ ('GET', (), ''), ('GET', (':id',), '/{id}'), ('UNKNOWN', (':id',), '/{id}'), ('GET', (':id', 'action'), '/{id}/action'), ('GET', ('action',), '/action'), ('PUT', ('action',), '/action'), ('POST', ('action',), '/action'), ('PATCH', ('action',), '/action'), ('DELETE', ('action',), '/action'), ('NEWMETHOD', ('action',), '/action'), ('UNKNOWN', ('action',), '/action'), ('GET', ('action', ':id'), '/action/{id}'), ('GET', ('noaction',), '/noaction') ] class DummyResource(Resource): def __init__(self): self.resourceName = 'foo' for method, pathElements, testPath in OrderedRoutes: self.route(method, pathElements, self.handler) @access.public def handler(self, **kwargs): return kwargs handler.description = describe.Description('Does nothing') def setUpModule(): server = base.startServer() server.root.api.v1.accesstest = DummyResource() def tearDownModule(): base.stopServer() class ApiDescribeTestCase(base.TestCase): """ Makes sure our swagger auto API docs are working. """ def testInvalidResource(self): methods = ['DELETE', 'GET', 'PATCH', 'POST', 'PUT'] for m in methods: resp = self.request(path='/not_valid', method=m, isJson=False) self.assertStatus(resp, 404) methods.remove('GET') for m in methods: resp = self.request(path='', method=m, isJson=False) self.assertStatus(resp, 405) def testApiDescribe(self): # Get coverage for serving the static swagger page resp = self.request(path='', method='GET', isJson=False) self.assertStatusOk(resp) # Test top level describe endpoint resp = self.request(path='/describe', method='GET') self.assertStatusOk(resp) self.assertEqual(resp.json['swaggerVersion'], describe.SWAGGER_VERSION) self.assertEqual(resp.json['apiVersion'], describe.API_VERSION) self.assertTrue({'path': '/group'} in resp.json['apis']) # Request a specific resource's description, sanity check resp = self.request(path='/describe/user', method='GET') self.assertS
tatusOk(resp) for routeDoc in resp.json['apis']: self.assertHasKeys(('path', 'operations'), routeDoc) self.assertTrue(len(routeDoc['operations']) > 0) # Request an unknown resource's description to get an error resp = self.request(path='/describe/unknown', method='GET') self.assertStatus(resp, 400) self.assertEqual(resp.json['message'], 'Invalid resource: unknown') def testRouteOrder(self): # Check that the resources and o
perations are listed in the order we # expect resp = self.request(path='/describe/foo', method='GET') self.assertStatusOk(resp) listedRoutes = [(method['httpMethod'], route['path']) for route in resp.json['apis'] for method in route['operations']] expectedRoutes = [(method, '/foo'+testPath) for method, pathElements, testPath in OrderedRoutes] self.assertEqual(listedRoutes, expectedRoutes)
SDSG-Invenio/invenio
invenio/legacy/bibindex/engine.py
Python
gpl-2.0
101,575
0.002648
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, # 2010, 2011, 2012, 2013, 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. from __future__ import print_function """ BibIndex indexing engine implementation. See bibindex executable for entry point. """ __revision__ = "$Id$" import re import sys import time import fnmatch import inspect from datetime import datetime from six import iteritems from invenio.config import CFG_SOLR_URL from invenio.legacy.bibindex.engine_config import CFG_MAX_MYSQL_THREADS, \ CFG_MYSQL_THREAD_TIMEOUT, \ CFG_CHECK_MYSQL_THREADS, \ CFG_BIBINDEX_INDEX_TABLE_TYPE, \ CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR, \ CFG_BIBINDEX_UPDATE_MESSAGE, \ CFG_BIBINDEX_UPDATE_MODE, \ CFG_BIBINDEX_TOKENIZER_TYPE, \ CFG_BIBINDEX_WASH_INDEX_TERMS, \ CFG_BIBINDEX_SPECIAL_TAGS from invenio.legacy.bibauthority.config import \ CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC from invenio.legacy.bibauthority.engine import get_index_strings_by_control_no,\ get_control_nos_from_recID from invenio.legacy.search_engine import perform_request_search, \ get_index_stemming_language, \ get_synonym_terms, \ search_pattern, \ search_unit_in_bibrec from invenio.legacy.dbquery import run_sql, DatabaseError, serialize_via_marshal, \ deserialize_via_marshal, wash_table_column_name from invenio.legacy.bibindex.engine_washer import wash_index_term from invenio.legacy.bibsched.bibtask import task_init, write_message, get_datetime, \ task_set_option, task_get_option, task_get_task_param, \ task_update_progress, task_sleep_now_if_required from intbitset import intbitset from invenio.ext.logging import register_exception from invenio.legacy.bibrank.adminlib import get_def_name from invenio.legacy.miscutil.solrutils_bibindex_indexer import solr_commit from invenio.modules.indexer.tokenizers.BibIndexJournalTokenizer import \ CFG_JOURNAL_TAG, \ CFG_JOURNAL_PUBINFO_STANDARD_FORM, \ CFG_JOURNAL_PUBINFO_STANDARD_FORM_REGEXP_CHECK from invenio.legacy.bibindex.termcollectors import TermCollector from invenio.legacy.bibindex.engine_utils import load_tokenizers, \ get_all_index_names_and_column_values, \ get_index_tags, \ get_field_tags, \ get_marc_tag_indexes, \ get_nonmarc_tag_indexes, \ get_all_indexes, \ get_index_virtual_indexes, \ get_virtual_index_building_blocks, \ get_index_id_from_index_name, \ run_sql_drop_silently, \ get_min_last_updated, \ remove_inexistent_indexes, \ get_all_synonym_knowledge_bases, \ get_index_remove_stopwords, \ get_index_remove_html_markup, \ get_index_remove_latex_markup, \ filter_for_virtual_indexes, \ get_records_range_for_index, \ make_prefix, \ list_union, \ recognize_marc_tag from invenio.modules.records.api import get_record from invenio.utils.memoise import Memoise from invenio.legacy.bibindex.termcollectors import \ TermCollector, \ NonmarcTermCollector if sys.hexversion < 0x2040000: # pylint: disable=W0622 from sets import Set as set # pylint: enable=W0622 # precompile some often-used regexp for speed reasons: re_subfields = re.compile('\$\$\w') re_datetime_shift = re.compile("([-\+]{0,1})([\d]+)([dhms])") re_prefix = re.compile('__[a-zA-Z1-9]*__') nb_char_in_line = 50 # for verbose pretty printing chunksize = 1000 # default size of chunks that the records will be treated by base_process_size = 4500 # process base size _last_word_table = None _TOKENIZERS = load_tokenizers() def list_unique(_list): """Returns a _list with duplicates removed.""" _dict = {} for e in _list: _dict[e] = 1 return _dict.keys() # safety function for killing slow DB threads: def kill_sleepy_mysql_threads(max_threads=CFG_MAX_MYSQL_THREADS, thread_timeout=CFG_MYSQL_THREAD_TIMEOUT): """Check the number of DB threads and if there are more than MAX_THREADS of them, lill all threads that are in a sleeping state for more than THREAD_TIMEOUT seconds. (This is useful for working around the the max_connection problem that appears during indexation in some not-yet-understood cases.) If some threads are to be killed, write info into the log file. """ res = run_sql("SHOW FULL PROCESSLIST") if len(res) > max_threads: for row in res: r_id, dummy, dummy, dummy, r_command, r_time, dummy, dummy = row if r_command == "Sleep" and int(r_time) > thread_timeout: run_sql("KILL %s", (r_id, )) write_message("WARNING: too many DB threads, " + \ "killing thread %s" % r_id, verbose=1) return def get_associated_subfield_value(recID, tag, value, associated_subfield_code): """Return list of ASSOCIATED_SUBFIELD_CODE, if exists, for record RECID and TAG of value VALUE. Used by fulltext indexer only. Note: TAG must be 6 characters long (tag+ind1+ind2+sfcode), otherwise en empty string is returned. FIXME: what if many tag values have the same value but different associated_subfield_code? Better use bibrecord library for this. """ out = "" if len(tag) != 6: return out bibXXx = "bib" + tag[0] + tag[1] + "x" bibrec_bibXXx = "bi
brec_" + bibXXx query = """SELECT bb.field_number, b.tag, b.value FROM %s AS b, %s AS bb WHERE bb.id_bibrec=%%s AND bb.id_bibxxx=b.id AND tag LIKE %%s%%""" % (bibXXx, bibrec_bibXXx) res = run_sql(query, (recID, tag[:-1])) field_number = -1 for row in res: if row[1] == tag and row[2]
== value: field_number = row[0] if field_number > 0: for row in res: if row[0] == field_number and row[1] == tag[:-1] + associated_subfield_code: out = row[2] break return out def get_author_canonical_ids_for_recid(recID): """ Return list of author canonical IDs (e.g. `J.Ellis.1') for the given record. Done by consulting BibAuthorID module. """ return [] def swap_temporary_reindex_tables(index_id, reindex_prefix="tmp_"): """Atomically swap reindexed temporary table with the original one. Delete the now-old one.""" write_message("Putting new tmp index tables " + \ "for id %s into production" % index_id) run_sql( "RENAME TABLE " + "idxWORD%02dR TO old_idxWORD%02dR," % (index_id, index_id) + "%sidxWORD%02dR TO idxWORD%02dR," % (reindex_prefix, index_id, index_id) + "idxWORD%02dF TO old_idxWORD%02dF," % (index_id, index_id) + "%sidxWORD%02dF TO idxWORD%02dF," % (reindex_prefix, index_id, index_id) + "idxPAIR%02dR TO old_idxPAIR%02dR," % (index_id, index_id) + "%sidxPAIR%02dR TO idxPAIR%02dR," % (reindex_prefix, index_id, index_id) + "idxPAIR%02dF TO old_idxPAIR%02dF," % (index_id, index_id) + "%sidxPAIR%02dF TO idxPAIR%02dF," % (reindex_prefix, index_id, index_id) + "idxPHRASE%02dR TO old_idxPHRASE%02dR," % (index_id, index_id) + "%sidxPHRASE%02dR TO idxPHRASE%02dR," % (reindex_prefix, index_id, index_id) + "idxPHRASE%02dF TO old_idxPHRASE%02dF," % (index_id, index_id) + "%sidxPHRASE%02dF TO idxPHRASE%02dF;" % (reindex_prefix, index_id, index_id) ) write_message("Dropping old index tables for id %s" %
samuelmaudo/yepes
tests/validators/tests.py
Python
bsd-3-clause
13,740
0.00102
# -*- coding:utf-8 -*- from __future__ import unicode_literals from django import test from django.core.exceptions import ValidationError from yepes.validators import ( CharSetValidator, ColorValidator, FormulaValidator, IdentifierValidator, PhoneNumberValidator, PostalCodeValidator, RestrictedEmailValidator, ) class ValidatorsTest(test.SimpleTestCase): def test_charset(self): validator = CharSetValidator('abcdef') def assertValid(value): self.assertTrue(validator.validate(value)) validator(value) def assertNotValid(value): self.assertFalse(validator.validate(value)) with self.assertRaises(ValidationError): validator(value) assertValid('abcdef') assertValid('dadadada') assertNotValid('aBcDeF') assertNotValid('DADADADA') assertNotValid('uy') assertNotValid('a-f') validator = CharSetValidator('abcdefABCDEF') def assertValid(value): self.assertTrue(validator.validate(value)) validator(value) def assertNotValid(value): self.assertFalse(validator.validate(value)) with self.assertRaises(ValidationError): validator(value) assertValid('abcdef') assertValid('dadadada') assertValid('aBcDeF') assertValid('DADADADA') assertNotValid('uy') assertNotValid('a-f') def test_charset_with_range(self): validator = CharSetValidator('a-f') def assertValid(value): self.assertTrue(validator.validate(value)) validator(value) def assertNotValid(value): self.assertFalse(validator.validate(value)) with self.assertRaises(ValidationError): validator(value) assertValid('abcdef') assert
Valid('dadadada') assertNotValid('aBcDeF') assertNotValid('DADADADA') assertNotValid('uy') assertNotValid('a-
f') validator = CharSetValidator('a-fA-F') def assertValid(value): self.assertTrue(validator.validate(value)) validator(value) def assertNotValid(value): self.assertFalse(validator.validate(value)) with self.assertRaises(ValidationError): validator(value) assertValid('abcdef') assertValid('dadadada') assertValid('aBcDeF') assertValid('DADADADA') assertNotValid('uy') assertNotValid('a-f') def test_color(self): validator = ColorValidator() def assertValid(value): self.assertTrue(validator.validate(value)) validator(value) def assertNotValid(value): self.assertFalse(validator.validate(value)) with self.assertRaises(ValidationError): validator(value) assertValid('#5DC1B9') assertValid('#5dc1b9') assertValid('#fff') assertValid('#fffFFF') assertNotValid('5DC1B9') assertNotValid('5dc1b9') assertNotValid('fff') assertNotValid('fffFFF') assertNotValid('#12') assertNotValid('#1234') assertNotValid('#12345678') assertNotValid('#hijKLM') def test_formula(self): validator = FormulaValidator() def assertValid(value): self.assertTrue(validator.validate(value)) validator(value) def assertNotValid(value): self.assertFalse(validator.validate(value)) with self.assertRaises(ValidationError): validator(value) assertValid('1 * 3 ** 5') assertValid('a * b ** c') assertValid('x * y ** z') assertNotValid('*') assertNotValid('not') assertNotValid('* 1') assertNotValid('1 *') assertNotValid('1 |/ 1') assertNotValid('1 * (10 - 3') assertNotValid('a * b)') def test_formula_with_variables(self): validator = FormulaValidator(['a', 'b', 'c']) def assertValid(value): self.assertTrue(validator.validate(value)) validator(value) def assertNotValid(value): self.assertFalse(validator.validate(value)) with self.assertRaises(ValidationError): validator(value) assertValid('1 * 3 ** 5') assertValid('a * b ** c') assertNotValid('x * y ** z') assertNotValid('*') assertNotValid('not') assertNotValid('* 1') assertNotValid('1 *') assertNotValid('1 |/ 1') assertNotValid('1 * (10 - 3') assertNotValid('a * b)') def test_identifier(self): validator = IdentifierValidator() def assertValid(value): self.assertTrue(validator.validate(value)) validator(value) def assertNotValid(value): self.assertFalse(validator.validate(value)) with self.assertRaises(ValidationError): validator(value) assertValid('variable') assertValid('variable_123') assertValid('_') assertValid('_variable') assertValid('variable_') assertValid('__variable__') assertValid('UpperCamelCase') assertValid('lowerCamelCase') assertValid('UPPER_CASE_WITH_UNDERSCORES') assertValid('lower_case_with_underscores') assertValid('Mixed_Case_With_Underscores') assertNotValid('123_variable') assertNotValid('z%.# +ç@') assertNotValid('UPPER-CASE-WITH-DASHES') assertNotValid('lower-case-with-dashes') assertNotValid('Mixed-Case-With-Dashes') def test_phone_number(self): validator = PhoneNumberValidator() def assertValid(value): self.assertTrue(validator.validate(value)) validator(value) def assertNotValid(value): self.assertFalse(validator.validate(value)) with self.assertRaises(ValidationError): validator(value) assertValid('+9-999-999-9999') assertValid('999-999-999-9999') assertValid('999 999 999 9999') assertValid('999-99999') assertValid('(999) / 999-99999') assertValid('+99-99-999-99999') assertValid('99-99-99-999-99999') assertValid('999') assertValid('9999-9999999') assertValid('99999-99999') assertValid('+99-99999-99999') assertValid('9-999999999') assertValid('(9999) 9999 9999') assertValid('99999999') assertValid('999999999999') assertValid('+99 999 9999 9999') assertValid('+99 (9999) 9999 9999') assertValid('999 9999 9999') assertValid('9999 9999') assertValid('+9999-999-999') assertValid('+999-999-9999') assertValid('+999-9999-9999') assertValid('+9999-999-9999') assertValid('9999-999-999') assertValid('+99 (9) 999 9999') assertValid('+99 (99) 999 9999') assertValid('+99 (999) 999 9999') assertValid('9 (999) 999 9999') assertValid('+99-9999-9999') assertValid('+99 9999 9999') assertValid('99 99 99 99') assertValid('99 99 99 99 99') assertValid('9 99 99 99 99') assertValid('+99 9 99 99 99 99') assertValid('99999 999999') assertValid('99999 999999-99') assertValid('+99 9999 999999') assertValid('(99999) 999999') assertValid('+99 (9999) 999999') assertValid('99999-999999') assertValid('99999/999999-99') assertValid('999 9999') assertValid('999-9999') assertValid('99-99999999') assertValid('999-9999999') assertValid('9999-9999') assertValid('+99 99 99999999') assertValid('+99 9 99999999') assertValid('999 99 999') assertValid('999-999-999') assertValid('99-999-99-99') assertValid('(99) 999-99-99') assertValid('9 9999 99-99-99') assertValid('9 (999) 999-99-99') assertValid('999 99 99 99') assertVal
radical-cybertools/radical.ensemblemd.mdkernels
setup.py
Python
mit
5,031
0.015703
#!/usr/bin/env python """Setup file for HT-BAC Tools. """ __author__ = "Ole Weidner" __email__ = "[email protected]" __copyright__ = "Copyright 2014, The RADICAL Project at Rutgers" __license__ = "MIT" """ Setup script. Used by easy_install and pip. """ import os import sys import subprocess from setuptools import setup, find_packages, Command #----------------------------------------------------------------------------- # def get_version(): short_version = None # 0.4.0 long_version = None # 0.4.0-9-g0684b06 try: import subprocess as sp import re srcroot = os.path.dirname (os.path.abspath (__file__)) VERSION_MATCH = re.compile (r'(([\d\.]+)\D.*)') # attempt to get version information from git p = sp.Popen ('cd %s && git describe --tags --always' % srcroot, stdout=sp.PIPE, stderr=sp.STDOUT, shell=True) out = p.communicate()[0] if p.returncode != 0 or not out : # the git check failed -- its likely that we are called from # a tarball, so use ./VERSION instead out=open ("%s/VERSION" % ".", 'r').read().strip() # from the full string, extract short and long versions v = VERSION_MATCH.search (out) if v: long_version = v.groups ()[0] short_version = v.groups ()[1] # sanity check if we got *something* if not short_version or not long_version : sys.stderr.write ("Cannot determine version from git or ./VERSION\n") import sys sys.exit (-1) # make sure the version files exist for the runtime version inspection open ('%s/VERSION' % srcroot, 'w').write (long_version+"\n") open ('%s/src/radical/ensemblemd/mdkernels/VERSION' % srcroot, 'w').write (long_version+"\n") except Exception as e : print 'Could not extract/set version: %s' % e import sys sys.exit (-1) return short_version, long_version #short_version, long_version = get_version () #----------------------------------------------------------------------------- # check python version. we need > 2.5, <3.x if sys.hexversion < 0x02050000 or sys.hexversion >= 0x03000000: raise RuntimeError("Sinon requires Python 2.x (2.5 or higher)") #----------------------------------------------------------------------------- # def read(*rnames): return open(os.path.join(os.path.dirname(__file__), *rnames)).read() #----------------------------------------------------------------------------- setup_args = { 'name' : 'radical.ensemblemd.mdkernels', 'version' : 0.1, 'description' : "BAC is a tool for molecular dynamics binding affinity calculations.", 'long_description' : (read('README.md') + '\n\n' + read('CHANGES.md')), 'author' : 'RADICAL Group at Rutgers University', 'author_email' : '[email protected]', 'maintainer' : "Ole Weidner", 'maintainer_email' : '[email protected]', 'url' : 'https://github.com/radical-cybertools', 'license' : 'MIT', 'keywords' : "molecular dynamics binding affinity calculations", 'classifiers' : [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Environment :: Console', 'License :: OSI Approved :: MIT', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Utilities', 'Topic :: System :: Distributed Computing', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Operating System :: Unix' ], #'entry_points': { # 'console_scripts': # ['htbac-fecalc = radical.ensemblemd.htbac.bin.fecalc:main', # 'htbac-sim = radical.ensemblemd.htbac.bin.sim:main'] #}, #'dependency_links': ['https://github.com/saga-project/saga-pilot/tarball/master#egg=sagapilot'], 'namespace_packages': ['radical', 'radical.ensemblemd'], 'packages' : ['radical', 'radical.ensemblemd', 'radical.ensemblem
d.mdkernel
s', 'radical.ensemblemd.mdkernels.configs'], 'package_dir' : {'': 'src'}, 'package_data' : {'': ['*.sh', '*.json', 'VERSION', 'VERSION.git']}, 'install_requires' : ['radical.utils', 'setuptools>=1'], 'test_suite' : 'radical.ensemblemd.mdkernels.tests', 'zip_safe' : False, } #----------------------------------------------------------------------------- setup (**setup_args) #-----------------------------------------------------------------------------
devs1991/test_edx_docmode
lms/djangoapps/ccx/api/v0/views.py
Python
agpl-3.0
30,649
0.002545
""" API v0 views. """ import datetime import json import logging import pytz from django.contrib.auth.models import User from django.db import transaction from django.http import Http404 from rest_framework import status from rest_framework.authentication import SessionAuthentication from rest_framework.generics import GenericAPIView from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from rest_framework_oauth.authentication import OAuth2Authentication from ccx_keys.locator import CCXLocator from courseware import courses from instructor.enrollment import ( enroll_email, get_email_params, ) from opaque_keys import InvalidKeyError from opaque_keys.edx.keys
import CourseKey, UsageKey from open
edx.core.djangoapps.content.course_overviews.models import CourseOverview from openedx.core.lib.api import permissions from student.models import CourseEnrollment from student.roles import CourseCcxCoachRole from lms.djangoapps.ccx.models import CcxFieldOverride, CustomCourseForEdX from lms.djangoapps.ccx.overrides import ( override_field_for_ccx, ) from lms.djangoapps.ccx.utils import ( add_master_course_staff_to_ccx, assign_coach_role_to_ccx, is_email, get_course_chapters, ) from .paginators import CCXAPIPagination from .serializers import CCXCourseSerializer log = logging.getLogger(__name__) TODAY = datetime.datetime.today # for patching in tests def get_valid_course(course_id, is_ccx=False, advanced_course_check=False): """ Helper function used to validate and get a course from a course_id string. It works with both master and ccx course id. Args: course_id (str): A string representation of a Master or CCX Course ID. is_ccx (bool): Flag to perform the right validation advanced_course_check (bool): Flag to perform extra validations for the master course Returns: tuple: a tuple of course_object, course_key, error_code, http_status_code """ if course_id is None: # the ccx detail view cannot call this function with a "None" value # so the following `error_code` should be never used, but putting it # to avoid a `NameError` exception in case this function will be used # elsewhere in the future error_code = 'course_id_not_provided' if not is_ccx: log.info('Master course ID not provided') error_code = 'master_course_id_not_provided' return None, None, error_code, status.HTTP_400_BAD_REQUEST try: course_key = CourseKey.from_string(course_id) except InvalidKeyError: log.info('Course ID string "%s" is not valid', course_id) return None, None, 'course_id_not_valid', status.HTTP_400_BAD_REQUEST if not is_ccx: try: course_object = courses.get_course_by_id(course_key) except Http404: log.info('Master Course with ID "%s" not found', course_id) return None, None, 'course_id_does_not_exist', status.HTTP_404_NOT_FOUND if advanced_course_check: if course_object.id.deprecated: return None, None, 'deprecated_master_course_id', status.HTTP_400_BAD_REQUEST if not course_object.enable_ccx: return None, None, 'ccx_not_enabled_for_master_course', status.HTTP_403_FORBIDDEN return course_object, course_key, None, None else: try: ccx_id = course_key.ccx except AttributeError: log.info('Course ID string "%s" is not a valid CCX ID', course_id) return None, None, 'course_id_not_valid_ccx_id', status.HTTP_400_BAD_REQUEST # get the master_course key master_course_key = course_key.to_course_locator() try: ccx_course = CustomCourseForEdX.objects.get(id=ccx_id, course_id=master_course_key) return ccx_course, course_key, None, None except CustomCourseForEdX.DoesNotExist: log.info('CCX Course with ID "%s" not found', course_id) return None, None, 'ccx_course_id_does_not_exist', status.HTTP_404_NOT_FOUND def get_valid_input(request_data, ignore_missing=False): """ Helper function to validate the data sent as input and to build field based errors. Args: request_data (OrderedDict): the request data object ignore_missing (bool): whether or not to ignore fields missing from the input data Returns: tuple: a tuple of two dictionaries for valid input and field errors """ valid_input = {} field_errors = {} mandatory_fields = ('coach_email', 'display_name', 'max_students_allowed',) # checking first if all the fields are present and they are not null if not ignore_missing: for field in mandatory_fields: if field not in request_data: field_errors[field] = {'error_code': 'missing_field_{0}'.format(field)} if field_errors: return valid_input, field_errors # at this point I can assume that if the fields are present, # they must be validated, otherwise they can be skipped coach_email = request_data.get('coach_email') if coach_email is not None: if is_email(coach_email): valid_input['coach_email'] = coach_email else: field_errors['coach_email'] = {'error_code': 'invalid_coach_email'} elif 'coach_email' in request_data: field_errors['coach_email'] = {'error_code': 'null_field_coach_email'} display_name = request_data.get('display_name') if display_name is not None: if not display_name: field_errors['display_name'] = {'error_code': 'invalid_display_name'} else: valid_input['display_name'] = display_name elif 'display_name' in request_data: field_errors['display_name'] = {'error_code': 'null_field_display_name'} max_students_allowed = request_data.get('max_students_allowed') if max_students_allowed is not None: try: max_students_allowed = int(max_students_allowed) valid_input['max_students_allowed'] = max_students_allowed except (TypeError, ValueError): field_errors['max_students_allowed'] = {'error_code': 'invalid_max_students_allowed'} elif 'max_students_allowed' in request_data: field_errors['max_students_allowed'] = {'error_code': 'null_field_max_students_allowed'} course_modules = request_data.get('course_modules') if course_modules is not None: if isinstance(course_modules, list): # de-duplicate list of modules course_modules = list(set(course_modules)) for course_module_id in course_modules: try: UsageKey.from_string(course_module_id) except InvalidKeyError: field_errors['course_modules'] = {'error_code': 'invalid_course_module_keys'} break else: valid_input['course_modules'] = course_modules else: field_errors['course_modules'] = {'error_code': 'invalid_course_module_list'} elif 'course_modules' in request_data: # case if the user actually passed null as input valid_input['course_modules'] = None return valid_input, field_errors def valid_course_modules(course_module_list, master_course_key): """ Function to validate that each element in the course_module_list belongs to the master course structure. Args: course_module_list (list): A list of strings representing Block Usage Keys master_course_key (CourseKey): An object representing the master course key id Returns: bool: whether or not all the course module strings belong to the master course """ course_chapters = get_course_chapters(master_course_key) if course_chapters is None: return False return set(course_module_list).intersection(set(course_chapters)) == set(course_module_list) def make_user_coach(user, master_course_key): """ Makes an user coach on the master course. This functi
CybOXProject/python-cybox
cybox/objects/win_service_object.py
Python
bsd-3-clause
2,132
0.002814
# Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. from mixbox import entities from mixbox import fields import cybox.bindings.win_service_object as win_service_binding from cybox.common import HashList from cybox.objects.win_process_object import WinProcess from
cybox.common import ObjectProperties, String class ServiceDescriptionList(entities.EntityList): _binding = win_service_binding _binding_class = win_service_binding.ServiceDescriptionListType _namespace = "http://
cybox.mitre.org/objects#WinServiceObject-2" description = fields.TypedField("Description", String, multiple=True) class WinService(WinProcess): _binding = win_service_binding _binding_class = win_service_binding.WindowsServiceObjectType _namespace = "http://cybox.mitre.org/objects#WinServiceObject-2" _XSI_NS = "WinServiceObj" _XSI_TYPE = "WindowsServiceObjectType" service_dll_signature_exists = fields.TypedField("service_dll_signature_exists") service_dll_signature_verified = fields.TypedField("service_dll_signature_verified") description_list = fields.TypedField("Description_List", ServiceDescriptionList) display_name = fields.TypedField("Display_Name", String) group_name = fields.TypedField("Group_Name", String) service_name = fields.TypedField("Service_Name", String) service_dll = fields.TypedField("Service_DLL", String) service_dll_certificate_issuer = fields.TypedField("Service_DLL_Certificate_Issuer", String) service_dll_certificate_subject = fields.TypedField("Service_DLL_Certificate_Subject", String) service_dll_hashes = fields.TypedField("Service_DLL_Hashes", HashList) service_dll_signature_description = fields.TypedField("Service_DLL_Signature_Description", String) startup_command_line = fields.TypedField("Startup_Command_Line", String) startup_type = fields.TypedField("Startup_Type", String) service_status = fields.TypedField("Service_Status", String) service_type = fields.TypedField("Service_Type", String) started_as = fields.TypedField("Started_As", String)
winnerineast/Origae-6
origae/download_data/cifar100.py
Python
gpl-3.0
5,218
0.002491
import cPickle import os import tarfile import PIL.Image from downloader import DataDownloader class Cifar100Downloader(DataDownloader): """ See details about the CIFAR100 dataset here: http
://www.cs.toronto.edu/~kriz/cifar.html """ def urlList(self): return [ 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz', ] def uncompressData(self): filename = 'cifar-100-python.tar.gz' filepath = os.path.join(sel
f.outdir, filename) assert os.path.exists(filepath), 'Expected "%s" to exist' % filename if not os.path.exists(os.path.join(self.outdir, 'cifar-100-python')): print "Uncompressing file=%s ..." % filename with tarfile.open(filepath) as tf: tf.extractall(self.outdir) def processData(self): label_filename = 'meta' label_filepath = os.path.join(self.outdir, 'cifar-100-python', label_filename) with open(label_filepath, 'rb') as infile: pickleObj = cPickle.load(infile) fine_label_names = pickleObj['fine_label_names'] coarse_label_names = pickleObj['coarse_label_names'] for level, label_names in [ ('fine', fine_label_names), ('coarse', coarse_label_names), ]: dirname = os.path.join(self.outdir, level) self.mkdir(dirname, clean=True) with open(os.path.join(dirname, 'labels.txt'), 'w') as outfile: for name in label_names: outfile.write('%s\n' % name) for filename, phase in [ ('train', 'train'), ('test', 'test'), ]: filepath = os.path.join(self.outdir, 'cifar-100-python', filename) assert os.path.exists(filepath), 'Expected "%s" to exist' % filename self.__extractData(filepath, phase, fine_label_names, coarse_label_names) def __extractData(self, input_file, phase, fine_label_names, coarse_label_names): """ Read a pickle file at input_file and output as images Arguments: input_file -- a pickle file phase -- train or test fine_label_names -- mapping from fine_labels to strings coarse_label_names -- mapping from coarse_labels to strings """ print 'Extracting images file=%s ...' % input_file # Read the pickle file with open(input_file, 'rb') as infile: pickleObj = cPickle.load(infile) # print 'Batch -', pickleObj['batch_label'] data = pickleObj['data'] assert data.shape[1] == 3072, 'Unexpected data.shape %s' % (data.shape,) count = data.shape[0] fine_labels = pickleObj['fine_labels'] assert len(fine_labels) == count, 'Expected len(fine_labels) to be %d, not %d' % (count, len(fine_labels)) coarse_labels = pickleObj['coarse_labels'] assert len(coarse_labels) == count, 'Expected len(coarse_labels) to be %d, not %d' % ( count, len(coarse_labels)) filenames = pickleObj['filenames'] assert len(filenames) == count, 'Expected len(filenames) to be %d, not %d' % (count, len(filenames)) data = data.reshape((count, 3, 32, 32)) data = data.transpose((0, 2, 3, 1)) fine_to_coarse = {} # mapping of fine labels to coarse labels fine_dirname = os.path.join(self.outdir, 'fine', phase) os.makedirs(fine_dirname) coarse_dirname = os.path.join(self.outdir, 'coarse', phase) os.makedirs(coarse_dirname) with open(os.path.join(self.outdir, 'fine', '%s.txt' % phase), 'w') as fine_textfile, \ open(os.path.join(self.outdir, 'coarse', '%s.txt' % phase), 'w') as coarse_textfile: for index, image in enumerate(data): # Create the directory fine_label = fine_label_names[fine_labels[index]] dirname = os.path.join(fine_dirname, fine_label) self.mkdir(dirname) # Get the filename filename = filenames[index] ext = os.path.splitext(filename)[1][1:].lower() if ext != self.file_extension: filename = '%s.%s' % (os.path.splitext(filename)[0], self.file_extension) filename = os.path.join(dirname, filename) # Save the image PIL.Image.fromarray(image).save(filename) fine_textfile.write('%s %s\n' % (filename, fine_labels[index])) coarse_textfile.write('%s %s\n' % (filename, coarse_labels[index])) if fine_label not in fine_to_coarse: fine_to_coarse[fine_label] = coarse_label_names[coarse_labels[index]] # Create the coarse dataset with symlinks for fine, coarse in fine_to_coarse.iteritems(): self.mkdir(os.path.join(coarse_dirname, coarse)) os.symlink( # Create relative symlinks for portability os.path.join('..', '..', '..', 'fine', phase, fine), os.path.join(coarse_dirname, coarse, fine) )
rich-pixley/zoo-animals
statlog-rollup.py
Python
apache-2.0
3,874
0.004388
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2008 - 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # usage: %prog file [ file [ file [...]]] # This script merges the timing data from several files into a single # aggregate which is sent to stdout. class stamp: def __init__(this, time, weight): this.time = long(time) this.weight = long(weight) def weighted_time(this): return this.time * this.weight def minimum(x, y): if x < y: return x else: return y def maximum(x, y): if x > y: return x else: return y class timing_file: def __init__(this, filename = ''): this.stamps = {} this.filename = '' this.filename = filename if this.filename: f = open(filename, 'r') this.lines = f.readlines() f.close() this.lines = [ line.strip() for line in this.lines ] for line in this.lines: space_sep = line.split() if len(space_sep) != 2: raise Exception('bad timing line in %s: %s' % (this.filename, line)) star_sep = space_sep[0].split('*') if len(star_sep) == 1: weight = 1 else: weight = star_sep[1] this.stamps[space_sep[1]] = stamp(star_sep[0], weight) def write(this): for stamp in this.stamps: print '%d*%d %s' % (this.stamps[stamp].time, this.stamps[stamp].weight, stamp) def merge(this, old): new = timing_file() minmax
= ['maximum', 'minimum'] for s in this.stamps: if s in minmax: continue if s in old.stamps: total_weight = this.stamp
s[s].weight + old.stamps[s].weight weighted_average_time = (this.stamps[s].weighted_time() + old.stamps[s].weighted_time()) / total_weight new.stamps[s] = stamp(weighted_average_time, total_weight) else: new.stamps[s] = this.stamps[stamp] for s in old.stamps: if s in minmax: continue if s not in this.stamps: new.stamps[s] = old.stamps[s] stamps = [this.stamps[s].time for s in this.stamps] + [old.stamps[s].time for s in old.stamps] new.stamps['maximum'] = stamp(reduce(maximum, stamps, 0), 0) if new.stamps['maximum'] > 0: new.stamps['minimum'] = stamp(reduce(minimum, stamps, new.stamps['maximum'].time), 0) return new def option_parser(): import optparse usage = "Usage: %prog file [ file [ file [...]]]" parser = optparse.OptionParser(usage = usage) general = optparse.OptionGroup(parser, 'General Options', '') # general.add_option('-i', '--input', # type = 'string', # dest = 'infile', # default = '', # help = 'use this as the input file [default: stdin]') parser.add_option_group(general) return parser if __name__ == '__main__': import optparse options, args = option_parser().parse_args() sum = timing_file() for a in args: sum = sum.merge(timing_file(a)) sum.write()
DrChat/thermoctrl
thermoctrl/urls.py
Python
mit
645
0.006202
from django.conf.urls import include, url from django.contrib import admin from django.contrib import auth admin.autodiscover() import templog.urls import control.urls from thermoctrl import views urlpatterns = [ # Examples: # url(r'^$', 'the
rmoctrl.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r'^$', views.index, name='index'), url(r'^admin/', include(admin.site.urls)), url(r'^log/', include(templog.urls), name='log'), url(r'^control/', include(co
ntrol.urls), name='control'), url(r'^login/', auth.views.login, {"SSL": True, "template_name": "main/login.html"}, name='login'), ]
tomkralidis/geonode
geonode/monitoring/__init__.py
Python
gpl-3.0
3,733
0.000804
# -*- coding: utf-8 -*- ######################################################################### # # Copyright (C) 2017 OSGeo # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### import logging from django.utils.translation import ugettext_noop as _ from django.conf import settings from
functools import wraps from six import string_types from geonode.notifications_helper import NotificationsAppConfigBase, has_notifications from django.db.models.signals import post_migrate log = logging.getLogger(__name__) def run_setup_hooks(*args, **kwargs): if not has_notifications:
log.warning("Monitoring requires notifications app to be enabled. " "Otherwise, no notifications will be send") from geonode.monitoring.models import populate populate() class MonitoringAppConfig(NotificationsAppConfigBase): name = 'geonode.monitoring' NOTIFICATION_NAME = 'monitoring_alert' NOTIFICATIONS = ((NOTIFICATION_NAME, _("Monitoring alert"), _("Alert situation reported by monitoring"), ), ) def ready(self): super(MonitoringAppConfig, self).ready() post_migrate.connect(run_setup_hooks, sender=self) default_app_config = 'geonode.monitoring.MonitoringAppConfig' def register_url_event(event_type=None): """ Decorator on views, which will register url event usage: >> register_url_event()(TemplateView.view_as_view()) """ def _register_url_event(view): @wraps(view) def inner(*args, **kwargs): if settings.MONITORING_ENABLED: request = args[0] register_event(request, event_type or 'view', request.path) return view(*args, **kwargs) return inner return _register_url_event def register_event(request, event_type, resource): """ Wrapper function to be used inside views to collect event and resource @param request Request object @param event_type name of event type @param resource string (then resource type will be url) or Resource instance >>> from geonode.monitoring import register_event >>> def view(request): register_event(request, 'view', layer) """ if not settings.MONITORING_ENABLED: return from geonode.base.models import ResourceBase if isinstance(resource, string_types): resource_type = 'url' resource_name = request.path resource_id = None elif isinstance(resource, ResourceBase): resource_type = resource.__class__._meta.verbose_name_raw resource_name = getattr(resource, 'alternate', None) or resource.title resource_id = resource.id else: raise ValueError("Invalid resource: {}".format(resource)) if request and hasattr(request, 'register_event'): request.register_event(event_type, resource_type, resource_name, resource_id) def register_proxy_event(request): """ Process request to geoserver proxy. Extract layer and ows type """
SylvainCecchetto/plugin.video.catchuptvandmore
plugin.video.catchuptvandmore/resources/lib/channels/fr/francetv.py
Python
gpl-2.0
14,146
0.001344
# -*- coding: utf-8 -*- """ Catch-up TV & More Original work (C) JUL1EN094, SPM, SylvainCecchetto Copyright (C) 2016 SylvainCecchetto This file is part of Catch-up TV & More. Catch-up TV & More is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Catch-up TV & More is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Catch-up TV & More; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """ # The unicode_literals import only has # an effect on Python 2. # It makes string literals as unicode like in Python 3 from __future__ import unicode_literals from codequick import Route, Resolver, Listitem, utils, Script from resources.lib import web_utils from resources.lib import resolver_proxy from resources.lib.menu_utils import item_post_treatment from resources.lib.addon_utils import get_item_media_path from kodi_six import xbmcplugin import re import json import time import urlquick from six.moves.html_parser import HTMLParser HTML_PARSER = HTMLParser() TAG_RE = re.compile(r'<[^>]+>') try: from itertools import zip_longest except ImportError: from itertools import izip_longest as zip_longest """ Channels: * france.tv (https://www.france.tv/) """ URL_API_MOBILE = utils.urljoin_partial("https://api-mobile.yatta.francetv.fr/") URL_API_FRONT = utils.urljoin_partial("http://api-front.yatta.francetv.fr") @Route.register def francetv_root(plugin, **kw
args): # Channels item = Listitem() item.label = Script.localize(30006) item.set_callback(channels) item_post_treatment(item) yield item # Categories item = Listitem() item.label = Script.localize(30725) item.set_callback(categories) item_post_treatment(item) yield item # Search fea
ture item = Listitem.search(search) item_post_treatment(item) yield item @Route.register def channels(plugin, **kwargs): """ List all france.tv channels """ # (item_id, label, thumb, fanart) channels = [ ('channels/france-2', 'France 2', 'france2.png', 'france2_fanart.jpg'), ('channels/france-3', 'France 3', 'france3.png', 'france3_fanart.jpg'), ('channels/france-4', 'France 4', 'france4.png', 'france4_fanart.jpg'), ('channels/france-5', 'France 5', 'france5.png', 'france5_fanart.jpg'), ('channels/france-o', 'France Ô', 'franceo.png', 'franceo_fanart.jpg'), ('regions/outre-mer', 'Outre-mer la 1ère', 'la1ere.png', 'la1ere_fanart.jpg'), ('channels/franceinfo', 'franceinfo:', 'franceinfo.png', 'franceinfo_fanart.jpg'), ('channels/slash', 'France tv Slash', 'slash.png', 'slash_fanart.jpg'), ('channels/enfants', 'Okoo', 'okoo.png', 'okoo_fanart.jpg'), ('channels/spectacles-et-culture', 'Culturebox', 'culturebox.png', 'culturebox_fanart.jpg') ] for channel_infos in channels: item = Listitem() item.label = channel_infos[1] item.art["thumb"] = get_item_media_path('channels/fr/' + channel_infos[2]) item.art["fanart"] = get_item_media_path('channels/fr/' + channel_infos[3]) item.set_callback(channel_homepage, channel_infos[0]) item_post_treatment(item) yield item @Route.register def channel_homepage(plugin, item_id, **kwargs): """ List channel homepage elements (e.g. https://www.france.tv/france-2/) """ r = urlquick.get(URL_API_MOBILE('/apps/%s' % item_id), params={'platform': 'apps'}) j = json.loads(r.text) j = j['collections'] if 'collections' in j else j['items'] for collection in j: item = Listitem() if set_item_callback_based_on_type(item, collection['type'], collection): yield item def set_item_callback_based_on_type(item, type_, j, next_page_item=None): # First try to populate label if 'label' in j: item.label = j['label'] elif 'title' in j: item.label = j['title'] else: item.label = 'No title' if 'description' in j: item.info['plot'] = j['description'] # Second, try to populate images if 'images' in j: populate_images(item, j['images']) # Then, based on type, try to guess the correct callback # This is a new path if type_ == 'program': item.set_callback(grab_json_collections, URL_API_MOBILE('/apps/program/%s' % j['program_path'])) item_post_treatment(item) return True elif type_ == 'sous_categorie': item.set_callback(grab_json_collections, URL_API_MOBILE('/apps/sub-categories/%s' % j['url_complete'])) item_post_treatment(item) return True elif type_ == 'region': item.set_callback(outre_mer_root, j['region_path']) item_post_treatment(item) return True elif type_ == 'categories': item.label = 'Les sous-catégories' item.set_callback(list_generic_items, j['items'], next_page_item) item_post_treatment(item) return True # This is a video elif type_ == 'integrale' or type_ == 'extrait' or type_ == 'unitaire': si_id = populate_video_item(item, j) item.set_callback(get_video_url, broadcast_id=si_id) item_post_treatment(item, is_playable=True, is_downloadable=True) return True elif 'items' in j: item.set_callback(list_generic_items, j['items'], next_page_item) item_post_treatment(item) return True return False def populate_images(item, images): all_images = {} for image in images: if 'type' in image: type_ = image['type'] if type_ == 'carre': all_images['carre'] = image['urls']['w:400'] elif type_ == 'vignette_16x9': all_images['vignette_16x9'] = image['urls']['w:1024'] elif type_ == 'background_16x9': all_images['background_16x9'] = image['urls']['w:2500'] elif type_ == 'vignette_3x4': all_images['vignette_3x4'] = image['urls']['w:1024'] if 'vignette_3x4' in all_images: item.art['thumb'] = item.art['landscape'] = all_images['vignette_3x4'] elif 'carre' in all_images: item.art['thumb'] = item.art['landscape'] = all_images['carre'] if 'background_16x9' in all_images: item.art['fanart'] = all_images['background_16x9'] elif 'vignette_16x9' in all_images: item.art['fanart'] = all_images['vignette_16x9'] def populate_video_item(item, video): if 'episode_title' in video: item.label = video['episode_title'] else: item.label = video['title'] description = video['description'] if description: item.info['plot'] = TAG_RE.sub('', HTML_PARSER.unescape(description)) begin_date = time.strftime('%Y-%m-%d', time.localtime(video['begin_date'])) item.info.date(begin_date, "%Y-%m-%d") if 'program' in video and video['program'] is not None and 'label' in video['program']: item.label = video['program']['label'] + ' - ' + item.label type_ = video['type'] if type_ == 'extrait': item.label = '[extrait] ' + item.label # It's too bad item.info['title'] overrules item.label everywhere # so there's no difference between what is shown in the video list # and what is shown in the video details # item.info['title'] = video['title'] item.info['title'] = item.label # id_ = video['id'] rating = video['rating_csa_code'] if rating.isdigit(): rating = "-" + rating item.info['mpaa'] = rating if "text" in video and video['text']: item.info['plot'] = video['text']
johncheetham/jcchess
chess/pgn.py
Python
gpl-3.0
32,586
0.00043
# -*- coding: utf-8 -*- # # This file is part of the python-chess library. # Copyright (C) 2012-2016 Niklas Fiekas <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import chess import itertools import re import logging try: import backport_collections as collections except ImportError: import collections LOGGER = logging.getLogger(__name__) NAG_NULL = 0 NAG_GOOD_MOVE = 1 """A good move. Can also be indicated by ``!`` in PGN notation.""" NAG_MISTAKE = 2 """A mistake. Can also be indicated by ``?`` in PGN notation.""" NAG_BRILLIANT_MOVE = 3 """A brilliant move. Can also be indicated by ``!!`` in PGN notation.""" NAG_BLUNDER = 4 """A blunder. Can also be indicated by ``??`` in PGN notation.""" NAG_SPECULATIVE_MOVE = 5 """A speculative move. Can also be indicated by ``!?`` in PGN notation.""" NAG_DUBIOUS_MOVE = 6 """A dubious move. Can also be indicated by ``?!`` in PGN notation.""" NAG_FORCED_MOVE = 7 NAG_SINGULAR_MOVE = 8 NAG_WORST_MOVE = 9 NAG_DRAWISH_POSITION = 10 NAG_QUIET_POSITION = 11 NAG_ACTIVE_POSITION = 12 NAG_UNCLEAR_POSITION = 13 NAG_WHITE_SLIGHT_ADVANTAGE = 14 NAG_BLACK_SLIGHT_ADVANTAGE = 15 # TODO: Add more constants for example from # https://en.wikipedia.org/wiki/Numeric_Annotation_Glyphs NAG_WHITE_MODERATE_COUNTERPLAY = 132 NAG_BLACK_MODERATE_COUNTERPLAY = 133 NAG_WHITE_DECISIVE_COUNTERPLAY = 134 NAG_BLACK_DECISIVE_COUNTERPLAY = 135 NAG_WHITE_MODERATE_TIME_PRESSURE = 136 NAG_BLACK_MODERATE_TIME_PRESSURE = 137 NAG_WHITE_SEVERE_TIME_PRESSURE = 138 NAG_BLACK_SEVERE_TIME_PRESSURE = 139 TAG_REGEX = re.compile(r"^\[([A-Za-z0-9_]+)\s+\"(.*)\"\]\s*$") MOVETEXT_REGEX = re.compile(r""" (%.*?[\n\r]) |(\{.*) |(\$[0-9]+) |(\() |(\)) |(\*|1-0|0-1|1/2-1/2) |( [NBKRQ]?[a-h]?[1-8]?[\-x]?[a-h][1-8](?:=?[nbrqkNBRQK])? |[PNBRQK]?@[a-h][1-8] |-- |O-O(?:-O)? |0-0(?:-0)? ) |([\?!]{1,2}) """, re.DOTALL | re.VERBOSE) class GameNode(object): def __init__(self): self.parent = None self.move = None self.nags = set() self.starting_comment = "" self.comment = "" self.variations = [] self.board_cached = None def board(self, _cache=True): """ Gets a board with the position of the node. It's a copy, so modifying the board will not alter the game. """ if self.board_cached: return self.board_cached.copy() board = self.parent.board(_cache=False) board.push(self.move) if _cache: self.board_cached = board return board.copy() else: return board def san(self): """ Gets the standard algebraic notation of the move leading to this node. Do not call this on the root node. """ return self.parent.board().san(self.move) def root(self): """Gets the root node, i.e. the game.""" node = self while node.parent: node = node.parent return node def end(self): """Follows the main variation to the end and returns the last node.""" node = self while node.variations: node = node.variations[0] return node def is_end(self): """Checks if this node is the last node in the current variation.""" return not self.variations def starts_variation(self): """ Checks if this node starts a variation (and can thus have a starting comment). The root node does not start a variation and can have no starting comment. """ if not self.parent or not self.parent.variations: return False return self.parent.variations[0] != self def is_main_line(self): """Checks if the node is in the main line of the game.""" node = self while node.parent: parent = node.parent if not parent.variations or parent.variations[0] != node: return False node = parent return True def is_main_variation(self): """ Checks if this node is the first variation from the point of view of its parent. The root node also is in the main variation. """ if not self.parent: return True return not self.parent.variations or self.parent.variations[0] == self def variation(self, move): """ Gets a child node by move or index. """ for index, variation in enumerate(self.variations): if move == variation.move or index == move or move == variation: return variation raise KeyError("variation not found") def has_variation(self, move): """Checks if the given move appears as a variation.""" return move in (variation.move for variation in self.variations) def promote_to_main(self, move): """Promotes the given move to the main variation.""" variation = self.variation(move) self.variations.remove(variation) self.variations.insert(0, variation) def promote(self, move): """Moves the given variation one up in the list of variations.""" variation = self.variation(move) i = self.variations.index(variation) if i > 0: self.variations[i - 1], self.variations[i] = self.variations[i], self.variations[i - 1] def demote(self, move): """Moves the given variation one down in the list of variations.""" variation = self.variation(move) i = self.variations.index(variation) if i < len(self.variations) - 1: self.variations[i + 1], sel
f.variations[i] = self.variations[i], self.variations[i + 1] def remove_variation(self, move): """Removes a variation by move.""" self.variations.remove(self.variation(move)) def add_variation(self, move, comment="",
starting_comment="", nags=()): """Creates a child node with the given attributes.""" node = GameNode() node.move = move node.nags = set(nags) node.parent = self node.comment = comment node.starting_comment = starting_comment self.variations.append(node) return node def add_main_variation(self, move, comment=""): """ Creates a child node with the given attributes and promotes it to the main variation. """ node = self.add_variation(move, comment=comment) self.variations.remove(node) self.variations.insert(0, node) return node def main_line(self): """Yields the moves of the main line starting in this node.""" node = self while node.variations: node = node.variations[0] yield node.move def add_line(self, moves, comment="", starting_comment="", nags=()): """ Creates a sequence of child nodes for the given list of moves. Adds *comment* and *nags* to the last node of the line and returns it. """ node = self # Add line. for move in moves: node = node.add_variation(move, starting_comment=starting_comment) starting_comment = "" # Merge comment and NAGs. if node.comment: node.comment += " " + comment else: node.comment = comment node.nags.update(nags) return node def accept(self, visitor, _board=None): """ Traverse game nodes in PG
EmreAtes/spack
var/spack/repos/builtin/packages/astyle/package.py
Python
lgpl-2.1
2,548
0.000392
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Liv
ermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * import sys class Astyle(MakefilePackage): """A Free, Fast, and Small Automatic Formatter for C, C++, C++/CLI, Objective-C, C#, and Java Source Code. """ homepage = "http://astyle.sourceforge.net/" url = "https://sourceforge.net/projects/astyle/files/astyle/astyle%203.0.1/astyle_3.0.1_linux.tar.gz" # Gentoo alternative # url = "http://distfiles.gentoo.org/distfiles/astyle_3.0.1_linux.tar.gz" maintainers = ['davydden'] version('3.0.1', 'c301f09679efa2e1eb6e6b5fd33788b4') version('2.06', 'ff588e7fcede824591cf5b9085df109d') version('2.05.1', '4142d178047d7040da3e0e2f1b030a1a') version('2.04', '30b1193a758b0909d06e7ee8dd9627f6') parallel = False @property def build_directory(self): return join_path(self.stage.source_path, 'build', self.compiler.name) def edit(self, spec, prefix): makefile = join_path(self.build_directory, 'Makefile') filter_file(r'^CXX\s*=.*', 'CXX=%s' % spack_cxx, makefile) # strangely enough install -o $(USER) -g $(USER) stoped working on OSX if sys.platform == 'darwin': filter_file(r'^INSTALL=.*', 'INSTALL=install', makefile) @property def install_targets(self): return ['install', 'prefix={0}'.format(self.prefix)]
kinow-io/kinow-python-sdk
test/test_subtitle_file.py
Python
apache-2.0
747
0.001339
# coding: utf-8 """ Server API Reference for Server API (REST/Json) OpenAPI spec version: 2.0.6 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absol
ute_import import os import sys import unittest import kinow_client from
kinow_client.rest import ApiException from kinow_client.models.subtitle_file import SubtitleFile class TestSubtitleFile(unittest.TestCase): """ SubtitleFile unit test stubs """ def setUp(self): pass def tearDown(self): pass def testSubtitleFile(self): """ Test SubtitleFile """ model = kinow_client.models.subtitle_file.SubtitleFile() if __name__ == '__main__': unittest.main()
MichaelCurrin/twitterverse
app/lib/trends.py
Python
mit
3,038
0
""" Trends library module. """ import datetime from lib import database as db from lib.twitter_api import authentication # Global object to be used as api connection. During execution of the insert # function, this can be setup once with default app then reused later, # to avoid time calling Twitter API. It can be left as null if not needed. appApi = None def insertTrendsForWoeid(woeid, userApi=None, delete=False, verbose=True): """ Retrieve Trend data from the Twitter API for a place and insert into the database. Expects a WOEID value for a Place, gets up to 50 trend records for the Place as limited by the API and stores each of the values in the Trend table. From the API request response, we ignore the location field (which we know already) and the time field (since we just use current time as close enough). For printing of the added trend, it works normally to print the string as '...{}'.format, even if the value is 'Jonathan Garc\xeda'. This was tested in the bash console of Python Anywhere. However, when running as a cronjob and outputting to log file, it appears to be converted to ASCII and throws an error. Therefore encoding to ASCII and replacing the character is done, even though it less readable. :param woeid: Integer for WOEID value of a Place. :param userApi: tweepy API connection object. Set this with a user-authorised connection to skip the default behaviour of generating and using an app-authorised connection. :param delete: Boolean, default False. If set to True, delete item after it is inserted into db. This is useful for testing. :param verbose: Print details for each trend added. """ global appApi now = datetime.datetime.now() print(f"{now.strftime('%x %X')} Inserting trend data for WOEID {woeid}") assert isin
stance( woeid, int
), f"Expected WOEID as type `int` but got type `{type(woeid).__name__}`." if userApi: # Use user token. api = userApi else: # Use app token. if not appApi: # Set it if necessary and then reuse it next time. appApi = authentication.getAPIConnection() api = appApi response = api.trends_place(woeid)[0] trends = response["trends"] for x in trends: topic = x["name"] volume = x["tweet_volume"] t = db.Trend(topic=topic, volume=volume).setPlace(woeid) if verbose: print( "Added trend: {tweetID:4d} | {topic:25} - {volume:7,d} K |" " {woeid:10} - {place}.".format( tweetID=t.id, topic=t.topic, volume=(t.volume // 1000 if t.volume else 0), woeid=t.place.woeid, place=t.place.name, ) ) if delete: db.Trend.delete(t.id) if verbose: print(" - removed from db.") return len(trends)