repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
vishnevskiy/battlenet
|
tests/test_regions.py
|
Python
|
mit
| 1,132
| 0.001767
|
import os
import battlenet
try:
import unittest2 as unittest
except ImportError:
import unittest as unittest
PUBLIC_KEY = os.environ.get('BNET_PUBLIC_KEY')
PRIVATE_KEY = os.environ.get('BNET_PRIVATE_KEY')
class RegionsTest(unittest.TestCase):
def setUp(self):
self.connection = battlenet.Connection(public_key=PUBLIC_KEY, private_key=PRIVATE_KEY)
def test_us(self):
realms = self.connection.get_all_realms(battlenet.UNITED_STATES)
self.assertTrue(len(realms) > 0)
def test_eu(self):
realms = self.connection.get_all_realms(battlenet.EUROPE)
self.assertTrue(len(realms) > 0)
def test_kr(self):
realms = self.connection.get_all_realms(ba
|
ttlenet.KOREA)
self.assertTrue(len(realms) > 0)
def test_tw(self):
realms = self.connection.get_all_realms(battlenet.TAIWAN)
self.assertTrue(len(realms) > 0)
def test_cn(self):
realms = self.connection.get_all_realms(battlenet.CHINA)
|
self.assertTrue(len(realms) > 0)
def tearDown(self):
del self.connection
if __name__ == '__main__':
unittest.main()
|
whymirror/unholy
|
decompyle/decompyle/dis_23.py
|
Python
|
mit
| 6,551
| 0.005343
|
"""Disassembler of Python byte code into mnemonics."""
import sys
import types
from opcode_25 import *
from opcode_25 import __all__ as _opcodes_all
__all__ = ["dis","disassemble","distb","disco"] + _opcodes_all
del _opcodes_all
def dis(x=None):
"""Disassemble classes, methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb()
return
if type(x) is types.InstanceType:
x = x.__class__
if hasattr(x, 'im_func'):
x = x.im_func
if hasattr(x, 'func_code'):
x = x.func_code
if hasattr(x, '__dict__'):
items = x.__dict__.items()
items.sort()
for name, x1 in items:
if type(x1) in (types.MethodType,
types.FunctionType,
types.CodeType,
types.ClassType):
print "Disassembly of %s:" % name
try:
dis(x1)
except TypeError, msg:
print "Sorry:", msg
print
elif hasattr(x, 'co_code'):
disassemble(x)
elif isinstance(x, str):
disassemble_string(x)
else:
raise TypeError, \
"don't know how to disassemble %s objects" % \
type(x).__name__
def distb(tb=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError, "no last traceback to disassemble"
while tb.tb_next: tb = tb.tb_next
disassemble(tb.tb_frame.f_code, tb.tb_lasti)
def disassemble(co, lasti=-1):
"""Disassemble a code object."""
code = co.co_code
byte_increments = [ord(c) for c in co.co_lnotab[0::2]]
line_increments = [ord(c) for c in co.co_lnotab[1::2]]
table_length = len(byte_increments) # == len(line_increments)
lineno = co.co_firstlineno
table_index = 0
while (table_index < table_length
and byte_increments[table_index] == 0):
lineno += line_increments[table_index]
table_
|
index += 1
addr = 0
line_incr = 0
labels = findlabels(code)
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
|
c = code[i]
op = ord(c)
if i >= addr:
lineno += line_incr
while table_index < table_length:
addr += byte_increments[table_index]
line_incr = line_increments[table_index]
table_index += 1
if line_incr:
break
else:
addr = sys.maxint
if i > 0:
print
print "%3d"%lineno,
else:
print ' ',
if i == lasti: print '-->',
else: print ' ',
if i in labels: print '>>',
else: print ' ',
print `i`.rjust(4),
print opname[op].ljust(20),
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = oparg*65536L
print `oparg`.rjust(5),
if op in hasconst:
print '(' + `co.co_consts[oparg]` + ')',
elif op in hasname:
print '(' + co.co_names[oparg] + ')',
elif op in hasjrel:
print '(to ' + `i + oparg` + ')',
elif op in haslocal:
print '(' + co.co_varnames[oparg] + ')',
elif op in hascompare:
print '(' + cmp_op[oparg] + ')',
elif op in hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print '(' + free[oparg] + ')',
print
def disassemble_string(code, lasti=-1, varnames=None, names=None,
constants=None):
labels = findlabels(code)
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
if op == opmap['SET_LINENO'] and i > 0:
print # Extra blank line
if i == lasti: print '-->',
else: print ' ',
if i in labels: print '>>',
else: print ' ',
print `i`.rjust(4),
print opname[op].ljust(15),
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
print `oparg`.rjust(5),
if op in hasconst:
if constants:
print '(' + `constants[oparg]` + ')',
else:
print '(%d)'%oparg,
elif op in hasname:
if names is not None:
print '(' + names[oparg] + ')',
else:
print '(%d)'%oparg,
elif op in hasjrel:
print '(to ' + `i + oparg` + ')',
elif op in haslocal:
if varnames:
print '(' + varnames[oparg] + ')',
else:
print '(%d)' % oparg,
elif op in hascompare:
print '(' + cmp_op[oparg] + ')',
print
disco = disassemble # XXX For backwards compatibility
def findlabels(code):
"""Detect all offsets in a byte code which are jump targets.
Return the list of offsets.
"""
labels = []
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
label = -1
if op in hasjrel:
label = i+oparg
elif op in hasjabs:
label = oparg
if label >= 0:
if label not in labels:
labels.append(label)
return labels
def _test():
"""Simple test program to disassemble a file."""
if sys.argv[1:]:
if sys.argv[2:]:
sys.stderr.write("usage: python dis.py [-|file]\n")
sys.exit(2)
fn = sys.argv[1]
if not fn or fn == "-":
fn = None
else:
fn = None
if fn is None:
f = sys.stdin
else:
f = open(fn)
source = f.read()
if fn is not None:
f.close()
else:
fn = "<stdin>"
code = compile(source, fn, "exec")
dis(code)
if __name__ == "__main__":
_test()
|
davislidaqing/Mcoderadius
|
toughradius/console/libs/pyforms/__init__.py
|
Python
|
agpl-3.0
| 12,706
| 0.009762
|
#coding:utf-8
import sys,os
import copy
import re
import itertools
import net
"""basic from web.py: makes web apps (http://webpy.org)"""
__version__ = "0.37"
__author__ = [
"Aaron Swartz <[email protected]>",
"Anand Chitipothu <[email protected]>"
]
__license__ = "public domain"
__contributors__ = "see https://github.com/webpy/webpy"
class Storage(dict):
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage
def attrget(obj, attr, value=None):
try:
if hasattr(obj, 'has_key') and obj.has_key(attr):
return obj[attr]
except TypeError:
# Handle the case where has_key takes different number of arguments.
# This is the case with Model objects on appengine. See #134
pass
if hasattr(obj, attr):
return getattr(obj, attr)
return value
def safeunicode(obj, encoding='utf-8'):
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
if isinstance(obj, unicode):
return obj.encode(encoding)
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
def autoassign(self, locals):
for (key, value) in locals.iteritems():
if key == 'self':
continue
setattr(self, key, value)
class Form(object):
def __init__(self, *inputs, **kw):
self.inputs = inputs
self.valid = True
self.note = None
self.validators = kw.pop('validators', [])
self.action = kw.pop("action","")
self.title = kw.pop("title","")
self.method = kw.pop("method","post")
self.onsubmit = kw.pop("onsubmit","")
self.error = None
def __call__(self, x=None):
o = copy.deepcopy(self)
if x: o.validates(x)
return o
def render(self):
out = ''
out += self.rendernote(self.note)
out += '<table class="formtab table table-bordered">\n'
out += '<thead ><tr class=active><th>%s</th><th class=rtd><a class="btn"\
href="javascript:history.go(-1);">%s</a></th></tr></thead>\n'%(self.title, net.websafe("返回"))
for i in self.inputs:
html = safeunicode(i.pre) + i.render() + self.rendernote(i.note) + safeunicode(i.post)
if i.is_hidden():
out += ' <tr style="display: none;"><td></td><td>%s</td></tr>\n' % (html)
else:
out += ' <tr><td>%s</td><td>%s</td></tr>\n' % ( net.websafe(i.description),html)
if self.error:
out += ' <tr><td colspan=2>%s</td></tr>\n' % ( self.rendernote(self.error))
out += "</table>"
return out
def render_css(self):
out = []
out.append(self.rendernote(self.note))
for i in self.inputs:
out.append(' <div class="form-group">\n')
if not i.is_hidden():
out.append(' <label class="col-sm-4 control-label" id="lab_%s" for="%s">%s</label>\n' % (i.id,i.id, net.websafe(i.description)))
out.append(i.pre)
out.append(' <div class="col-sm-6">\n')
out.append(" %s\n"%i.render())
out.append(' </div>\n')
if i.help:
out.append(' <a id="%s_help" href="javascript:void(0);" data-container="body" data-toggle="popover" data-trigger="focus" data-placement="top" data-content="%s">\n'%(i.id,i.help))
out.append(' <span class="input-help glyphicon glyphicon-question-sign"></span></a>\n')
out.append(self.rendernote(i.note))
out.append(i.post)
out.append(' </div>\n')
if i.hr:
out.append("<hr/>\n")
return ''.join(out)
def rendernote(self, note):
if note: return '<span class="wrong">%s</span>' % net.websafe(note)
else: return ""
def validates(self, source=None, _validate=True, **kw):
source = source or kw
out = True
for i in self.inputs:
v = attrget(source, i.name)
if _validate:
out = i.validate(v) and out
else:
i.set_value(v)
if _validate:
out = out and self._validate(source)
self.valid = out
return out
def _validate(self, value):
self.value = value
for v in self.validators:
if not v.valid(value):
self.note = v.msg
return False
return True
def fill(self, source=None, **kw):
return self.validates(source, _validate=False, **kw)
def __getitem__(self, i):
for x in self.inputs:
if x.name == i: return x
raise KeyError, i
def __getattr__(self, name):
# don't interfere with deepcopy
inputs = self.__dict__.get('inputs') or []
for x in inputs:
if x.name == name: return x
raise AttributeError, name
def get(self, i, default=None):
try:
return self[i]
except KeyError:
return default
def _get_d(self): #@@ should really be form.attr, no?
return sto
|
rage([(i.name, i.get_value()) for i in self.inputs])
d = property(_get_d)
class Input(object):
def __init__(self, name, *validators, **attrs):
self.name = name
self.validators = validators
self.attrs = attrs = AttributeList(attrs)
self.description = attrs.pop('description', name)
self.help = attrs.pop("help","")
self.value = attrs.pop
|
('value', None)
self.pre = attrs.pop('pre', "")
self.post = attrs.pop('post', "")
self.hr = attrs.pop('hr',False)
self.note = None
self.id = attrs.setdefault('id', self.get_default_id())
if 'class_' in attrs:
attrs['class'] = attrs['class_']
del attrs['class_']
attrs['placeholder'] = self.description
for vd in self.validators:
attrs['placeholder'] += ", "+vd.msg
def is_hidden(self):
return False
def get_type(self):
raise NotImplementedError
def get_default_id(self):
return self.name
def validate(self, value):
self.set_value(value)
for v in self.validators:
if not v.valid(value):
self.note = v.msg
return False
return True
def set_value(self, value):
self.value = value
def get_value(self):
return self.value
def render(self):
attrs = self.attrs.copy()
attrs['type'] = self.get_type()
if self.value is not None:
attrs['value'] = self.value
attrs['name'] = self.name
return '<input %s/>' % attrs
def rendernote(self, note):
if note: return '<strong class="wrong">%s</strong>' % net.websafe(note)
else: return ""
def addatts(self):
# add leading space for backward-compatibility
return " " + str(self.attrs)
class AttributeList(dict):
def copy(self):
return AttributeList(self)
def __str__(self):
return " ".join(['%s="%s"' % (k, net.websafe(v)) for k, v in self.items()])
def __repr__(self):
return '<attrs: %s>' % repr(str(self))
class Textbox(Input):
def ge
|
ooici/coi-services
|
ion/agents/platform/platform_agent_stream_publisher.py
|
Python
|
bsd-2-clause
| 10,637
| 0.003572
|
#!/usr/bin/env python
"""
@package ion.agents.platform.platform_agent_stream_publisher
@file ion/agents/platform/platform_agent_stream_publisher.py
@author Carlos Rueda
@brief Stream publishing support for platform agents.
"""
__author__ = 'Carlos Rueda'
import logging
import uuid
from coverage_model.parameter import ParameterDictionary
import numpy
from pyon.public import log
from pyon.core.bootstrap import get_obj_registry
from pyon.core.object import IonObjectDeserializer
from pyon.ion.stream import StreamPublisher
from ion.services.dm.utility.granule.record_dictionary import RecordDictionaryTool
from interface.objects import StreamRoute
class PlatformAgentStreamPublisher(object):
"""
Stream publishing support for platform agents.
"""
def __init__(self, agent):
self._agent = agent
self._platform_id = agent._platform_id
self.resource_id = agent.resource_id
self._pp = agent._pp
self.CFG = agent.CFG
# Dictionaries used for data publishing.
self._data_streams = {}
self._param_dicts = {}
self._stream_defs = {}
self._data_publishers = {}
self._connection_ID = None
self._connection_index = {}
# Set of parameter names received in event notification but not
# configured. Allows to log corresponding warning only once.
self._unconfigured_params = set()
stream_info = self.CFG.get('stream_config', None)
if stream_info is None:
# should not happen: PlatformAgent._validate_configuration validates this.
log.e
|
rror("%r: No stream_config given in CFG", self._platform_id)
return
for stream_name, stream_config in stream_info.iteritems():
self._construct_stream_and_publisher(stream_name, stream_config)
log.debug("%r: PlatformAgentStreamPublisher complete", self._platform_id)
def _construct_stream_and_publisher(self, stream_name
|
, stream_config):
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r: _construct_stream_and_publisher: "
"stream_name:%r, stream_config:\n%s",
self._platform_id, stream_name,
self._pp.pformat(stream_config))
decoder = IonObjectDeserializer(obj_registry=get_obj_registry())
if 'stream_def_dict' not in stream_config:
# should not happen: PlatformAgent._validate_configuration validates this.
log.error("'stream_def_dict' key not in configuration for stream %r" % stream_name)
return
stream_def_dict = stream_config['stream_def_dict']
stream_def_dict['type_'] = 'StreamDefinition'
stream_def_obj = decoder.deserialize(stream_def_dict)
self._stream_defs[stream_name] = stream_def_obj
routing_key = stream_config['routing_key']
stream_id = stream_config['stream_id']
exchange_point = stream_config['exchange_point']
parameter_dictionary = stream_def_dict['parameter_dictionary']
log.debug("%r: got parameter_dictionary from stream_def_dict", self._platform_id)
self._data_streams[stream_name] = stream_id
self._param_dicts[stream_name] = ParameterDictionary.load(parameter_dictionary)
stream_route = StreamRoute(exchange_point=exchange_point, routing_key=routing_key)
publisher = self._create_publisher(stream_id, stream_route)
self._data_publishers[stream_name] = publisher
log.debug("%r: created publisher for stream_name=%r", self._platform_id, stream_name)
def _create_publisher(self, stream_id, stream_route):
publisher = StreamPublisher(process=self._agent,
stream_id=stream_id,
stream_route=stream_route)
return publisher
def reset_connection(self):
self._connection_ID = uuid.uuid4()
self._connection_index = {stream_name : 0 for
stream_name in self._data_streams.keys()}
log.debug("%r: reset_connection: connection_id=%s, connection_index=%s",
self._platform_id, self._connection_ID.hex, self._connection_index)
def handle_attribute_value_event(self, driver_event):
if log.isEnabledFor(logging.TRACE): # pragma: no cover
# show driver_event as retrieved (driver_event.vals_dict might be large)
log.trace("%r: driver_event = %s", self._platform_id, driver_event)
log.trace("%r: vals_dict:\n%s",
self._platform_id, self._pp.pformat(driver_event.vals_dict))
elif log.isEnabledFor(logging.DEBUG): # pragma: no cover
log.debug("%r: driver_event = %s", self._platform_id, driver_event.brief())
stream_name = driver_event.stream_name
publisher = self._data_publishers.get(stream_name, None)
if not publisher:
log.warn('%r: no publisher configured for stream_name=%r. '
'Configured streams are: %s',
self._platform_id, stream_name, self._data_publishers.keys())
return
param_dict = self._param_dicts[stream_name]
stream_def = self._stream_defs[stream_name]
if isinstance(stream_def, str):
rdt = RecordDictionaryTool(param_dictionary=param_dict.dump(),
stream_definition_id=stream_def)
else:
rdt = RecordDictionaryTool(stream_definition=stream_def)
self._publish_granule_with_multiple_params(publisher, driver_event,
param_dict, rdt)
def _publish_granule_with_multiple_params(self, publisher, driver_event,
param_dict, rdt):
stream_name = driver_event.stream_name
pub_params = {}
selected_timestamps = None
for param_name, param_value in driver_event.vals_dict.iteritems():
param_name = param_name.lower()
if not param_name in rdt:
if param_name not in self._unconfigured_params:
# an unrecognized attribute for this platform:
self._unconfigured_params.add(param_name)
log.warn('%r: got attribute value event for unconfigured parameter %r in stream %r'
' rdt.keys=%s',
self._platform_id, param_name, stream_name, list(rdt.iterkeys()))
continue
# separate values and timestamps:
vals, timestamps = zip(*param_value)
self._agent._dispatch_value_alerts(stream_name, param_name, vals)
# Use fill_value in context to replace any None values:
param_ctx = param_dict.get_context(param_name)
if param_ctx:
fill_value = param_ctx.fill_value
log.debug("%r: param_name=%r fill_value=%s",
self._platform_id, param_name, fill_value)
# do the replacement:
vals = [fill_value if val is None else val for val in vals]
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r: vals array after replacing None with fill_value:\n%s",
self._platform_id, self._pp.pformat(vals))
else:
log.warn("%r: unexpected: parameter context not found for %r",
self._platform_id, param_name)
# Set values in rdt:
rdt[param_name] = numpy.array(vals)
pub_params[param_name] = vals
selected_timestamps = timestamps
if selected_timestamps is None:
# that is, all param_name's were unrecognized; just return:
return
self._publish_granule(stream_name, publisher, param_dict, rdt,
pub_params, selected_timestamps)
def _publish_granule(self, stream_name, publisher, param_dict, rdt,
|
skosukhin/spack
|
lib/spack/spack/cmd/__init__.py
|
Python
|
lgpl-2.1
| 9,998
| 0.0004
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from __future__ import print_function
import os
import re
import sys
import llnl.util.tty as tty
from llnl.util.lang import attr_setdefault, index_by
from llnl.util.tty.colify import colify
from llnl.util.tty.color import colorize
from llnl.util.filesystem import working_dir
import spack
import spack.config
import spack.spec
import spack.store
#
# Settings for commands that modify configuration
#
# Commands that modify configuration by default modify the *highest*
# priority scope.
default_modify_scope = spack.config.highest_precedence_scope().name
# Commands that list configuration list *all* scopes by default.
default_list_scope = None
# cmd has a submodule called "list" so preserve the python list module
python_list = list
# Patterns to ignore in the commands directory when looking for commands.
ignore_files = r'^\.|^__init__.py$|^#'
SETUP_PARSER = "setup_parser"
DESCRIPTION = "description"
command_path = os.path.join(spack.lib_path, "spack", "cmd")
commands = []
for file in os.listdir(command_path):
if file.endswith(".py") and not re.search(ignore_files, file):
cmd = re.sub(r'.py$', '', file)
commands.append(cmd)
commands.sort()
def remove_options(parser, *options):
"""Remove some options from a parser."""
for option in options:
for action in parser._actions:
if vars(action)['option_strings'][0] == option:
parser._handle_conflict_resolve(None, [(option, action)])
break
def get_python_name(name):
"""Commands can have '-' in their names, unlike Python identifiers."""
return name.replace("-", "_")
def get_module(name):
"""Imports the module for a particular command name and returns it."""
module_name = "%s.%s" % (__name__, name)
module = __import__(module_name,
fromlist=[name, SETUP_PARSER, DESCRIPTION],
level=0)
attr_setdefault(module, SETUP_PARSER, lambda *args: None) # null-op
attr_setdefault(module, DESCRIPTION, "")
fn_name = get_python_name(name)
if not hasattr(module, fn_name):
tty.die("Command module %s (%s) must define function '%s'." %
(module.__name__, module.__file__, fn_name))
return module
def get_command(name):
"""Imports the command's function from a module and returns it."""
python_name = get_python_name(name)
return getattr(get_module(python_name), python_name)
def parse_specs(args, **kwargs):
"""Convenience function for parsing arguments from specs. Handles common
exceptions and dies if there are errors.
"""
concretize = kwargs.get('concretize', False)
normalize = kwargs.get('normalize', False)
try:
specs = spack.spec.parse(args)
for spec in specs:
if concretize:
spec.concretize() # implies normalize
elif normalize:
spec.normalize()
return specs
except spack.parse.ParseError as e:
tty.error(e.message, e.string, e.pos * " " + "^")
sys.exit(1)
except spack.spec.SpecError as e:
tty.error(e.message)
sys.exit(1)
def elide_list(line_list, max_num=10):
"""Takes a long list and limits it to a smaller number of elements,
replacing intervening elements with '...'. For example::
elide_list([1,2,3,4,5,6], 4)
gives::
[1, 2, 3, '...', 6]
"""
if len(line_list) > max_num:
return line_list[:max_num - 1] + ['...'] + line_list[-1:]
else:
return line_list
def disambiguate_spec(spec):
matching_specs = spack.store.db.query(spec)
if not matching_specs:
tty.die("Spec '%s' matches no installed packages." % spec)
elif len(matching_specs) > 1:
args = ["%s matches multiple packages." % spec,
"Matching packages:"]
args += [colorize(" @K{%s} " % s.dag_hash(7)) +
s.cformat('$_$@$%@$=') for s in matching_specs]
args += ["Use a more specific spec."]
tty.die(*args)
return matching_specs[0]
def gray_hash(spec, length):
return colorize('@K{%s}' % spec.dag_hash(length))
def display_specs(specs, args=None, **kwargs):
"""Display human readable specs with customizable formatting.
Prints the supplied specs to the screen, formatted according to the
arguments provided.
Specs are grouped by architecture and compiler, and columnized if
possible. There are three possible "modes":
* ``short`` (default): short specs with name and version, columnized
* ``paths``: Two columns: one for specs, one for paths
* ``deps``: Dependency-tree style, like ``spack spec``; can get long
Options can add more information to the default display. Options can
be provided either as keyword arguments or as an argparse namespace.
Keyword arguments take precedence over settings in the argparse
namespace.
Args:
specs (list of spack.spec.Spec): the specs to display
args (optional argparse.Namespace): namespace containing
formatting arguments
Keyword Args:
mode (str): Either 'short', 'paths', or 'deps'
long (bool): Display short hashes with specs
very_long (bool): Display full hashes with specs (supersedes ``long``)
namespace (bool): Print namespaces along with names
show_flags (bool): Show compiler flags with specs
variants (bool): Show variants with specs
"""
def get_arg(name, default=None):
"""Prefer kwargs, then args, then default."""
if name in kwargs:
return kwargs.get(name)
elif args is not None:
return getattr(args, name, default)
else:
return default
mode = get_arg('mode', 'short')
hashes = get_arg('long', False)
namespace = get_arg('namespace', False)
flags = get_arg('show_flags', False)
full_compiler = get_arg('show_full_compiler', False)
variants = get_arg('variants', False)
hlen = 7
if get_arg('very_long', False):
hashes = True
hlen = None
nfmt = '.' if namespace else '_'
ffmt = ''
if full_compiler or flags:
ffmt += '$%'
if full_compiler:
ffmt += '@'
ffmt += '+'
vfmt = '$+' if variants else ''
format_string = '$%s$@%s%s' % (nfmt, ffmt, vfmt)
# Make a dict with specs keyed by architecture and compiler.
index = index_by(specs, ('architecture', 'compiler'))
# Traverse the index and print out each package
for i, (architecture, compiler) in enumerate(sorted(index)):
if i > 0:
print(
|
)
header = "%s{%s} / %s{%s}" % (spack.spec.architecture_color,
architecture, spack.spec.compiler_color,
compiler)
tty.hline(colorize(header), char='-')
specs = in
|
dex
|
zhangyage/Python-oldboy
|
day07/ssh_client2.py
|
Python
|
apache-2.0
| 880
| 0.025074
|
#!/usr/bin/env py
|
thon
# -*- coding:utf-8 -*-
'''
使用paramiko模块远程管理服务器
通过key登录
'''
import paramiko
private_key_path = 'D:\workspace\Python-oldboy\day07\zhangyage_pass'
#key = paramiko.RSAKey.from_private_key_file(filename, passwo
|
rd)
key = paramiko.RSAKey.from_private_key_file(private_key_path,'12345678') #private_key_path是秘钥文件的位置,'12345678'是秘钥的口令
ssh = paramiko.SSHClient() #实例化一个客户端
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #自动恢复yes,在我们使用ssh客户端链接的时候第一次的时候都会让我们输入一个yes确定的
ssh.connect('192.168.75.133', 22, username='root', pkey=key)
stdin,stdout,stderr = ssh.exec_command('ifconfig') #定义三个变量进行输出,默认输出是个元组会赋值给三个变量
print stdout.read()
ssh.close()
|
HaBaLeS/digital-mess-cleaner
|
filesorter.py
|
Python
|
mit
| 4,411
| 0.009522
|
import exifread
import os
import shutil
import dmcutils
from dmcutils import mylog
report = {}
trashFiles = [
'.DS_Store',
'Thumbs.db',
'.picasa.ini'
]
def processVideo(file):
vp = os.path.join(args.targetFolder,'videos')
if not os.path.exists(vp):
os.mkdir(vp)
outPath = os.path.join(vp,os.path.split(file)[1])
#check filesize ... if same then SHA -- just to save a little bit of time
while os.path.exists(outPath) :
print outPath + " exists geenrating new name"
outPath = os.path.join(vp,"dif_" + os.path.split(file)[1])
move(file, outPath);
def processFile(file):
if not os.path.isfile(file):
mylog("File %s does not exist." % file)
return
if str(file).lower().endswith(('.jpg', '.jpeg')):
processImage(file)
report["processImageCount"] += 1
elif str(file).lower().en
|
dswith(('.mp4', '.mov', '.avi')):
processVideo(file)
pass
elif any(bf.lower() in str(file).lower() for bf in trashFiles):
mylog("Deleting %s because defindes as Trash" % file)
os.remove(file)
pass
else:
mylog("Unhandled %s " % file)
def scanAndProcessFolders(inputD
|
ir):
mylog("Starting in " + inputDir)
fileList = []
for root, dirs, files in os.walk(inputDir):
for file in files:
candidate = os.path.join(root, file)
fileList.append(candidate);
for candidate in fileList:
processImage(candidate);
def processImage(img):
with open(img, "rb") as f:
tags = exifread.process_file(f, stop_tag='EXIF DateTimeOriginal')
datestr = "0"
if "EXIF DateTimeOriginal" in tags:
datestr = str(tags["EXIF DateTimeOriginal"])
elif "Image DateTime" in tags:
datestr = str(tags["Image DateTime"])
if not datestr == "0" and not datestr == " ":
moveImage(img, datestr)
else:
report["processNoExif"] = report["processNoExif"] +1
if(args.requireExif):
mylog("Skip %s due missing EXIF Date" % img)
return
mylog("%s - No EXIFDate Found" % img)
ndd = os.path.join(args.targetFolder,"nodate") #maybe old directory structure could be preserved
if(not os.path.exists(ndd)):
os.mkdir(ndd)
move(img,os.path.join(ndd,os.path.split(img)[1]))
def moveImage(image,datestr):
dateList = datestr.split(':')
year, month = createDirsIfNotExist(dateList)
filename = os.path.split(image)[1]
newPath = os.path.join(args.targetFolder, year,month,filename)
if(os.path.exists(newPath)):
if(not checkForDublette(image,newPath)):
newPath = os.path.join(args.targetFolder, year, month, "dif_" + filename)
mylog("New filename for conflicting file generated %s" % newPath)
move(image,newPath)
else:
if not args.copyonly:
mylog("Deleting %s it already exists in %s" % (image,newPath))
os.remove(image)
else:
move(image,newPath)
def move(srcFile, toDir):
if args.copyonly:
mylog("copy %s to direcotry %s" % (srcFile,toDir))
shutil.copy(srcFile,toDir)
else:
mylog("move %s to direcotry %s" % (srcFile,toDir))
shutil.move(srcFile,toDir)
def checkForDublette(image,newPath):
imageHash = dmcutils.fileSha265Sum(image)
copyedHash = dmcutils.fileSha265Sum(newPath)
if(imageHash == copyedHash):
return True
else:
return False
def createDirsIfNotExist(dateList):
year = os.path.join(args.targetFolder,dateList[0].strip())
month = os.path.join(year,dateList[1].strip())
if(not os.path.exists(year)):
mylog("Create new Folder %s" % year)
os.mkdir(year)
if(not os.path.exists(month)):
mylog("Create new Folder %s" % month)
os.mkdir(month)
return year, month
def init(commandArgs):
global args
report["processImageCount"] = 0;
report["processNoExif"] = 0;
mylog("Init FileUtils")
args = commandArgs
if __name__ == '__main__':
dmcutils.init()
init(dmcutils.commandArgs)
scanAndProcessFolders(args.inputFolder)
mylog("Images processed %s" % report["processImageCount"])
mylog("Images without valid EXIF Date %s" % report["processNoExif"])
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/Blast/NCBIStandalone.py
|
Python
|
apache-2.0
| 74,333
| 0.000592
|
# Copyright 1999-2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# Patches by Mike Poidinger to support multiple databases.
# Updated by Peter Cock in 2007 to do a better job on BLAST 2.2.15
"""Code for calling standalone BLAST and parsing plain text output (DEPRECATED).
Rather than parsing the human readable plain text BLAST output (which seems to
change with every update to BLAST), we and the NBCI recommend you pa
|
rse the
XML output instead. The plain text parser in this module still works at the
time of writing, but is considered obsolete and updating it to cope with the
latest versions of BLAST is not a priority for us.
This module also provides code to work with the "legacy" standalone version of
NCBI B
|
LAST, tools blastall, rpsblast and blastpgp via three helper functions of
the same name. These functions are very limited for dealing with the output as
files rather than handles, for which the wrappers in Bio.Blast.Applications are
preferred. Furthermore, the NCBI themselves regard these command line tools as
"legacy", and encourage using the new BLAST+ tools instead. Biopython has
wrappers for these under Bio.Blast.Applications (see the tutorial).
"""
from __future__ import print_function
from Bio import BiopythonDeprecationWarning
import warnings
warnings.warn("This module has been deprecated. Consider Bio.SearchIO for "
"parsing BLAST output instead.", BiopythonDeprecationWarning)
import os
import re
from Bio._py3k import StringIO
from Bio import File
from Bio.ParserSupport import *
from Bio.Blast import Record
from Bio.Application import _escape_filename
__docformat__ = "restructuredtext en"
_score_e_re = re.compile(r'Score +E')
class LowQualityBlastError(Exception):
"""Error caused by running a low quality sequence through BLAST.
When low quality sequences (like GenBank entries containing only
stretches of a single nucleotide) are BLASTed, they will result in
BLAST generating an error and not being able to perform the BLAST.
search. This error should be raised for the BLAST reports produced
in this case.
"""
pass
class ShortQueryBlastError(Exception):
"""Error caused by running a short query sequence through BLAST.
If the query sequence is too short, BLAST outputs warnings and errors::
Searching[blastall] WARNING: [000.000] AT1G08320: SetUpBlastSearch failed.
[blastall] ERROR: [000.000] AT1G08320: Blast:
[blastall] ERROR: [000.000] AT1G08320: Blast: Query must be at least wordsize
done
This exception is raised when that condition is detected.
"""
pass
class _Scanner(object):
"""Scan BLAST output from blastall or blastpgp.
Tested with blastall and blastpgp v2.0.10, v2.0.11
Methods:
- feed Feed data into the scanner.
"""
def feed(self, handle, consumer):
"""S.feed(handle, consumer)
Feed in a BLAST report for scanning. handle is a file-like
object that contains the BLAST report. consumer is a Consumer
object that will receive events as the report is scanned.
"""
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
# Try to fast-forward to the beginning of the blast report.
read_and_call_until(uhandle, consumer.noevent, contains='BLAST')
# Now scan the BLAST report.
self._scan_header(uhandle, consumer)
self._scan_rounds(uhandle, consumer)
self._scan_database_report(uhandle, consumer)
self._scan_parameters(uhandle, consumer)
def _scan_header(self, uhandle, consumer):
# BLASTP 2.0.10 [Aug-26-1999]
#
#
# Reference: Altschul, Stephen F., Thomas L. Madden, Alejandro A. Schaf
# Jinghui Zhang, Zheng Zhang, Webb Miller, and David J. Lipman (1997),
# "Gapped BLAST and PSI-BLAST: a new generation of protein database sea
# programs", Nucleic Acids Res. 25:3389-3402.
#
# Query= test
# (140 letters)
#
# Database: sdqib40-1.35.seg.fa
# 1323 sequences; 223,339 total letters
#
# ========================================================
# This next example is from the online version of Blast,
# note there are TWO references, an RID line, and also
# the database is BEFORE the query line.
# Note there possibleuse of non-ASCII in the author names.
# ========================================================
#
# BLASTP 2.2.15 [Oct-15-2006]
# Reference: Altschul, Stephen F., Thomas L. Madden, Alejandro A. Sch??ffer,
# Jinghui Zhang, Zheng Zhang, Webb Miller, and David J. Lipman
# (1997), "Gapped BLAST and PSI-BLAST: a new generation of
# protein database search programs", Nucleic Acids Res. 25:3389-3402.
#
# Reference: Sch??ffer, Alejandro A., L. Aravind, Thomas L. Madden, Sergei
# Shavirin, John L. Spouge, Yuri I. Wolf, Eugene V. Koonin, and
# Stephen F. Altschul (2001), "Improving the accuracy of PSI-BLAST
# protein database searches with composition-based statistics
# and other refinements", Nucleic Acids Res. 29:2994-3005.
#
# RID: 1166022616-19998-65316425856.BLASTQ1
#
#
# Database: All non-redundant GenBank CDS
# translations+PDB+SwissProt+PIR+PRF excluding environmental samples
# 4,254,166 sequences; 1,462,033,012 total letters
# Query= gi:16127998
# Length=428
#
consumer.start_header()
read_and_call(uhandle, consumer.version, contains='BLAST')
read_and_call_while(uhandle, consumer.noevent, blank=1)
# There might be a <pre> line, for qblast output.
attempt_read_and_call(uhandle, consumer.noevent, start="<pre>")
# Read the reference(s)
while attempt_read_and_call(uhandle,
consumer.reference, start='Reference'):
# References are normally multiline terminated by a blank line
# (or, based on the old code, the RID line)
while True:
line = uhandle.readline()
if is_blank_line(line):
consumer.noevent(line)
break
elif line.startswith("RID"):
break
else:
# More of the reference
consumer.reference(line)
# Deal with the optional RID: ...
read_and_call_while(uhandle, consumer.noevent, blank=1)
attempt_read_and_call(uhandle, consumer.reference, start="RID:")
read_and_call_while(uhandle, consumer.noevent, blank=1)
# blastpgp may have a reference for compositional score matrix
# adjustment (see Bug 2502):
if attempt_read_and_call(
uhandle, consumer.reference, start="Reference"):
read_and_call_until(uhandle, consumer.reference, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
# blastpgp has a Reference for composition-based statistics.
if attempt_read_and_call(
uhandle, consumer.reference, start="Reference"):
read_and_call_until(uhandle, consumer.reference, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
line = uhandle.peekline()
assert line.strip() != ""
assert not line.startswith("RID:")
if line.startswith("Query="):
# This is an old style query then database...
# Read the Query lines and the following blank line.
read_and_call(uhandle, consumer.query_info, start='Query=')
read_and_call_until(uhandle, consumer.query_info, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Read the database lines and the followin
|
vr2262/framer
|
vlc/vlc.py
|
Python
|
mit
| 256,336
| 0.004837
|
#! /usr/bin/python
# Python ctypes bindings for VLC
#
# Copyright (C) 2009-2012 the VideoLAN team
# $Id: $
#
# Authors: Olivier Aubert <olivier.aubert at liris.cnrs.fr>
# Jean Brouwers <MrJean1 at gmail.com>
# Geoff Salmon <geoff.salmon at gmail.com>
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
"""This module provides bindings for the LibVLC public API, see
U{http://wiki.videolan.org/LibVLC}.
You can find the documentation and a README file with some examples
at U{http://www.advene.org/download/python-ctypes/}.
Basically, the most important class is L{Instance}, which is used
to create a libvlc instance. From this instance, you then create
L{MediaPlayer} and L{MediaListPlayer} instances.
Alternatively, you may create instances of the L{MediaPlayer} and
L{MediaListPlayer} class directly and an instance of L{Instance}
will be implicitly created. The latter can be obtained using the
C{get_instance} method of L{MediaPlayer} and L{MediaListPlayer}.
"""
import ctypes
from ctypes.util import find_library
import os
import sys
# Used by EventManager in override.py
from inspect import getargspec
__version__ = "N/A"
build_date = "Tue Jul 2 10:35:53 2013"
if sys.version_info[0] > 2:
str = str
unicode = str
bytes = bytes
basestring = (str, bytes)
PYTHON3 = True
def str_to_bytes(s):
"""Translate string or bytes to bytes.
"""
if isinstance(s, str):
return bytes(s, sys.getfilesystemencoding())
else:
return s
def bytes_to_str(b):
"""Translate bytes to string.
"""
if isinstance(b, bytes):
return b.decode(sys.getfilesystemencoding())
else:
return b
else:
str = str
unicode = unicode
bytes = str
basestring = basestring
PYTHON3 = False
def str_to_bytes(s):
"""Translate string or bytes to bytes.
"""
if isinstance(s, unicode):
return s.encode(sys.getfilesystemencoding())
else:
return s
def bytes_to_str(b):
"""Translate bytes to unicode string.
"""
if isinstance(b, str):
return unicode(b, sys.getfilesystemencoding())
else:
return b
# Internal guard to prevent internal classes to be directly
# instanciated.
_internal_guard = object()
def find_lib():
dll = None
plugin_path = None
if sys.platform.startswith('linux'):
p = find_library('vlc')
try:
dll = ctypes.CDLL(p)
except OSError: # may fail
dll = ctypes.CDLL('libvlc.so.5')
elif sys.platform.startswith('win'):
p = find_library('libvlc.dll')
if p is None:
try: # some registry settings
import _winreg as w # leaner than win32api, win32con
for r in w.HKEY_LOCAL_MACHINE, w.HKEY_CURRENT_USER:
try:
r = w.OpenKey(r, 'Software\\VideoLAN\\VLC')
plugin_path, _ = w.QueryValueEx(r, 'InstallDir')
w.CloseKey(r)
break
except w.error:
pass
except ImportError: # no PyWin32
pass
if plugin_path is None:
# try some standard locations.
for p in ('Program Files\\VideoLan\\', 'VideoLan\\',
'Program Files\\', ''):
p = 'C:\\' + p + 'VLC\\libvlc.dll'
if os.path.exists(p):
plugin_path = os.path.dirname(p)
break
if plugin_path is not None: # try loading
p = os.getcwd()
os.chdir(plugin_path)
# if chdir failed, this will raise an exception
dll = ctypes.CDLL('libvlc.dll')
# restore cwd after dll has been loaded
os.chdir(p)
else: # may fail
dll = ctypes.CDLL('libvlc.dll')
else:
plugin_path = os.path.dirname(p)
dll = ctypes.CDLL(p)
elif sys.platform.startswith('darwin'):
# FIXME: should find a means to configure path
d = '/Applications/VLC.app/Contents/MacOS/'
p = d + 'lib/libvlc.dylib'
if os.path.exists(p):
dll = ctypes.CDLL(p)
d += 'modules'
if os.path.isdir(d):
plugin_path = d
else: # hope, some PATH is set...
dll = ctypes.CDLL('libvlc.dylib')
else:
raise NotImplementedError('%s: %s not supported' % (sys.argv[0], sys.platform))
return (dll, plugin_path)
# plugin_path used on win32 and MacOS in override.py
dll, plugin_path = find_lib()
class VLCException(Exception):
"""Exception raised by libvlc methods.
"""
pass
try:
_Ints = (int, long)
except NameError: # no long in Python 3+
_Ints = int
_Seqs = (list, tuple)
# Default instance. It is used to instanciate classes directly in the
# OO-wrapper.
_default_instance = None
def get_default_instance():
"""Return the default VLC.Instance.
"""
global _default_instance
if _default_instance is None:
_default_instance = Instance()
return _default_instance
_Cfunctions = {} # from LibVLC __version__
_Globals = globals() # sys.modules[__name__].__dict__
def _Cfunction(name, flags, errcheck, *types):
"""(INTERNAL) New ctypes function binding.
"""
if hasattr(dll, name) and name in _Globals:
p = ctypes.CFUNCTYPE(*types)
f = p((name, dll), flags)
if errcheck is not None:
f.errcheck = errcheck
# replace the Python function
# in this module, but only when
# running as python -O or -OO
if __debug__:
_Cfunctions[name] = f
else:
_Globals[name] = f
return f
raise NameError('no function %r' % (name,))
def
|
_Cobject(cls, ctype):
"""(INTERNAL) New instance from ctypes.
"""
o = object.__new__(cls)
o._as_parameter_ = ctype
return o
def _Constructor(cls, ptr=_internal_guard):
"""(INTERNAL) New wrapper from ctypes.
"""
if ptr == _internal_guard:
rais
|
e VLCException("(INTERNAL) ctypes class. You should get references for this class through methods of the LibVLC API.")
if ptr is None or ptr == 0:
return None
return _Cobject(cls, ctypes.c_void_p(ptr))
class _Cstruct(ctypes.Structure):
"""(INTERNAL) Base class for ctypes structures.
"""
_fields_ = [] # list of 2-tuples ('name', ctyptes.<type>)
def __str__(self):
l = [' %s:\t%s' % (n, getattr(self, n)) for n, _ in self._fields_]
return '\n'.join([self.__class__.__name__] + l)
def __repr__(self):
return '%s.%s' % (self.__class__.__module__, self)
class _Ctype(object):
"""(INTERNAL) Base class for ctypes.
"""
@staticmethod
def from_param(this): # not self
"""(INTERNAL) ctypes parameter conversion method.
"""
if this is None:
return None
return this._as_parameter_
class ListPOINTER(object):
"""Just like a POINTER but accept a list of ctype as an argument.
"""
def __init__(self, etype):
self.etype = etype
def from_param(self, param):
if isinstance(param, _Seqs):
return (self.etype * len(param))(*param)
# errcheck functions for some nati
|
NeuroRoboticTech/Jetduino
|
Software/Python/grove_thumb_joystick.py
|
Python
|
mit
| 3,602
| 0.003609
|
#!/usr/bin/env python
#
# Jetduino Example for using the Grove Thumb Joystick (http://www.seeedstudio.com/wiki/Grove_-_Thumb_Joystick)
#
# The Jetduino connects the Jetson and Grove sensors. You can learn more about the Jetduino here: http://www.NeuroRoboticTech.com/Projects/Jetduino
#
# Have a question about this example? Ask on the forums here: http://www.NeuroRoboticTech.com/Forum
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Copyright (C) 2016 NeuroRobotic Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or subs
|
tantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import jetduino
from jetduino_pins import *
# Connect the Grove Thumb Joystick to analog port A0
# GrovePi Port A0 uses Arduino pins 0 and 1
# GrovePi Port A1 uses Arduino pins 1 and 2
# Don't plug anything into port A1 that uses pin 1
# Most Grove sensors only use 3 of their 4 pins, which is why the GrovePi shares Arduino pins between adjacent ports
# If the sensor has a pin definition SIG,NC,VCC,GND, the second (white) pin is not connected to anything
# If you wish to connect two joysticks, use ports A0 and A2 (skip A1)
# Uses two pins - one for the X axis and one for the Y axis
# This configuration means you are using port A0
xPin = ARD_A0
yPin = ARD_A2
jetduino.pinMode(xPin, INPUT_PIN)
jetduino.pinMode(yPin, INPUT_PIN)
# The Grove Thumb Joystick is an analog device that outputs analog signal ranging from 0 to 1023
# The X and Y axes are two ~10k potentiometers and a momentary push button which shorts the x axis
# My joystick produces slightly different results to the specifications found on the url above
# I've listed both here:
# Specifications
# Min Typ Max Click
# X 206 516 798 1023
# Y 203 507 797
# My Joystick
# Min Typ Max Click
# X 253 513 766 1020-1023
# Y 250 505 769
while True:
try:
# Get X/Y coordinates
x = jetduino.analogRead(xPin)
y = jetduino.analogRead(yPin)
# Calculate X/Y resistance
Rx = (float)(1023 - x) * 10 / x
Ry = (float)(1023 - y) * 10 / y
# Was a click detected on the X axis?
click = 1 if x >= 1020 else 0
print ("x =", x, " y =", y, " Rx =", Rx, " Ry =", Ry, " click =", click)
time.sleep(.5)
except IOError:
print ("Error")
|
Capitains/MyCapytain
|
MyCapytain/errors.py
|
Python
|
mpl-2.0
| 2,639
| 0.003412
|
# -*- coding: utf-8 -*-
"""
.. module:: MyCapytain.errors
:synopsis: MyCapytain errors
.. moduleauthor:: Thibault Clérice <[email protected]>
"""
class MyCapytainException(BaseException):
""" Namespacing errors
"""
class JsonLdCollectionMissing(MyCapytainException):
""" Error thrown when a JSON LD contains no principle collection
Raised when a json supposed to contain collection is parsed
but nothing is found
"""
class DuplicateReference(SyntaxWarning, MyCapytainException):
""" Error generated when a duplicate is found in CtsReference
"""
class RefsDeclError(Exception, MyCapytainException):
""" Error issued when an the refsDecl does not succeed in xpath (no results)
"""
pass
class InvalidSiblingRequest(Exception, MyCapytainException):
""" This error is thrown when one attempts to get previous or next passage on a passage with a range of different
depth, ex. : 1-2.25
"""
pass
class InvalidURN(Exception, MyCapytainException):
""" This error is thrown when URN are not valid
"""
class MissingAttribute(Exception, MyCapytainException):
""" This error is thrown when an attribute is not present in the Object (missing at startup)
"""
class UnknownObjectError(ValueError, MyCapytainException):
""" This error is thrown when an object does not exist in an inventory or in an API
"""
class UnknownNamespace(ValueError, MyCapytainException):
""" This error is thrown when a namespace is unknown
"""
class UndispatchedTextError(Exception, MyCapytainException):
""" This error is thrown when a text has not been dispatched by a dispatcher
"""
class UnknownCollection(KeyError, MyCapytainException):
""" A collection is unknown to its ancestor
"""
class EmptyReference(SyntaxWarning, MyCapytainException):
""" Error generated when a CtsReference does not exist or is invalid
"""
class CitationDepthError(UnknownObjectError, MyCapytainException):
""" Error generated when the depth of a requested citation is deeper than the citation scheme of the text
"""
class MissingRefsDecl(Exception, MyCapytainException):
""" A text has no properly encoded refsDecl
"""
class PaginationBrowsingError(MyCapytainException):
""" When contacting a remote service and some part of the pages where not reachable or parsable
"""
class CapitainsXPathError(Exception):
def __init__(self, message):
super(CapitainsXPathError, self).__init__()
self.message = message
def __repr__(self):
|
return "Capit
|
ainsXPathError("+self.message+")"
|
mat128/python-ubersmith-remote-module-server
|
ubersmith_remote_module_server/server.py
|
Python
|
apache-2.0
| 915
| 0.002186
|
# Copyright 2016 Internap
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License
|
at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from ubersmith_remote_module_server import api, router
class Server(object):
def __init__(self, modules):
self.router = router.Router()
self.app = Flask(__name__)
self.api = api.Api(modules, self.app, self.router)
def run(self, *args, **kwargs):
self.app.run(*args, **kwargs)
|
levibostian/myBlanky
|
googleAppEngine/google/appengine/tools/old_dev_appserver.py
|
Python
|
mit
| 133,803
| 0.007593
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pure-Python application server for testing applications locally.
Given a port and the paths to a valid application directory (with an 'app.yaml'
file), the external library director
|
y, and a relative URL to use for logins,
creates an HTTP server that can be used to test an application locally. Uses
stubs instead of actual APIs when SetupStubs() is called first.
Example:
root_path = '/path/to/application/directory'
login_url = '/login'
port = 8080
server = dev_appserver.CreateServer(root_path, login_url, port)
server.serve_forever()
"""
from __future__ import with_statement
from g
|
oogle.appengine.tools import os_compat
import __builtin__
import BaseHTTPServer
import base64
import binascii
import calendar
import cStringIO
import cgi
import cgitb
import email.Utils
import errno
import hashlib
import heapq
import httplib
import imp
import inspect
import logging
import mimetools
import mimetypes
import os
import select
import shutil
import simplejson
import StringIO
import struct
import tempfile
import wsgiref.headers
import yaml
import re
import sre_compile
import sre_constants
import sre_parse
import socket
import sys
import time
import types
import urlparse
import urllib
import zlib
import google
try:
from google.third_party.apphosting.python.webapp2 import v2_3 as tmp
sys.path.append(os.path.dirname(tmp.__file__))
del tmp
except ImportError:
pass
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import appinfo
from google.appengine.api import appinfo_includes
from google.appengine.api import app_logging
from google.appengine.api import blobstore
from google.appengine.api import croninfo
from google.appengine.api import datastore
from google.appengine.api import datastore_file_stub
from google.appengine.api import lib_config
from google.appengine.api import mail
from google.appengine.api import mail_stub
from google.appengine.api import namespace_manager
from google.appengine.api import request_info
from google.appengine.api import urlfetch_stub
from google.appengine.api import user_service_stub
from google.appengine.api import yaml_errors
from google.appengine.api.app_identity import app_identity_stub
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.blobstore import file_blob_storage
from google.appengine.api.capabilities import capability_stub
from google.appengine.api.channel import channel_service_stub
from google.appengine.api.files import file_service_stub
from google.appengine.api.logservice import logservice
from google.appengine.api.logservice import logservice_stub
from google.appengine.api.search import simple_search_stub
from google.appengine.api.taskqueue import taskqueue_stub
from google.appengine.api.prospective_search import prospective_search_stub
from google.appengine.api.remote_socket import _remote_socket_stub
from google.appengine.api.memcache import memcache_stub
from google.appengine.api import rdbms_mysqldb
from google.appengine.api.system import system_stub
from google.appengine.api.xmpp import xmpp_service_stub
from google.appengine.datastore import datastore_sqlite_stub
from google.appengine.datastore import datastore_stub_util
from google.appengine.datastore import datastore_v4_stub
from google.appengine import dist
try:
from google.appengine.runtime import request_environment
from google.appengine.runtime import runtime
except:
request_environment = None
runtime = None
from google.appengine.tools import dev_appserver_apiserver
from google.appengine.tools import dev_appserver_blobimage
from google.appengine.tools import dev_appserver_blobstore
from google.appengine.tools import dev_appserver_channel
from google.appengine.tools import dev_appserver_import_hook
from google.appengine.tools import dev_appserver_login
from google.appengine.tools import dev_appserver_multiprocess as multiprocess
from google.appengine.tools import dev_appserver_oauth
from google.appengine.tools import dev_appserver_upload
from google.storage.speckle.python.api import rdbms
CouldNotFindModuleError = dev_appserver_import_hook.CouldNotFindModuleError
FakeAccess = dev_appserver_import_hook.FakeAccess
FakeFile = dev_appserver_import_hook.FakeFile
FakeReadlink = dev_appserver_import_hook.FakeReadlink
FakeSetLocale = dev_appserver_import_hook.FakeSetLocale
FakeUnlink = dev_appserver_import_hook.FakeUnlink
GetSubmoduleName = dev_appserver_import_hook.GetSubmoduleName
HardenedModulesHook = dev_appserver_import_hook.HardenedModulesHook
SDK_ROOT = dev_appserver_import_hook.SDK_ROOT
PYTHON_LIB_VAR = '$PYTHON_LIB'
DEVEL_CONSOLE_PATH = PYTHON_LIB_VAR + '/google/appengine/ext/admin'
REMOTE_API_PATH = (PYTHON_LIB_VAR +
'/google/appengine/ext/remote_api/handler.py')
FILE_MISSING_EXCEPTIONS = frozenset([errno.ENOENT, errno.ENOTDIR])
MAX_URL_LENGTH = 2047
DEFAULT_ENV = {
'GATEWAY_INTERFACE': 'CGI/1.1',
'AUTH_DOMAIN': 'gmail.com',
'USER_ORGANIZATION': '',
'TZ': 'UTC',
}
DEFAULT_SELECT_DELAY = 30.0
for ext, mime_type in mail.EXTENSION_MIME_MAP.iteritems():
mimetypes.add_type(mime_type, '.' + ext)
MAX_RUNTIME_RESPONSE_SIZE = 32 << 20
MAX_REQUEST_SIZE = 32 * 1024 * 1024
COPY_BLOCK_SIZE = 1 << 20
API_VERSION = '1'
VERSION_FILE = '../../VERSION'
DEVEL_PAYLOAD_HEADER = 'HTTP_X_APPENGINE_DEVELOPMENT_PAYLOAD'
DEVEL_PAYLOAD_RAW_HEADER = 'X-AppEngine-Development-Payload'
DEVEL_FAKE_IS_ADMIN_HEADER = 'HTTP_X_APPENGINE_FAKE_IS_ADMIN'
DEVEL_FAKE_IS_ADMIN_RAW_HEADER = 'X-AppEngine-Fake-Is-Admin'
FILE_STUB_DEPRECATION_MESSAGE = (
"""The datastore file stub is deprecated, and
will stop being the default in a future release.
Append the --use_sqlite flag to use the new SQLite stub.
You can port your existing data using the --port_sqlite_data flag or
purge your previous test data with --clear_datastore.
""")
NON_PUBLIC_CACHE_CONTROLS = frozenset(['private', 'no-cache', 'no-store'])
class Error(Exception):
"""Base-class for exceptions in this module."""
class InvalidAppConfigError(Error):
"""The supplied application configuration file is invalid."""
class AppConfigNotFoundError(Error):
"""Application configuration file not found."""
class CompileError(Error):
"""Application could not be compiled."""
def __init__(self, text):
self.text = text
class ExecuteError(Error):
"""Application could not be executed."""
def __init__(self, text, log):
self.text = text
self.log = log
def MonkeyPatchPdb(pdb):
"""Given a reference to the pdb module, fix its set_trace function.
This will allow the standard trick of setting a breakpoint in your
code by inserting a call to pdb.set_trace() to work properly, as
long as the original stdin and stdout of dev_appserver.py are
connected to a console or shell window.
"""
def NewSetTrace():
"""Replacement for set_trace() that uses the original i/o streams.
This is necessary because by the time the user code that might
invoke pdb.set_trace() runs, the default sys.stdin and sys.stdout
are redirected to the HTTP request and response streams instead,
so that pdb will encounter garbage (or EOF) in its input, and its
output will garble the HTTP response. Fortunately, sys.__stdin__
and sys.__stderr__ retain references to the original streams --
this is a standard Python feature. Also, fortunately, as of
Python 2.5, the Pdb class lets you easily override stdin and
stdout. The original set_trace() function does essentially the
same thing as the code here except
|
hackshel/metaCollecter
|
src/metaManager/modules/defines.py
|
Python
|
bsd-3-clause
| 312
| 0.032051
|
PORT = {}
PORT['metaManager'] = 10087
CGI_SOCK = {}
CGI_SOCK[ 'metaManager' ] = '/tmp/metaManager_fcgi_sock'
SOCK = {}
SOCK[ 'logqueue' ] = '/tmp/logqueue_sock'
PIDPATH =
|
{}
PIDPATH[ 'metaManager' ] = '/var/run/metaManager.server.pid'
PIDPATH[ 'managerChewer'
|
] = '/var/run/data.chewer.pid'
|
oriel-hub/api
|
deploy/bootstrap.py
|
Python
|
gpl-2.0
| 2,753
| 0.00109
|
#!/usr/bin/env python3
"""
bootstrap.py will set up a virtualenv for you and update it as required.
Usage:
bootstrap.py # update virtualenv
bootstrap.py fake # just update the virtualenv timestamps
bootstrap.py clean # delete the virtualenv
bootstrap.py -h | --help # print this message and exit
Options for the plain command:
-f, --force # do the virtualenv update even if it is up to date
-r, --full-rebuild # delete the virtualenv before rebuilding
-q, --quiet # don't ask for user input
"""
# a script to set up the virtualenv so we can use fabric and tasks
import sys
import getopt
import ve_mgr
def print_help_text():
print(__doc__)
def print_error_msg(error_msg):
print(error_msg)
print_help_text()
return 2
def main(argv):
# check python version is high enough
ve_mgr.check_python_version(2, 6, __file__)
force_update = False
full_rebuild = False
fake_update = False
clean_ve = False
devel = False
if argv:
try:
opts, args = getopt.getopt(argv[1:], 'hfqr',
['help', 'force', 'quiet', 'full-rebuild', 'dev'])
ex
|
cept getopt.error as msg:
return print_error_msg('Bad options: %s' % msg)
|
# process options
for o, a in opts:
if o in ("-h", "--help"):
print_help_text()
return 0
if o in ("-f", "--force"):
force_update = True
if o in ("-r", "--full-rebuild"):
full_rebuild = True
if o in ("-d", "--dev"):
devel = True
if len(args) > 1:
return print_error_msg(
"Can only have one argument - you had %s" % (' '.join(args)))
if len(args) == 1:
if args[0] == 'fake':
fake_update = True
elif args[0] == 'clean':
clean_ve = True
# check for incompatible flags
if force_update and fake_update:
return print_error_msg("Cannot use --force with fake")
if full_rebuild and fake_update:
return print_error_msg("Cannot use --full-rebuild with fake")
if full_rebuild and clean_ve:
return print_error_msg("Cannot use --full-rebuild with clean")
environment = 'dev' if devel is True else None
updater = ve_mgr.UpdateVE(environment=environment)
if fake_update:
return updater.update_ve_timestamp()
elif clean_ve:
return updater.delete_virtualenv()
else:
updater.update_git_submodule()
return updater.update_ve(full_rebuild, force_update)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
cp16net/trove
|
trove/guestagent/datastore/mysql/service.py
|
Python
|
apache-2.0
| 43,814
| 0.000023
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from collections import defaultdict
import os
import re
import uuid
from oslo_log import log as logging
import sqlalchemy
from sqlalchemy import exc
from sqlalchemy import interfaces
from sqlalchemy.sql.expression import text
from trove.common import cfg
from trove.common import configurations
from trove.common import exception
from trove.common.exception import PollTimeOut
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common import utils as utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.common import sql_query
from trove.guestagent.datastore import service
from trove.guestagent.db import models
from trove.guestagent import pkg
ADMIN_USER_NAME = "os_admin"
LOG = logging.getLogger(__name__)
FLUSH = text(sql_query.FLUSH)
ENGINE = None
DATADIR = None
PREPARING = False
UUID = False
TMP_MYCNF = "/tmp/my.cnf.tmp"
CONF = cfg.CONF
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mysql'
INCLUDE_MARKER_OPERATORS = {
True: ">=",
False: ">"
}
OS_NAME = operating_system.get_os()
MYSQL_CONFIG = {operating_system.REDHAT: "/etc/my.cnf",
operating_system.DEBIAN: "/etc/mysql/my.cnf",
operating_system.SUSE: "/etc/my.cnf"}[OS_NAME]
MYSQL_SERVICE_CANDIDATES = ["mysql", "mysqld", "mysql-server"]
MYSQL_BIN_CANDIDATES = ["/usr/sbin/mysqld", "/usr/libexec/mysqld"]
MYCNF_OVERRIDES = "/etc/mysql/conf.d/overrides.cnf"
MYCNF_OVERRIDES_TMP = "/tmp/overrides.cnf.tmp"
MYCNF_REPLMASTER = "/etc/mysql/conf.d/0replmaster.cnf"
MYCNF_REPLSLAVE = "/etc/mysql/conf.d/1replslave.cnf"
MYCNF_REPLCONFIG_TMP = "/tmp/replication.cnf.tmp"
# Create a package impl
packager = pkg.Package()
def clear_expired_password():
"""
Some mysql installations generate random root password
and save it in /root/.mysql_secret, this password is
expired and should be changed by client that supports expired passwords.
"""
LOG.debug("Removing expired password.")
secret_file = "/root/.mysql_secret"
try:
out, err = utils.execute("cat", secret_file,
run_as_root=True, root_helper="sudo")
except exception.ProcessExecutionError:
LOG.exception(_("/root/.mysql_secret does not exist."))
return
m = re.match('# The random password set for the root user at .*: (.*)',
out)
if m:
try:
out, err = utils.execute("mysqladmin", "-p%s" % m.group(1),
"password", "", run_as_root=True,
root_helper="sudo")
except exception.ProcessExecutionError:
LOG.exception(_("Cannot change mysql password."))
return
operating_system.remove(secret_file, force=True, as_root=True)
LOG.debug("Expired password removed.")
def get_auth_password():
pwd, err = utils.execute_with_timeout(
"sudo",
"awk",
"/password\\t=/{print $3; exit}",
MYSQL_CONFIG)
if err:
LOG.error(err)
raise RuntimeError("Problem reading my.cnf! : %s" % err)
return pwd.strip()
def get_engine():
"""Create the default engine with the updated admin user."""
# TODO(rnirmal):Based on permissions issues being resolved we may revert
# url = URL(drivername='mysql', host='localhost',
# query={'read_default_file': '/etc/mysql/my.cnf'})
global ENGINE
if ENGINE:
return ENGINE
pwd = get_auth_password()
ENGINE = sqlalchemy.create_engine("mysql://%s:%s@localhost:3306" %
(ADMIN_USER_NAME, pwd.strip()),
pool_recycle=7200,
echo=CONF.sql_query_logging,
listeners=[KeepAliveConnection()])
return ENGINE
def load_mysqld_options():
# find mysqld bin
for bin in MYSQL_BIN_CANDIDATES:
if os.path.isfile(bin):
mysqld_bin = bin
break
else:
return {}
try:
out, err = utils.execute(mysqld_bin, "--print-defaults",
run_as_root=True, root_helper="sudo")
arglist = re.split("\n", out)[1].split()
args = defaultdict(list)
for item in arglist:
if "=" in item:
key, value = item.split("=", 1)
args[key.lstrip("--")].append(value)
else:
args[item.lstrip("--")].append(None)
return args
except exception.ProcessExecutionError:
return {}
def read_mycnf():
with open(MYSQL_CONFIG, 'r') as file:
config_contents = file.read()
return config_contents
def get_datadir(reset_cache=False):
"""Return the data directory currently used by Mysql."""
global DATADIR
if not reset_cache and DATADIR:
return DATADIR
mycnf_contents = read_mycnf()
# look for datadir parameter in my.cnf
mycnf = dict(configurations.MySQLConfParser(mycnf_contents).parse())
DATADIR = mycnf['datadir']
return DATADIR
class MySqlAppStatus(service.BaseDbStatus):
@classmethod
def get(cls):
if not cls._instance:
cls._instance = MySqlAppStatus()
return cls._instance
def _get_actual_db_status(self):
try:
out, err = utils.execute_with_timeout(
"/usr/bin/mysqladmin",
"ping", run_as_root=True, root_helper="sudo",
log_output_on_error=True)
LOG.info(_("MySQL Service Status is RUNNING."))
return rd_instance.ServiceStatuses.RUNNING
except exception.ProcessExecutionError:
LOG.exception(_("Failed to get database status."))
try:
out, err = utils.execute_with_timeout("/bin/ps", "-C",
|
"mysqld", "h")
pid = out.split()[0]
# TODO(rnirmal): Need to create new statuses for instances
# where the mysql service is up, but unresponsive
LOG.info(_('MySQL Service Status %(pid)s is BLOCKED.') %
|
{'pid': pid})
return rd_instance.ServiceStatuses.BLOCKED
except exception.ProcessExecutionError:
LOG.exception(_("Process execution failed."))
mysql_args = load_mysqld_options()
pid_file = mysql_args.get('pid_file',
['/var/run/mysqld/mysqld.pid'])[0]
if os.path.exists(pid_file):
LOG.info(_("MySQL Service Status is CRASHED."))
return rd_instance.ServiceStatuses.CRASHED
else:
LOG.info(_("MySQL Service Status is SHUTDOWN."))
return rd_instance.ServiceStatuses.SHUTDOWN
class LocalSqlClient(object):
"""A sqlalchemy wrapper to manage transactions."""
def __init__(self, engine, use_flush=True):
self.engine = engine
self.use_flush = use_flush
def __enter__(self):
self.conn = self.engine.connect()
self.trans = self.conn.begin()
return self.conn
def __exit__(self, type, value, traceback):
if self.trans:
if type is not None: # An error occurred
self.trans.r
|
erikhvatum/RisWidget
|
ris_widget/examples/simple_point_picker.py
|
Python
|
mit
| 6,323
| 0.003163
|
# The MIT License (MIT)
#
# Copyright (c) 2016 WUSTL ZPLAB
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors: Erik Hvatum <[email protected]>
from PyQt5 import Qt
from ..shared_resources import UNIQUE_QGRAPHICSITEM_TYPE
class PointItem(Qt.QGraphicsRectItem):
# Omitting .type() or failing to return a unique causes PyQt to return a wrapper of the wrong type when retrieving an instance of this item as a base
# class pointer from C++. For example, if this item has a child and that child calls self.parentItem(), it would receive a Python object of type
# Qt.QGraphicsRectItem rather than PointItem unless PointItem has a correct .type() implementation.
QGRAPHICSITEM_TYPE = UNIQUE_QGRAPHICSITEM_TYPE()
def __init__(self, picker, x, y, w, h, parent_item):
super().__init__(x, y, w, h, parent_item)
self.picker = picker
flags = self.flags()
self.setFlags(
flags |
Qt.QGraphicsItem.ItemIsFocusable | # Necessary in order for item to receive keyboard events
Qt.QGraphicsItem.ItemIsSelectable |
Qt.QGraphicsItem.ItemIsMovable |
Qt.QGraphicsItem.ItemSendsGeometryChanges # Necessary in order for .itemChange to be called when item is moved
)
def itemChange(self, change, value):
if change == Qt.QGraphicsItem.ItemPositionHasChanged:
self.picker.point_item_position_has_changed.emit(self)
return super().itemChange(change, value)
def keyPressEvent(self, event):
if event.key() == Qt.Qt.Key_Delete and event.modifiers() == Qt.Qt.NoModifier:
self.picker.delete_selected()
def type(self):
return self.QGRAPHICSITEM_TYPE
# NB: deriving from Qt.QGraphicsObject is necessary in order to be a scene event filter target
class SimplePointPicker(Qt.QGraphicsObject):
"""ex:
from ris_widget.ris_widget import RisWidget
from ris_widget.examples.simple_point_picker import SimplePointPicker
rw = RisWidget()
simple_point_picker = SimplePointPicker(rw.main_view, rw.main_scene.layer_stack_item)"""
QGRAPHICSITEM_TYPE = UNIQUE_QGRAPHICSITEM_TYPE()
point_item_position_has_changed = Qt.pyqtSignal(PointItem)
point_item_list_content_reset = Qt.pyqtSignal()
def __init__(self, general_view, parent_item, points=None):
super().__init__(parent_item)
self.view = general_view
self.view.viewport_rect_item.size_changed.connect(self.on_viewport_size_changed)
self.point_items = []
self.pen = Qt.QPen(Qt.Qt.red)
self.pen.setWidth(2)
color = Qt.QColor(Qt.Qt.yellow)
color.setAlphaF(0.5)
self.brush = Qt.QBrush(color)
self.brush_selected = Qt.QBrush(Qt.QColor(255, 0, 255, 127))
parent_item.installSceneEventFilter(self)
if points:
for point in points:
self.make_and_store_point_item(Qt.QPointF(point[0], point[1]))
def boundingRect(self):
return Qt.QRectF()
def paint(self, QPainter, QStyleOptionGraphicsItem, QWidget_widget=None):
pass
def type(self):
return self.QGRAPHICSITEM_TYPE
def make_and_store_point_item(self, pos):
point_item = PointItem(self, -7, -7, 15, 15, self.parentItem())
point_item.setScale(1 / self.view.transform().m22())
point_item.setPen(self.pen)
point_item.setBrush(self.brush)
flags = point_item.flags()
point_item.setFlags(
flags |
Qt.QGraphicsItem.ItemIsFocusable | # Necessary in order for item
|
to receive keyboard events
Qt.QGraphicsItem.ItemIsSelectable |
Qt.QGraphicsItem.ItemIsMovable |
|
Qt.QGraphicsItem.ItemSendsGeometryChanges
)
point_item.installSceneEventFilter(self)
self.point_items.append(point_item)
point_item.setPos(pos)
def delete_selected(self):
for idx, item in reversed(list(enumerate((self.point_items)))):
if item.isSelected():
self.scene().removeItem(item)
del self.point_items[idx]
self.point_item_list_content_reset.emit()
def sceneEventFilter(self, watched, event):
if watched is self.parentItem():
if event.type() == Qt.QEvent.GraphicsSceneMousePress and event.button() == Qt.Qt.RightButton:
self.make_and_store_point_item(event.pos())
return True
if event.type() == Qt.QEvent.KeyPress and event.key() == Qt.Qt.Key_Delete and event.modifiers() == Qt.Qt.NoModifier:
self.delete_selected()
return False
def on_viewport_size_changed(self):
scale = 1 / self.view.transform().m22()
for point_item in self.point_items:
point_item.setScale(scale)
def clear(self):
for point_item in self.point_items:
self.view.scene().removeItem(point_item)
self.point_items = []
self.point_item_list_content_reset.emit()
@property
def points(self):
return [(point_item.pos().x(), point_item.pos().y()) for point_item in self.point_items]
@points.setter
def points(self, points):
self.clear()
for point in points:
self.make_and_store_point_item(Qt.QPointF(point[0], point[1]))
|
nesdis/djongo
|
djongo/sql2mongo/sql_tokens.py
|
Python
|
agpl-3.0
| 12,682
| 0.000552
|
import abc
import re
from typing import Union as U, Iterator, Optional as O
from pymongo import ASCENDING, DESCENDING
from sqlparse import tokens, parse as sqlparse
from sqlparse.sql import Token, Identifier, Function, Comparison, Parenthesis, IdentifierList, Statement
from . import query as query_module
from ..exceptions import SQLDecodeError, NotSupportedError
all_token_types = U['SQLConstIdentifier',
'djongo.sql2mongo.functions.CountFunc',
'djongo.sql2mongo.functions.SimpleFunc',
'SQLIdentifier',
'SQLComparison',
'SQLPlaceholder']
class SQLToken:
@abc.abstractmethod
def __init__(self,
token: Token,
query: 'query_module.BaseQuery'):
self._token = token
self.query = query
def __repr__(self):
return f'{self._token}'
@staticmethod
def tokens2sql(token: Token,
query: 'query_module.BaseQuery'
) -> Iterator[all_token_types]:
from .functions import SQLFunc
if isinstance(token, Identifier):
# Bug fix for sql parse
if isinstance(token[0], Parenthesis):
try:
int(token[0][1].value)
except ValueError:
yield SQLIdentifier(token[0][1], query)
else:
yield SQLConstIdentifier(token, query)
elif isinstance(token[0], Function):
yield SQLFunc.token2sql(token, query)
else:
yield SQLIdentifier(token, query)
elif isinstance(token, Function):
yield SQLFunc.token2sql(token, query)
elif isinstance(token, Comparison):
yield SQLComparison(token, query)
elif isinstance(token, IdentifierList):
for tok in token.get_identifiers():
yield from SQLToken.tokens2sql(tok, query)
elif isinstance(token, Parenthesis):
yield SQLPlaceholder(token, query)
else:
raise SQLDecodeError(f'Unsupported: {token.value}')
@staticmethod
def token2sql(token: Token,
query: 'query_module.BaseQuery'
) -> all_token_types:
return next(SQLToken.tokens2sql(token, query))
@staticmethod
def placeholder_index(token) -> int:
return int(re.match(r'%\(([0-9]+)\)s', token.value, flags=re.IGNORECASE).group(1))
class AliasableToken(SQLToken):
@abc.abstractmethod
def __init__(self, *args):
super().__init__(*args)
self.token_alias: 'query_module.TokenAlias' = self.query.token_alias
if self.alias:
self.token_alias.alias2token[self.alias] = self
self.token_alias.token2alias[self] = self.alias
if self.is_explicit_alias():
self.token_alias.aliased_names.add(self.alias)
def __hash__(self):
if self.is_explicit_alias():
return hash(self._token[0].value)
return hash(self._token.value)
def __eq__(self, other):
return hash(self) == hash(other)
def is_explicit_alias(self):
return len(self._token.tokens) == 5 and self._token[2].match(tokens.Keyword, 'AS')
@property
def alias(self) -> str:
# bug fix sql parse
if not self._token.get_ordering():
return self._token.get_alias()
class SQLIdentifier(AliasableToken):
def __init__(self, *args):
super().__init__(*args)
self._ord = None
if self._token.get_ordering():
# Bug fix for sql parse
self._ord = self._token.get_ordering()
self._token = self._token[0]
@property
def order(self):
if self._ord is None:
raise SQLDecodeError
return ORDER_BY_MAP[self._ord]
@property
def field(self) -> str:
if self.given_table in self.query.token_alias.aliased_names:
return self.given_table
if self.table == self.query.left_table:
return self.column
else:
return f'{self.table}.{self.column}'
@property
def table(self) -> str:
name = self.given_table
alias2token = self.token_alias.alias2token
try:
return alias2token[name].table
except KeyError:
return name
@property
def given_table(self) -> str:
name = self._token.get_parent_name()
if name is None:
name = self._token.get_real_name()
if name is None:
raise SQLDecodeError
return name
@property
def column(self) -> str:
name = self._token.get_real_name()
if name is None:
raise SQLDecodeError
return name
class SQLConstIdentifier(AliasableToken):
def __init__(self, *args):
super().__init__(*args)
@property
def value(self) -> int:
return int(self._token[0][1].value)
def to_mongo(self) -> dict:
return {'$literal': self.value}
class SQLComparison(SQLToken):
@property
def left_table(self):
lhs = SQLIdentifier(self._token.left, self.query)
return lhs.table
@property
def left_column(self):
lhs = SQLIdentifier(self._token.left, self.query)
return lhs.column
@property
def right_table(self):
rhs = SQLIdentifier(self._token.right, self.query)
return rhs.table
@property
def right_column(self):
rhs = SQLIdentifier(self._token.right, self.query)
return rhs.column
@property
def rhs_indexes(self):
if not self._token.right.ttype == tokens.Name.Placeholder:
if self._token.right.match(tokens.Keyword, 'NULL'):
return None
raise SQLDecodeError
index = self.placeholder_index(self._token.right)
return index
class SQLPlaceholder(SQLToken):
def __iter__(self):
tok = self._token[1:-1][0]
|
if isinstance(tok, IdentifierList):
for aid in tok.get_identifiers():
yield self.get_value(aid)
else:
yield self.get_value(tok)
def __init
|
__(self, token: Token, query: 'query_module.BaseQuery'):
super().__init__(token, query)
def get_value(self, tok: Token):
if tok.ttype == tokens.Name.Placeholder:
return self.placeholder_index(tok)
elif tok.match(tokens.Keyword, 'NULL'):
return None
elif tok.match(tokens.Keyword, 'DEFAULT'):
return 'DEFAULT'
else:
raise SQLDecodeError
class SQLStatement:
@property
def current_token(self) -> Token:
return self._statement[self._tok_id]
def __init__(self, statement: U[Statement, Token]):
self._statement = statement
self._tok_id = 0
self._gen_inst = self._generator()
def __getattr__(self, item):
return getattr(self._statement, item)
def __iter__(self) -> Token:
yield from self._gen_inst
def __repr__(self):
return str(self._statement)
def __getitem__(self, item: slice):
start = (item.start or 0) + self._tok_id
stop = item.stop and self._tok_id + item.stop
sql = ''.join(str(tok) for tok in self._statement[start:stop])
sql = sqlparse(sql)[0]
return SQLStatement(sql)
def next(self) -> O[Token]:
# self._tok_id, token = self._statement.token_next(self._tok_id)
try:
return next(self._gen_inst)
except StopIteration:
return None
def skip(self, num):
self._tok_id += num
@property
def prev_token(self) -> Token:
return self._statement.token_prev(self._tok_id)[1]
@property
def next_token(self) -> Token:
return self._statement.token_next(self._tok_id)[1]
def _generator(self):
token = self._statement[self._tok_id]
while self._tok_id is not None:
yield token
self._tok_id, token = self._statement.token_next(self._tok_id)
class SQLColumnDef:
not_null = object()
unique = object()
a
|
helifu/kudu
|
examples/python/basic-python-example/basic_example.py
|
Python
|
apache-2.0
| 2,977
| 0.00168
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import argparse
import kudu
from kudu.client import Partitioning
# Parse arguments
parser = argparse.ArgumentParser(description='Basic Example for Kudu Python.')
parser.add_argument('--masters', '-m', nargs='+', default='localhost',
help='The master address(es) to connect to Kudu.')
parser.add_argument('--ports', '-p', nargs='+', default='7051',
help='The master server port(s) to connect to Kudu.')
args = parser.parse_args()
# Connect to Kudu master server(s).
client = kudu.connect(host=args.masters, port=args.ports)
# Define a schema for a new table.
builder = kudu.schema_builder()
builder.add_column('key').type(kudu.int64).nullable(False).primary_key()
builder.add_column('ts_val', type_=kudu.unixtime_micros, nullable=False, compression='lz4')
schema = builder.build()
# Define the partitioning schema.
partitioning = Partitioning().add_hash_partitions(column_names=['key'], num_buckets=3)
# Delete table if it already exists.
if client.table_exists('python-example'):
client.delete_table('python-example')
# Create a new table.
client.create_table('python-example', schema, partitioning)
# Open a table.
table = client.table('python-example')
# Create a new session so that we can apply write operations.
session = client.new_session()
# Insert a row.
op = table.new_insert({'key': 1, 'ts_val': datetime.utcnow()})
session.apply(op)
# Upsert a row.
op = table.new_upsert({'key': 2, 'ts_val': "2016-01-01T00:00:00.000000"})
session.apply(op)
# Update a row.
op = table.new_update({'key': 1, 'ts_val': ("2017-01-01", "%Y-%m-%d")})
session.apply(op)
# Delete a row.
op = table.new_delete({'key
|
': 2})
session.apply(op)
# Flush write operations, if failures occur, print them.
try:
session.flush()
except kudu.KuduBadStatus:
print(session.get_pending_errors())
# Create a scanner and add a predicate.
scanner = table.scanner()
scanner.add_predicate(tabl
|
e['ts_val'] == datetime(2017, 1, 1))
# Open scanner and print all tuples.
# Note: This doesn't scale for large scans
# The expected output: [(1, datetime.datetime(2017, 1, 1, 0, 0, tzinfo=<UTC>))]
print(scanner.open().read_all_tuples())
|
h2020-endeavour/iSDX
|
pctrl/policy_loader.py
|
Python
|
apache-2.0
| 1,264
| 0.015032
|
import os
import json
from ss_rule_scheme import update_outbound_rules, init_inbound_rules, init_outbound_rules, msg_clear_all_outbound, ss_process_policy_change
base_path = os.path.abspath(os.path.join(os.path.realpath(__file__),
".."))
test_file = os.path.join(base_path, "blackholing_test.py")
with open(test_file, 'r') as f:
data = json.load(f)
inbound_policies = []
outbound_policies = []
for element in data['policy']:
if 'inbound' in element:
inbound_policies = element
if 'outbound' in element:
outbound_policies = element
#print inbound_policies
final_switch = "main-in"
rule_msgs = init_inbound_rules(1,
|
inbound_policies,[],final_switch)
print "Rule Messages to be removed INBOUND:: "+str(rule_msgs)
#rule_msgs2 =
|
init_outbound_rules(1, outbound_policies, [], final_switch)
#print ("Rule Messages OUTBOUND:: "+str(rule_msgs2))
#if 'changes' in rule_msgs2:
# if 'changes' not in rule_msgs:
# rule_msgs['changes'] = []
# rule_msgs['changes'] += rule_msgs2['changes']
#TODO: Initialize Outbound Policies from RIB
print ("Rule Messages:: "+str(rule_msgs))
for rule in rule_msgs['changes']:
rule['mod_type'] = "remove"
print ("XRS_Test: Rule Msgs: %s" % rule_msgs)
|
CompassionCH/compassion-accounting
|
donation_report_compassion/__init__.py
|
Python
|
agpl-3.0
| 409
| 0
|
#######################################################################
|
#######
#
# Copyright (C) 2018-2020 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty i
|
n Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from . import reports
|
KarolBedkowski/wxgtd
|
wxgtd/wxtools/validators/__init__.py
|
Python
|
gpl-2.0
| 638
| 0.004724
|
# -*- coding: utf-8 -*-
""" Validators for wx widgets.
Copyright (c) Karol Będko
|
wski, 2006-2013
This file is part of wxGTD
This is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, version 2.
"""
__author__ = "Karol Będkowski"
__copyright__ = "Copyright (c) Karol Będkowski, 2006-2013"
__version__ = '2013-04-21'
__all__ = ['ValidatorDv', 'Validator', '
|
ValidatorDate', 'ValidatorTime',
'ValidatorColorStr']
from .validator import Validator, ValidatorDv, ValidatorDate, ValidatorTime, \
ValidatorColorStr
|
showa-yojyo/notebook
|
source/_sample/apgl/distribution.py
|
Python
|
mit
| 553
| 0
|
#!/usr/bin/env
|
python
import scipy.sparse as sps
from apgl.graph.GeneralVertexList import GeneralVertexList
from apgl.graph.SparseGraph import SparseGraph
numVertices = 10
vList = GeneralVertexList(numVertices)
Wght = sps.lil_matrix((numVertices, numVertices))
graph = SparseGraph
|
(vList, W=Wght, undirected=False)
# Add some edges to the graph.
# Vertices are indexed starting from 0.
graph[0, 1] = 1
graph[0, 2] = 1
# Set the label of the 0th vertex to [2, 3].
graph.setVertex(0, "abc")
graph.setVertex(1, 123)
print(graph.inDegreeDistribution())
|
diogo149/autocause
|
autocause/autocause_settings.py
|
Python
|
mit
| 7,414
| 0.001619
|
import numpy as np
from scipy.stats import skew, kurtosis, shapiro, pearsonr, ansari, mood, levene, fligner, bartlett, mannwhitneyu
from scipy.spatial.distance import braycurtis, canberra, chebyshev, cityblock, correlation, cosine, euclidean, hamming, jaccard, kulsinski, matching, russellrao, sqeuclidean
from sklearn.preprocessing import LabelBinarizer
from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, accuracy_score, roc_auc_score, average_precision_score, f1_score, hinge_loss, matthews_corrcoef, precision_score, recall_score, zero_one_loss
from sklearn.metrics.cluster import adjusted_mutual_info_score, adjusted_rand_score, completeness_score, homogeneity_completeness_v_measure, homogeneity_score, mutual_info_score, normalized_mutual_info_score, v_measure_score
from boomlet.utils.aggregators import to_aggregator
from boomlet.metrics import max_error, error_variance, relative_error_variance, gini_loss, categorical_gini_loss
from boomlet.transform.type_conversion import Discretizer
from autocause.feature_functions import *
from autocause.converters import NUMERICAL_TO_NUMERICAL, NUMERICAL_TO_CATEGORICAL, BINARY_TO_NUMERICAL, BINARY_TO_CATEGORICAL, CATEGORICAL_TO_NUMERICAL, CATEGORICAL_TO_CATEGORICAL
"""
Functions used to combine a list of features into one coherent one.
Sample use:
1. to convert categorical to numerical, we perform a one hot encoding
2. treat each binary column as a separate numerical feature
3. compute numerical features as usual
4. use each of the following functions to create a new feature
(with the input as the nth feature for each of the columns)
WARNING: these will be used in various locations throughout the code base
and will result in feature size growing at faster than a linear rate
"""
AGGREGATORS = [
to_aggregator("max"),
to_aggregator("min"),
to_aggregator("median"),
to_aggregator("mode"),
to_aggregator("mean"),
to_aggregator("sum"),
]
"""
Boolean flags specifying whether or not to perform conversions
"""
CONVERT_TO_NUMERICAL = True
CONVERT_TO_CATEGORICAL = True
"""
Functions that compute a metric on a single 1-D array
"""
UNARY_NUMERICAL_FEATURES = [
normalized_entropy,
skew,
kurtosis,
np.std,
shapiro,
]
UNARY_CATEGORICAL_FEATURES = [
lambda x: len(set(x)), # number of unique
]
"""
Functions that compute a metric on two 1-D arrays
"""
BINARY_NN_FEATURES = [
independent_component,
chi_square,
pearsonr,
correlation_magnitude,
braycurtis,
canberra,
chebyshev,
cityblock,
correlation,
cosine,
euclidean,
hamming,
sqeuclidean,
ansari,
mood,
levene,
fligner,
bartlett,
mannwhitneyu,
]
BINARY_NC_FEATURES = [
]
BINARY_CN_FEATURES = [
categorical_numerical_homogeneity,
bucket_variance,
anova,
]
BINARY_CC_FEATURES = [
categorical_categorical_homogeneity,
anova,
dice_,
jaccard,
kulsinski,
matching,
rogerstanimoto_,
russellrao,
sokalmichener_,
sokalsneath_,
yule_,
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
v_measure_score,
]
"""
Dictionaries of input type (e.g. B corresponds to pairs where binary
data is the input) to pairs of converter functions and a boolean flag
of whether or not to aggregate over the output of the converter function
converter functions should have the type signature:
converter(X_raw, X_current_type, Y_raw, Y_type)
where X_raw is the data to convert
"""
NUMERICAL_CONVERTERS = dict(
N=NUMERICAL_TO_NUMERICAL["identity"],
B=BINARY_TO_NUMERICAL["identity"],
C=CATEGORICAL_TO_NUMERICAL["binarize"],
)
CATEGORICAL_CONVERTERS = dict(
N=NUMERICAL_TO_C
|
ATEGORICAL["discretizer10"],
B=BIN
|
ARY_TO_CATEGORICAL["identity"],
C=CATEGORICAL_TO_CATEGORICAL["identity"],
)
"""
Whether or not the converters can result in a 2D output. This must be set to True
if any of the respective converts can return a 2D output.
"""
NUMERICAL_CAN_BE_2D = True
CATEGORICAL_CAN_BE_2D = False
"""
Estimators used to provide a fit for a variable
"""
REGRESSION_ESTIMATORS = [
Ridge(),
LinearRegression(),
DecisionTreeRegressor(random_state=0),
RandomForestRegressor(random_state=0),
GradientBoostingRegressor(subsample=0.5, n_estimators=10, random_state=0),
KNeighborsRegressor(),
]
CLASSIFICATION_ESTIMATORS = [
LogisticRegression(random_state=0),
DecisionTreeClassifier(random_state=0),
RandomForestClassifier(random_state=0),
GradientBoostingClassifier(subsample=0.5, n_estimators=10, random_state=0),
KNeighborsClassifier(),
GaussianNB(),
]
"""
Functions to provide a value of how good a fit on a variable is
"""
REGRESSION_METRICS = [
explained_variance_score,
mean_absolute_error,
mean_squared_error,
r2_score,
max_error,
error_variance,
relative_error_variance,
gini_loss,
] + BINARY_NN_FEATURES
REGRESSION_RESIDUAL_METRICS = [
] + UNARY_NUMERICAL_FEATURES
BINARY_PROBABILITY_CLASSIFICATION_METRICS = [
roc_auc_score,
hinge_loss,
] + REGRESSION_METRICS
RESIDUAL_PROBABILITY_CLASSIFICATION_METRICS = [
] + REGRESSION_RESIDUAL_METRICS
BINARY_CLASSIFICATION_METRICS = [
accuracy_score,
average_precision_score,
f1_score,
matthews_corrcoef,
precision_score,
recall_score,
zero_one_loss,
categorical_gini_loss,
]
ND_CLASSIFICATION_METRICS = [ # metrics for N-dimensional classification
] + BINARY_CC_FEATURES
"""
Functions to assess the model (e.g. complexity) of the fit on a numerical variable
of type signature:
metric(clf, X, y)
"""
REGRESSION_MODEL_METRICS = [
# TODO model complexity metrics
]
CLASSIFICATION_MODEL_METRICS = [
# TODO use regression model metrics on predict_proba
]
"""
The operations to perform on the A->B features and B->A features.
"""
RELATIVE_FEATURES = [
# Identity functions, comment out the next 2 lines for only relative features
lambda x, y: x,
lambda x, y: y,
lambda x, y: x - y,
]
"""
Whether or not to treat each observation (A,B) as two observations: (A,B) and (B,A)
If this is done and training labels are given, those labels will have to be
reflected as well. The reflection is performed through appending at the end.
(e.g. if we have N training examples, observation N+1 in the output will be
the first example reflected)
"""
REFLECT_DATA = False
"""
Whether or not metafeatures based on the types of A and B are generated.
e.g. 1/0 feature on whether or not A is Numerical, etc.
"""
ADD_METAFEATURES = True
"""
Whether or not to generate combination features between the computed
features and metafeatures.
e.g. for each feature and metafeature, generate a new feature which is the
product of the two
WARNING: will generate a LOT of features (approximately 21 times as many)
"""
COMPUTE_METAFEATURE_COMBINATIONS = False
|
bdang2012/taiga-back-casting
|
taiga/projects/custom_attributes/services.py
|
Python
|
agpl-3.0
| 2,749
| 0.001092
|
# Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Found
|
ation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If
|
not, see <http://www.gnu.org/licenses/>.
from django.db import transaction
from django.db import connection
@transaction.atomic
def bulk_update_userstory_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_userstorycustomattribute set "order" = $1
where custom_attributes_userstorycustomattribute.id = $2 and
custom_attributes_userstorycustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_task_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_taskcustomattribute set "order" = $1
where custom_attributes_taskcustomattribute.id = $2 and
custom_attributes_taskcustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_issue_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_issuecustomattribute set "order" = $1
where custom_attributes_issuecustomattribute.id = $2 and
custom_attributes_issuecustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
|
oxc/Flexget
|
flexget/plugins/metainfo/subtitles_check.py
|
Python
|
mit
| 2,678
| 0.003361
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import os
import logging
import tempfile
from flexget import plugin
from flexget.event import event
log = logging.getLogger('check_subtitles')
class MetainfoSubs(object):
"""
Set 'subtitles' field for entries, if they are local video files with subs.
The field is a list of language codes (3-letter ISO-639-3) for each subtitles
file found on disk and/or subs track found inside video (for MKVs).
Special "und" code is for unidentified language (i.e. files without language
code before extension).
"""
schema = {'type': 'boolean'}
def on_task_start(self, task, config):
try:
import subliminal
except ImportError as e:
log.debug('Error importing Subliminal: %s' % e)
raise plugin.DependencyError('subliminal', 'subliminal',
'Subliminal module required. ImportError: %s' % e)
from subliminal.cli import MutexLock
from dogpile.cache.exception import RegionAlreadyConfigu
|
red
try:
subliminal.region.configure('dogpile.cache.dbm',
arguments={'filename': os.path.join(tempfile.gettempdir(), 'cachefile.dbm'),
'lock_factory': MutexLock})
except RegionAlreadyConfigured:
pass
logging.getLogger("subliminal").setLevel(logging.CRITICAL)
logging.getLogger("enzyme").setLevel(logging.WARNING)
|
def on_task_metainfo(self, task, config):
# check if explicitly disabled (value set to false)
if config is False:
return
for entry in task.entries:
entry.register_lazy_func(self.get_subtitles, ['subtitles'])
def get_subtitles(self, entry):
if entry.get('subtitles', eval_lazy=False) or not ('location' in entry) or \
('$RECYCLE.BIN' in entry['location']) or not os.path.exists(entry['location']):
return
from subliminal.core import search_external_subtitles
try:
subtitles = list(search_external_subtitles(entry['location']).values())
if subtitles:
entry['subtitles'] = subtitles
log.debug('Found subtitles %s for %s', '/'.join(subtitles), entry['title'])
except Exception as e:
log.debug('Error checking local subtitles for %s: %s' % (entry['title'], e))
@event('plugin.register')
def register_plugin():
plugin.register(MetainfoSubs, 'check_subtitles', api_ver=2)
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/paris/household_x_neighborhood/hhnper2_nbnper2.py
|
Python
|
gpl-2.0
| 1,879
| 0.031932
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
# This is a simple test variable for the interaction of gridcells and households.
from opus_core.variables.variable import Variable
from urbansim.functions import attribute_label
class hhnper2_nbnper2(Variable):
"""Test variable for the interaction of neighborhoods and households.
Computes household.poor * neighborhood.poor."""
def dependencies(self):
return [attribute_label("neighborhood", "nper2_m"),
attribute_label("household", "nper2")]
def compute(self, dataset_pool):
return self.get_dataset().multiply("nper2", "nper2_m")
#if __name__=='__main__':
#from opus_core.tests import opus_unittest
#from urbansim.variable_test_toolbox import VariableTestToolbox
#from numpy import array
#from numpy import ma
#class Tests(opus_unittest.OpusTestCase):
#variable_name = "urbansim.household_x_neighborhood.hhrich_nbpoor"
#def test_full_tree(self):
#dept = array([10, 20, 30])
#prev_dept = array([10, 4, 20, 30])
#values = VariableTestToolbox().compute_variable(self.variable_name,
#{"neighborhood":{
#"dept":dept},
#"household":{
#"prev_dept":prev_dept}},
#dataset = "household_x_neighborhood")
#should_be = array([[1, 0, 0],
|
#[0, 0, 0],
#[0, 1, 0],
#[0, 0, 1]])
#self.assertEqual(ma.allclose(values, should_be, rtol=1e-20),
#True, msg = "Error in " + self.variable
|
_name)
#opus_unittest.main()
|
IntelLabs/hpat
|
examples/series/series_lt.py
|
Python
|
bsd-2-clause
| 1,748
| 0
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that
|
the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution
|
.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_lt():
s1 = pd.Series([5, 4, 3, 2, 1])
s2 = pd.Series([0, 2, 3, 6, 8])
return s1.lt(s2) # Expect series of False, False, False, True, True
print(series_lt())
|
criswell/noink
|
src/tests/test_DelEntry.py
|
Python
|
agpl-3.0
| 740
| 0.005405
|
#!/usr/bin/env python
'''
##BOILERPLATE_COPYRIGHT
##BOILERPLATE_COPYRIGHT_END
'''
import unittest, copy
from testRoot imp
|
ort RootClass
from noink.user_db import UserDB
from noink.entry_db import EntryDB
class AddEntry(RootClass):
def test_AddEntry(self):
userDB = UserDB()
entryDB = EntryDB()
u = userDB.add("jontest", "pass", "Jon Q. Testuser")
title = 'Little Buttercup'
entry = 'There once was a man from Nantucket,' + \
'who kept his wife in a Bucket.' + \
"Wait... how'd she fit in that bucket anyway?"
e =
|
entryDB.add(copy.deepcopy(title), entry, u)
self.assertTrue(e.title == title)
if __name__ == '__main__':
unittest.main()
|
b-mueller/mythril
|
tests/mythril/mythril_leveldb_test.py
|
Python
|
mit
| 2,235
| 0.003579
|
import io
import pytest
from contextlib import redirect_stdout
from mock import patch
from mythril.mythril import MythrilLevelDB, MythrilConfig
from mythril.exceptions import CriticalError
@patch("mythril.ethereum.interface.leveldb.client.EthLevelDB.search")
@patch("mythril.ethereum.interface.leveldb.client.ETH_DB", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBReader", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBWriter", return_value=None)
def test_leveldb_code_search(mock_leveldb, f1, f2, f3):
config = MythrilConfig()
config.set_api_leveldb("some path")
leveldb_search = MythrilLevelDB(leveldb=config.eth_db)
leveldb_search.search_db("code#PUSH#")
mock_leveldb.assert_called()
@patch("mythril.ethereum.interface.leveldb.client.ETH_DB", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBReader", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBWriter", return_value=None)
def test_leveldb_hash_search_incorrect_input(f1, f2, f3):
config = MythrilConfig()
config.set_api_leveldb("some path")
leve
|
ldb_search = MythrilLevelDB(leveldb=config.eth_db)
with pytest.raises(CriticalError):
leveldb_search.contract_hash_to_address("0x23")
@patch(
"mythril.ethereum.interface.leveldb.client.EthLevelDB.contract_hash_to_address",
return_value="0xddbb615cb2ffaff7233d8a6f3601621de94795e1",
)
@patch("mythril.ethereum.interface.leveldb.client.ETH_DB", ret
|
urn_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBReader", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBWriter", return_value=None)
def test_leveldb_hash_search_correct_input(mock_hash_to_address, f1, f2, f3):
config = MythrilConfig()
config.set_api_leveldb("some path")
leveldb_search = MythrilLevelDB(leveldb=config.eth_db)
f = io.StringIO()
with redirect_stdout(f):
leveldb_search.contract_hash_to_address(
"0x0464e651bcc40de28fc7fcde269218d16850bac9689da5f4a6bd640fd3cdf6aa"
)
out = f.getvalue()
mock_hash_to_address.assert_called()
assert out == "0xddbb615cb2ffaff7233d8a6f3601621de94795e1\n"
|
ibackus/testdust
|
testdust/diffusion/plot.py
|
Python
|
mit
| 4,000
| 0.009
|
# -*- coding: utf-8 -*-
"""
Contains functions to plot the results of the dustydiffusion test.
@author: ibackus
"""
import matplotlib.pyplot as plt
import numpy as np
import pynbody
import diskpy
#sim, epsEstimator, ts, runpars = analyze.loadSim(simdir)
def crossSection(sim, ts, crossSectionTimes=[0, 1, 10]):
"""
Reproduces the cross-section plot of dust density of
Price & Laibe 2015, fig. 5
Note, sim and ts can be loaded with analyze.loadSim(...)
Parameters
----------
sim : list
List of SimSnaps for the simulation
ts : array-like
Snapshot times
crossSectionTimes : array-like
(optional) Sim times to plot (approximate)
"""
# Select times to plot at
crossSectionTimes = np.asarray(crossSectionTimes)
crossSectionTimes = crossSectionTimes.reshape(crossSectionTimes.size)
if np.ndim(crossSectionTimes) == 0:
crossSectionTimes = crossSectionTimes[None]
nPlots = len(crossSectionTimes)
# Plot
axs = diskpy.plot.gridplot(1, nPlots, square=True)
fig = plt.gcf()
for iPlot in range(nPlots):
ax = axs[iPlot]
iTime = abs(ts - crossSectionTimes[iPlot]).argmin()
t = ts[iTime]
f = sim[iTime]
im=pynbody.plot.sph.image(f, 'dustFrac', width=1, log=False, vmin=0,
vmax = 0.11, cmap='cubehelix_r',
show_cbar=False, subplot=ax, ret_im=True)
ax.set_xlabel('t={:.2g}'.format(float(t)))
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.2, 0.05, 0.6])
fig.colorbar(im, cax=cbar_ax)
fig.set_size_inches(8.5, 3.4, forward=True)
plt.suptitle('Cross section of dust fraction in z=0 plane\n'\
'See Price & Laibe (2015)')
def dustFracProfile(sim, ts, epsEstimator,
epsPlotTimes=[0., 0.1, 0.3, 1, 3, 10], nr=200,
colorcode=True, legend=True, rasterized=True):
"""
Note, sim and ts and epsEstimator can be loaded with analyze.loadSim(...)
Parameters
----------
sim : list
List of SimSnaps for the simulation
ts : array-like
Snapshot times
epsEstimator : function
A function of (r, t) that returns the analytic estimate of the dust
fraction density profile of P&L15 dustydiffusion
epsPlotTimes : array-like
Approximate times to plot at
nr : int
Number of radial bins
colorcode : bool
Color-code the times
legend : bool
Display legend
rasterized : bool
Rasterize the dots. Useful for saving figures as vector graphics
"""
# Make plot times an array
epsPlotTimes = np.asarray(epsPlotTimes)
epsPlotTimes = epsPlotTimes.reshape(epsPlotTimes.size)
nt = len(epsPlotTime
|
s)
actualPlotTimes = np.zeros(nt)
title = 'plot times: '
if colorcode:
markercolor = None
else:
markercolor = 'k'
for iPlot in range(nt):
iTime = abs(ts - epsPlotTimes[iPlot]).argmin()
# Calculate stuff
f = sim[iTime]
t = ts[[iTime]
|
]
actualPlotTimes[iPlot] = t
print t
r = np.linspace(0, f['r'].max(), nr)
epsAnalytic = epsEstimator(r, t)
# Plot
scatter=plt.plot(f['r'], f['dustFrac'], 'o', markersize=3,
markeredgecolor='none', label='t={:.2g}'.format(float(t)),
color=markercolor, rasterized=rasterized)
line=plt.plot(r, epsAnalytic, 'r')
if colorcode:
# Make lines and points the same color
line[0].set_color(scatter[0].get_color())
title += '{:.2g}, '.format(float(t))
# Set-up plot
plt.ylim(0, 0.11)
plt.xlim(0, 0.5)
plt.ylabel('Dust fraction')
plt.xlabel('r')
if legend:
plt.legend(loc='best', markerscale=2)
plt.title(title)
|
Suwmlee/XX-Net
|
Python3/lib/ctypes/test/test_byteswap.py
|
Python
|
bsd-2-clause
| 11,726
| 0.000768
|
import sys, unittest, struct, math, ctypes
from binascii import hexlify
from ctypes import *
def bin(s):
return hexlify(memoryview(s)).decode().upper()
# Each *simple* type that supports different byte orders has an
# __ctype_be__ attribute that specifies the same type in BIG ENDIAN
# byte order, and a __ctype_le__ attribute that is the same type in
# LITTLE ENDIAN byte order.
#
# For Structures and Unions, these types are created on demand.
class Test(unittest.TestCase):
@unittest.skip('test disabled')
def test_X(self):
print(sys.byteorder, file=sys.stderr)
for i in range(32):
bits = BITS()
setattr(bits, "i%s" % i, 1)
dump(bits)
def test_slots(self):
class BigPoint(BigEndianStructure):
__slots__ = ()
_fields_ = [("x", c_int), ("y", c_int)]
class LowPoint(LittleEndianStructure):
__slots__ = ()
_fields_ = [("x", c_int), ("y", c_int)]
big = BigPoint()
little = LowPoint()
big.x = 4
big.y = 2
little.x = 2
little.y = 4
with self.assertRaises(AttributeError):
big.z = 42
with self.assertRaises(AttributeError):
little.z = 24
def test_endian_short(self):
if sys.byteorder == "little":
self.assertIs(c_short.__ctype_le__, c_short)
self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short)
else:
self.assertIs(c_short.__ctype_be__, c_short)
self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short)
s = c_short.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_short.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
def test_endian_int(self):
if sys.byteorder == "little":
self.assertIs(c_int.__ctype_le__, c_int)
self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int)
else:
self.assertIs(c_int.__ctype_be__, c_int)
self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int)
s = c_int.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_int.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<i", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">I", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<I", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
def test_endian_longlong(self):
if sys.byteorder == "little":
self.assertIs(c_longlong.__ctype_le__, c_longlong)
self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong)
else:
self.assertIs(c_longlong.__ctype_be__, c_longlong)
self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong)
s = c_longlong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_longlong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">Q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<Q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
def test_endian_float(self):
if sys.byteorder == "little":
self.assertIs(c_float.__ctype_le__, c_float)
self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float)
else:
self.assertIs(c_float.__ctype_be__, c_float)
self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float)
s = c_float(math.pi)
self.assertEqual(bin(struct.pack("f", math.pi)), bin(s))
# Hm, what's the precision of a float compared to a double?
self.assertAlmostEqual(s.value, math.pi, places=6)
s = c_float.__ctype_le__(math.pi)
self.assertAlmostEqual(s.value, math.pi, places=6)
self.assertEqual(bin(struct.pack("<f", math.pi)), bin(s))
s = c_float.__ctype_be__(math.pi)
self.assertAlmostEqual(s.value, math.pi, places=6)
self.assertEqual(bin(struct.pack(">f", math.pi)), bin(s))
def test_endian_double(self):
if sys.byteorder == "little":
self.assertIs(c_double.__ctype_le__, c_double)
self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double)
else:
self.assertIs(c_double.__ctype_be__, c_double)
self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double)
s = c_double(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("d", math.pi)), bin(s))
s = c_double.__ctype_le__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("<d", math.pi)), bin(s))
s = c_double.__ctype_be__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s))
def test_endian_other(self):
self.assertIs(c_byte.__ctype_le__, c_byte)
self.assertIs(c_byte.__ctype_be__, c_byte)
self.assertIs(c_ubyte.__ctype_le__, c_ubyte)
self.assertIs(c_ub
|
yte.__ctype_be__, c_ubyte)
self.assertIs(c_char.__ctype_le__,
|
c_char)
self.assertIs(c_char.__ctype_be__, c_char)
def test_struct_fields_1(self):
if sys.byteorder == "little":
base = BigEndianStructure
else:
base = LittleEndianStructure
class T(base):
pass
_fields_ = [("a", c_ubyte),
("b", c_byte),
("c", c_short),
("d", c_ushort),
("e", c_int),
("f", c_uint),
("g", c_long),
("h", c_ulong),
("i", c_longlong),
("k", c_ulonglong),
("l", c_float),
("m", c_double),
("n", c_char),
("b1", c_byte, 3),
("b2", c_byte, 3),
("b3", c_byte, 2),
("a", c_int * 3 * 3 * 3)]
T._fields_ = _fields_
# these fields do not support different byte order:
for typ in c_wchar, c_void_p, POINTER(c_int):
_fields_.append(("x", typ))
class
|
grafeas/client-python
|
grafeas/models/discovery_discovered_details.py
|
Python
|
apache-2.0
| 3,542
| 0.000282
|
# coding: utf-8
"""
An API to insert and retrieve metadata on cloud artifacts.
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DiscoveryDiscoveredDetails(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'operation': 'GooglelongrunningOperation'
}
attribute_map = {
'operation': 'operation'
}
def __init__(self, operation=None): # noqa: E501
"""DiscoveryDiscoveredDetails - a model defined in Swagger""" # noqa: E501
|
self._operation = None
self.discriminator = None
if operation is not None:
self.operation = operation
@property
def operation(self):
"""Gets the operation of this DiscoveryDiscoveredDetails. # noqa: E501
Output only. An operation that indicates the status of the current scan. # noqa: E501
:return: The operation of this DiscoveryDiscoveredDe
|
tails. # noqa: E501
:rtype: GooglelongrunningOperation
"""
return self._operation
@operation.setter
def operation(self, operation):
"""Sets the operation of this DiscoveryDiscoveredDetails.
Output only. An operation that indicates the status of the current scan. # noqa: E501
:param operation: The operation of this DiscoveryDiscoveredDetails. # noqa: E501
:type: GooglelongrunningOperation
"""
self._operation = operation
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DiscoveryDiscoveredDetails, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DiscoveryDiscoveredDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
quarkslab/irma
|
common/src/ftp/sftpv2.py
|
Python
|
apache-2.0
| 3,911
| 0
|
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LIC
|
ENSE-2.0
#
|
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import stat
import socket
from irma.common.base.exceptions import IrmaSFTPv2Error
from irma.common.ftp.ftp import FTPInterface
from ssh2.session import Session
from ssh2.sftp import LIBSSH2_FXF_CREAT, LIBSSH2_FXF_WRITE,\
LIBSSH2_SFTP_S_IRUSR, LIBSSH2_SFTP_S_IWUSR,\
LIBSSH2_SFTP_S_IRGRP, LIBSSH2_SFTP_S_IROTH,\
LIBSSH2_SFTP_S_IXUSR
class IrmaSFTPv2(FTPInterface):
"""Irma SFTPv2 handler
This class handles the connection with a sftp server
functions for interacting with it.
"""
_Exception = IrmaSFTPv2Error
# ==================================
# Constructor and Destructor stuff
# ==================================
def __init__(self, host, port, auth, key_path, user, passwd,
dst_user=None, upload_path='uploads', hash_check=False,
autoconnect=True):
self._sess = None
self._client = None
super().__init__(host, port, auth, key_path, user, passwd, dst_user,
upload_path, hash_check, autoconnect)
def connected(self):
return self._sess is not None
# ============================
# Overridden private methods
# ============================
def _connect(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self._host, self._port))
self._sess = Session()
self._sess.handshake(sock)
if self._auth == 'key':
# self._pubkey_path must be generated from private key
# s.userauth_publickey_fromfile(self._user, self._pubkey_path,
# self._key_path, '')
raise IrmaSFTPv2Error("Pub key authentication not implemented")
else:
self._sess.userauth_password(self._user, self._passwd)
self._client = self._sess.sftp_init()
def _disconnect(self, *, force=False):
self._client = None
if not force:
self._sess.disconnect()
self._sess = None
def _upload(self, remote, fobj):
mode = LIBSSH2_SFTP_S_IRUSR | LIBSSH2_SFTP_S_IWUSR | \
LIBSSH2_SFTP_S_IRGRP | LIBSSH2_SFTP_S_IROTH
opt = LIBSSH2_FXF_CREAT | LIBSSH2_FXF_WRITE
with self._client.open(remote, opt, mode) as rfh:
for chunk in iter(lambda: fobj.read(1024*1024), b""):
rfh.write(chunk)
def _download(self, remote, fobj):
with self._client.open(remote, 0, 0) as rfh:
for size, data in rfh:
fobj.write(data)
def _ls(self, remote):
with self._client.opendir(remote) as rfh:
paths = (p[1].decode('utf-8') for p in rfh.readdir())
return [p for p in paths if p not in ['.', '..']]
def _is_file(self, remote):
return not self._is_dir(remote)
def _is_dir(self, remote):
st = self._client.stat(remote)
return stat.S_ISDIR(st.st_mode)
def _rm(self, remote):
self._client.unlink(remote)
def _rmdir(self, remote):
self._client.rmdir(remote)
def _mkdir(self, remote):
mode = LIBSSH2_SFTP_S_IRUSR | \
LIBSSH2_SFTP_S_IWUSR | \
LIBSSH2_SFTP_S_IXUSR
self._client.mkdir(remote, mode)
def _mv(self, oldremote, newremote):
self._client.rename(oldremote, newremote)
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/Applications/ParaView/Testing/Python/CTHAMRClip.py
|
Python
|
gpl-3.0
| 1,836
| 0.003813
|
#/usr/bin/env python
import QtTesting
import QtTestingImage
object1 = 'pqClientMainWindow/MainControlsToolbar/actionOpenData'
QtTesting.playCommand(object1, 'activate', '')
object2 = 'pqClientMainWindow/FileOpenDialog'
QtTesting.playCommand(object2, 'filesSelected', '$PARAVIEW_DATA_ROOT/SPCTH/Dave_Karelitz_Small/spcth_a')
object3 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/ScrollArea/qt_scrollarea_viewport/PanelArea/Editor/CellArrayStatus/1QHeaderView0'
QtTesting.playCommand(object3, 'mousePress', '1,1,0,0,0,0')
QtTesting.playCommand(object3, 'mouseRelease', '1,0,0,0,0,0')
object4 = 'pqClientMainWindow/proxyTab
|
Dock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/Accept'
QtTesting.playCommand(object4, 'activate', '')
object5 = 'pqClientMainWindow/representationToolbar/displayRepresentation/comboBox'
QtTesting.playCommand(object5, 'set_string', 'Surface')
object6 = 'pqClientMainWindow/variableToolbar/displayColor/Variables'
QtTesting.playCommand(object6, 'set_string', 'Pressure (dynes/cm^2^)')
object7 = 'pqClientM
|
ainWindow/cameraToolbar/actionPositiveX'
QtTesting.playCommand(object7, 'activate', '')
object8 = 'pqClientMainWindow/menubar/menuFilters/pqProxyGroupMenuManager0/Cut'
QtTesting.playCommand(object8, 'activate', '')
QtTesting.playCommand(object4, 'activate', '')
object9 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/ScrollArea/qt_scrollarea_viewport/PanelArea/Editor/CutFunction/pqImplicitPlaneWidget/show3DWidget'
QtTesting.playCommand(object9, 'set_boolean', 'false')
# DO_IMAGE_COMPARE
snapshotWidget = 'pqClientMainWindow/centralwidget/MultiViewWidget/CoreWidget/qt_tabwidget_stackedwidget/MultiViewWidget1/Frame.0/Viewport'
QtTestingImage.compareImage(snapshotWidget, 'CTHAMRClip.png', 300, 300)
|
weichen2046/IntellijPluginDevDemo
|
enterprise-repo/enterprepo/pluginrepo/apps.py
|
Python
|
apache-2.0
| 160
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_lite
|
rals
from django.apps import AppConfig
class PluginrepoConfig(AppCon
|
fig):
name = 'pluginrepo'
|
FancyRice/RitoAPI
|
ritoapi/tests/test_stress.py
|
Python
|
mit
| 655
| 0.003053
|
from ritoapi.endpoints.match_v3 import MatchV3
import threading
def _load_matches(match_v3, sample_region, sample_match_id, count):
for i in range(count):
|
data = match_v3.matches(sample_region, sample_match_id)
assert(data['gameId'] == sample_match_id)
def test_matches_stress(sample_api_key, sample_rate_limit, sample_region, sample_match_id):
match_v3 =
|
MatchV3(sample_api_key, sample_rate_limit)
threads = []
for i in range(10):
t = threading.Thread(target=_load_matches, args=(match_v3, sample_region, sample_match_id, 20))
threads.append(t)
t.start()
for t in threads:
t.join()
|
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks
|
Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/schedule/scheduler.py
|
Python
|
isc
| 4,153
| 0.001445
|
from future import standard_library
standard_library.install_aliases()
from builtins import object
import threading
from time import time
import random
import queue
from ..common import log
class Scheduler(object):
"""
A simple scheduler which schedules the periodic or once event
"""
import sortedcontainers as sc
max_delay_time = 60
def __init__(self):
self._jobs = Scheduler.sc.SortedSet()
self._wakeup_q = queue.Queue()
self._lock = threading.Lock()
self._thr = threading.Thread(target=self._do_jobs)
self._thr.deamon = True
self._started = False
def start(self):
"""
Start the schduler which will start the internal thread for scheduling
jobs. Please do tear_down when doing cleanup
"""
if self._started:
log.logger.info("Scheduler already started.")
return
self._started = True
self._thr.start()
def tear_down(self):
"""
Stop the schduler which will stop the internal thread for scheduling
jobs.
"""
if not self._started:
log.logger.info("Scheduler already tear down.")
return
|
self._wakeup_q.put(True)
def _do_jobs(self):
while 1:
(sleep_time, jobs) = self.get_ready_jobs()
self._do_execution(jobs)
try:
done = self._wakeup_q.get(timeout=sleep_time)
except queue.Empty:
pass
else:
if done:
|
break
self._started = False
log.logger.info("Scheduler exited.")
def get_ready_jobs(self):
"""
@return: a 2 element tuple. The first element is the next ready
duration. The second element is ready jobs list
"""
now = time()
ready_jobs = []
sleep_time = 1
with self._lock:
job_set = self._jobs
total_jobs = len(job_set)
for job in job_set:
if job.get_expiration() <= now:
ready_jobs.append(job)
if ready_jobs:
del job_set[:len(ready_jobs)]
for job in ready_jobs:
if job.get_interval() != 0 and not job.stopped():
# repeated job, calculate next due time and enqueue
job.update_expiration()
job_set.add(job)
if job_set:
sleep_time = job_set[0].get_expiration() - now
if sleep_time < 0:
log.logger.warn("Scheduler satuation, sleep_time=%s",
sleep_time)
sleep_time = 0.1
if ready_jobs:
log.logger.info("Get %d ready jobs, next duration is %f, "
"and there are %s jobs scheduling",
len(ready_jobs), sleep_time, total_jobs)
ready_jobs.sort(key=lambda job: job.get("priority", 0), reverse=True)
return (sleep_time, ready_jobs)
def add_jobs(self, jobs):
with self._lock:
now = time()
job_set = self._jobs
for job in jobs:
delay_time = random.randrange(0, self.max_delay_time)
job.set_initial_due_time(now + delay_time)
job_set.add(job)
self._wakeup()
def update_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
job_set.discard(njob)
job_set.add(njob)
self._wakeup()
def remove_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
njob.stop()
job_set.discard(njob)
self._wakeup()
def number_of_jobs(self):
with self._lock:
return len(self._jobs)
def disable_randomization(self):
self.max_delay_time = 1
def _wakeup(self):
self._wakeup_q.put(None)
def _do_execution(self, jobs):
for job in jobs:
job()
|
khalibartan/pgmpy
|
pgmpy/tests/test_factors/test_discrete/test_Factor.py
|
Python
|
mit
| 56,244
| 0.004054
|
import unittest
import warnings
from collections import OrderedDict
import numpy as np
import numpy.testing as np_test
from pgmpy.extern.six.moves import range
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.factors.discrete import JointProbabilityDistribution as JPD
from pgmpy.factors import factor_divide
from pgmpy.factors import factor_product
from pgmpy.factors.discrete.CPD import TabularCPD
from pgmpy.independencies import Independencies
from pgmpy.models import BayesianModel
from pgmpy.models import MarkovModel
class TestFactorInit(unittest.TestCase):
def test_class_init(self):
phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
self.assertEqual(phi.variables, ['x1', 'x2', 'x3'])
np_test.assert_array_equal(phi.cardinality, np.array([2, 2, 2]))
np_test.assert_array_equal(phi.values, np.ones(8).reshape(2, 2, 2))
def test_class_init1(self):
phi = DiscreteFactor([1, 2, 3], [2, 3, 2], np.arange(12))
self.assertEqual(phi.variables, [1, 2, 3])
np_test.assert_array_equal(phi.cardinality, np.array([2, 3, 2]))
np_test.assert_array_equal(phi.values, np.arange(12).reshape(2, 3, 2))
def test_class_init_sizeerror(self):
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x2', 'x3'], [2, 2, 2], np.ones(9))
def test_class_init_typeerror(self):
self.assertRaises(TypeError, DiscreteFactor, 'x1', [3], [1, 2, 3])
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x1', 'x3'], [2, 3, 2], range(12))
def test_init_size_var_card_not_equal(self):
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x2'], [2], np.ones(2))
class TestFactorMethods(unittest.TestCase):
def setUp(self):
self.phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], np.random.uniform(5, 10, size=8))
self.phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
self.phi2 = DiscreteFactor([('x1', 0), ('x2', 0), ('x3', 0)], [2, 3, 2], range(12))
# This larger factor (phi3) caused a
|
bug in reduce
card3 = [3, 3, 3, 2, 2, 2, 2, 2, 2]
self.phi3 = DiscreteFactor(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'],
card3, np.arange(np.prod(card3), dtype=np.float))
self.tup1 = ('x1', 'x2')
self.tup2 = ('x2', 'x3')
self.tup3 = ('x3', (1, 'x4'))
self.phi4 = DiscreteFactor([self.tup1, self.tup2, self.tu
|
p3], [2, 3, 4], np.random.uniform(3, 10, size=24))
self.phi5 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 3, 4], range(24))
self.card6 = [4, 2, 1, 3, 5, 6]
self.phi6 = DiscreteFactor([self.tup1, self.tup2, self.tup3, self.tup1 + self.tup2,
self.tup2 + self.tup3, self.tup3 + self.tup1], self.card6,
np.arange(np.prod(self.card6), dtype=np.float))
self.var1 = 'x1'
self.var2 = ('x2', 1)
self.var3 = frozenset(['x1', 'x2'])
self.phi7 = DiscreteFactor([self.var1, self.var2], [3, 2], [3, 2, 4, 5, 9, 8])
self.phi8 = DiscreteFactor([self.var2, self.var3], [2, 2], [2, 1, 5, 6])
self.phi9 = DiscreteFactor([self.var1, self.var3], [3, 2], [3, 2, 4, 5, 9, 8])
self.phi10 = DiscreteFactor([self.var3], [2], [3, 6])
def test_scope(self):
self.assertListEqual(self.phi.scope(), ['x1', 'x2', 'x3'])
self.assertListEqual(self.phi1.scope(), ['x1', 'x2', 'x3'])
self.assertListEqual(self.phi4.scope(), [self.tup1, self.tup2, self.tup3])
def test_assignment(self):
self.assertListEqual(self.phi.assignment([0]), [[('x1', 0), ('x2', 0), ('x3', 0)]])
self.assertListEqual(self.phi.assignment([4, 5, 6]), [[('x1', 1), ('x2', 0), ('x3', 0)],
[('x1', 1), ('x2', 0), ('x3', 1)],
[('x1', 1), ('x2', 1), ('x3', 0)]])
self.assertListEqual(self.phi1.assignment(np.array([4, 5, 6])), [[('x1', 0), ('x2', 2), ('x3', 0)],
[('x1', 0), ('x2', 2), ('x3', 1)],
[('x1', 1), ('x2', 0), ('x3', 0)]])
self.assertListEqual(self.phi4.assignment(np.array([11, 12, 23])),
[[(self.tup1, 0), (self.tup2, 2), (self.tup3, 3)],
[(self.tup1, 1), (self.tup2, 0), (self.tup3, 0)],
[(self.tup1, 1), (self.tup2, 2), (self.tup3, 3)]])
def test_assignment_indexerror(self):
self.assertRaises(IndexError, self.phi.assignment, [10])
self.assertRaises(IndexError, self.phi.assignment, [1, 3, 10, 5])
self.assertRaises(IndexError, self.phi.assignment, np.array([1, 3, 10, 5]))
self.assertRaises(IndexError, self.phi4.assignment, [2, 24])
self.assertRaises(IndexError, self.phi4.assignment, np.array([24, 2, 4, 30]))
def test_get_cardinality(self):
self.assertEqual(self.phi.get_cardinality(['x1']), {'x1': 2})
self.assertEqual(self.phi.get_cardinality(['x2']), {'x2': 2})
self.assertEqual(self.phi.get_cardinality(['x3']), {'x3': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x2']), {'x1': 2, 'x2': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x3']), {'x1': 2, 'x3': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x2', 'x3']), {'x1': 2, 'x2': 2, 'x3': 2})
self.assertEqual(self.phi4.get_cardinality([self.tup1, self.tup3]),
{self.tup1: 2, self.tup3: 4})
def test_get_cardinality_scopeerror(self):
self.assertRaises(ValueError, self.phi.get_cardinality, ['x4'])
self.assertRaises(ValueError, self.phi4.get_cardinality, [('x1', 'x4')])
self.assertRaises(ValueError, self.phi4.get_cardinality, [('x3', (2, 'x4'))])
def test_get_cardinality_typeerror(self):
self.assertRaises(TypeError, self.phi.get_cardinality, 'x1')
def test_marginalize(self):
self.phi1.marginalize(['x1'])
np_test.assert_array_equal(self.phi1.values, np.array([[6, 8],
[10, 12],
[14, 16]]))
self.phi1.marginalize(['x2'])
np_test.assert_array_equal(self.phi1.values, np.array([30, 36]))
self.phi1.marginalize(['x3'])
np_test.assert_array_equal(self.phi1.values, np.array(66))
self.phi5.marginalize([self.tup1])
np_test.assert_array_equal(self.phi5.values, np.array([[12, 14, 16, 18],
[20, 22, 24, 26],
[28, 30, 32, 34]]))
self.phi5.marginalize([self.tup2])
np_test.assert_array_equal(self.phi5.values, np.array([60, 66, 72, 78]))
self.phi5.marginalize([self.tup3])
np_test.assert_array_equal(self.phi5.values, np.array([276]))
def test_marginalize_scopeerror(self):
self.assertRaises(ValueError, self.phi.marginalize, ['x4'])
self.phi.marginalize(['x1'])
self.assertRaises(ValueError, self.phi.marginalize, ['x1'])
self.assertRaises(ValueError, self.phi4.marginalize, [('x1', 'x3')])
self.phi4.marginalize([self.tup2])
self.assertRaises(ValueError, self.phi4.marginalize, [self.tup2])
def test_marginalize_typeerror(self):
self.assertRaises(TypeError, self.phi.marginalize, 'x1')
def test_marginalize_shape(self):
values = ['A', 'D', 'F', 'H']
phi3_mar = self.phi3.marginalize(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_mar.values.shape, phi3_mar.cardinality)
phi6_mar = self.phi6.marginalize([self.tup1, self.tup2], inplace=False)
np_test.assert_array_equal(phi6_mar.values.shape, phi6_mar.cardinality)
self.phi6.marginalize([self.tup1, self.tup3 + self.tup1], inp
|
albertz/music-player
|
src/Events.py
|
Python
|
bsd-2-clause
| 2,934
| 0.039877
|
from collections import deque
from threading import RLock, Condition, currentThread
import sys
import time
class OnRequestQueue:
ListUsedModFunctions = ("append", "popleft")
class QueueEnd:
def __init__(self, queueList=None):
if queueList is not None:
self.q = queueList
else:
self.q = deque()
self.cond = Condition()
self.cancel = False
def __repr__(self):
with
|
self.cond:
return "<QueueEnd %r>" % self.q
def put(self, item):
with self.cond:
if self.cancel: return False
self.q.append(item)
self.cond.notifyAll()
def setCancel(self):
with self.cond:
self.cancel = True
self.cond.notifyAll()
def __init__(self):
self.queues = set()
def put(self, item):
for q in list(self.queues):
q.put(item)
def cancelAll(self):
for q in list(self.queues):
q.setCancel()
def read(self, *otherQueues, **kwa
|
rgs):
q = self.QueueEnd(**kwargs)
thread = currentThread()
thread.waitQueue = q
if thread.cancel:
# This is to avoid a small race condition for the case
# that the thread which wants to join+cancel us was faster
# and didn't got the waitQueue. In that case, it would
# have set the cancel already to True.
return
for reqqu in otherQueues: assert(isinstance(reqqu, OnRequestQueue))
reqQueues = (self,) + otherQueues
for reqqu in reqQueues: reqqu.queues.add(q)
while True:
with q.cond:
# Note on cancel-behavior:
# Earlier, we always still yielded all left items in the queue
# before breaking out here. This behavior doesn't fit if you
# want to cancel as fast as possible and when you have a
# persistent queue anyway - while you might hang at some entry.
if q.cancel: break
l = list(q.q)
if not l:
q.cond.wait()
for item in l:
if q.cancel: break
yield item
with q.cond:
popitem = q.q.popleft()
assert popitem is item
for reqqu in reqQueues: reqqu.queues.remove(q)
class EventCallback:
def __init__(self, targetQueue, name=None, reprname=None, extraCall=None):
self.targetQueue = targetQueue
self.name = name
self.reprname = reprname
self.extraCall = extraCall
def __call__(self, *args, **kwargs):
if not "timestamp" in kwargs:
kwargs["timestamp"] = time.time()
if self.extraCall:
self.extraCall(*args, **kwargs)
self.targetQueue.put((self, args, kwargs))
def __repr__(self):
if self.reprname:
return self.reprname
else:
return "<EventCallback %s>" % self.name
class Event:
def __init__(self):
self.lock = RLock()
self.targets = []
def push(self, *args):
with self.lock:
targets = self.targets
for weakt in targets:
t = weakt() # resolve weakref
if t: t(*args)
else: self.targets.remove(weakt)
def register(self, target):
assert sys.getrefcount(target) > 1, "target will be weakrefed, thus we need more references to it"
import weakref
with self.lock:
self.targets.append(weakref.ref(target))
|
weylin/CloudBot
|
plugins/google.py
|
Python
|
gpl-3.0
| 1,653
| 0.00242
|
import random
from cloudbot.util import http, formatting
def api_get(kind, query):
"""Use the RESTful Google Search API"""
url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \
'v=1.0&safe=moderate'
return http.get_json(url % kind, q=query)
# @hook.command("googleimage", "gis", "image")
def googleimage(text):
"""<query> - returns the first google image result for <query>"""
parsed = api_get('images', text)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'no images found'
return random.choice(parsed['responseData']['results'][:10])['unescapedUrl']
# @hook.command("google", "g", "search")
def google(text):
"""<query> - returns the first google search result for <query>"""
parsed = api_get('web', text)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for pages: {}: {}'.format(parse
|
d['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'No results fou
|
nd.'
result = parsed['responseData']['results'][0]
title = http.unescape(result['titleNoFormatting'])
title = formatting.truncate_str(title, 60)
content = http.unescape(result['content'])
if not content:
content = "No description available."
else:
content = http.html.fromstring(content).text_content()
content = formatting.truncate_str(content, 150).replace('\n', '')
return '{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)
|
svaarala/duktape
|
util/filter_test262_log.py
|
Python
|
mit
| 3,455
| 0.002894
|
#!/usr/bin/env python2
import os
import sys
import json
import yaml
def main():
with open(sys.argv[1], 'rb') as f:
known_issues = yaml.safe_load(f.read())
skipstrings = [
'passed in strict mode',
'passed in non-strict mode',
'failed in strict mode as expected',
'failed in non-strict mode as expected'
]
in_failed_tests = False
tofix_count = 0 # count of bugs that will be fixed (no uncertainty about proper behavior etc)
known_errors = []
diagnosed_errors = []
unknown_errors = []
other_errors = []
for line in sys.stdin:
if len(line) > 1 and line[-1] == '\n':
line = line[:-1]
# Skip success cases
skip = False
for sk in skipstrings:
if sk in line:
skip = True
if skip:
continue
# Augment error list with "known bugs"
print(line) # print error list as is, then refined version later
if 'failed tests' in line.lower():
in_failed_tests = True
continue
if in_failed_tests and line.strip() == '':
in_failed_tests = False
continue
if in_failed_tests:
# " intl402/ch12/12.2/12.2.3_c in non-strict mode"
tmp = line.strip().split(' ')
test = tmp[0]
matched = False
for kn in known_issues:
if kn.get('test', None) != test:
continue
if kn.has_key('diagnosed'):
tofix_count += 1
diagnosed_err
|
ors.append(line + ' // diagnosed: ' + kn['diagnosed'])
elif kn.has_key('knownissue'):
# Don't bump tofix_count, as testcase exp
|
ected result is not certain
known_errors.append(line + ' // KNOWN: ' + kn['knownissue'])
else:
tofix_count += 1
unknown_errors.append(line + ' // ??? (rule matches)')
kn['used'] = True # mark rule used
matched = True
break
if matched:
continue
# no match, to fix
other_errors.append(line)
tofix_count += 1
print('')
print('=== CATEGORISED ERRORS ===')
print('')
# With ES2015+ semantic changes to ES5 there are too many known
# issues to print by default.
#for i in known_errors:
# print(i)
for i in diagnosed_errors:
print(i)
for i in unknown_errors:
print(i)
for i in other_errors:
print(i)
# Check for unused rules (e.g. bugs fixed)
print('')
for kn in known_issues:
if not kn.has_key('used'):
print('WARNING: unused rule: ' + json.dumps(kn))
# Used by testclient
if len(unknown_errors) > 0 or len(other_errors) > 0:
print('TEST262 FAILED')
elif len(known_errors) > 0 or len(diagnosed_errors) > 0:
# Known and diagnosed errors don't indicate test failure
# as far as GitHub status is concerned.
print('TEST262 SUCCESS')
else:
print('TEST262 SUCCESS')
# To fix count
print('')
print('KNOWN ISSUE COUNT: ' + str(len(known_errors)))
print('TO-FIX COUNT: ' + str(tofix_count))
print(' = test case failures which need fixing (Duktape bugs, uninvestigated)')
if __name__ == '__main__':
main()
|
the-zebulan/CodeWars
|
tests/kyu_6_tests/test_longest_2_character_substring.py
|
Python
|
mit
| 1,191
| 0
|
import unittest
from katas.kyu_6.longest_2_character_substring import substring
class SubstringTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(substring(''), '')
def test_equals_2(self):
self.assertEqual(substring('a'), 'a')
def test_equals_3(self):
self.assertEqual(substring('aa'), 'aa')
def test_equals_4(self):
self.assertEqual(substring('aaa'), 'aaa')
def test_equals_5(self):
self.assertEqual(substring('ab'), 'ab')
def test_equals_6(self):
self.assertEqual(substring('aba'), 'aba')
def test_equals_7(self):
self.assertEqual(substring('abc'), 'ab')
def test_equals_8(self):
|
self.assertEqual(substring('abacd'), 'aba')
def test_equals_9(self):
self.assertEqual(substring('abcba'), 'bcb')
def test_equals_10(self):
self.assertEqual(substring('bbacc'), 'bba')
def test_equals_11(self):
self.assertEqual(substring('ccddeeff'), 'ccdd')
def test_equals_12(self):
self.assertEqual(substring('abacddcd'), 'cddcd')
def test_equals_13(self):
|
self.assertEqual(substring('cefageaacceaccacca'), 'accacca')
|
portfors-lab/sparkle
|
test/tests/gui/__init__.py
|
Python
|
gpl-3.0
| 599
| 0.013356
|
import sip
sip.setdestroyonex
|
it(0)
import os, shutil
import numpy as np
from sparkle.QtWrapper import QtGui
tempfolder = os.path.join(os.path.abspath(os.path.dirname(__file__)), u"tmp")
app = None
# executes once before all tests
def setup():
if not os.path.exists(tempfolder):
os.mkdir(tempfolder)
np.warnings.filterwarnings('ignore', "All-NaN axis encountered", RuntimeWarning)
global app
app = QtGui.QApplication([])
def teardown():
shutil.rmtree(tempfolder, ignore_errors=True
|
)
np.warnings.resetwarnings()
global app
app.exit(0)
app = None
|
phborba/dsgtoolsop
|
numericalVertexEdit/resources.py
|
Python
|
gpl-2.0
| 7,700
| 0.000649
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.7.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x05\x96\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0\x77\x3d\xf8\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x18\x74\x45\
\x58\x74\x54\x69\x74\x6c\x65\x00\x47\x49\x53\x20\x69\x63\x6f\x6e\
\x20\x74\x68\x65\x6d\x65\x20\x30\x2e\x32\xee\x53\xa0\xa0\x00\x00\
\x00\x18\x74\x45\x58\x74\x41\x75\x74\x68\x6f\x72\x00\x52\x6f\x62\
\x65\x72\x74\x20\x53\x7a\x63\x7a\x65\x70\x61\x6e\x65\x6b\x5f\x56\
\xb1\x08\x00\x00\x00\x27\x74\x45\x58\x74\x44\x65\x73\x63\x72\x69\
\x70\x74\x69\x6f\x6e\x00\x68\x74\x74\x70\x3a\x2f\x2f\x72\x6f\x62\
\x65\x72\x74\x2e\x73\x7a\x63\x7a\x65\x70\x61\x6e\x65\x6b\x2e\x70\
\x6c\x90\x59\x48\x60\x00\x00\x00\x18\x74\x45\x58\x74\x43\x72\x65\
\x61\x74\x69\x6f\x6e\x20\x54\x69\x6d\x65\x00\x32\x30\x30\x38\x2d\
\x31\x32\x2d\x31\x32\x58\x2e\x3b\xbf\x00\x00\x00\x52\x74\x45\x58\
\x74\x43\x6f\x70\x79\x72\x69\x67\x68\x74\x00\x43\x43\x20\x41\x74\
\x74\x72\x69\x62\x75\x74\x69\x6f\x6e\x2d\x53\x68\x61\x72\x65\x41\
\x6c\x69\x6b\x65\x20\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\
\x74\x69\x76\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\
\x6c\x69\x63\x65\x6e\x73\x65\x73\x2f\x62\x79\x2d\x73\x61\x2f\x33\
\x2e\x30\x2f\x5e\x83\x5a\xbc\x00\x00\x04\x16\x49\x44\x41\x54\x48\
\x89\x95\x93\x6d\x6c\x53\x55\x18\xc7\x7f\xf7\xde\xde\x7b\xdb\xb5\
\xb7\x4c\xde\xc6\x14\x37\x32\x12\x08\x0b\x64\xf8\x01\xb2\xb8\xad\
\xc9\x3e\x30\x12\x83\xd9\x10\x05\xb7\x25\x0c\x21\x42\x0c\x0c\x48\
\xcc\x28\x16\x0d\x01\x92\x45\x8a\x1f\xe4\x25\x82\xa0\x4b\xc0\x28\
\xcc\x28\x26\xc2\x17\x08\xfb\x00\x63\xc9\xfa\xc1\xc0\x4c\xd4\xac\
\xdb\x9c\x61\x71\x05\x9c\xb3\x8c\x5b\xa0\x5d\x6f\x8f\x1f\x48\x47\
\x5b\xee\xdc\xfc\x27\x27\xf7\xe5\xf9\x9f\xf3\x3b\xcf\xf3\x9c\x23\
\xbd\xb5\x61\xdd\x25\x24\x0a\x99\x4a\x82\xc8\xb7\x17\xbe\x7b\x7d\
\x4a\x5f\x8e\x1c\x48\x14\xbe\xdd\xb0\x01\xc3\x6d\x00\xf0\x30\xf6\
\x90\x0b\xdf\xb4\x63\xf3\x6f\xea\x4d\xd8\x02\x60\x62\xa1\x47\x4f\
\xc6\x67\x27\x92\xca\x3c\x21\x39\xee\x1a\x6e\x63\x24\x6d\x4a\xc7\
\x27\xd3\x9a\x1d\x07\x36\x63\x59\x01\x14\xa5\xf5\xf2\x89\xfd\x6d\
\x99\x31\x39\xf3\xe3\x71\x7c\x7c\xde\xec\x39\x73\x74\x21\x29\xff\
\x6f\xb7\x96\xb5\xbf\x65\x4f\xcb\x42\x2c\x2b\x90\x1b\x92\xe1\x69\
\x09\x00\x5c\xba\x7a\xf7\xf7\xc1\x41\x00\x69\xcc\x1c\x93\xd2\xa6\
\x74\xdc\x4e\xd5\xd5\x07\x1c\xe3\x56\xd2\x71\xf8\xe3\xc3\xa0\x28\
\xad\xb9\x71\x07\x82\x48\x46\x7d\x47\xc6\x51\x8b\x9d\x4e\x5d\x39\
\x7f\xfe\xfb\x17\x65\xac\x3f\x27\x9c\x82\x88\x1d\x40\x29\x36\x0f\
\xce\x9f\xbf\x60\x46\xb8\x37\x4c\xe7\xd7\x47\xdb\x9e\x33\x08\x21\
\xb2\x46\x65\xc3\x4e\x71\x2d\x3c\x2a\x56\x6d\xf1\xc7\x2a\x1a\x9b\
\xcb\x73\xe3\x99\xa3\xa2\xb1\xb9\x7c\xd5\x16\x7f\xec\x5a\x78\x54\
\x54\x36\xec\x14\x76\x9e\xac\x1e\xac\xd9\x71\x60\xb3\xe1\x31\xe8\
\x1f\x18\xa0\xbe\xbe\x3e\xcf\xa9\xea\x17\xab\xd7\x6f\xf7\xd8\x96\
\x66\xfd\x76\x8f\x53\xd5\x2f\xd6\xd7\xd7\xe7\xf5\x0f\x0c\x60\x78\
\x8c\xa7\xcd\xce\x51\x16\x00\xcb\x0a\xf8\xf7\xfa\xe9\xbc\x7e\x83\
\xd2\xd2\x52\xca\xca\x96\xe7\x2b\x86\xfb\x94\x6d\x69\x0c\xf7\xa9\
\xb2\xb2\xe5\xf9\xa5\xa5\xa5\x74\x5e\xbf\x81\x7f\xaf\x9f\x49\x9b\
\xfc\x6c\x96\xd2\x1a\x0c\x06\x1f\x18\x5e\x4f\x12\xa0\x6e\x6d\x9d\
\xcb\xa9\xeb\x75\xbe\xc6\x5d\xb5\x99\x36\x5f\xe3\xae\x5a\xa7\xae\
\xd7\xd5\xad\xad\x73\x01\x18\x5e\x4f\x32\x18\x0c\x3e\xb0\x6b\x72\
\x16\xe0\xf2\x89\xfd\x6d\xd1\xd8\xc8\xa2\x70\xb8\x2f\x61\x9a\x26\
\x9a\xa6\xd1\xd4\xb4\xc9\xad\x6a\xda\xd9\xf2\x86\xdd\x05\x00\xe5\
\x0d\xbb\x0b\x54\x4d\x3b\xdb\xd4\xb4\xc9\xad\x69\x1a\xa6\x69\x12\
\x0e\xf7\x25\xa2\xb1\x91\x45\xb9\x77\xe0\xf9\x0c\x80\xae\x73\x27\
\xef\xcb\x92\xdc\xd6\xd1\xd1\x11\x07\x28\x2a\x2a\xc2\xe7\xab\xca\
\x33\x9c\x7a\xbb\x24\x49\x92\xe1\xd4\xdb\x7d\xbe\xaa\xbc\xa2\xa2\
\x22\x00\x3a\x3a\x3a\xe2\xb2\x24\xb7\x75\x9d\x3b\x79\xdf\xae\x94\
\xcf\x01\x00\x1e\x25\xc7\x0e\x85\x42\x21\xcb\x34\x4d\x00\x6a\x6a\
\x56\xab\x86\xd7\x5b\xfe\xda\xb6\x7d\x31\xc3\xeb\x2d\xaf\xa9\x59\
\x
|
ad\x02\x98\xa6\x49\x28\x14\xb2\x1e\x25\xc7\x0e\xd9\xad\x03\x20\
\x09\x21\x6c\x03\xab\x36\xfb\x8f\xaf\x58\xb9\xe2\xdd\xda\xda\x5a\
\x1d\xe0\xd8\xd1\x63\x6c\xdd\xb6\x95\xd3\x9f\x9f\xa6\xf9\x9d\x4a\
\x52\x7d\x1f\x91\xf8\xbb\x0b\x91\x4a\x24\x34\x39\xf9\xd8\x66\x89\
\x90\x80\xc0\xa4\x80\x8a\x8d\xef\xcd\x75\x2a\x9e\xc1\x40\x20\x90\
\xe7\xf1\x78\xb8\xdd\xd3\xc3\xcd\xce\x4e\x2a\xab\xaa\x58\x16\xdf\
\x8d\x14\xfb\x19\x97\x51\x82\x2c\x3b\x91\x15\x27\x92\xa2\x21\xac\
\x04\x42\x58\xcc\x5a\xd8\xc8\x9d\x9f
|
\x3e\xc0\x4a\x44\x6f\x4e\x0a\
\xb0\xcb\x22\xad\xe4\x55\x0d\x63\x6e\x05\x0e\xed\x85\x2c\xbf\xaa\
\xcf\xa6\x70\xe9\xfb\xb8\xf2\x97\x32\x32\xf0\x15\xfd\x37\x37\xda\
\xf7\x20\xad\xdc\x5e\x64\x4a\x76\x64\xdf\x3f\xe7\x8c\xc5\xcc\x7f\
\xe5\x20\xae\xfc\xa5\xc4\xcd\x41\x46\x87\x2e\x3d\xf5\xfd\x17\x20\
\xf7\x44\x65\x01\x64\x75\xe2\xdd\x53\xe0\xe3\xa5\x65\x01\x34\xf7\
\xcb\x24\xe3\xa3\xdc\xfd\xf5\x18\xc2\x7a\x3c\x35\xc0\x2e\x8b\xdc\
\x6c\xbc\xf3\xaa\x29\x5c\xd2\x8c\x43\x9f\x49\xca\x8a\xf3\x57\xdf\
\x97\x3c\x79\xd8\xff\x6c\x23\x53\x01\x72\xb3\x48\x3f\xa3\xc3\x57\
\x70\xcf\x59\x49\x74\xf8\x2a\xff\x0c\xfd\x08\x08\x22\xbf\x7c\xc2\
\x9d\x5b\xfb\x88\x0e\x5f\x21\x3a\x7c\x65\x7a\x80\xcc\x2c\x22\x91\
\x08\xa1\x50\xc8\x02\x40\xa4\x98\x55\xfc\x26\x25\xaf\x9e\xe6\x5e\
\xef\x29\x06\xbb\x77\x30\x74\xeb\x43\x44\x6a\x7c\x62\x4c\x1b\xd0\
\x75\xee\xe4\x7d\x4d\xd5\xbb\xbf\x38\x73\x06\x4d\xd5\xbb\x01\x66\
\x2d\x58\x8f\x6b\xc6\x12\x24\x49\x61\x66\xf1\x1b\xdc\xeb\xfd\xcc\
\x76\xee\xb4\x00\x00\x8a\x22\x97\xb4\xec\xd9\x83\xa2\xc8\x25\x48\
\xf4\xa8\xae\x02\x06\xbb\xb7\x73\xfb\x87\x45\xfc\x11\x6a\xb6\x9f\
\x24\xd1\xe3\x98\x2e\x00\x45\x39\x74\x24\x78\x24\x80\xa2\xb4\x92\
\x62\x28\xf2\xdb\xa7\xc7\x11\x2c\x9e\xd4\x2f\xd1\x4b\x8a\x96\x7f\
\x01\xb3\x71\xdb\xcb\x12\x7d\x31\x70\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x08\
\x09\xcb\x6f\x53\
\x00\x44\
\x00\x73\x00\x67\x00\x54\x00\x6f\x00\x6f\x00\x6c\x00\x73\
\x00\x0a\
\x0b\x6f\x47\xe0\
\x00\x44\
\x00\x73\x00\x67\x00\x54\x00\x6f\x00\x6f\x00\x6c\x00\x73\x00\x4f\x00\x70\
\x00\x0d\
\x01\xed\x72\x73\
\x00\x4d\
\x00\x69\x00\x6c\x00\x69\x00\x74\x00\x61\x00\x72\x00\x79\x00\x54\x00\x6f\x00\x6f\x00\x6c\x00\x73\
\x00\x13\
\x0c\xc0\x02\x64\
\x00\x6e\
\x00\x75\x00\x6d\x00\x65\x00\x72\x00\x69\x00\x63\x00\x61\x00\x6c\x00\x56\x00\x65\x00\x72\x00\x74\x00\x65\x00\x78\x00\x45\x00\x64\
\x00\x69\x00\x74\
\x00\x18\
\x0a\x0d\x3f\x47\
\x00\x76\
\x00\x65\x00\x63\x00\x74\x00\x6f\x00\x72\x00\x2d\x00\x65\x00\x64\x00\x69\x00\x74\x00\x2d\x00\x6b\x00\x65\x00\x79\x00\x62\x00\x6f\
\x00\x61\x00\x72\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x2a\x00\x02\x00\x00\x00\x01\x00\x00\x00\x04\
\x00\x00\x00\x44\x00\x02\x00\x00\x00\x01\x00\x00\x00\x05\
\x00\x00\x00\x64\x00\x02\x00\x00\x00\x01\x00\x00\x00\x06\
\x00\x00\x00\x90\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
maikodaraine/EnlightenmentUbuntu
|
bindings/python/python-efl/tests/evas/test_04_object_box.py
|
Python
|
unlicense
| 1,720
| 0.001163
|
#!/usr/bin/env python
from efl import evas
import unittest
class TestBoxBasics(unittest.TestCase):
def setUp(self):
self.canvas = evas.Canvas(method="buffer",
size=(400, 500),
viewport=(0, 0, 400, 500))
self.canvas.engine_info_set(self.canvas.engine_info_get())
def tearDown(self):
self.canvas.delete()
def testConstructor(self):
box = evas.Box(self.canvas)
self.assertEqual(type(box), evas.Box)
box.delete()
def testConstructorBaseParameters(self):
size = (20, 30)
pos = (40, 50)
geometry = (60, 70, 80, 90)
color = (110, 120, 130, 140)
# create box using size/pos
box1 = evas.Box(self.canvas, name="box1", color=color, size=size, pos=pos)
self.assertEqual(box1.name, "box1")
self.assertEqual(box1.color, color)
self.assertEqual(box1.size, size)
self.assertEqual(box1.pos, pos)
box1.delete()
# cleate box2 using geometry
box2 = evas.Box(self.canvas, name="box2", color=color, geometry=geometry)
self.assertEqual(box2.name, "box2")
self.assertEqual(box2.color, color)
self.assertEqual(box2.geometry, geometry)
box2.delete()
def testRemoveAll(self):
box = evas.Box(self.canvas)
r1 = evas.Rectangle(self.canvas)
r2 = evas.Rectangle(self.canvas)
|
box.append(r1)
box.append(r2)
box.remove_all(True)
self.assertEqual(r1.is_deleted(), True)
self.assertEqual(r2.is_
|
deleted(), True)
box.delete()
if __name__ == '__main__':
unittest.main(verbosity=2)
evas.shutdown()
|
hagabbar/pycbc_copy
|
pycbc/types/array.py
|
Python
|
gpl-3.0
| 33,597
| 0.004643
|
# Copyright (C) 2012 Alex Nitz, Josh Willis, Andrew Miller, Tito Dal Canton
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides a device independent Array class based on PyCUDA and Numpy.
"""
BACKEND_PREFIX="pycbc.types.array_"
import h5py
import os as _os
from decorator import decorator
import lal as _lal
import numpy as _numpy
from numpy import float32, float64, complex64, complex128, ones
from numpy.linalg import norm
import pycbc.scheme as _scheme
from pycbc.scheme import schemed, cpuonly
from pycbc.types.aligned import ArrayWithAligned
#! FIXME: the uint32 datatype has not been fully tested,
# we should restrict any functions that do not allow an
# array of uint32 integers
_ALLOWED_DTYPES = [_numpy.float32, _numpy.float64, _numpy.complex64,
_numpy.complex128, _numpy.uint32, _numpy.int32, _numpy.int]
try:
_ALLOWED_SCALARS = [int, long, float, complex] + _ALLOWED_DTYPES
except NameError:
_ALLOWED_SCALARS = [int, float, complex] + _ALLOWED_DTYPES
def _convert_to_scheme(ary):
if not isinstance(ary._scheme, _scheme.mgr.state.__class__):
converted_array = Array(ary, dtype=ary._data.dtype)
ary._data = converted_array._data
ary._scheme = _scheme.mgr.state
@decorator
def _convert(fn, self, *args):
# Convert this array to the current processing scheme
_convert_to_scheme(self)
return fn(self, *args)
@decorator
def _nocomplex(fn, self, *args):
if self.kind == 'real':
return fn(self, *args)
else:
raise TypeError( fn.__name__ + " does not support complex types")
@decorator
def _noreal(fn, self, *args):
if self.kind == 'complex':
return fn(self, *args)
else:
raise TypeError( fn.__name__ + " does not support real types")
def force_precision_to_match(scalar, precision):
if _numpy.iscomplex(scalar):
if precision is 'single':
return _numpy.complex64(scalar)
else:
return _numpy.complex128(scalar)
else:
if precision is 'single':
return _numpy.float32(scalar)
else:
return _numpy.float64(scalar)
def common_kind(*dtypes):
for dtype in dtypes:
if dtype.kind is 'c':
return dtype
return dtypes[0]
@schemed(BACKEND_PREFIX)
def _to_device(array):
""" Move input to device """
@schemed(BACKEND_PREFIX)
def _copy_base_array(array):
""" Copy a backend array"""
@schemed(BACKEND_PREFIX)
def _scheme_matches_base_array(array):
""" Check that input matches array type for scheme """
class Array(object):
"""Array used to do numeric calculations on a various compute
devices. It is a convience wrapper around numpy, and
pycuda.
"""
__array_priority__ = 1000
def __init__(self, initial_array, dtype=None, copy=True):
""" initial_array: An array-like object as specified by NumPy, this
also includes instances of an underlying data type as described in
section 3 or an instance of the PYCBC Array class itself. This
object is used to populate the data of the array.
dtype: A NumPy style dtype that describes the type of
encapsulated data
|
(float32,compex64, etc)
copy: This defines whether the initial_array is copied to instantiate
the array or is simply referenced. If copy is false, new data is not
created, and so all arguments that would force a copy are ignored.
The default is to copy the given object.
"""
self._scheme=_scheme.mgr.state
self._saved = {}
#Unwrap initial_array
if isinstance(initial_array, Array):
initial_array = ini
|
tial_array._data
if not copy:
if not _scheme_matches_base_array(initial_array):
raise TypeError("Cannot avoid a copy of this array")
elif issubclass(type(self._scheme), _scheme.CPUScheme):
# ArrayWithAligned does not copy its memory; all
# the following does is add the 'isaligned' flag
# in case initial_array was a true numpy array
self._data = ArrayWithAligned(initial_array)
else:
self._data = initial_array
# Check that the dtype is supported.
if self._data.dtype not in _ALLOWED_DTYPES:
raise TypeError(str(self._data.dtype) + ' is not supported')
if dtype and dtype != self._data.dtype:
raise TypeError("Can only set dtype when allowed to copy data")
if copy:
# First we will check the dtype that we are given
if not hasattr(initial_array, 'dtype'):
initial_array = _numpy.array(initial_array)
# Determine the dtype to use
if dtype is not None:
dtype = _numpy.dtype(dtype)
if dtype not in _ALLOWED_DTYPES:
raise TypeError(str(dtype) + ' is not supported')
if dtype.kind != 'c' and initial_array.dtype.kind == 'c':
raise TypeError(str(initial_array.dtype) + ' cannot be cast as ' + str(dtype))
elif initial_array.dtype in _ALLOWED_DTYPES:
dtype = initial_array.dtype
else:
if initial_array.dtype.kind == 'c':
dtype = complex128
else:
dtype = float64
# Cast to the final dtype if needed
if initial_array.dtype != dtype:
initial_array = initial_array.astype(dtype)
#Create new instance with initial_array as initialization.
if issubclass(type(self._scheme), _scheme.CPUScheme):
if hasattr(initial_array, 'get'):
self._data = ArrayWithAligned(_numpy.array(initial_array.get()))
else:
self._data = ArrayWithAligned(_numpy.array(initial_array,
dtype=dtype, ndmin=1))
elif _scheme_matches_base_array(initial_array):
self._data = _copy_base_array(initial_array) # pylint:disable=assignment-from-no-return
else:
initial_array = _numpy.array(initial_array, dtype=dtype, ndmin=1)
self._data = _to_device(initial_array) # pylint:disable=assignment-from-no-return
@decorator
def _memoize_single(fn, self, arg):
badh = str(arg)
if badh in self._saved:
return self._saved[badh]
res = fn(self, arg) # pylint:disable=not-callable
self._saved[badh] = res
return res
@decorator
def _returnarray(fn, self, *args):
return Array(fn(self, *args), copy=False) # pylint:disable=not-callable
@decorator
def _returntype(fn, self, *args):
ary = fn(self,*args) # pylint:disable=not-callable
if ary is NotImplemented:
return NotImplemented
return self._return(ary)
def _return(self, ary):
"""Wrap the ary to return an Array type """
if isinstance(ary, Array):
|
marco-c/pluotsorbet
|
tests/sslEchoServer.py
|
Python
|
gpl-2.0
| 2,382
| 0.00084
|
#!/usr/bin/env python
import socket, ssl
# This is a copy of _RESTRICTED_SERVER_CIPHERS from the current tip of ssl.py
# <https://hg.python.org/cpython/file/af793c7580f1/Lib/ssl.py#l174> except that
# RC4 has been added back in, since it was removed in Python 2.7.10,
# but SSLStreamConnection only supports RC4 ciphers.
CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5:!DSS:RC4'
)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('localhost', 54443))
s.listen(5)
while True:
newsocket, fromaddr = s.accept()
try:
connstream = ssl.wrap_socket(newsocket,
server_side=True,
certfile="cert.pem",
keyfile="cert.pem",
ciphers=CIPHERS)
except ssl.SSLError as e:
# Catch occurrences of:
# ssl.SSLEOFError: EOF occurred in violation of protocol (_ssl.c:581)
#
# In theory, setting ssl_version to ssl.PROTOCOL_TLSv1 will resolve
# the problem, but it didn't do so for me, and it caused the error:
# ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version num
|
ber (_ssl.c:581)
#
# Whereas the SSLEOFError doesn't prevent the server from working
# (it seems to happen only when the server is first started, and it
# stops happening if we simply ignore it and try again a few times)
# so we leave ssl_version at ssl.PROTOCOL_SSLv3 and ignore tha
|
t error.
#
# If we catch SSLEOFError specifically, then Travis fails with:
# AttributeError: 'module' object has no attribute 'SSLEOFError'
# So we catch the more general exception SSLError.
continue
try:
data = connstream.read()
while data:
connstream.write(data)
data = connstream.read()
finally:
try:
connstream.shutdown(socket.SHUT_RDWR)
except socket.error as e:
# On Mac, if the other side has already closed the connection,
# then socket.shutdown will fail, but we can ignore this failure.
pass
connstream.close()
|
thedrow/streamparse
|
examples/wordcount/multilang/resources/sentence_splitter.py
|
Python
|
apache-2.0
| 274
| 0
|
from pystorm.bolt import BasicBolt
class SentenceSplitterBolt(BasicBolt):
def process(self, tup):
sentence = tup.values[0]
for word in sentence
|
.sp
|
lit(' '):
BasicBolt.emit(word)
if __name__ == '__main__':
SentenceSplitterBolt().run()
|
coxmediagroup/googleads-python-lib
|
examples/adwords/adwords_appengine_demo/views/add_campaign_view.py
|
Python
|
apache-2.0
| 2,387
| 0.005027
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on
|
an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles request to add a Campaign to a client account."""
__author__ = 'Mark Saniscalchi'
import os
from handlers.api_handler import APIHandler
from handler
|
s.ndb_handler import InitUser
import webapp2
from google.appengine.api import users
from google.appengine.ext.webapp import template
class AddCampaign(webapp2.RequestHandler):
"""View that either adds a Campaign or displays an error message."""
def post(self):
"""Handle post request."""
client_customer_id = self.request.get('clientCustomerId')
campaign_name = self.request.get('campaignName')
ad_channel_type = self.request.get('adChannelType')
budget = self.request.get('budget')
template_values = {
'back_url': '/showCampaigns?clientCustomerId=%s' % client_customer_id,
'back_msg': 'View Campaigns',
'logout_url': users.create_logout_url('/'),
'user_nickname': users.get_current_user().nickname()
}
try:
app_user = InitUser()
# Load Client instance.
handler = APIHandler(app_user.client_id,
app_user.client_secret,
app_user.refresh_token,
app_user.mcc_cid,
app_user.developer_token)
# Create new campaign.
handler.AddCampaign(client_customer_id, campaign_name,
ad_channel_type, budget)
self.redirect('/showCampaigns?clientCustomerId=%s' % client_customer_id)
except Exception, e:
template_values['error'] = str(e)
# Use template to write output to the page.
path = os.path.join(os.path.dirname(__file__),
'../templates/base_template.html')
self.response.out.write(template.render(path, template_values))
|
gilt/incubator-airflow
|
airflow/migrations/versions/947454bf1dff_add_ti_job_id_index.py
|
Python
|
apache-2.0
| 1,041
| 0.001921
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required b
|
y applicable law or agreed to in writing, software
# distributed under the Lic
|
ense is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add ti job_id index
Revision ID: 947454bf1dff
Revises: bdaa763e6c56
Create Date: 2017-08-15 15:12:13.845074
"""
# revision identifiers, used by Alembic.
revision = '947454bf1dff'
down_revision = 'bdaa763e6c56'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ti_job_id', 'task_instance', ['job_id'], unique=False)
def downgrade():
op.drop_index('ti_job_id', table_name='task_instance')
|
geo2tag-logistics/Geo2Logistics
|
logistics/migrations/0001_initial.py
|
Python
|
apache-2.0
| 5,033
| 0.004769
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-08 19:59
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Driver',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('is_online', models.BooleanField(default=False)),
('last_seen', models.DateTimeField(blank=True, null=True)),
('auto_back', models.CharField(blank=True, max_length=50, null=True)),
('auto_model', models.CharField(blank=True, max_length=50, null=True)),
('auto_manufacturer', models.CharField(blank=True, max_length=50, null=True)),
],
),
migrations.CreateModel(
name='DriverStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('driver', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='logistics.Driver')),
],
),
migrations.CreateModel(
name='Fleet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField(blank=True, null=True)),
('creation_date', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Owner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('is_confirmed', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Trip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', mod
|
els.C
|
harField(max_length=50)),
('description', models.TextField(blank=True, null=True)),
('passenger_phone', models.CharField(blank=True, max_length=50, null=True)),
('passenger_name', models.CharField(blank=True, max_length=50, null=True)),
('start_position', models.CharField(blank=True, max_length=50, null=True)),
('end_position', models.CharField(blank=True, max_length=50, null=True)),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField(blank=True, null=True)),
('is_finished', models.BooleanField(default=False)),
('problem', models.IntegerField(choices=[(1, 'none'), (2, 'crash'), (3, 'jam'), (4, 'other')], default=1)),
('problem_description', models.CharField(blank=True, max_length=50, null=True)),
('driver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='logistics.Driver')),
('fleet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='logistics.Fleet')),
],
),
migrations.CreateModel(
name='TripStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('trip', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='logistics.Trip')),
],
),
migrations.AddField(
model_name='fleet',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='logistics.Owner'),
),
migrations.AddField(
model_name='driver',
name='fleets',
field=models.ManyToManyField(blank=True, related_name='fleets', to='logistics.Fleet'),
),
migrations.AddField(
model_name='driver',
name='pending_fleets',
field=models.ManyToManyField(blank=True, related_name='pending_fleets', to='logistics.Fleet'),
),
migrations.AddField(
model_name='driver',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
afreeorange/rosalind
|
SUBS/motif.py
|
Python
|
mit
| 684
| 0.001462
|
import sys
def find_motif_locations(dna, motif):
motif_locations = []
if len(dna) < len(motif):
raise ValueError('Motif can\'t be shorter than sequence')
if len(motif) == len(dna) and motif != dna:
return motif_locations
for _ in range(len(dna) - len(motif) + 1):
if dna[_:_ + len(motif)] == motif:
motif_locations.append(_ + 1)
return motif_locations
if _
|
_name__ == '__main__':
sequences = open(sys.argv[1]).read().sp
|
lit()
print(
' '.join(
str(_)
for _ in
find_motif_locations(
sequences[0],
sequences[1]
)
)
)
|
paalge/scikit-image
|
doc/examples/features_detection/plot_blob.py
|
Python
|
bsd-3-clause
| 2,997
| 0
|
"""
==============
B
|
lob Detection
==============
Blobs are bright on dark or dark on bright regions in an image. In
this example, blobs are detected using 3 algor
|
ithms. The image used
in this case is the Hubble eXtreme Deep Field. Each bright dot in the
image is a star or a galaxy.
Laplacian of Gaussian (LoG)
-----------------------------
This is the most accurate and slowest approach. It computes the Laplacian
of Gaussian images with successively increasing standard deviation and
stacks them up in a cube. Blobs are local maximas in this cube. Detecting
larger blobs is especially slower because of larger kernel sizes during
convolution. Only bright blobs on dark backgrounds are detected. See
:py:meth:`skimage.feature.blob_log` for usage.
Difference of Gaussian (DoG)
----------------------------
This is a faster approximation of LoG approach. In this case the image is
blurred with increasing standard deviations and the difference between
two successively blurred images are stacked up in a cube. This method
suffers from the same disadvantage as LoG approach for detecting larger
blobs. Blobs are again assumed to be bright on dark. See
:py:meth:`skimage.feature.blob_dog` for usage.
Determinant of Hessian (DoH)
----------------------------
This is the fastest approach. It detects blobs by finding maximas in the
matrix of the Determinant of Hessian of the image. The detection speed is
independent of the size of blobs as internally the implementation uses
box filters instead of convolutions. Bright on dark as well as dark on
bright blobs are detected. The downside is that small blobs (<3px) are not
detected accurately. See :py:meth:`skimage.feature.blob_doh` for usage.
"""
from math import sqrt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
image = data.hubble_deep_field()[0:500, 0:500]
image_gray = rgb2gray(image)
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = ['Laplacian of Gaussian', 'Difference of Gaussian',
'Determinant of Hessian']
sequence = zip(blobs_list, colors, titles)
fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()
for idx, (blobs, color, title) in enumerate(sequence):
ax[idx].set_title(title)
ax[idx].imshow(image, interpolation='nearest')
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)
ax[idx].add_patch(c)
ax[idx].set_axis_off()
plt.tight_layout()
plt.show()
|
yusaira-khan/un-iife-ize
|
tests/vars.py
|
Python
|
mit
| 2,836
| 0.000705
|
__author__ = 'yusaira-khan'
import unittest
import un_iife_ize.un_iife_ize as un_iife_ize
class CheckVar(unittest.TestCase):
def test_simple(self):
statement = [('var hello,world=5;', 0)]
exp = [('hello=undefined,world=5;', 0)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
self.assertEqual(ret, exp)
def test_multiple(self):
statement = [('var hello,world=5;\nvar bye,nope;', 0)]
exp = [('hello=undefined,world=5;', 0), ('bye=undefined,nope=undefined;', 19)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
self.assertEqual(ret, exp)
def test_sections(self):
statement = [('var hello,world=5;\nvar bye,nope;', 0),
('var hello,world=5;\nvar bye,nope;', 30)]
exp = [('hello=undefined,world=5;', 0),
('bye=undefined,nope=undefined;', 19),
('hello=undefined,world=5;', 30),
('bye=undefined,nope=undefined;', 49)]
|
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
self.assertEqual(ret, exp)
def test_deliberate_iife(self):
statement = [('var hello=function(){;}', 0)]
exp = [('hello=function(){;}', 0)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
print(ret)
self.assertE
|
qual(ret, exp)
def test_deliberate_iife_barc(self):
statement = [('var hello = (function(){;}())', 0)]
exp = [(' hello = (function(){;}())', 0)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
print(ret, len(exp[0][0]), len(ret[0][0]))
self.assertEqual(ret, exp)
def test_double_assignment(self):
statement = [('var hello=wow=;', 0)]
exp = [('hello=wow=', 0)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
print(ret)
self.assertEqual(ret, exp)
def test_inside_function(self):
statement = [('function(a){var hello=5;}', 30)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.unmodified
print("woadh", ret, v.unmodified)
self.assertEqual(ret, statement)
def test_sections_unmodified(self):
statement = [('var hello,world=5;\nfunction(){}\nvar bye,nope;', 0),
('var hello,world=5;\nvar bye,nope;', 30)]
exp = [('\nfunction(){}\n', 18), ('', len(statement[0][0]) + statement[0][1]),
('\n', 48), ('', len(statement[1][0]) + statement[1][1])]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.unmodified
print("ret", ret)
print("expt", exp)
self.assertEqual(ret, exp)
if __name__ == '__main__':
unittest.main()
|
JacobJacob/pyew
|
jdisasm.py
|
Python
|
gpl-2.0
| 19,835
| 0.013511
|
#! /usr/bin/python2.4
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""jdisasm.py: a Java .class file disassembler
by [email protected] at Sun Apr 26 20:36:26 CEST 2009
jdisasm can display a Java .class file in a human readable form, showing the
class name, the field names and types, the method names, types and codes
(including instruction memonics). For each item shown, the file offset is
prepended. (Neither javap or jad can display the file offset.)
jdisasm is based on the documentation
http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html
"""
__author__ = '[email protected]'
import struct
import sys
ACCH = {
'PUBLIC': 0x1,
'PRIVATE': 0x2,
'PROTECTED': 0x4,
'STATIC': 0x8,
'FINAL': 0x10,
'SYNCHRONIZED': 0x20, # same as ACC_SUPER
'VOLATILE': 0x40,
'TRANSIENT': 0x80,
'NATIVE': 0x100,
'ABSTRACT': 0x400,
'STRICT': 0x800,
}
TAG_TO_CONSTANT_TYPE = {
7: 'Class_info',
9: 'Fieldref',
10: 'Methodref',
11: 'InterfaceMethodref',
8: 'String',
3: 'Integer',
4: 'Float',
5: 'Long',
6: 'Double',
12: 'NameAndType',
1: 'Utf8',
}
INSTRUCTIONS = {
50: ('aaload', 1),
83:
|
('aastore
|
', 1),
1: ('aconst_null', 1),
25: ('aload', 2),
42: ('aload_0', 1),
43: ('aload_1', 1),
44: ('aload_2', 1),
45: ('aload_3', 1),
189: ('anewarray', 3),
176: ('areturn', 1),
176: ('areturn', 1),
190: ('arraylength', 1),
190: ('arraylength', 1),
58: ('astore', 2),
75: ('astore_0', 1),
76: ('astore_1', 1),
77: ('astore_2', 1),
78: ('astore_3', 1),
191: ('athrow', 1),
51: ('baload', 1),
84: ('bastore', 1),
16: ('bipush', 2),
52: ('caload', 1),
85: ('castore', 1),
192: ('checkcast', 3),
144: ('d2f', 1),
142: ('d2i', 1),
143: ('d2l', 1),
99: ('dadd', 1),
49: ('daload', 1),
82: ('dastore', 1),
152: ('dcmpg', 1),
151: ('dcmpl', 1),
14: ('dconst_0', 1),
14: ('dconst_0', 1),
111: ('ddiv', 1),
24: ('dload', 2),
24: ('dload', 2),
38: ('dload_0', 1),
39: ('dload_1', 1),
40: ('dload_2', 1),
41: ('dload_3', 1),
107: ('dmul', 1),
119: ('dneg', 1),
115: ('drem', 1),
175: ('dreturn', 1),
57: ('dstore', 2),
71: ('dstore_0', 1),
72: ('dstore_1', 1),
73: ('dstore_2', 1),
74: ('dstore_3', 1),
103: ('dsub', 1),
89: ('dup', 1),
90: ('dup_x1', 1),
91: ('dup_x2', 1),
92: ('dup2', 1),
93: ('dup2_x1', 1),
141: ('f2d', 1),
139: ('f2i', 1),
140: ('f2l', 1),
98: ('fadd', 1),
48: ('faload', 1),
81: ('fastore', 1),
150: ('fcmpg', 1),
149: ('fcmpl', 1),
11: ('fconst_0', 1),
12: ('fconst_1', 1),
13: ('fconst_2', 1),
110: ('fdiv', 1),
23: ('fload', 2),
34: ('fload_0', 1),
35: ('fload_1', 1),
36: ('fload_2', 1),
37: ('fload_3', 1),
106: ('fmul', 1),
118: ('fneg', 1),
114: ('frem', 1),
174: ('freturn', 1),
56: ('fstore', 2),
67: ('fstore_0', 1),
68: ('fstore_1', 1),
69: ('fstore_2', 1),
70: ('fstore_3', 1),
102: ('fsub', 1),
180: ('getfield', 3),
178: ('getstatic', 3),
167: ('goto', 3),
200: ('goto_w', 5),
145: ('i2b', 1),
146: ('i2c', 1),
135: ('i2d', 1),
134: ('i2f', 1),
133: ('i2l', 1),
147: ('i2s', 1),
96: ('iadd', 1),
46: ('iaload', 1),
126: ('iand', 1),
79: ('iastore', 1),
2: ('iconst_m1', 1),
3: ('iconst_0', 1),
4: ('iconst_1', 1),
5: ('iconst_2', 1),
6: ('iconst_3', 1),
7: ('iconst_4', 1),
8: ('iconst_5', 1),
108: ('idiv', 1),
165: ('if_acmpeq', 3),
166: ('if_acmpne', 3),
159: ('if_icmpeq', 3),
160: ('if_icmpne', 3),
161: ('if_icmplt', 3),
162: ('if_icmpge', 3),
163: ('if_icmpgt', 3),
164: ('if_icmple', 3),
153: ('ifeq', 3),
154: ('ifne', 3),
155: ('iflt', 3),
156: ('iffe', 3),
157: ('ifgt', 3),
158: ('ifle', 3),
199: ('ifnonnull', 3),
198: ('ifnull', 3),
132: ('iinc', 3),
21: ('iload', 2),
26: ('iload_0', 1),
27: ('iload_1', 1),
28: ('iload_2', 1),
29: ('iload_3', 1),
104: ('imul', 1),
116: ('ineg', 1),
193: ('instanceof', 3),
185: ('invokeinterface', 5),
183: ('invokespecial', 3),
184: ('invokestatic', 3),
182: ('invokevirtual', 3),
128: ('ior', 1),
112: ('irem', 1),
172: ('ireturn', 1),
120: ('ishl', 1),
122: ('ishr', 1),
54: ('istore', 2),
59: ('istore_0', 1),
60: ('istore_1', 1),
61: ('istore_2', 1),
62: ('istore_3', 1),
100: ('isub', 1),
124: ('iushr', 1),
130: ('ixor', 1),
168: ('jsr', 3),
201: ('jsr_w', 5),
138: ('l2d', 1),
137: ('l2f', 1),
136: ('l2i', 1),
97: ('ladd', 1),
47: ('laload', 1),
127: ('land', 1),
80: ('lastore', 1),
2: ('lconst_m1', 1),
3: ('lconst_0', 1),
4: ('lconst_1', 1),
5: ('lconst_2', 1),
6: ('lconst_3', 1),
7: ('lconst_4', 1),
8: ('lconst_5', 1),
148: ('lcmp', 1),
9: ('lconst_0', 1),
10: ('lconst_1', 1),
18: ('ldc', 2),
19: ('ldc_w', 3),
20: ('ldc2_w', 3),
109: ('ldiv', 1),
22: ('lload', 2),
30: ('lload_0', 1),
31: ('lload_1', 1),
32: ('lload_2', 1),
33: ('lload_3', 1),
105: ('lmul', 1),
117: ('lneg', 1),
171: ('lookupswitch', None), # variable length
129: ('lor', 1),
113: ('lrem', 1),
173: ('lreturn', 1),
121: ('lshl', 1),
123: ('lshr', 1),
55: ('lstore', 2),
63: ('lstore_0', 1),
64: ('lstore_1', 1),
65: ('lstore_2', 1),
66: ('lstore_3', 1),
101: ('lsub', 1),
125: ('lushr', 1),
131: ('lxor', 1),
194: ('monitorenter', 1),
195: ('monitorexit', 1),
197: ('multianewarray', 4),
187: ('new', 3),
188: ('newarray', 2),
0: ('nop', 1),
87: ('pop', 1),
88: ('pop2', 1),
181: ('putfield', 3),
179: ('putstatic', 3),
169: ('ret', 2),
177: ('return', 1),
53: ('saload', 1),
86: ('sastore', 1),
17: ('sipush', 3),
95: ('swap', 1),
170: ('tableswitch', None), # variable length
196: ('wide', None), # variable length, 6 for iinc=132, 4 otherwise
254: ('impdep1', 1),
255: ('impdep2', 1),
202: ('breakpoint', 1),
}
"""Maps an opcode to a (mnemonic, ilength) list.
ilength is the instruction size in bytes, including the opcode.
"""
def FormatAccessFlags(acc, is_class=False):
if not isinstance(acc, int):
raise TypeError
items = []
for name in sorted(ACCH):
if acc & ACCH[name]:
if is_class and name == 'SYNCHRONIZED':
items.append('ACC_SUPER')
else:
items.append('ACC_' + name)
acc &= ~ACCH[name]
if acc:
items.append('0x%x' % acc)
if not items:
items.append(0)
return '|'.join(items)
def DumpCode(s, i, iend, constant_class, constant_utf8,
constant_name_and_type, constant_method_ref,
constant_interface_method_ref):
max_stack, = struct.unpack('>H', s[i : i + 2])
print '0x%08x max_stack=%d' % (i, max_stack)
i += 2
max_locals, = struct.unpack('>H', s[i : i + 2])
print '0x%08x max_locals=%d' % (i, max_locals)
i += 2
code_length, = struct.unpack('>L', s[i : i + 4])
i += 4
code_ofs = i
print '0x%08x code:' % i
j = i
i += code_length
while j < i:
opcode = ord(s[j])
mnemonic, ilength = INSTRUCTIONS[opcode]
if opcode == 185: # invokeinterface
j0 = j
j += 1
interface_method_ref_index, count = struct.unpack('>HB', s[j : j + 3])
j += 4
class_index, name_and_type_index = constant_interface_method_ref[
interface_method_ref_index]
name_index,
|
GreenJoey/My-Simple-Programs
|
python/500L Async Scraper/simple-url-getter.py
|
Python
|
gpl-2.0
| 1,959
| 0.001021
|
import socket
from selectors import DefaultSelector, EVENT_WRITE, EVENT_READ
sock = socket.socket()
sock.setblocking(False)
|
selector = DefaultSelector()
urls_todo = set(['/'])
seen_urls = set(['/'])
class Fetcher:
def __init__(self, url):
|
self.response = b''
self.url = url
self.sock = None
def fetch(self):
# This method fetches the url
self.sock = socket.socket()
self.sock.setblocking(False)
try:
self.sock.connect(('xkcd.com'), 80)
except BlockingIOError:
pass
selector.register(self.sock.fileno(),
EVENT_WRITE,
self.connected)
def connected(self, key, mask):
print('connected!')
selector.unregister(key.fd)
request = 'GET {} HTTP/1.0\r\nHost: xkcd.com\r\n\r\n'.format(self.url)
self.sock.send(request.encode('ascii'))
# Register the next callback
selector.register(key.fd,
EVENT_READ,
self.read_response)
def read_response(self, key, mask):
global stopped
chunk = self.sock.recv(4096) # 4K chunks of data
if chunk:
self.response += chunk
else:
selector.unregister(key.fd) # Done reading
links = self.parse_links()
# Set logic
for link in links.difference(seen_urls):
urls_todo.add(link)
Fetcher(link).fetch()
seen_urls.update(links)
urls_todo.remove(self.url)
if not urls_todo
stopped = True
def parse_links(self):
pass
if __name__ == '__main__':
fetcher = Fetcher('/353')
fetcher.fetch()
while not stopped:
events = selector.select()
for event_key, event_mask in events:
callback = event_key.data
callback(event_key, event_mask)
|
google/burst-denoising
|
kpn_data_provider.py
|
Python
|
apache-2.0
| 21,051
| 0.018241
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains code for loading and preprocessing the MNIST data."""
import os
import tensorflow as tf
slim = tf.contrib.slim
dataset_data_provider = slim.dataset_data_provider
dataset = slim.dataset
queues = slim.queues
gfile = tf.gfile
import demosaic_utils
def make_demosaic(image, height, width, BURST_LENGTH, to_shift, upscale, jitter):
y = tf.random_uniform([1], jitter * upscale, tf.shape(image)[0]-height - jitter*upscale, tf.int32)
x = tf.random_uniform([1], jitter * upscale, tf.shape(image)[1]-width - jitter*upscale, tf.int32)
y, x = y[0], x[0]
demosaic = tf.reshape(image[y:y+height, x:x+width, :], (1, height, width, 1, 3))
delta = tf.random_uniform([BURST_LENGTH-1,2], -jitter*upscale, jitter*upscale+1, tf.int32)
# delta_big = tf.random_uniform([BURST_LENGTH-1,2], -20, 20, tf.int32)
shift_mask = tf.random_uniform([BURST_LENGTH-1, 1], 0., BURST_LENGTH-1., tf.float32) * to_shift
shift_mask = tf.where(shift_mask > BURST_LENGTH-2., tf.ones([BURST_LENGTH-1, 1]), tf.zeros([BURST_LENGTH-1, 1]))
delta = delta # + tf.cast(tf.tile(shift_mask, [1, 2]), tf.int32) * delta_big
shift_mask = tf.reshape(shift_mask, [1, BURST_LENGTH-1])
for d in range(BURST_LENGTH-1):
y_ = (y + delta[d,0]) # % (tf.shape(image)[0]-height)
x_ = (x + delta[d,1]) # % (tf.shape(image)[1]-width)
demosaic2 = tf.reshape(image[y_:y_+height, x_:x_+width, :], (1, height, width, 1, 3))
demosaic = tf.concat([demosaic, demosaic2], axis=3)
return demosaic, shift_mask
def make_stack_demosaic(image, height, width, depth, BURST_LENGTH, to_shift, upscale, jitter):
height = height * upscale
width = width * upscale
v_error = tf.maximum(height + 2 * jitter * upscale - tf.shape(image)[0] + 1, 0)
h_error = tf.maximum(width + 2 * jitter * upscale - tf.shape(image)[1] + 1, 0)
image = tf.pad(image, [[0,v_error],[0,h_error],[0,0]])
demosaic_stack, shift_stack = make_demosaic(image, height, width, BURST_LENGTH, to_shift, upscale, jitter)
for i in range(depth-1):
demosaic, shift_mask = make_demosaic(image, height, width, BURST_LENGTH, to_shift, upscale, jitter)
demosaic_stack = tf.concat((demosaic_stack, demosaic), axis=0)
shift_stack = tf.concat((shift_stack, shift_mask) , axis=0)
dt = tf.reshape(tf.transpose(demosaic_stack, [0, 3, 1, 2, 4]), [-1, height, width, 3])
height = height // upscale
width = width // upscale
dt = tf.image.resize_images(dt, [height, width], method=tf.image.ResizeMethod.AREA)
demosaic_stack = tf.transpose(tf.reshape(dt, [depth, BURST_LENGTH, height, width, 3]), [0, 2, 3, 1, 4])
mosaic = tf.stack((demosaic_stack[:,::2,::2,:,0],demosaic_stack[:,::2,1::2,:,1],demosaic_stack[:,1::2,::2,:,1],demosaic_stack[:,1::2,1::2,:,2]), axis=-1)
mosaic = demosaic_utils.tf22reshape2(mosaic, BURST_LENGTH)
mosaic = tf.reshape(mosaic, (depth, height, width, BURST_LENGTH))
return mosaic, demosaic_stack, shift_stack
def load_batch_demosaic(BURST_LENGTH, dataset_dir, batch_size=32, height=64, width=64, degamma=1., to_shift=1., upscale=1, jitter=1):
filenames = [os.path.join(dataset_dir, f) for f in gfile.ListDirectory(dataset_dir)]
filename_queue = tf.train.string_input_producer(filenames)
mosaic = None
while mosaic == None:
_, image_file = tf.WholeFileReader().read(filename_queue)
image = tf.image.decode_image(image_file)
mosaic, demosaic, shift = make_stack_demosaic((tf.cast(image[0], tf.float32) / 255.)**degamma,
height, width, 128, BURST_LENGTH, to_shift, upscale, jitter)
# Batch it up.
mosaic, demosaic, shift = tf.train.shuffle_batch(
[mosaic, demosaic, shift],
batch_size=batch_size,
num_threads=2,
capacity=500 + 3 * batch_size,
enqueue_many=True,
min_after_dequeue=100)
return mosaic, demosaic, shift
def make_batch_hqjitter(patches, BURST_LENGTH, batch_size, repeats, height, width,
to_shift, u
|
pscale, jitter, smalljitter):
# patches is [BURST_LENGTH, h_up, w_up, 3]
j_up = jitter * upscale
h_up = height * upscale # + 2 * j_up
w_up = width * upscale # + 2 * j_up
bigj_patches = patches
delta_up = (jitter - smalljitter) * upscale
smallj_patches = patches[:, delta_up:-delta_up, delta_up:-delta_up, ...]
unique = batch_size//repeats
batch = []
for i in range(unique):
for j in range(repeats):
curr = [patches[i, j_up:-j_up, j_up:-j_up, :]]
|
prob = tf.minimum(tf.cast(tf.random_poisson(1.5, []), tf.float32)/BURST_LENGTH, 1.)
for k in range(BURST_LENGTH - 1):
flip = tf.random_uniform([])
p2use = tf.cond(flip < prob, lambda : bigj_patches, lambda : smallj_patches)
curr.append(tf.random_crop(p2use[i, ...], [h_up, w_up, 3]))
curr = tf.stack(curr, axis=0)
curr = tf.image.resize_images(curr, [height, width], method=tf.image.ResizeMethod.AREA)
curr = tf.transpose(curr, [1,2,3,0])
batch.append(curr)
batch = tf.stack(batch, axis=0)
return batch
def make_stack_hqjitter(image, height, width, depth, BURST_LENGTH, to_shift, upscale, jitter):
j_up = jitter * upscale
h_up = height * upscale + 2 * j_up
w_up = width * upscale + 2 * j_up
v_error = tf.maximum((h_up - tf.shape(image)[0] + 1) // 2, 0)
h_error = tf.maximum((w_up - tf.shape(image)[1] + 1) // 2, 0)
image = tf.pad(image, [[v_error, v_error],[h_error,h_error],[0,0]])
stack = []
for i in range(depth):
stack.append(tf.random_crop(image, [h_up, w_up, 3]))
stack = tf.stack(stack, axis=0)
return stack
def load_batch_hqjitter(dataset_dir, patches_per_img=32, min_queue=8, BURST_LENGTH=1, batch_size=32,
repeats=1, height=64, width=64, degamma=1.,
to_shift=1., upscale=1, jitter=1, smalljitter=1):
filenames = [os.path.join(dataset_dir, f) for f in gfile.ListDirectory(dataset_dir)]
filename_queue = tf.train.string_input_producer(filenames)
_, image_file = tf.WholeFileReader().read(filename_queue)
image = tf.image.decode_image(image_file)
patches = make_stack_hqjitter((tf.cast(image[0], tf.float32) / 255.)**degamma,
height, width, patches_per_img, BURST_LENGTH, to_shift, upscale, jitter)
unique = batch_size//repeats
# Batch it up.
patches = tf.train.shuffle_batch(
[patches],
batch_size=unique,
num_threads=2,
capacity=min_queue + 3 * batch_size,
enqueue_many=True,
min_after_dequeue=min_queue)
print('PATCHES =================',patches.get_shape().as_list())
patches = make_batch_hqjitter(patches, BURST_LENGTH, batch_size, repeats, height, width, to_shift, upscale, jitter, smalljitter)
return patches
def make_noised(image, height, width, sig_range):
y = tf.random_uniform([1], 0, tf.shape(image)[0]-height, tf.int32)
x = tf.random_uniform([1], 0, tf.shape(image)[1]-width, tf.int32)
y, x = y[0], x[0]
noised = tf.reshape(image[y:y+height, x:x+width, :], (1, height, width, 1, 3))
denoised = noised
sig = tf.random_uniform([1], 0, sig_range, tf.float32)
noised = tf.clip_by_value(noised + tf.random_normal(tf.shape(noised),mean=0.,stddev=sig[0]),0.,1.)
return noised, denoised, tf.reshape(sig, [1,1])
def make_stack_noised(image, height, width, depth, sig_range):
v_error = tf.maximum(height - tf.shape(image)[0] + 1, 0)
h_error = tf.maximum(width - tf.shape(image)[1] + 1, 0)
image = tf.pad(image, [[0,v_error],[0,h_error],[0,0]])
noised_stack, denoised_stack, sig_stack =
|
Secretions/zmdomainexport
|
zimbrasoap/__init__.py
|
Python
|
gpl-2.0
| 144
| 0.013889
|
#!/usr/bin/python
__version__ = '0.0.1'
import pysimplesoap.client
import pysimplesoap.simplexml
from zimbrasoap.soap import soap,
|
admin,mai
|
l
|
darth-dodo/project_lumos
|
lumos/migrations/0010_softskills_slug.py
|
Python
|
mit
| 428
| 0
|
# -*- coding: utf-8 -*-
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lumos', '0009_proglang_slug'),
]
operations = [
migrations.AddField(
model_name='softskills',
name='slug',
field=models.SlugField(default=''),
preserve_default=False,
|
),
]
|
vswamy/nightson
|
nightson/managers/base_entity_manager.py
|
Python
|
apache-2.0
| 1,373
| 0.002185
|
from __future__ import absolute_import
import momoko
from tornado import
|
gen
from psycopg2.extras import RealDictConnection
def initialize_database():
db = momoko.Pool(
dsn='''dbname=nightson user=vswamy password=vswamy host=localhost port=5432''',
size=5,
connection_factory=RealDictConnection,
)
db.connect()
return db
class BaseEntityManager(object):
db = initialize_database()
|
def __init__(self):
pass
def __init__(self, request):
self.request = request
@gen.coroutine
def execute_sql(self, sql):
''' Executes an sql statement and returns the value '''
cursor = yield BaseEntityManager.db.execute(sql)
raise gen.Return(cursor)
def get_value(self, key):
''' Gets a value given dictionary like arguments'''
params = {}
if(self.request.method == 'GET'):
params = self.request.query_arguments
elif(self.request.method == 'POST'):
params = self.request.body_arguments
elif(self.request.method == 'PUT'):
params = self.request.arguments
elif(self.request.method == 'DELETE'):
params = self.request.body_arguments
if(key not in params):
return None
''' Params will always be of the form key:[values] '''
return params.get(key)[0]
|
zero-os/0-orchestrator
|
pyclient/zeroos/orchestrator/client/EnumContainerStatus.py
|
Python
|
apache-2.0
| 139
| 0
|
from enum import Enum
class EnumContainerStatus(Enum):
running
|
= "running"
halted = "halted
|
"
networkKilled = "networkKilled"
|
spirrello/spirrello-pynet-work
|
applied_python/lib/python2.7/site-packages/pysmi/reader/localfile.py
|
Python
|
gpl-3.0
| 4,425
| 0.003164
|
#
# This file is part of pysmi software.
#
# Copyright (c) 2015-2016, Ilya Etingof <[email protected]>
# License: http://pysmi.sf.net/license.html
#
import os
import sys
import time
from pysmi.reader.base import AbstractReader
from pysmi.mibinfo import MibInfo
from pysmi.compat import decode
from pysmi import debug
from pysmi import error
class FileReader(AbstractReader):
"""Fetch ASN.1 MIB text by name from local file.
*FileReader* class instance tries to locate ASN.1 MIB files
by name, fetch and return their contents to caller.
"""
useIndexFile = True # optional .index file mapping MIB to file name
indexFile = '.index'
def __init__(self, path, recursive=True, ignoreErrors=True):
"""Create an in
|
stance of *FileReader* serving a directory.
Args:
path (str): directory to search MIB files
Keyword Args:
recursive (bool): whether to include subdirectories
ignoreErrors (bool): ignore filesystem access errors
"""
self._path = os.path.normpath(path)
self._recursive = recursive
self._ignoreErrors = ignoreErrors
|
self._indexLoaded = False
def __str__(self): return '%s{"%s"}' % (self.__class__.__name__, self._path)
def getSubdirs(self, path, recursive=True, ignoreErrors=True):
if not recursive:
return [path]
dirs = [path]
try:
subdirs = os.listdir(path)
except OSError:
if ignoreErrors:
return dirs
else:
raise error.PySmiError('directory %s access error: %s' % (path, sys.exc_info()[1]))
for d in subdirs:
d = os.path.join(decode(path), decode(d))
if os.path.isdir(d):
dirs.extend(self.getSubdirs(d, recursive))
return dirs
def loadIndex(self, indexFile):
mibIndex = {}
if os.path.exists(indexFile):
try:
mibIndex = dict(
[x.split()[:2] for x in open(indexFile).readlines()]
)
debug.logger & debug.flagReader and debug.logger('loaded MIB index map from %s file, %s entries' % (indexFile, len(mibIndex)))
except IOError:
pass
return mibIndex
def getMibVariants(self, mibname):
if self.useIndexFile:
if not self._indexLoaded:
self._mibIndex = self.loadIndex(
os.path.join(self._path, self.indexFile)
)
self._indexLoaded = True
if mibname in self._mibIndex:
debug.logger & debug.flagReader and debug.logger('found %s in MIB index: %s' % (mibname, self._mibIndex[mibname]))
return [(mibname, self._mibIndex[mibname])]
return super(FileReader, self).getMibVariants(mibname)
def getData(self, mibname):
debug.logger & debug.flagReader and debug.logger('%slooking for MIB %s' % (self._recursive and 'recursively ' or '', mibname))
for path in self.getSubdirs(self._path, self._recursive,
self._ignoreErrors):
for mibalias, mibfile in self.getMibVariants(mibname):
f = os.path.join(decode(path), decode(mibfile))
debug.logger & debug.flagReader and debug.logger('trying MIB %s' % f)
if os.path.exists(f) and os.path.isfile(f):
try:
mtime = os.stat(f)[8]
debug.logger & debug.flagReader and debug.logger('source MIB %s mtime is %s, fetching data...' % (f, time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(mtime))))
return MibInfo(path='file://%s' % f, file=mibfile, name=mibalias, mtime=mtime), decode(open(f, mode='rb').read(self.maxMibSize))
except (OSError, IOError):
debug.logger & debug.flagReader and debug.logger('source file %s open failure: %s' % (f, sys.exc_info()[1]))
if not self._ignoreErrors:
raise error.PySmiError('file %s access error: %s' % (f, sys.exc_info()[1]))
raise error.PySmiReaderFileNotModifiedError('source MIB %s is older than needed' % f, reader=self)
raise error.PySmiReaderFileNotFoundError('source MIB %s not found' % mibname, reader=self)
|
tiancj/emesene
|
emesene/gui/gtkui/RichWidget.py
|
Python
|
gpl-3.0
| 4,161
| 0.004086
|
# -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import xml.parsers.expat
import e3
import logging
class RichWidget(object):
'''a base widget that allows to add formatted text based on a
xhtml subset'''
def put_text(self, text, fg_color=None, bg_color=None, font=None, size=None,
bold=False, italic=False, underline=False, strike=False):
'''insert text at the current position with the style defined by the
optional parameters'''
raise NotImplementedError('Not implemented')
def put_formatted(self, text, fg_color=None, bg_color=None, font=None, size=None,
bold=False, italic=False, underline=False, strike=False):
'''insert text at the current position with the style defined inside
text'''
try:
result = e3.common.XmlParser.XmlParser(
#'<span>' + text.replace('\n', '') + '</span>').result
'<span>' + text + '</span>').result
except xml.parsers.expat.ExpatError:
logging.getLogger("gtkui.RichWidget").debug("cant parse '%s'" % \
(text, ))
return
dct = e3.common.XmlParser.DictObj(result)
self._put_formatted(dct, fg_color, bg_color, font, size,
bold, italic, underline, strike)
def _put_formatted(self, dct, fg_color=None, bg_color=None, font=None, size=None,
bold=False, italic=False, underline=False, strike=False):
'''insert text at the current position with the style defined inside
text, using the parsed structure stored on dct'''
# override the values if defined, keep the old ones if no new defined
bold = dct.tag == 'b' or dct.tag == 'strong' or bold
italic = dct.tag == 'i' or dct.tag == 'em' or italic
underline = dct.tag == 'u' or underline
strike = dct.tag == 's' or strike
if dct.tag == 'span' and dct.style:
style = e3.common.XmlParser.parse_css(dct.style)
font = style.font_family or font
try:
# TODO: handle different units?
size = int(style.font_size) or size
except ValueError:
pass
except TypeError:
pass
fg_color = style.color or fg_color
bg_color = style.background_color or bg_color
if dct.childs is None:
return
for child in dct.childs:
if isinstance(child, basestring):
self.put_text(child, fg_color, bg_color, font, size,
bold, italic, underline, strike)
elif child.tag == 'img':
self.put_image(child.src, child.alt)
elif child.tag == 'br':
self.new_line()
elif child.tag == 'a':
|
self.put_link(child.href)
else:
self._put_formatted(child, fg_color, bg_color, font, size,
bold, italic, underline,
|
strike)
def put_image(self, path, tip=None):
'''insert an image at the current position
tip it's the alt text on mouse over'''
raise NotImplementedError('Not implemented')
def new_line(self):
'''put a new line on the text'''
raise NotImplementedError('Not implemented')
def put_link(self, link):
'''insert a link at the current position'''
raise NotImplementedError('Not implemented')
|
fish2000/django-signalqueue
|
signalqueue/settings/__init__.py
|
Python
|
bsd-3-clause
| 4,884
| 0.006347
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('My Name', '[email protected]'),
)
MANAGERS = ADMINS
import tempfile, os
from django import contrib
tempdata = tempfile.mkdtemp()
approot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
adminroot = os.path.join(contrib.__path__[0], 'admin')
DATABASES = {
'default': {
'NAME': os.path.join(tempdata, 'signalqueue-test.db'),
'TEST_NAME': os.path.join(tempdata, 'signalqueue-test.db'),
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
}
}
TIME_ZONE = 'America/New_York'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
MEDIA_ROOT = os.path.join(approot, 'static')
MEDIA_URL = '/face/'
STATIC_ROOT = os.path.join(adminroot, 'static', 'admin')[0]
STATIC_URL = '/staticfiles/'
ADMIN_MEDIA_PREFIX = '/admin-media/'
ROOT_URLCONF = 'signalqueue.settings.urlconf'
TEMPLATE_DIRS = (
os.path.join(approot, 'templates'),
os.path.join(adminroot, 'templates'),
os.path.join(adminroot, 'templates', 'admin'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
#"django.core.context_processors.i18n", this is AMERICA
"django.core.context_processors.media",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django_nose',
'djcelery',
'delegate',
'signalqueue',
)
LOGGING = dict(
version=1,
disable_existing_loggers=False,
formatters={ 'standard': { 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s' }, },
handlers={
'default': { 'level':'DEBUG', 'class':'logging.StreamHandler', 'formatter':'standard', },
'nil': { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', },
},
loggers={
'signalqueue': { 'handlers': ['default'], 'level': 'INFO', 'propagate': False },
},
root={ 'handlers': ['default'], 'level': 'INFO', 'propagate': False },
)
SQ_QUEUES = {
'default': { # you need at least one dict named 'default' in SQ_QUEUES
'ENGINE': 'signalqueue.worker.backends.RedisSetQueue', # required - full path to a QueueBase subclass
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(port=8356),
},
'listqueue': {
'ENGINE': 'signalqueue.worker.backends.RedisQueue',
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(port=8356),
},
'db': {
'ENGINE': 'signalqueue.worker.backends.DatabaseQueueProxy',
'INTERVAL': 30, # 1/3 sec
|
'OPTIONS': dict(app_label='signalqueue',
modl_name='EnqueuedSignal'),
},
'celery': {
'ENGINE': 'signalqueue.wor
|
ker.celeryqueue.CeleryQueue',
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(celery_queue_name='inactive',
transport='redis', port=8356),
},
}
SQ_ADDITIONAL_SIGNALS=['signalqueue.tests']
SQ_WORKER_PORT = 11201
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
try:
from kombu import Queue
except ImportError:
pass
else:
CELERY_DEFAULT_QUEUE = 'default'
CELERY_DEFAULT_ROUTING_KEY = 'default'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_QUEUES = (
Queue('default', routing_key='default.#'),
Queue('yodogg', routing_key='yodogg.#'),
)
CELERY_ALWAYS_EAGER = True
BROKER_URL = 'redis://localhost:8356/0'
BROKER_HOST = "localhost"
BROKER_BACKEND = "redis"
REDIS_PORT = 8356
REDIS_HOST = "localhost"
BROKER_USER = ""
BROKER_PASSWORD = ""
BROKER_VHOST = "0"
REDIS_DB = 0
REDIS_CONNECT_RETRY = True
CELERY_SEND_EVENTS = True
CELERY_RESULT_BACKEND = "redis://localhost:8356/0"
CELERY_TASK_RESULT_EXPIRES = 10
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
try:
import djcelery
except ImportError:
pass
else:
djcelery.setup_loader()
# package path-extension snippet.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
ShaolongHu/lpts
|
tests/fio.py
|
Python
|
gpl-2.0
| 4,566
| 0.007627
|
# -*- coding:utf-8 -*-
'''
fio测试工具执行脚本
'''
import os,shutil,re,time,sys,copy
from test import BaseTest
from lpt.lib.error import *
from
|
lpt.lib import lptxml
from lpt.lib import lptlog
from lpt.lib.s
|
hare import utils
from lpt.lib import lptreport
class TestControl(BaseTest):
'''
继承BaseTest属性和方法
'''
def __init__(self, jobs_xml, job_node, tool, tarball='fio-2.1.10.tar.bz2'):
super(TestControl, self).__init__(jobs_xml, job_node, tool, tarball)
def setup(self):
'''编译源码,设置程序
'''
if not self.check_bin(self.processBin):
self.tar_src_dir = self.extract_bar()
os.chdir(self.tar_src_dir)
self.compile(configure_status=True, make_status=True)
os.chdir(self.lpt_root)
def run(self):
tool_node = self.check_tool_result_node()
self.config_file = os.path.join(self.tar_src_dir, self.get_config_value(tool_node, "config_file", "./fio-mixed.job", valueType=str))
lptlog.info("使用配置文件: %s" % self.config_file)
self.result_tmp_file = os.path.join(self.tmp_dir, "fio_output")
self.filesize = self.get_config_value(tool_node, "filesize", "100M", valueType=str)
lptlog.info("测试读写文件大小: %s" % self.filesize)
f = open(self.config_file,'r')
lines = f.read()
f.close()
f = open(self.config_file,'w')
lines = re.sub('size=(\d+)M', 'size=%s'%self.filesize, lines)
f.write(lines)
f.close()
self.mainParameters["parameters"] = "./fio --output %s %s"%(self.result_tmp_file, self.config_file)
lptlog.info("----------开始测试")
os.chdir(self.tar_src_dir)
utils.system("./fio --output %s %s"%(self.result_tmp_file, self.config_file))
def create_result(self):
lptlog.info("----------创建结果")
self.result_list = self.__match_index(self.result_tmp_file)
def __match_index(self, file):
if not os.path.isfile(file):
return []
lptlog.debug("在%s中搜索测试指标" % file)
results_lines = utils.read_all_lines(file)
labels = ('io', 'aggrb', 'minb', 'maxb', 'mint','maxt')
parallel_template = {'parallels': '1,2,3,4', 'parallel': '1', 'iter': '1', 'times': '2'}
result_list = []
count = 0
for line in results_lines:
if 'READ:' in line:
tmp_list = []
parallel_dict = copy.deepcopy(parallel_template)
parallel_dict['parallel'] = str(count / 2 + 1)
parallel_dict['iter'] = 'READ'
tmp_list.append(parallel_dict)
tmp_list.append(self.dict_generator(labels,line))
result_list.append(tmp_list)
count = count + 1
elif 'WRITE:' in line:
tmp_list = []
parallel_dict = copy.deepcopy(parallel_template)
parallel_dict['parallel'] = str(count / 2 + 1)
parallel_dict['iter'] = 'WRITE'
tmp_list.append(parallel_dict)
tmp_list.append(self.dict_generator(labels,line))
result_list.append(tmp_list)
count = count + 1
if count in [2,4,6,8]:
tmp_list = []
dict2 = result_list[-1][1]
dict1 = result_list[-2][1]
parallel_dict = copy.deepcopy(parallel_template)
parallel_dict['parallel'] = str(count / 2)
parallel_dict['iter'] = 'Average'
tmp_list.append(parallel_dict)
tmp_list.append(self.dict_average(dict1, dict2))
result_list.append(tmp_list)
return result_list
def dict_generator(self, labels, line):
result_dict = {}
line = line.replace(',','')
line = line.split()
for l,v in zip(labels, (line[1].split('=')[1][:-2], line[2].split('=')[1][:-4], line[3].split('=')[1][:-4], line[4].split('=')[1][:-4], line[5].split('=')[1][:-4], line[6].split('=')[1][:-4])):
result_dict[l] = "%s" % v
return result_dict
def dict_average(self, dict1, dict2):
result_dict = {}
for k,v in dict1.items():
try:
result_dict[k] = str((float(dict1[k]) * 0.33 + float(dict2[k]) * 0.67))
except e:
raise e
sys.exit()
return result_dict
|
booya-at/freecad_glider
|
freecad/freecad_glider/tools/line_tool.py
|
Python
|
lgpl-2.1
| 36,337
| 0.001431
|
from __future__ import division
import sys
if sys.version_info.major > 2:
from importlib import reload
from PySide import QtGui, QtCore
import traceback
import numpy as np
import FreeCAD as App
import FreeCADGui as Gui
from ._tools import BaseTool, input_field, text_field
from ._glider import draw_glider, draw_lines
from .pivy_primitives_new import vector3D
from .pivy_primitives_new import InteractionSeparator, Object3D, Arrow
from .pivy_primitives_new import Line as _Line
from .pivy_primitives_new import Marker as _Marker
from .pivy_primitives_new import coin
from openglider.glider.parametric.lines import UpperNode2D, LowerNode2D, \
BatchNode2D, Line2D, LineSet2D
from openglider.lines.line_types import LineType
import numpy as np
def refresh():
pass
class Line(_Line):
def set_disabled(self):
super(Line, self).set_disabled()
points = np.array(self.points)
points.T[2] = -1
self.points = points
def set_enabled(self):
super(Line, self).set_enabled()
points = np.array(self.points)
points.T[2] = 0
self.points = points
class Marker(_Marker):
def set_disabled(self):
super(Marker, self).set_disabled()
points = np.array(self.points)
points.T[2] = -1
self.points = points
def set_enabled(self):
super(Marker, self).set_enabled()
points = np.array(self.points)
points.T[2] = 0
self.points = points
class LineContainer(InteractionSeparator):
def Select(self, obj, multi=False):
if not multi:
for o in self.selected_objects:
o.unselect()
self.selected_objects = []
if obj:
if obj in self.selected_objects:
self.selected_objects.remove(obj)
elif obj.enabled:
self.selected_objects.append(obj)
self.ColorSelected()
self.selection_changed()
def select_all_cb(self, event_callback):
event = event_callback.getEvent()
if (event.getKey() == ord('a')):
if event.getState() == event.DOWN:
if self.selected_objects:
for o in self.selected_objects:
o.unselect()
self.selected_objects = []
else:
for obj in self.objects:
if obj.dynamic and obj.enabled:
self.selected_objects.append(obj)
self.ColorSelected()
self.selection_changed()
# all line info goes into the tool.
# the lineset will be totally reloaded after the tool work is ready
# if an error occurs nothing will happen
# 1: create markers from existing lineset
# 2: create lines from existing lineset
# 3: eventhandler for adding and connecting lines
class LineTool(BaseTool):
widget_name = 'Line Tool'
def __init__(self, obj):
super(LineTool, self).__init__(obj)
# get the parametric shape
_shape = self.parametric_glider.shape.get_half_shape()
self.ribs = _shape.ribs
self.front = _shape.front
self.back = _shape.back
self.xpos = self.parametric_glider.shape.rib_x_values
self.disabled_color = (0.5, 0.5, 0.5)
# setup the GUI
self.setup_widget()
self.setup_pivy()
def setup_pivy(self):
# pivy helper line
self.helper_line = coin.SoSeparator()
self.temp_point = coin.SoSeparator()
# pivy lines, points, shape
self.shape = LineContainer()
self.shape.selection_changed = self.selection_changed
self.shape.setName('shape')
self.shape.register(self.view)
self.task_separator += [self.shape, self.helper_line]
self.task_separator += [self.temp_point]
self.draw_shape()
self.update_layer_selection()
self.update_helper_line()
self.setup_cb()
def setup_widget(self):
# qt helper line
self.Qhl_pos = QtGui.QDoubleSpinBox()
# qt element widget
self.tool_widget = QtGui.QStackedWidget()
self.tool_layout = QtGui.QFormLayout(self.tool_widget)
# qt layer widget
self.layer_widget = QtGui.QWidget()
self.layer_layout = QtGui.QFormLayout(self.layer_widget)
self.layer_selection = LayerComboBox(self.layer_widget)
self.layer_combobox = LayerComboBox(self.layer_widget)
self.layer_color_button = QtGui.QPushButton('select color')
self.layer_color_dialog = QtGui.QColorDialog()
self.tool_widget.setWindowTitle('object properties')
self.layer_widget.setWindowTitle('layers')
self.form.append(self.layer_widget)
self.form.append(self.tool_widget)
# temp_wid = QtGui.QWidget()
# temp_lay = QtGui.QHBoxLayout(temp_wid)
# self.layout.setWidget(1, input_field, temp_wid)
self.none_widget = QtGui.QWidget()
self.line_widget = QtGui.QWidget()
self.lw_att_wid = QtGui.QWidget()
self.up_att_wid = QtGui.QWidget()
self.Qline_list = QtGui.QListWidget()
for _type in LineType.types.values():
self.Qline_list.addItem(QLineType_item(_type))
self.Qline_list.sortItems()
self.up_att_lay = QtGui.QFormLayout(self.up_att_wid)
self.lw_att_lay = QtGui.QFormLayout(self.lw_att_wid)
self.line_layout = QtGui.QFormLayout(self.line_widget)
self.none_layout = QtGui.QFormLayout(self.none_widget)
self.target_length = QtGui.QDoubleSpinBox()
self.target_length.setDecimals(5)
self.line_layout.setWidget(
0, text_field, QtGui.QLabel('target length: '))
self.line_layout.setWidget(0, input_field, self.target_length)
self.line_layout.setWidget(1, text_field, QtGui.QLabel('line type: '))
self.line_layout.setWidget(1, input_field, self.Qline_list)
self.target_length.valueChanged.connect(self.update_target_length)
self.Qline_list.currentItemChanged.connect(self.update_line_type)
self.QLineName = QtGui.QLineEdit()
self.line_layout.setWidget(2, text_field, QtGui.QLabel('name'))
self.line_layout.setWidget(2, input_field, self.QLineName)
self.QLineName.textChanged.connect(self.line_name_changed)
self.attach_x_val = QtGui.QDoubleSpinBox()
self.attach_y_val = QtGui.QDoubleSpinBox()
self.attach_z_val = QtGui.QDoubleSpinBox()
for spinbox in [
self.at
|
tach_x_val, self.attach_y_val, self.attach_z_val]:
spinbox.setMaximum(10.)
spinbox.setMinimum(-10.)
spinbox.valueChanged.connect(self.update_lw_att_pos)
self.lw_att_lay.addWidget(self.attach_x_val)
self.lw_att_lay.addWidget(self.attach_y_val)
self.lw_att_lay.addWidget(self.attach_z_val)
self.up_att_force =
|
QtGui.QDoubleSpinBox()
self.up_att_force.setSingleStep(0.1)
self.up_att_lay.setWidget(0, text_field, QtGui.QLabel('force'))
self.up_att_lay.setWidget(0, input_field, self.up_att_force)
self.up_att_force.valueChanged.connect(self.update_up_att_force)
self.up_att_rib = QtGui.QSpinBox()
self.up_att_rib.setMinimum(0)
self.up_att_rib.setMaximum(self.parametric_glider.shape.half_rib_num - 1)
self.up_att_lay.setWidget(1, text_field, QtGui.QLabel('rib nr'))
self.up_att_lay.setWidget(1, input_field, self.up_att_rib)
self.up_att_rib.valueChanged.connect(self.update_up_att_rib)
self.up_att_pos = QtGui.QDoubleSpinBox()
self.up_att_pos.setMinimum(0)
self.up_att_pos.setMaximum(1)
self.up_att_pos.setSingleStep(0.01)
self.up_att_lay.setWidget(2, text_field, QtGui.QLabel('position'))
self.up_att_lay.setWidget(2, input_field, self.up_att_pos)
self.up_att_pos.valueChanged.connect(self.update_up_att_pos)
self.tool_widget.addWidget(self.none_widget)
self.tool_widget.addWidget(self.line_widget)
self.tool_widget.addWidget(self.lw_att_wid)
self.tool_widget.addWidget(self.up_att_wid)
sel
|
jlongever/RackHD
|
test/benchmark/api_v2_0/discovery_tests.py
|
Python
|
apache-2.0
| 142
| 0.007042
|
from proboscis import test
@test(groups=['benchmark.discovery'])
class BenchmarkDiscoveryTests(object):
def __init_
|
_(self):
p
|
ass
|
donlee888/JsObjects
|
Python/ComplexPaths02/src/main/mypackage/MyPackageMod01.py
|
Python
|
mit
| 122
| 0.02459
|
'''
Cre
|
ated on May 26, 2012
@author: Charlie
'''
class MyPackageMod01(object):
def
|
__init__(self):
pass
|
ColoradoSchoolOfMines/acm-website
|
mozzarella/controllers/__init__.py
|
Python
|
gpl-3.0
| 50
| 0
|
"""
|
Controllers for the mozzarella application.""
|
"
|
amadeusproject/amadeuslms
|
themes/migrations/0003_auto_20170112_1408.py
|
Python
|
gpl-2.0
| 560
| 0.001786
|
# -*- coding: utf-
|
8 -*-
# Generated by Django 1.10 on 2017-01-12 17:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('themes', '0002_auto_20170110_1809'),
]
operations = [
migrations.AlterField(
model_name='themes',
|
name='css_style',
field=models.CharField(choices=[('green', 'Green'), ('red', 'Red'), ('black', 'Black')], default='green', max_length=50, verbose_name='Css Style'),
),
]
|
jonathanmeier5/teamstore
|
saleor/checkout/views/shipping.py
|
Python
|
bsd-3-clause
| 3,737
| 0.00107
|
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from ..forms import AnonymousUserShippingForm, ShippingAddressesForm
from ...userprofile.forms import get_address_form
from ...userprofile.models import Address
from ...teamstore.utils import get_team
def anonymous_user_shipping_address_view(request, checkout):
team = get_team(request.session['team'])
if team.group_shipping:
address_form, preview = get_address_form(
request.POST or None, country_code=request.country.code,
autocomplete_type='shipping',
initial={'country': request.country.code},
instance=team.shipping_address)
else:
address_form, preview = get_address_form(
request.POST or None, country_code=request.country.code,
autocomplete_type='shipping',
initial={'country': request.country.code},
instance=checkout.shipping_address)
user_form = AnonymousUserShippingForm(
not preview and request.POST or None, initial={'email': checkout.email}
if not preview else request.POST.dict())
if team.group_shipping and user_form.is_valid():
checkout.shipping_address = team.shipping_address
checkout.email = user_form.cleaned_data['email']
return redirect('checkout:shipping-method')
elif all([user_form.is_valid(), address_form.is_valid()]):
|
checkout.shipping_address = address_form.instance
checkout.email = user_form.c
|
leaned_data['email']
return redirect('checkout:shipping-method')
return TemplateResponse(
request, 'checkout/shipping_address.html', context={
'address_form': address_form, 'user_form': user_form,
'group_shipping': team.group_shipping, 'checkout': checkout})
def user_shipping_address_view(request, checkout):
data = request.POST or None
additional_addresses = request.user.addresses.all()
checkout.email = request.user.email
shipping_address = checkout.shipping_address
if shipping_address is not None and shipping_address.id:
address_form, preview = get_address_form(
data, country_code=request.country.code,
initial={'country': request.country})
addresses_form = ShippingAddressesForm(
data, additional_addresses=additional_addresses,
initial={'address': shipping_address.id})
elif shipping_address:
address_form, preview = get_address_form(
data, country_code=shipping_address.country.code,
instance=shipping_address)
addresses_form = ShippingAddressesForm(
data, additional_addresses=additional_addresses)
else:
address_form, preview = get_address_form(
data, initial={'country': request.country},
country_code=request.country.code)
addresses_form = ShippingAddressesForm(
data, additional_addresses=additional_addresses)
if addresses_form.is_valid() and not preview:
if addresses_form.cleaned_data['address'] != ShippingAddressesForm.NEW_ADDRESS:
address_id = addresses_form.cleaned_data['address']
checkout.shipping_address = Address.objects.get(id=address_id)
return redirect('checkout:shipping-method')
elif address_form.is_valid():
checkout.shipping_address = address_form.instance
return redirect('checkout:shipping-method')
return TemplateResponse(
request, 'checkout/shipping_address.html', context={
'address_form': address_form, 'user_form': addresses_form,
'checkout': checkout, 'additional_addresses': additional_addresses})
|
rohitranjan1991/home-assistant
|
homeassistant/components/rachio/switch.py
|
Python
|
mit
| 19,041
| 0.00084
|
"""Integration with the Rachio Iro sprinkler system controller."""
from abc import abstractmethod
from contextlib import suppress
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID, ATTR_ID
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import as_timestamp, now, parse_datetime, utc_from_timestamp
from .const import (
CONF_MANUAL_RUN_MINS,
DEFAULT_MANUAL_RUN_MINS,
DOMAIN as DOMAIN_RACHIO,
KEY_CUSTOM_CROP,
KEY_CUSTOM_SHADE,
KEY_CUSTOM_SLOPE,
KEY_DEVICE_ID,
KEY_DURATION,
KEY_ENABLED,
KEY_ID,
KEY_IMAGE_URL,
KEY_NAME,
KEY_ON,
KEY_RAIN_DELAY,
KEY_RAIN_DELAY_END,
KEY_SCHEDULE_ID,
KEY_SUBTYPE,
KEY_SUMMARY,
KEY_TYPE,
KEY_ZONE_ID,
KEY_ZONE_NUMBER,
SCHEDULE_TYPE_FIXED,
SCHEDULE_TYPE_FLEX,
SERVICE_SET_ZONE_MOISTURE,
SERVICE_START_MULTIPLE_ZONES,
SIGNAL_RACHIO_CONTROLLER_UPDATE,
SIGNAL_RACHIO_RAIN_DELAY_UPDATE,
SIGNAL_RACHIO_SCHEDULE_UPDATE,
SIGNAL_RACHIO_ZONE_UPDATE,
SLOPE_FLAT,
SLOPE_MODERATE,
SLOPE_SLIGHT,
SLOPE_STEEP,
)
from .entity import RachioDevice
from .webhooks import (
SUBTYPE_RAIN_DELAY_OFF,
SUBTYPE_RAIN_DELAY_ON,
SUBTYPE_SCHEDULE_COMPLETED,
SUBTYPE_SCHEDULE_STARTED,
SUBTYPE_SCHEDULE_STOPPED,
SUBTYPE_SLEEP_MODE_OFF,
SUBTYPE_SLEEP_MODE_ON,
|
SUBTYPE_ZONE_COMPLETED,
SUBTYPE_ZONE_PAUSED,
SUBTYPE_ZONE_STARTED,
SUBTYPE_ZONE_STOPPED,
)
_LOGGER = logging.getLogger(__name__)
ATTR_DURATION = "duration"
ATTR_PERCENT = "percent"
ATTR_SCHEDULE_SUMMARY = "Summary"
ATTR_SCHEDULE_ENABLED = "Enabled"
ATTR_SCHEDULE_DURATION = "Duration"
ATTR_SCHEDULE_TYPE = "Type"
ATTR_SORT_ORDER = "sortOrder"
ATTR_ZONE_NUMBER = "Zone number"
ATTR_ZONE_SHADE = "Shade"
ATTR_ZONE_SLOPE = "Slope"
ATTR_ZONE_SUMMARY = "Summary"
ATTR_ZONE_TYPE
|
= "Type"
START_MULTIPLE_ZONES_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_DURATION): cv.ensure_list_csv,
}
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Rachio switches."""
zone_entities = []
has_flex_sched = False
entities = await hass.async_add_executor_job(_create_entities, hass, config_entry)
for entity in entities:
if isinstance(entity, RachioZone):
zone_entities.append(entity)
if isinstance(entity, RachioSchedule) and entity.type == SCHEDULE_TYPE_FLEX:
has_flex_sched = True
async_add_entities(entities)
_LOGGER.info("%d Rachio switch(es) added", len(entities))
def start_multiple(service: ServiceCall) -> None:
"""Service to start multiple zones in sequence."""
zones_list = []
person = hass.data[DOMAIN_RACHIO][config_entry.entry_id]
entity_id = service.data[ATTR_ENTITY_ID]
duration = iter(service.data[ATTR_DURATION])
default_time = service.data[ATTR_DURATION][0]
entity_to_zone_id = {
entity.entity_id: entity.zone_id for entity in zone_entities
}
for (count, data) in enumerate(entity_id):
if data in entity_to_zone_id:
# Time can be passed as a list per zone,
# or one time for all zones
time = int(next(duration, default_time)) * 60
zones_list.append(
{
ATTR_ID: entity_to_zone_id.get(data),
ATTR_DURATION: time,
ATTR_SORT_ORDER: count,
}
)
if len(zones_list) != 0:
person.start_multiple_zones(zones_list)
_LOGGER.debug("Starting zone(s) %s", entity_id)
else:
raise HomeAssistantError("No matching zones found in given entity_ids")
hass.services.async_register(
DOMAIN_RACHIO,
SERVICE_START_MULTIPLE_ZONES,
start_multiple,
schema=START_MULTIPLE_ZONES_SCHEMA,
)
if has_flex_sched:
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_ZONE_MOISTURE,
{vol.Required(ATTR_PERCENT): cv.positive_int},
"set_moisture_percent",
)
def _create_entities(hass, config_entry):
entities = []
person = hass.data[DOMAIN_RACHIO][config_entry.entry_id]
# Fetch the schedule once at startup
# in order to avoid every zone doing it
for controller in person.controllers:
entities.append(RachioStandbySwitch(controller))
entities.append(RachioRainDelay(controller))
zones = controller.list_zones()
schedules = controller.list_schedules()
flex_schedules = controller.list_flex_schedules()
current_schedule = controller.current_schedule
for zone in zones:
entities.append(RachioZone(person, controller, zone, current_schedule))
for sched in schedules + flex_schedules:
entities.append(RachioSchedule(person, controller, sched, current_schedule))
_LOGGER.debug("Added %s", entities)
return entities
class RachioSwitch(RachioDevice, SwitchEntity):
"""Represent a Rachio state that can be toggled."""
def __init__(self, controller):
"""Initialize a new Rachio switch."""
super().__init__(controller)
self._state = None
@property
def name(self) -> str:
"""Get a name for this switch."""
return f"Switch on {self._controller.name}"
@property
def is_on(self) -> bool:
"""Return whether the switch is currently on."""
return self._state
@callback
def _async_handle_any_update(self, *args, **kwargs) -> None:
"""Determine whether an update event applies to this device."""
if args[0][KEY_DEVICE_ID] != self._controller.controller_id:
# For another device
return
# For this device
self._async_handle_update(args, kwargs)
@abstractmethod
def _async_handle_update(self, *args, **kwargs) -> None:
"""Handle incoming webhook data."""
class RachioStandbySwitch(RachioSwitch):
"""Representation of a standby status/button."""
@property
def name(self) -> str:
"""Return the name of the standby switch."""
return f"{self._controller.name} in standby mode"
@property
def unique_id(self) -> str:
"""Return a unique id by combining controller id and purpose."""
return f"{self._controller.controller_id}-standby"
@property
def icon(self) -> str:
"""Return an icon for the standby switch."""
return "mdi:power"
@callback
def _async_handle_update(self, *args, **kwargs) -> None:
"""Update the state using webhook data."""
if args[0][0][KEY_SUBTYPE] == SUBTYPE_SLEEP_MODE_ON:
self._state = True
elif args[0][0][KEY_SUBTYPE] == SUBTYPE_SLEEP_MODE_OFF:
self._state = False
self.async_write_ha_state()
def turn_on(self, **kwargs) -> None:
"""Put the controller in standby mode."""
self._controller.rachio.device.turn_off(self._controller.controller_id)
def turn_off(self, **kwargs) -> None:
"""Resume controller functionality."""
self._controller.rachio.device.turn_on(self._controller.controller_id)
async def async_added_to_hass(self):
"""Subscribe to updates."""
if KEY_ON in self._controller.init_data:
self._state = not self._controller.init_data[KEY_ON]
sel
|
LinuCC/sturo
|
src/lib/usonic.py
|
Python
|
gpl-2.0
| 3,134
| 0.021378
|
#!/usr/bin/python
import time
import RPi.GPIO as GPIO
# remember to change the GPIO values below to match your sensors
# GPIO output = the pin that's connected to "Trig" on the sensor
# GPIO input = the pin that's connected to "Echo" on the sensor
def reading(sensor):
# Disable any warning message such as GPIO pins in use
GPIO.setwarnings(False)
# use the values of the GPIO pins, and not the actual pin number
# so if you connect to GPIO 25 which is on pin number 22, the
# reference in this code is 25, which is the number of the GPIO
# port and not the number of the physical pin
GPIO.setmode(GPIO.BCM)
if sensor == 0:
# point the software to the GPIO pins the sensor is using
# change these values to the pins you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
# GPIO input = the pin that's connected to "Echo" on the sensor
GPIO.setup(22,GPIO.OUT)
GPIO.setup(27,GPIO.IN)
GPIO.output(22, GPIO.LOW)
# found that the sensor can crash if there isn't a delay here
# no idea why. If you have odd crashing issues, increase delay
time.sleep(0.3)
# sensor manual says a pulse ength of 10Us will trigger the
# sensor to transmit 8 cycles of ultrasonic burst at 40kHz and
# wait for the reflected ultrasonic burst to be received
# to get a pulse length of 10Us we need to start the pulse, then
# wait for 10 microseconds, then stop the pulse. This will
# result in the pulse length being 10Us.
# start the pulse on the GPIO pin
# change this value to the pin you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
GPIO.output(22, True)
# wait 10 micro seconds (this is 0.00001 seconds) so the pulse
# length is 10Us as the sensor expects
time.sleep(0.00001)
# stop the pulse after the time above has passed
# change this value to the pin you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
GPIO.output(22, False)
# listen to the input pin. 0 means nothing is happening. Once a
# signal is received the value will be 1 so the while loop
# stops and has the last recorded time the signal was 0
# change this value to the pin you are using
# GPIO input = the
|
pin that's connected to "Echo" on the sensor
while GPIO.input(27) == 0:
signaloff = time.time()
# listen to the input pin. Once a signal is received, record the
# time the signal came through
# change this value to the pin you are using
# GPIO input = the pin that's connected to "Echo" on th
|
e sensor
while GPIO.input(27) == 1:
signalon = time.time()
# work out the difference in the two recorded times above to
# calculate the distance of an object in front of the sensor
timepassed = signalon - signaloff
# we now have our distance but it's not in a useful unit of
# measurement. So now we convert this distance into centimetres
distance = timepassed * 17000
# return the distance of an object in front of the sensor in cm
return distance
# we're no longer using the GPIO, so tell software we're done
GPIO.cleanup()
else:
print "Incorrect usonic() function varible."
|
Stiliyan92/accounting-system
|
common/config_parser.py
|
Python
|
gpl-2.0
| 828
| 0.001208
|
import configparser
CONFIG_PATH = 'accounting.conf'
class MyConfigParser():
|
def __init__(self, config_path=CONFIG_PATH):
self.config = configparser.ConfigParser(allow_no_value=True)
self.config.read(config_path)
def config_section_map(self, section):
""" returns all configuration options in 'section' in a dict with
key: config_option and value: the read value in the file"""
dict1 = {}
options = self.config.options(section)
for option in options:
|
try:
dict1[option] = self.config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
dict1[option] = None
return dict1
# getint(section, option)
# getboolean(section, option)
|
fahadsultan/CausalRelations
|
to_txt.py
|
Python
|
apache-2.0
| 551
| 0.039927
|
import os
from bs4 import BeautifulSoup
count = pd.DataFrame(columns = ['filename', 'count'])
for folder, subs, files in os.walk('data/xml'):
for filename in files:
try:
if ('.xml' in
|
filename) and (filename[0] != '.'):
f = open(os.path.join(folder, filename))
soup = BeautifulSoup(f.read())
tokens = soup.findAll('token')
tokens_arr = [token.text for token in tokens]
text = ' '.join(tokens_arr)
f = open('data/text/'+filename, 'w')
f.write(text
|
)
f.close()
except Exception as e:
print e
continue
|
kyon-bll/.dotfiles
|
.emacs.d/elpa/jedi-core-20191011.1750/jediepcserver.py
|
Python
|
mit
| 15,847
| 0.000252
|
#!/usr/bin/env python
"""
Jedi EPC server.
Copyright (C) 2012 Takafumi Arakaki
Author: Takafumi Arakaki <aka.tkf at gmail.com>
This file is NOT part of GNU Emacs.
Jedi EPC server is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
Jedi EPC server is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Jedi EPC server.
If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import glob
import itertools
import logging
import logging.handlers
import os
import re
import site
import sys
from collections import namedtuple
import jedi
import jedi.api
import epc
import epc.server
import sexpdata
logger = logging.getLogger('jedie
|
pcserver')
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description=__doc__)
parser.add_argument(
'--address', default='localhost')
parser.add_argument(
'--port', default=0, type=int)
parser.add_argument(
'--port-file', '-f', default='-', type=argparse.FileType('wt'),
help='fi
|
le to write port on. default is stdout.')
parser.add_argument(
'--sys-path', '-p', default=[], action='append',
help='paths to be inserted at the top of `sys.path`.')
parser.add_argument(
'--sys-path-append', default=[], action='append',
help='paths to be appended at the end of `sys.path`.')
parser.add_argument(
'--virtual-env', '-v', default=[], action='append',
help='paths to be used as if VIRTUAL_ENV is set to it.')
parser.add_argument(
'--log', help='Save server log to this file.')
parser.add_argument(
'--log-level',
choices=['CRITICAL', 'ERROR', 'WARN', 'INFO', 'DEBUG'],
help='Logging level for log file.')
parser.add_argument(
'--log-rotate-max-size', default=0, type=int,
help='Rotate log file after it reaches this size',
)
parser.add_argument(
'--log-rotate-max-count', default=3, type=int,
help='Max number of log rotations before removal',
)
parser.add_argument(
'--log-traceback', action='store_true', default=False,
help='Include traceback in logging output.')
parser.add_argument(
'--pdb', dest='debugger', const='pdb', action='store_const',
help='start pdb when error occurs.')
parser.add_argument(
'--ipdb', dest='debugger', const='ipdb', action='store_const',
help='start ipdb when error occurs.')
PY3 = (sys.version_info[0] >= 3)
NEED_ENCODE = not PY3
LogSettings = namedtuple(
'LogSettings',
[
'log_file',
'log_level',
'log_rotate_max_size',
'log_rotate_max_count',
],
)
try:
jedi.create_environment
except AttributeError:
jedi_create_environment = None
else:
_cached_jedi_environments = {}
def jedi_create_environment(venv, safe=False):
"""Cache jedi environments to avoid startup cost."""
try:
return _cached_jedi_environments[venv]
except KeyError:
logger.info('Creating jedi environment: %s', venv)
if venv is None:
jedienv = jedi.api.environment.get_default_environment()
else:
jedienv = jedi.create_environment(venv, safe=safe)
_cached_jedi_environments[venv] = jedienv
return jedienv
def get_venv_sys_path(venv):
if jedi_create_environment is not None:
return jedi_create_environment(venv).get_sys_path()
from jedi.evaluate.sys_path import get_venv_path
return get_venv_path(venv)
class JediEPCHandler(object):
def __init__(self, sys_path=(), virtual_envs=(), sys_path_append=()):
self.script_kwargs = self._get_script_path_kwargs(
sys_path=sys_path,
virtual_envs=virtual_envs,
sys_path_append=sys_path_append,
)
def get_sys_path(self):
environment = self.script_kwargs.get('environment')
if environment is not None:
return environment.get_sys_path()
sys_path = self.script_kwargs.get('sys_path')
if sys_path is not None:
return sys_path
return sys.path
@classmethod
def _get_script_path_kwargs(cls, sys_path, virtual_envs, sys_path_append):
result = {}
if jedi_create_environment:
# Need to specify some environment explicitly to workaround
# https://github.com/davidhalter/jedi/issues/1242. Otherwise jedi
# will create a lot of child processes.
if virtual_envs:
primary_env, virtual_envs = virtual_envs[0], virtual_envs[1:]
primary_env = path_expand_vars_and_user(primary_env)
else:
primary_env = None
try:
result['environment'] = jedi_create_environment(primary_env)
except Exception:
logger.warning(
'Cannot create environment for %r', primary_env, exc_info=1
)
if primary_env is not None:
result['environment'] = jedi_create_environment(None)
if not sys_path and not virtual_envs and not sys_path_append:
# No additional path customizations.
return result
# Either multiple environments or custom sys_path extensions are
# specified, or jedi version doesn't support environments.
final_sys_path = []
final_sys_path.extend(path_expand_vars_and_user(p) for p in sys_path)
for p in virtual_envs:
final_sys_path.extend(get_venv_sys_path(path_expand_vars_and_user(p)))
final_sys_path.extend(
path_expand_vars_and_user(p) for p in sys_path_append
)
dupes = set()
def not_seen_yet(val):
if val in dupes:
return False
dupes.add(val)
return True
result['sys_path'] = [p for p in final_sys_path if not_seen_yet(p)]
return result
def jedi_script(self, source, line, column, source_path):
if NEED_ENCODE:
source = source.encode('utf-8')
source_path = source_path and source_path.encode('utf-8')
return jedi.Script(
source, line, column, source_path or '', **self.script_kwargs
)
def complete(self, *args):
def _wrap_completion_result(comp):
try:
docstr = comp.docstring()
except Exception:
logger.warning(
"Cannot get docstring for completion %s", comp, exc_info=1
)
docstr = ""
return dict(
word=comp.name,
doc=docstr,
description=candidates_description(comp),
symbol=candidate_symbol(comp),
)
return [
_wrap_completion_result(comp)
for comp in self.jedi_script(*args).completions()
]
def get_in_function_call(self, *args):
sig = self.jedi_script(*args).call_signatures()
call_def = sig[0] if sig else None
if not call_def:
return []
return dict(
# p.description should do the job. But jedi-vim use replace.
# So follow what jedi-vim does...
params=[PARAM_PREFIX_RE.sub('', p.description).replace('\n', '')
for p in call_def.params],
index=call_def.index,
call_name=call_def.name,
)
def _goto(self, method, *args):
"""
Helper function for `goto_assignments` and `usages`.
:arg method: `jedi.Script.goto_assignments` or `jedi.Script.usages`
:arg args: Arguments to `jedi_script`
"""
# `definitions` is a list. Each element is an instances of
# `jedi.api_classes.BaseOutput` subclass, i.e.,
# `jedi.api_classes.RelatedName` or `jedi.ap
|
diegocepedaw/oncall
|
e2e/test_schedules.py
|
Python
|
bsd-2-clause
| 6,315
| 0.002375
|
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import urllib.parse
import requests
from testutils import prefix, api_v0
HOUR = 60 * 60
DAY = HOUR * 24
@prefix('test_v0_schedules')
def test_api_v0_schedules(team, roster, role):
tuesday9am = 2 * DAY + 9 * HOUR
tuesday9pm = tuesday9am + 12 * HOUR
wednesday9am = tuesday9pm + 12 * HOUR
wednesday9pm = wednesday9am + 12 * HOUR
team_name = team.create()
team_name_2 = team.create()
roster_name = roster.create(team_name)
roster_name_2 = roster.create(team_name_2)
role_name = role.create()
role_name_2 = role.create()
# test create schedule
events = [{'start': tuesday9am, 'duration': 12 * HOUR},
{'start': tuesday9pm, 'duration': 12 * HOUR},
{'start': wednesday9am, 'duration': 12 * HOUR},
{'start': wednesday9pm, 'duration': 12 * HOUR}]
re = requests.post(api_v0('teams/%s/rosters/%s/schedules' % (team_name, roster_name)),
json={
'role': role_name,
'events': events,
'advanced_mode': 1
})
assert re.status_code == 201
schedule_id = str(re.json()['id'])
# verify schedule created properly
re = requests.get(api_v0('teams/%s/rosters/%s/schedules' % (team_name, roster_name)))
assert re.status_code == 200
data = re.json()
assert len(data) == 1
schedule = data[0]
assert schedule['role'] == role_name
# check consecutive events have been merged
assert len(s
|
chedule['events']) == 1
assert schedule['events'][0]['start'] == tuesday9am
assert schedule['events'][0]['duration'] == 48 * HOUR
assert schedule['advanced_mode'] == 1
# test 'schedule' endpoint
re = requests.get(api_v0('schedules/%s' % (schedule_id)))
assert re.status_code == 200
|
assert re.json() == data[0]
updated_events = [{'start': 0, 'duration': 100}, {'start': 150, 'duration': 200}]
# verify schedule updates properly
re = requests.put(api_v0('schedules/' + schedule_id),
json={'role': role_name_2,
'team': team_name_2,
'roster': roster_name_2,
'auto_populate_threshold': 28,
'events': updated_events,
'advanced_mode': 1})
assert re.status_code == 200
re = requests.get(api_v0('teams/%s/rosters/%s/schedules' % (team_name_2, roster_name_2)))
assert re.status_code == 200
data = re.json()
assert len(data) == 1
schedule = data[0]
assert schedule['roster'] == roster_name_2
assert schedule['role'] == role_name_2
assert schedule['auto_populate_threshold'] == 28
assert schedule['events'] == updated_events
assert schedule['advanced_mode'] == 1
re = requests.put(api_v0('schedules/' + schedule_id), json={'team': team_name, 'roster': roster_name})
assert re.status_code == 200
# test delete schedule
re = requests.delete(api_v0('schedules/' + schedule_id))
assert re.status_code == 200
# verify schedule was deleted
re = requests.get(api_v0('teams/%s/rosters/%s/schedules' % (team_name_2, roster_name_2)))
assert re.status_code == 200
data = re.json()
assert data == []
@prefix('test_v0_advanced_schedule')
def test_api_v0_advanced_schedule(team, roster, role, schedule):
team_name = team.create()
roster_name = roster.create(team_name)
role_name = role.create()
schedule_id = schedule.create(team_name,
roster_name,
{'role': role_name,
'events': [{'start': 0, 'duration': 100},
{'start': 200, 'duration': 300}],
'advanced_mode': 1})
# check invalid schedule updates
re = requests.put(api_v0('schedules/%d' % schedule_id), json={'events': [{'start': 0, 'duration': 100},
{'start': 150, 'duration': 300}],
'advanced_mode': 0})
assert re.status_code == 400
re = requests.put(api_v0('schedules/%d' % schedule_id), json={'advanced_mode': 0})
assert re.status_code == 400
@prefix('test_v0_invalid_schedule_event')
def test_api_v0_invalid_schedule_event(team, roster, role, schedule):
team_name = team.create()
roster_name = roster.create(team_name)
role_name = role.create()
api_url = api_v0('teams/%s/rosters/%s/schedules' % (team_name, roster_name))
re = requests.post(api_url, json={
'role': role_name,
'events': [{'duration': 100},
{'start': 150, 'duration': 300}],
'advanced_mode': 1
})
assert re.status_code == 400
re = requests.post(api_url, json={
'role': role_name,
'events': [{'start': 150}],
'advanced_mode': 1
})
assert re.status_code == 400
re = requests.post(api_url, json={
'role': role_name,
'events': [{'start': 150, 'duration': 300}],
'advanced_mode': 0
})
assert re.status_code == 400
re = requests.post(api_url, json={
'role': role_name,
'events': 7 * [{'start': 150, 'duration': 300}],
'advanced_mode': 0
})
assert re.status_code == 400
@prefix('test_v0_schedules_spaces')
def test_api_v0_schedules_with_spaces_in_roster_name(team):
team_name = 'test_v0 spaces team foo'
roster_name = 'test_v0 spaces roster foo'
re = requests.post(api_v0('teams'), json={'name': team_name, 'scheduling_timezone': 'UTC'})
assert re.status_code == 201
team.mark_for_cleaning(team_name)
re = requests.post(api_v0('teams/%s/rosters' % team_name),
json={'name': roster_name})
assert re.status_code == 201
re = requests.get(api_v0('teams/%s/rosters/%s/schedules' %
(team_name, urllib.parse.quote(roster_name, safe=''))))
assert re.status_code == 200
|
igemsoftware/SYSU-Software2013
|
project/Python27_32/Lib/site-packages/pypm/common/util.py
|
Python
|
mit
| 7,802
| 0.003204
|
# Copyright (c) 2010 ActiveState Software Inc. All rights reserved.
"""
pypm.common.util
~~~~~~~~~~~~~~~~
Assorted utility code
"""
import os
from os import path as P
import sys
import re
from contextlib import contextmanager
import logging
import time
import textwrap
from datetime import datetime
from pkg_resources import Requirement
from pkg_resources import resource_filename
import six
import pypm
from zclockfile import LockFile
LOG = logging.getLogger(__name__)
# Language/library utilities
#####################################################################
def wrapped(txt, prefix='', **options):
"""Return wrapped text suitable for printing to terminal"""
MAX_WIDTH=70 # textwrap.wrap's default
return '\n'.join([
'{0}{1}'.format(prefix, line)
for line in textwrap.wrap(txt, width=MAX_WIDTH-len(prefix), **options)])
def lazyproperty(func):
"""A property decorator for lazy evaluation"""
cache = {}
def _get(self):
"""Return the property value from cache once it is calculated"""
try:
return cache[self]
except KeyError:
cache[self] = value = func(self)
return value
return property(_get)
def memoize(fn):
"""Memoize functions that take simple arguments
The arugments of this function must be 'hashable'
Keywords are not supported
"""
memo = {}
def wrapper(*args):
key = tuple(args)
if key not in memo:
memo[key] = fn(*args)
return memo[key]
return wrapper
class ConfigParserNamedLists(object):
"""Parse a named mapping from the configuration file.
Example input (config file):
[packages]
free = http://pypm-free.as.com
be = http://pypm-be.as.com
staging = http://pypm-staging.as.com
default = be free
QA = staging default
What this class produces (self.mapping):
{
'free': [factory('free', 'http://pypm-free.as.com')],
'be': [factory('be', 'http://pypm-be.as.com')],
'staging': [factory('staging', 'http://pypm-staging.as.com')],
'default': [factory('be', 'http://pypm-be.as.com'),
factory('free', 'http://pypm-free.as.com')],
'QA': [factory('staging', 'http://pypm-staging.as.com'),
factory('be', 'http://pypm-be.as.com'),
factory('free', 'http://pypm-free.as.com')],
}
"""
VALUE_SEP = re.compile('[\s,]+')
def __init__(self, option_items, factory, is_sentinel):
"""
- option_items: ConfigParser.items('yoursection')
- factory: a function that produces the value object
- sentinel_p: a function that returns True for sentinels
"""
self.option_items = option_items
self.factory = factory
self.is_sentinel = is_sentinel
self.mapping = {}
self._init()
def _init(self):
for name, value in self.option_items:
if name in self.mapping:
raise ValueError('duplicate option key found: {0}'.format(name))
else:
self.mapping[name] = value
# substitute references
_processed = set()
for name in self.mapping:
self._expand_rvalue(name, _processed)
def _expand_rvalue(self, name, processed):
if name in processed:
return
value = self.mapping[name]
if isinstance(value, list):
processed.add(name)
return
if name not in self.mapping:
raise ValueError('unknown option reference: {0}'.format(name))
if self.is_sentinel(value):
self.mapping[name] = [self.factory(name, value)]
else:
self.mapping[name] = []
for part in self.VALUE_SEP.split(value):
self._expand_rvalue(part, processed)
self.mapping[name].extend(self.mapping[part])
# System routines
######################################################################
@contextmanager
def locked(lockfile):
"""'with' context to lock a file"""
lock = LockFile(lockfile)
try:
yield
finally:
lock.close()
@contextmanager
def dlocked(directory):
"""Lock based on a directory
You need this function if you do not want more than on process to be
operating on a directory
"""
if not P.exists(directory):
os.makedirs(directory)
lockfile = P.join(directory, '.lock')
with locked(lockfile):
yield
def get_user_agent(default):
"""Return an user agent string representing PyPM
Retain the default user-agent for backward-compat
"""
return '{0} (PyPM {1.__version__})'.format(default, pypm)
# Path routines
# ########################################################################
def existing(path):
"""Return path, but assert its presence first"""
assert isinstance(path, (six.string_types, six.text_type)), \
'not of string type: %s <%s>' % (path, type(path))
assert P.exists(path), 'file/directory not found: %s' % path
return path
def concise_path(pth):
"""Return a concise, but human-understandable, version of ``pth``
Compresses %HOME% and %APPDATA%
"""
aliases = [
('%APPDATA%', os.getenv('APPDATA', None)),
('~', P.expanduser('~')),
]
for alias, pthval in aliases:
if pthval and pth.startswith(pthval):
return P.join(alias, P.relpath(pth, pthval))
return pth
def abs2rel(absolute_path):
"""Convert an absolute path to relative path assuming the topmost directory
is the bast dir.
>>> strip_abs_root('/opt/ActivePython/')
'opt/ActivePython/'
>>> strip_abs_root('/opt/ActivePython')
'opt/ActivePython'
"""
assert os.path.isabs(absolute_path), \
'`%s` is not a absolute path' % absolute_path
if sys.platform.startswith('win'):
assert absolute_path[1:3] == ':\\'
return absolute_path[3:] # remove the DRIVE
else:
assert absolute_path[0] == '/'
return absolute_path[1:] # remove the '/'
def url_join(url, components):
"""Join URL components .. always with a forward slash"""
assert type(components) is list
assert '\\' not in url, \
'URL is not supposed to contain backslashes. Is this windows path? '+url
return url + '/' + '/'.join(components)
def path_to_url(path):
"""Convert local path to remote url
"""
if sys.platform.startswith('win'):
assert '/' not in path, \
'windows path cannot contain forward slash: '+path
drive, path = os.path.splitdrive(path)
return url_join('file:///' + drive,
path.split('\\'))
else:
return 'file://' + P.abspath(path)
def pypm_file(*paths):
"""Return absolute path to a file residing inside the pypm package using
pkg_resources API"""
return resource_filename(Requirement.parse('pypm'), P.join(*paths))
|
class BareDateTime(six.text_type):
"""Wrapper around the DateTime object with our own s
|
tandard string
representation
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
FORMAT = DATE_FORMAT + ' ' + TIME_FORMAT
@classmethod
def to_string(cls, dt):
"""Convert the datetime object `dt` to a string
with format as defind by this class
"""
return dt.strftime(cls.FORMAT)
@classmethod
def to_datetime(cls, dt_string):
"""Convert dt_string, formatted by `to_string()` method above"""
ts = time.mktime(time.strptime(dt_string, cls.FORMAT))
return datetime.fromtimestamp(ts)
|
google/merge_pyi
|
testdata/heuristics.comment.py
|
Python
|
apache-2.0
| 631
| 0.006339
|
from typing import Any
# Copyright (c) 2016 Google Inc. (under http://www.apache.org/licenses/LICENSE-2.0)
# If not annotate_pep484, info in pyi files is augmented with heuristics to decide if un-annotated
# arguments are "Any" or "" (like "self")
class B(object):
def __init__(self):
pass
def f(self, x):
# type: (e1) -> None
pass
class C(object):
def __init__(self, x):
# type: (e2) -> None
pass
@staticmethod
def f2():
pass
@staticmethod
def f3(x, y):
# type: (Any, e3) -> None
|
pass
@
|
classmethod
def f4(cls):
pass
|
vmindru/ansible
|
lib/ansible/context.py
|
Python
|
gpl-3.0
| 2,049
| 0.003416
|
# Copyright: (c) 2018, Toshio Kuratomi <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
Context of the running Ansible.
In the future we *may* create Context objects to allow running multiple Ansible plays in parallel
with different contexts but that is currently out of scope as the Ansible library is just for
running the ansible command line tools.
These APIs are still in flux so do not use them unless you are willing to update them with every Ansible release
"""
from ansible.module_utils.common._collections_compat import Mapping, Set
from ansible.module_utils.common.collections import is_sequence
from ansible.u
|
tils.context_objec
|
ts import CLIArgs, GlobalCLIArgs
__all__ = ('CLIARGS',)
# Note: this is not the singleton version. The Singleton is only created once the program has
# actually parsed the args
CLIARGS = CLIArgs({})
# This should be called immediately after cli_args are processed (parsed, validated, and any
# normalization performed on them). No other code should call it
def _init_global_context(cli_args):
"""Initialize the global context objects"""
global CLIARGS
CLIARGS = GlobalCLIArgs.from_options(cli_args)
def cliargs_deferred_get(key, default=None, shallowcopy=False):
"""Closure over getting a key from CLIARGS with shallow copy functionality
Primarily used in ``FieldAttribute`` where we need to defer setting the default
until after the CLI arguments have been parsed
This function is not directly bound to ``CliArgs`` so that it works with
``CLIARGS`` being replaced
"""
def inner():
value = CLIARGS.get(key, default=default)
if not shallowcopy:
return value
elif is_sequence(value):
return value[:]
elif isinstance(value, (Mapping, Set)):
return value.copy()
return value
return inner
|
puttarajubr/commcare-hq
|
corehq/apps/app_manager/tests/test_case_meta.py
|
Python
|
bsd-3-clause
| 2,721
| 0.00147
|
from django.test.testcases import SimpleTestCase
from corehq.apps.app_manager.const import APP_V2
from corehq.apps.app_manager.models import Application, Module, OpenCaseAction, ParentSelect, OpenSubCaseAction, \
AdvancedModule, LoadUpdateAction, AdvancedOpenCaseAction
from mock import patch
class CaseMetaTest(SimpleTestCase):
def setUp(self):
self.is_usercase_in_use_patch = patch('corehq.apps.app_manager.models.is_usercase_in_use')
self.is_usercase_in_use_mock = self.is_usercase_in_use_patch.start()
def tearDown(self):
self.is_usercase_in_use_patch.stop()
def _make_module(self, app, module_id, case_type):
m = app.add_module(Module.new_module('Module{}'.format(module_id), lang='en'))
m.case_type = case_type
mf = app.new_form(module_id, 'form {}'.format(case_type), lang='en')
mf.actions.open_case = OpenCaseAction(name_path="/data/question1", external_id=None)
mf.actions.open_case.condition.type = 'always'
return m
|
def test_hierarchy(self):
|
app, expected_hierarchy = self.get_test_app()
meta = app.get_case_metadata()
self.assertDictEqual(meta.type_hierarchy, expected_hierarchy)
def get_test_app(self):
app = Application.new_app('domain', 'New App', APP_V2)
app.version = 1
m0 = self._make_module(app, 0, 'parent')
m0.get_form(0).actions.subcases.append(OpenSubCaseAction(
case_type='child',
reference_id='parent'
))
m1 = self._make_module(app, 1, 'child')
m1.get_form(0).actions.subcases.append(OpenSubCaseAction(
case_type='grand child',
reference_id='parent'
))
m2 = self._make_module(app, 2, 'grand child')
m3 = app.add_module(AdvancedModule.new_module('Module3', lang='en'))
m3.case_type = 'other grand child'
m3f0 = m3.new_form('other form', 'en')
m3f0.actions.load_update_cases.append(LoadUpdateAction(
case_type='child',
case_tag='child'))
m3f0.actions.open_cases.append(AdvancedOpenCaseAction(
name_path='/data/question1',
case_type='other grand child',
parent_tag='child'
))
m3f0.actions.open_cases[0].open_condition.type = 'always'
m2.parent_select = ParentSelect(active=True, module_id=m1.unique_id)
m1.parent_select = ParentSelect(active=True, module_id=m0.unique_id)
expected_hierarchy = {
'parent': {
'child': {
'grand child': {},
'other grand child': {}
}
}
}
return app, expected_hierarchy
|
viniciusd/DCO1008---Digital-Signal-Processing
|
projeto3/aiff.py
|
Python
|
mit
| 913
| 0.001095
|
import aifc
import sndhdr
import utils
class Aiff:
def __init__(self, filename):
assert sndhdr.what(filename).filetype == 'aiff'
x = aifc.open(filename)
data = x.readframes(x.getnframes())
self.nchannels = x.getnchannels()
self.sampwidth = x.getsampwidth()
self.framerate = x.getframerate()
self.sig = utils.from_buffer(data).reshape(-1, x.getnchannels())
def save(self, filename):
y = aifc.open(filename, 'wb')
y.setnchannels(self.nchannels)
|
y.setsampwidth(self.sampwidth)
y.setframerate(self.framerate)
y.writeframes(self.sig.flatten().tobytes())
def save_channel(self, filename, channel):
y = aifc.open(filename, 'wb')
y.setnchannels(1)
y.setsampwidth(self.sampwidth)
y.setframerate(self.framerate)
y.writeframes(self.sig[:, ch
|
annel].flatten().tobytes())
|
wexi/python-for-android
|
pythonforandroid/recipes/asn1crypto/__init__.py
|
Python
|
mit
| 426
| 0.002347
|
from pythonforandroid.recipe import PythonRecipe
class Asn1cryptoRecipe(PythonRecipe):
name = 'asn1crypto'
version = '0.23
|
.0'
url = 'https://pypi.python.org/packages/31/53/8bca924b30cb79d6d70dbab6a99e8731d1e4dd3b090b7f3d8412a8d8ffbc/asn1crypto-0.23.0.tar.gz#md5=97d54665c397b72b165768398dfdd876'
depends = ['python2', 'setuptools']
call_hostpython
|
_via_targetpython = False
recipe = Asn1cryptoRecipe()
|
ESOedX/edx-platform
|
common/lib/xmodule/xmodule/video_module/video_module.py
|
Python
|
agpl-3.0
| 51,311
| 0.002904
|
# -*- coding: utf-8 -*-
"""Video is ungraded Xmodule for support video content.
It's new improved video module, which support additional feature:
- Can play non-YouTube video sources via in-browser HTML5 video player.
- YouTube defaults to HTML5 mode from the start.
- Speed changes in both YouTube and non-YouTube videos happen via
in-browser HTML5 video method (when in HTML5 mode).
- Navigational subtitles can be disabled altogether via an attribute
in XML.
Examples of html5 videos for manual testing:
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.mp4
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.webm
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.ogv
"""
from __future__ import absolute_import
import copy
import json
import logging
from collections import OrderedDict, defaultdict
from operator import itemgetter
import six
from django.conf import settings
from lxml import etree
from opaque_keys.edx.locator import AssetLocator
from web_fragments.fragment import Fragment
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from xblock.fields import ScopeIds
from xblock.runtime import KvsFieldData
from openedx.core.djangoapps.video_config.models import HLSPlaybackEnabledFlag, CourseYoutubeBlockedFlag
from openedx.core.djangoapps.video_pipeline.config.waffle import DEPRECATE_YOUTUBE, waffle_flags
from openedx.core.lib.cache_utils import request_cached
from openedx.core.lib.license import LicenseMixin
from xmodule.contentstore.content import StaticContent
from xmodule.editing_module import EditingMixin, TabsEditingMixin
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.inheritance import InheritanceKeyValueStore, own_metadata
from xmodule.raw_module import EmptyDataRawMixin
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.util.xmodule_django import add_webpack_to_fragment
from xmodule.video_module import manage_video_subtitles_save
from xmodule.x_module import (
PUBLIC_VIEW, STUDENT_VIEW,
HTMLSnippet, ResourceTemplates, shim_xmodule_js,
XModuleMixin, XModuleToXBlockMixin, XModuleDescriptorToXBlockMixin,
)
from xmodule.xml_module import XmlMixin, deserialize_field, is_pointer_tag, name_to_pathname
from .bumper_utils import bumperize
from .transcripts_utils import (
Transcript,
VideoTranscriptsMixin,
clean_video_id,
get_html5_ids,
get_transcript_for_video,
subs_filename
)
from .video_handlers import VideoStudentViewHandlers, VideoStudioViewHandlers
from .video_utils import create_youtube_string, format_xml_exception_message, get_poster, rewrite_video_url
from .video_xfields import VideoFields
# The following import/except block for edxval is temporary measure until
# edxval is a proper XBlock Runtime Service.
#
# Here's the deal: the VideoBlock should be able to take advantage of edx-val
# (https://github.com/edx/edx-val) to figure out what URL to give for video
# resources that have an edx_video_id specified. edx-val is a Django app, and
# including it causes tests to fail because we run common/lib tests standalone
# without Django dependencies. The alternatives seem to be:
#
# 1. Move VideoBlock out of edx-platform.
# 2. Accept the Django dependency in common/lib.
# 3. Try to import, catch the exception on failure, and check for the existence
# of edxval_api before invoking it in the code.
# 4. Make edxval an XBlock Runtime Service
#
# (1) is a longer term goal. VideoBlock should be made into an XBlock and
# extracted from edx-platform entirely. But that's expensive to do because of
# the various dependencies (like templates). Need to sort this out.
# (2) is explicitly discouraged.
# (3) is what we're doing today. The code is still functional when called within
# the context of the LMS, but does not cause failure on import when running
# standalone tests. Most VideoBlock tests tend to be in the LMS anyway,
# probably for historical reasons, so we're not making things notably worse.
# (4) is one of the next items on the backlog for edxval, and should get rid
# of this particular import silliness. It's just that I haven't made one before,
# and I was worried about trying it with my deadline constraints.
try:
import edxval.api as edxval_api
except ImportError:
edxval_api = None
try:
from branding.models import BrandingInfoConfig
except ImportError:
BrandingInfoConfig = None
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
EXPORT_IMPORT_COURSE_DIR = u'course'
EXPORT_IMPORT_STATIC_DIR = u'static'
@XBlock.wants('settings', 'completion', 'i18n', 'request_cache')
class VideoBlock(
VideoFields, VideoTranscriptsMixin, VideoStudioViewHandlers, VideoStudentViewHandlers,
TabsEditingMixin, EmptyDataRawMixin, XmlMixin, EditingMixin,
XModuleDescriptorToXBlockMixin, XModuleToXBlockMixin, HTMLSnippet, ResourceTemplates, XModuleMixin,
LicenseMixin):
"""
XML source example:
<video show_captions="true"
youtube="0.75:jNCf2gIqpeE,1.0:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg"
url_name="lecture_21_3" display_name="S19V3: Vacancies"
>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.mp4"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.webm"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.ogv"/>
</video>
"""
has_custom_completion = True
completion_mode = XBlockCompletionMode.COMPLETABLE
video_time = 0
icon_class = 'video'
show_in_read_only_mode = True
tabs = [
{
'name': _("Basic"),
'template': "video/transcripts.html",
'current': True
},
{
'name': _("Advanced"),
'template': "tabs/metadata-edit-tab.html"
}
]
uses_xmodule_styles_setup = True
requires_per_student_anonymous_id = True
def get_transcripts_for_student(self, transcripts):
"""Return transcript information necessary for rendering the XModule student view.
This is more or less a direct extraction from `get_html`.
Args:
transcripts (dict): A dict with all transcripts and a sub.
Returns:
Tuple of (track_url, transcript_language, sorted_languages)
track_url -> subtitle download url
transcript_language -> default transcript language
sorted_languages -> dictionary of available transcript languages
"""
track_url = None
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.download_track:
if self.track:
track_url = self.track
elif sub or other_lang:
track_url = self.runtime.handler_url(self, 'transcript', 'download').rstrip('/?')
transcript_language = self.get_default_transcript_language(transcripts)
native_languages = {lang: label for lang, label in settings.LANGUAGES if len(lang) == 2}
languages = {
lang:
|
native_languages.get(lang, display)
for lang, display in settings.ALL_LANGUAGES
if lang in other_lang
}
if not other_lang or (other_lang and sub):
languages['en'] = 'English'
# OrderedDict for easy testing of rendered context in tests
sorted_languages = sorted(list(languages.items()), key=itemgetter(1))
sorted_languages = OrderedDict(sorted_languages)
return
|
track_url, transcript_language, sorted_languages
@property
def youtube_deprecated(self):
"""
Return True if youtube is deprecated and hls as primary playback is enabled else False
"""
# Return False if `hls` playback feature is disabled.
if not HLSPlaybackEnabledFlag.feature_enabled(self.location.course_key):
return False
# check if youtube has been deprecated and hls as primary playb
|
Cinntax/home-assistant
|
homeassistant/components/device_tracker/__init__.py
|
Python
|
apache-2.0
| 5,372
| 0.000931
|
"""Provide functionality to keep track of devices."""
import asyncio
import voluptuous as vol
from homeassistant.loader import bind_hass
from homeassistant.components import group
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import GPSType, ConfigType, HomeAssistantType
from homeassistant.helpers.event import async_track_utc_time_change
from homeassistant.const import ATTR_GPS_ACCURACY, STATE_HOME
from . import legacy, setup
from .config_entry import ( # noqa # pylint: disable=unused-import
async_setup_entry,
async_unload_entry,
)
from .legacy import DeviceScanner # noqa # pylint: disable=unused-import
from .const import (
ATTR_ATTRIBUTES,
ATTR_BATTERY,
ATTR_CONSIDER_HOME,
ATTR_DEV_ID,
ATTR_GPS,
ATTR_HOST_NAME,
ATTR_LOCATION_NAME,
ATTR_MAC,
ATTR_SOURCE_TYPE,
CONF_AWAY_HIDE,
CONF_CONSIDER_HOME,
CONF_NEW_DEVICE_DEFAULTS,
CONF_SCAN_INTERVAL,
CONF_TRACK_NEW,
DEFAULT_AWAY_HIDE,
DEFAULT_CONSIDER_HOME,
DEFAULT_TRACK_NEW,
DOMAIN,
PLATFORM_TYPE_LEGACY,
SOURCE_TYPE_BLUETOOTH_LE,
SOURCE_TYPE_BLUETOOTH,
SOURCE_TYPE_GPS,
SOURCE_TYPE_ROUTER,
)
ENTITY_ID_ALL_DEVICES = group.ENTITY_ID_FORMAT.format("all_devices")
SERVICE_SEE = "see"
SOURCE_TYPES = (
SOURCE_TYPE_GPS,
SOURCE_TYPE_ROUTER,
SOURCE_TYPE_BLUETOOTH,
SOURCE_TYPE_BLUETOOTH_LE,
)
NEW_DEVICE_DEFAULTS_SCHEMA = vol.Any(
None,
vol.Schema(
{
vol.Optional(CONF_TRACK_NEW, default=DEFAULT_TRACK_NEW): cv.boolean,
vol.Optional(CONF_AWAY_HIDE, default=DEFAULT_AWAY_HIDE): cv.boolean,
}
),
)
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_SCAN_INTERVAL): cv.time_period,
vol.Optional(CONF_TRACK_NEW): cv.boolean,
vol.Optional(CONF_CONSIDER_HOME, default=DEFAULT_CONSIDER_HOME): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_NEW_DEVICE_DEFAULTS, default={}): NEW_DEVICE_DEFAULTS_SCHEMA,
}
)
PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema)
SERVICE_SEE_PAYLOAD_SCHEMA = vol.Schema(
vol.All(
cv.has_at_least_one_key(ATTR_MAC, ATTR_DEV_ID),
{
ATTR_MAC: cv.string,
ATTR_DEV_ID: cv.string,
ATTR_HOST_NAME: cv.string,
ATTR_LOCATION_NAME: cv.string,
ATTR_GPS: cv.gps,
ATTR_GPS_ACCURACY: cv.positive_int,
ATTR_BATTERY: cv.positive_int,
ATTR_ATTRIBUTES: dict,
ATTR_SOURCE_TYPE: vol.In(SOURCE_TYPES),
ATTR_CONSIDER_HOME: cv.time_period,
# Temp workaround for iOS app introduced in 0.65
vol.Optional("battery_status"): str,
vol.Optional("hostname"): str,
},
)
)
@bind_hass
def is_on(hass: HomeAssistantType, entity_id: str = None):
"""Return the state if any or a specified device is home."""
entity = entity_id or ENTITY_ID_ALL_DEVICES
return hass.states.is_state(entity, STATE_HOME)
def see(
hass: HomeAssistantType,
mac: str = None,
dev_id: str = None,
host_name: str = None,
location_name: str = None,
gps: GPSType = None,
gps_accuracy=None,
battery: int = None,
attributes: dict = None,
):
"""Call service to notify you see device."""
data = {
key: value
for key, value in (
(ATTR_MAC, mac),
(ATTR_DEV_ID, dev_id),
(ATTR_HOST_NAME, host_name),
(ATTR_LOCATION_NAME, location_name),
(ATTR_GPS, gps),
(ATTR_GPS_ACCURACY, gps_accuracy),
(ATTR_BATTERY, battery),
)
if value is
|
not None
}
if attributes:
data[ATTR_ATTRIBUTES] = attri
|
butes
hass.services.call(DOMAIN, SERVICE_SEE, data)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the device tracker."""
tracker = await legacy.get_tracker(hass, config)
legacy_platforms = await setup.async_extract_config(hass, config)
setup_tasks = [
legacy_platform.async_setup_legacy(hass, tracker)
for legacy_platform in legacy_platforms
]
if setup_tasks:
await asyncio.wait(setup_tasks)
tracker.async_setup_group()
async def async_platform_discovered(p_type, info):
"""Load a platform."""
platform = await setup.async_create_platform_type(hass, config, p_type, {})
if platform is None or platform.type != PLATFORM_TYPE_LEGACY:
return
await platform.async_setup_legacy(hass, tracker, info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
# Clean up stale devices
async_track_utc_time_change(
hass, tracker.async_update_stale, second=range(0, 60, 5)
)
async def async_see_service(call):
"""Service to see a device."""
# Temp workaround for iOS, introduced in 0.65
data = dict(call.data)
data.pop("hostname", None)
data.pop("battery_status", None)
await tracker.async_see(**data)
hass.services.async_register(
DOMAIN, SERVICE_SEE, async_see_service, SERVICE_SEE_PAYLOAD_SCHEMA
)
# restore
await tracker.async_setup_tracked_device()
return True
|
richtier/imhotep
|
imhotep/repomanagers.py
|
Python
|
mit
| 4,477
| 0
|
import logging
import os
from tempfile import mkdtemp
from .repositories import Repository, AuthenticatedRepository
log = logging.getLogger(__name__)
class RepoMana
|
ger(object):
"""
Manages creation and deletion of `Repository` objects.
"""
to_cleanup = {}
def __init__(self, authenticated=False, cache_directory=None,
tools=None, executor=None, shallow_clone=False):
se
|
lf.should_cleanup = cache_directory is None
self.authenticated = authenticated
self.cache_directory = cache_directory
self.tools = tools or []
self.executor = executor
self.shallow = shallow_clone
def get_repo_class(self):
if self.authenticated:
return AuthenticatedRepository
return Repository
def clone_dir(self, repo_name):
dired_repo_name = repo_name.replace('/', '__')
if not self.cache_directory:
dirname = mkdtemp(suffix=dired_repo_name)
else:
dirname = os.path.abspath("%s/%s" % (
self.cache_directory, dired_repo_name))
return dirname
def fetch(self, dirname, remote_name, ref):
log.debug("Fetching %s %s", remote_name, ref)
self.executor("cd %s && git fetch --depth=1 %s %s" % (dirname,
remote_name,
ref))
def pull(self, dirname):
log.debug("Pulling all %s", dirname)
self.executor("cd %s && git pull --all" % dirname)
def add_remote(self, dirname, name, url):
log.debug("Adding remote %s url: %s", name, url)
self.executor("cd %s && git remote add %s %s" % (dirname,
name,
url))
def set_up_clone(self, repo_name, remote_repo):
"""Sets up the working directory and returns a tuple of
(dirname, repo )"""
dirname = self.clone_dir(repo_name)
self.to_cleanup[repo_name] = dirname
klass = self.get_repo_class()
repo = klass(repo_name,
dirname,
self.tools,
self.executor,
shallow=self.shallow_clone)
return (dirname, repo)
def clone_repo(self, repo_name, remote_repo, ref):
"""Clones the given repo and returns the Repository object."""
self.shallow_clone = False
dirname, repo = self.set_up_clone(repo_name, remote_repo)
if os.path.isdir("%s/.git" % dirname):
log.debug("Updating %s to %s", repo.download_location, dirname)
self.executor(
"cd %s && git checkout master" % dirname)
self.pull(dirname)
else:
log.debug("Cloning %s to %s", repo.download_location, dirname)
self.executor(
"git clone %s %s" % (repo.download_location, dirname))
if remote_repo is not None:
log.debug("Pulling remote branch from %s", remote_repo.url)
self.add_remote(dirname,
remote_repo.name,
remote_repo.url)
self.pull(dirname)
return repo
def cleanup(self):
if self.should_cleanup:
for repo_dir in self.to_cleanup.values():
log.debug("Cleaning up %s", repo_dir)
self.executor('rm -rf %s' % repo_dir)
class ShallowRepoManager(RepoManager):
def __init__(self, *args, **kwargs):
super(ShallowRepoManager, self).__init__(*args, **kwargs)
def clone_repo(self, repo_name, remote_repo, ref):
self.shallow_clone = True
dirname, repo = self.set_up_clone(repo_name, remote_repo)
remote_name = 'origin'
log.debug("Shallow cloning.")
download_location = repo.download_location
log.debug("Creating stub git repo at %s" % (dirname))
self.executor("mkdir -p %s" % (dirname, ))
self.executor("cd %s && git init" % (dirname, ))
log.debug("Adding origin repo %s " % (download_location))
self.add_remote(dirname, 'origin', download_location)
if remote_repo:
self.add_remote(dirname, remote_repo.name, remote_repo.url)
remote_name = remote_repo.name
self.fetch(dirname, 'origin', 'HEAD')
self.fetch(dirname, remote_name, ref)
return repo
|
ASCIT/donut-python
|
donut/default_config.py
|
Python
|
mit
| 472
| 0
|
"""Default website configur
|
ations, used only for testing.
"""
from donut import environment
# Public Test Database
TEST = environment.Environment(
db_hostname="localhost",
db_name="donut_test",
db_user="donut_test",
db_password="public",
debug=True,
testing=True,
secret_key="1234567890",
imgur_api={
"id": "b579f690cacf867",
"secret
|
": "****************************************"
},
restricted_ips=r"127\.0\.0\.1")
|
rm-hull/luma.core
|
luma/core/sprite_system.py
|
Python
|
mit
| 8,896
| 0.000787
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2020 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Simplified sprite animation framework.
.. note:: This module is an evolving "work-in-progress" and should be treated
as such until such time as this notice disappears.
"""
from time import sleep, perf_counter
from PIL import Image
class dict_wrapper(object):
"""
Helper class to turn dictionaries into objects.
"""
def __init__(self, d):
for a, b in d.items():
if isinstance(b, (list, tuple)):
setattr(self, a, [dict_wrapper(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, a, dict_wrapper(b) if isinstance(b, dict) else b)
class spritesheet(object):
"""
A sprite sheet is a series of images (usually animation frames) combined
into a larger image. A dictionary is usually spread into the object
constructor parameters with the following top-level attributes:
:param image: A path to a sprite map image.
:type image: str
:param frames: A dictionary of settings that defines how to extract
individual frames from the supplied image, as follows
- ``width`` & ``height`` are required and specify the dimensions of
the frames
- ``regX`` & ``regY`` indicate the registration point or "origin" of
the frames
- ``count`` allows you to specify the total number of frames in the
spritesheet; if omitted, this will be calculated based on the
dimensions of the source images and the frames. Frames will be
assigned indexes based on their position in the source images
(left to right, top to bottom).
:type frames: dict
:param animations: A dictionary of key/value pairs where the key is the
name of of the animation sequence, and the value are settings that
defines an animation sequence as follows:
- ``frames`` is a list of frame to show in sequence. Usually this
comprises of frame numbers, but can refer to other animation
sequences (which are handled much like a subroutine call).
- ``speed`` determines how quickly the animation frames are cycled
through compared to the how often the animation sequence yields.
- ``next`` is optional, but if supplied, determines what happens when
the animation sequence is exhausted. Typically this can be used to
self-reference, so that it forms an infinite loop, but can hand off
to any other animation sequence.
:type animations: dict
Loosely based on https://www.createjs.com/docs/easeljs/classes/SpriteSheet.html
"""
def __init__(self, image, frames, animations):
with open(image, 'rb') as fp:
self.image = Image.open(fp)
self.image.load()
self.frames = dict_wrapper(frames)
self.animations = dict_wrapper(animations)
|
# Reframe the sprite map in terms of the registration point (if set)
regX = self.frames.regX if hasattr(self.frames, "regX") else 0
regY = self.frames.regY if hasattr(self.frames, "regY") else 0
self.image = self.image.crop((regX, regY, self.image.width - regX, self.image.height - regY))
self.width, self.height = sel
|
f.image.size
assert(self.width % self.frames.width == 0)
assert(self.height % self.frames.height == 0)
self.frames.size = (self.frames.width, self.frames.height)
if not hasattr(self.frames, 'count'):
self.frames.count = (self.width * self.height) // (self.frames.width * self.frames.height)
self.cache = {}
def __getitem__(self, frame_index):
"""
Returns (and caches) the frame for the given index.
:param frame_index: The index of the frame.
:type frame_index: int
:returns: A Pillow image cropped from the main image corresponding to
the given frame index.
:raises TypeError: if the ``frame_index`` is not numeric
:raises IndexError: if the ``frame_index`` is less than zero or more
than the largest frame.
"""
if not isinstance(frame_index, int):
raise TypeError("frame index must be numeric")
if frame_index < 0 or frame_index > self.frames.count:
raise IndexError("frame index out of range")
cached_frame = self.cache.get(frame_index)
if cached_frame is None:
offset = frame_index * self.frames.width
left = offset % self.width
top = (offset // self.width) * self.frames.height
right = left + self.frames.width
bottom = top + self.frames.height
bounds = [left, top, right, bottom]
cached_frame = self.image.crop(bounds)
self.cache[frame_index] = cached_frame
return cached_frame
def __len__(self):
"""
The number of frames in the sprite sheet
"""
return self.frames.count
def animate(self, seq_name):
"""
Returns a generator which "executes" an animation sequence for the given
``seq_name``, inasmuch as the next frame for the given animation is
yielded when requested.
:param seq_name: The name of a previously defined animation sequence.
:type seq_name: str
:returns: A generator that yields all frames from the animation
sequence.
:raises AttributeError: If the ``seq_name`` is unknown.
"""
while True:
index = 0
anim = getattr(self.animations, seq_name)
speed = anim.speed if hasattr(anim, "speed") else 1
num_frames = len(anim.frames)
while index < num_frames:
frame = anim.frames[int(index)]
index += speed
if isinstance(frame, int):
yield self[frame]
else:
for subseq_frame in self.animate(frame):
yield subseq_frame
if not hasattr(anim, "next"):
break
seq_name = anim.next
class framerate_regulator(object):
"""
Implements a variable sleep mechanism to give the appearance of a consistent
frame rate. Using a fixed-time sleep will cause animations to be jittery
(looking like they are speeding up or slowing down, depending on what other
work is occurring), whereas this class keeps track of when the last time the
``sleep()`` method was called, and calculates a sleep period to smooth out
the jitter.
:param fps: The desired frame rate, expressed numerically in
frames-per-second. By default, this is set at 16.67, to give a frame
render time of approximately 60ms. This can be overridden as necessary,
and if no FPS limiting is required, the ``fps`` can be set to zero.
:type fps: float
"""
def __init__(self, fps=16.67):
if fps == 0:
fps = -1
self.max_sleep_time = 1.0 / fps
self.total_transit_time = 0
self.called = 0
self.start_time = None
self.last_time = None
def __enter__(self):
self.enter_time = perf_counter()
if not self.start_time:
self.start_time = self.enter_time
self.last_time = self.enter_time
return self
def __exit__(self, *args):
"""
Sleeps for a variable amount of time (dependent on when it was last
called), to give a consistent frame rate. If it cannot meet the desired
frame rate (i.e. too much time has occurred since the last call), then
it simply exits without blocking.
"""
self.called += 1
self.total_transit_time += perf_counter() - self.enter_time
if self.max_sleep_time >= 0:
elapsed = perf_counter() - self.last_time
sleep_for = self.max_sleep_time - elapsed
if sleep_for > 0:
sleep(sleep_for)
self.last_time = perf_counter()
def effective_FPS(self):
"""
Calculates th
|
BartoszCichecki/onlinepython
|
onlinepython/pypy-2.4.0-win32/lib_pypy/cffi/_pycparser/c_parser.py
|
Python
|
gpl-2.0
| 59,384
| 0.001381
|
#------------------------------------------------------------------------------
# pycparser: c_parser.py
#
# CParser class: Parser and AST builder for the C language
#
# Copyright (C) 2008-2013, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
import re
from .ply import yacc
from . import c_ast
from .c_lexer import CLexer
from .plyparser import PLYParser, Coord, ParseError
from .ast_transforms import fix_switch_cases
class CParser(PLYParser):
def __init__(
self,
lex_optimize=True,
lextab='cffi._pycparser.lextab',
yacc_optimize=True,
yacctab='cffi._pycparser.yacctab',
yacc_debug=False):
""" Create a new CParser.
Some arguments for controlling the debug/optimization
level of the parser are provided. The defaults are
tuned for release/performance mode.
The simple rules for using them are:
*) When tweaking CParser/CLexer, set these to False
*) When releasing a stable parser, set to True
lex_optimize:
Set to False when you're modifying the lexer.
Otherwise, changes in the lexer won't be used, if
some lextab.py file exists.
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
some tests to avoid re-generating the table, make
this point to a local lex table file (that's been
earlier generated with lex_optimize=True)
yacc_optimize:
Set to False when you're modifying the parser.
Otherwise, changes in the parser won't be used, if
some parsetab.py file exists.
When releasing with a stable parser, set to True
to save the re-generation of the parser table on
each run.
yacctab:
Points to the yacc table that's used for optimized
mode. Only if you're modifying the parser, make
this point to a local yacc table file
yacc_debug:
Generate a parser.out file that explains how yacc
built the parsing table from the grammar.
"""
self.clex = CLexer(
error_func=self._lex_error_func,
on_lbrace_func=self._lex_on_lbrace_func,
on_rbrace_func=self._lex_on_rbrace_func,
type_lookup_func=self._lex_type_lookup_func)
self.clex.build(
optimize=lex_optimize,
lextab=lextab)
self.tokens = self.clex.tokens
rules_with_opt = [
'abstract_declarator',
'assignment_expression',
'declaration_list',
'declaration_specifiers',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
'parameter_type_list',
'specifier_qualifier_list',
'block_item_list',
'type_qualifier_list',
'struct_declarator_list'
]
for rule in rules_with_opt:
self._create_opt_rule(rule)
self.cparser = yacc.yacc(
module=self,
start='translation_unit_or_empty',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=yacctab)
# Stack of scopes for keeping track of symbols. _scope_stack[-1] is
# the current (topmost) scope. Each scope is a dictionary that
# specifies whether a name is a type. If _scope_stack[n][name] is
# True, 'name' is currently a type in the scope. If it's False,
# 'name' is used in the scope but not as a type (for instance, if we
# saw: int name;
# If 'name' is not a key in _scope_stack[n] then 'name' was not defined
# in this scope at all.
self._scope_stack = [dict()]
# Keeps track of the last token given to yacc (the lookahead token)
self._last_yielded_token = None
def parse(self, text, filename='', debuglevel=0):
""" Parses C code and returns an AST.
text:
A string containing the C source code
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()]
self._last_yielded_token = None
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debuglevel)
######################-- PRIVATE --######################
def _push_scope(self):
self._scope_stack.append(dict())
def _pop_scope(self):
assert len(self._scope_stack) > 1
self._scope_stack.pop()
def _add_typedef_name(self, name, coord):
""" Add a new typedef name (ie a TYPEID) to the current scope
"""
if not self._scope_stack[-1].get(name, True):
self
|
._parse_error(
"Typedef %r previ
|
ously declared as non-typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = True
def _add_identifier(self, name, coord):
""" Add a new object, function, or enum member name (ie an ID) to the
current scope
"""
if self._scope_stack[-1].get(name, False):
self._parse_error(
"Non-typedef %r previously declared as typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = False
def _is_type_in_scope(self, name):
""" Is *name* a typedef-name in the current scope?
"""
for scope in reversed(self._scope_stack):
# If name is an identifier in this scope it shadows typedefs in
# higher scopes.
in_scope = scope.get(name)
if in_scope is not None: return in_scope
return False
def _lex_error_func(self, msg, line, column):
self._parse_error(msg, self._coord(line, column))
def _lex_on_lbrace_func(self):
self._push_scope()
def _lex_on_rbrace_func(self):
self._pop_scope()
def _lex_type_lookup_func(self, name):
""" Looks up types that were previously defined with
typedef.
Passed to the lexer for recognizing identifiers that
are types.
"""
is_type = self._is_type_in_scope(name)
return is_type
def _get_yacc_lookahead_token(self):
""" We need access to yacc's lookahead token in certain cases.
This is the last token yacc requested from the lexer, so we
ask the lexer.
"""
return self.clex.last_token
# To understand what's going on here, read sections A.8.5 and
# A.8.6 of K&R2 very carefully.
#
# A C type consists of a basic type declaration, with a list
# of modifiers. For example:
#
# int *c[5];
#
# The basic declaration here is 'int c', and the pointer and
# the array are the modifiers.
#
# Basic declarations are represented by TypeDecl (from module
# c_ast) and the modifiers are FuncDecl, PtrDecl and
# ArrayDecl.
#
# The standard states that whenever a new modifier is parsed,
# it should be added to the end of the list of modifiers. For
# example:
#
# K&R2 A.8.6.2: Array Declarators
#
# In a declaration T D where D has the form
# D1 [constant-expression-opt]
# and the type of the identifier in the declaration T D1 is
# "type-modifier T", the type of the
# identifier of D is "type-modifier array of T"
#
# This is what this method does. The declarator it receives
# can be a list of declarators ending
|
tensorflow/tensorflow
|
tensorflow/python/autograph/pyct/static_analysis/activity_test.py
|
Python
|
apache-2.0
| 23,693
| 0.006078
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for activity module."""
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import naming
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import annos
from tensorflow.python.platform import test
QN = qual_names.QN
NodeAnno = annos.NodeAnno
global_a = 7
global_b = 17
class ScopeTest(test.TestCase):
def assertMissing(self, qn, scope):
self.assertNotIn(qn, scope.read)
self.assertNotIn(qn, scope.modified)
def assertReadOnly(self, qn, scope):
self.assertIn(qn, scope.read)
self.assertNotIn(qn, scope.modified)
def assertWriteOnly(self, qn, scope):
self.assertNotIn(qn, scope.read)
self.assertIn(qn, scope.modified)
def assertReadWrite(self, qn, scope):
self.assertIn(qn, scope.read)
self.assertIn(qn, scope.modified)
def test_copy_from(self):
scope = activity.Scope(None)
scope.modified.add(QN('foo'))
other = activity.Scope(None)
other.copy_from(scope)
self.assertWriteOnly(QN('foo'), other)
scope.modified.add(QN('bar'))
scope.copy_from(other)
self.assertMissing(QN('bar'), scope)
def test_merge_from(self):
scope = activity.Scope(None)
other = activity.Scope(None)
for col in (scope.modified, scope.read, scope.bound, scope.deleted):
col.add(QN('foo'))
for col in (other.modified, other.read, other.bound, other.deleted):
col
|
.add(QN('foo'))
col.add(QN('bar'))
scope.merge_from(other)
self.assertReadWrite(QN('foo'), scope)
self.ass
|
ertReadWrite(QN('bar'), scope)
self.assertIn(QN('foo'), scope.bound)
self.assertIn(QN('bar'), scope.bound)
self.assertIn(QN('foo'), scope.deleted)
self.assertIn(QN('bar'), scope.deleted)
def test_copy_of(self):
scope = activity.Scope(None)
scope.read.add(QN('foo'))
other = activity.Scope.copy_of(scope)
self.assertReadOnly(QN('foo'), other)
child_scope = activity.Scope(scope)
child_scope.read.add(QN('bar'))
other = activity.Scope.copy_of(child_scope)
self.assertReadOnly(QN('bar'), other)
def test_referenced(self):
scope = activity.Scope(None)
scope.read.add(QN('a'))
child = activity.Scope(scope)
child.read.add(QN('b'))
child2 = activity.Scope(child, isolated=False)
child2.read.add(QN('c'))
child2.finalize()
child.finalize()
scope.finalize()
self.assertIn(QN('c'), child2.referenced)
self.assertIn(QN('b'), child2.referenced)
self.assertIn(QN('a'), child2.referenced)
self.assertIn(QN('c'), child.referenced)
self.assertIn(QN('b'), child.referenced)
self.assertIn(QN('a'), child.referenced)
class ActivityAnalyzerTestBase(test.TestCase):
def _parse_and_analyze(self, test_fn):
# TODO(mdan): Use a custom FunctionTransformer here.
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
name=test_fn.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
node = qual_names.resolve(node)
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
return node, entity_info
def assertSymbolSetsAre(self, expected, actual, name):
expected = set(expected)
actual = set(str(s) for s in actual)
self.assertSetEqual(
expected, actual, 'for symbol set: %s\n'
' Expected: %s\n'
' Got: %s\n'
' Missing: %s\n'
' Extra: %s\n' % (name.upper(), expected, actual,
expected - actual, actual - expected))
def assertScopeIs(self, scope, used, modified):
"""Assert the scope contains specific used, modified & created variables."""
self.assertSymbolSetsAre(used, scope.read, 'read')
self.assertSymbolSetsAre(modified, scope.modified, 'modified')
class ActivityAnalyzerTest(ActivityAnalyzerTestBase):
def test_import(self):
def test_fn():
import a, b.x, y as c, z.u as d # pylint:disable=g-multiple-import,g-import-not-at-top,unused-variable
node, _ = self._parse_and_analyze(test_fn)
scope = anno.getanno(node.body[0], anno.Static.SCOPE)
self.assertScopeIs(scope, (), ('a', 'b', 'c', 'd'))
def test_import_from(self):
def test_fn():
from x import a # pylint:disable=g-import-not-at-top,unused-variable
from y import z as b # pylint:disable=g-import-not-at-top,unused-variable
node, _ = self._parse_and_analyze(test_fn)
scope = anno.getanno(node.body[0], anno.Static.SCOPE)
self.assertScopeIs(scope, (), ('a',))
scope = anno.getanno(node.body[1], anno.Static.SCOPE)
self.assertScopeIs(scope, (), ('b',))
def test_print_statement(self):
def test_fn(a):
b = 0
c = 1
print(a, b)
return c
node, _ = self._parse_and_analyze(test_fn)
print_node = node.body[2]
if isinstance(print_node, gast.Print):
# Python 2
print_args_scope = anno.getanno(print_node, NodeAnno.ARGS_SCOPE)
else:
# Python 3
assert isinstance(print_node, gast.Expr)
# The call node should be the one being annotated.
print_node = print_node.value
print_args_scope = anno.getanno(print_node, NodeAnno.ARGS_SCOPE)
# We basically need to detect which variables are captured by the call
# arguments.
self.assertScopeIs(print_args_scope, ('a', 'b'), ())
def test_call_args(self):
def test_fn(a):
b = 0
c = 1
foo(a, b) # pylint:disable=undefined-variable
return c
node, _ = self._parse_and_analyze(test_fn)
call_node = node.body[2].value
# We basically need to detect which variables are captured by the call
# arguments.
self.assertScopeIs(
anno.getanno(call_node, NodeAnno.ARGS_SCOPE), ('a', 'b'), ())
def test_call_args_attributes(self):
def foo(*_):
pass
def test_fn(a):
a.c = 0
foo(a.b, a.c)
return a.d
node, _ = self._parse_and_analyze(test_fn)
call_node = node.body[1].value
self.assertScopeIs(
anno.getanno(call_node, NodeAnno.ARGS_SCOPE), ('a', 'a.b', 'a.c'), ())
def test_call_args_subscripts(self):
def foo(*_):
pass
def test_fn(a):
b = 1
c = 2
foo(a[0], a[b])
return a[c]
node, _ = self._parse_and_analyze(test_fn)
call_node = node.body[2].value
self.assertScopeIs(
anno.getanno(call_node, NodeAnno.ARGS_SCOPE),
('a', 'a[0]', 'a[b]', 'b'), ())
def test_while(self):
def test_fn(a):
b = a
while b > 0:
c = b
b -= 1
return b, c
node, _ = self._parse_and_analyze(test_fn)
while_node = node.body[1]
self.assertScopeIs(
anno.getanno(while_node, NodeAnno.BODY_SCOPE), ('b',), ('b', 'c'))
self.assertScopeIs(
anno.getanno(while_node, NodeAnno.BODY_SCOPE).parent, ('a', 'b', 'c'),
('b', 'c'))
self.assertScopeIs(
anno.getanno(while_node, NodeAnno.COND_SCOPE), ('b',), ())
def test_for(self):
def test_fn(a):
b = a
for _ in a:
c = b
b -= 1
return b, c
node, _ = self._parse_and_analyze(test_fn)
for_node = node.body[1]
|
shankari/e-mission-server
|
emission/net/usercache/formatters/ios/transition.py
|
Python
|
bsd-3-clause
| 3,275
| 0.006107
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import emission.core.wrapper.transition as et
import emission.net.usercache.formatters.common as fc
import attrdict as ad
state_map = {
"STATE_START": et.State.START,
"STATE_WAITING_FOR_TRIP_START": et.State.WAITING_FOR_TRIP_START,
"STATE_ONGOING_TRIP": et.State.ONGOING_TRIP,
"STATE_TRACKING_STOPPED": et.State.TRACKING_STOPPED
}
transition_map = {
"booted": et.TransitionType.BOOTED,
"T_INITIALIZE": et.TransitionType.INITIALIZE,
"T_INIT_COMPLETE": et.TransitionType.INIT_COMPLETE,
"T_EXITED_GEOFENCE": et.TransitionType.EXITED_GEOFENCE,
"T_TRIP_STARTED": et.TransitionType.TRIP_STARTED,
"T_RECEIVED_SILENT_PUSH": et.TransitionType.RECEIVED_SILENT_PUSH,
"T_TRIP_END_DETECTED": et.TransitionType.TRIP_END_DETECTED,
"T_TRIP_RESTARTED": et.TransitionType.TRIP_RESTARTED,
"T_END_TRIP_TRACKING": et.TransitionType.END_TRIP_TRACKING,
"T_DATA_PUSHED": et.TransitionType.DATA_PUSHED,
"T_TRIP_ENDED": et.TransitionType.STOPPED_MOVING,
"T_FORCE_STOP_TRACKING": et.TransitionType.STOP_TRACKING,
"T_TRACKING_STOPPED": et.TransitionType.TRACKING_STOPPED,
"T_VISIT_STARTED": et.TransitionType.VISIT_STARTED,
"T_VISIT_ENDED": et.TransitionType.VISIT_ENDED,
"T_NOP": et.TransitionType.NOP,
"T_START_TRACKING": et.TransitionType.START_TRACKING
}
def format(entry):
formatted_entry = ad.AttrDict()
formatted_entry["_id"] = entry["_id"]
formatted_entry.user_id = entry.user_id
m = entry.metadata
fc.expand_metadata_times(m)
formatted_entry.metadata = m
|
data = ad.AttrDict()
data.curr_state = state_map[entry.data.currState].value
logging.debug("Mapped %s -> %s" % (entry.data.currState, data.curr_state))
# The iOS state diagram is significantly more complex than the android state diagram
# So there are a lot more transitions. But some of the intermediate states are
# not interesting, so it seems like it should be possible to collapse them to the
# simple 2-state android state machine. But t
|
hat requires looking at a window of
# transitions, which we don't have here. Let's focus on simply mapping here and
# deal with collapsing later
# data.transition_raw = entry.data.transition
data.transition = transition_map[entry.data.transition].value
if entry.data.transition is not None:
data.transition = transition_map[entry.data.transition].value
else:
data.transition = None
logging.debug("Mapped %s -> %s" % (entry.data.transition, data.transition))
if "ts" not in data:
data.ts = formatted_entry.metadata.write_ts
logging.debug("No existing timestamp, copyied from metadata%s" % data.ts)
data.local_dt = formatted_entry.metadata.write_local_dt
data.fmt_time = formatted_entry.metadata.write_fmt_time
else:
logging.debug("Retaining existing timestamp %s" % data.ts)
fc.expand_data_times(data, metadata)
formatted_entry.data = data
return formatted_entry
|
simonluijk/aws-ecs-service-discovery
|
register.py
|
Python
|
mit
| 6,803
| 0.001029
|
#!/usr/bin/env python
"""
A toolkit for identifying and advertising service resources.
Uses a specific naming convention for the Task Definition of services. If you
name the Task Definition ending with "-service", no configuration is needed.
This also requires that you not use that naming convention for task definitions
that are not services.
For example:
A Task Definition with the family name of 'cache-service' will have its
hosting Container Instance's internal ip added to a Route53 private Zone as
cache.local and other machines on the same subnet can address it that way.
"""
import argparse
import logging
import os
import re
import json
import boto
import boto.ec2
import boto.route53
import requests
from etcd.client import Client
from time import sleep
region = os.environ.get('ECS_REGION', 'us-east-1')
ecs = boto.connect_ec2containerservice(
host='ecs.{0}.amazonaws.com'.format(region))
ec2 = boto.ec2.connect_to_region(region)
route53 = boto.route53.connect_to_region(region)
logging.basicConfig(format='%(asctime)s %(message)s',
datefmt='%Y/%m/%d/ %I:%M:%S %p')
if 'ECS_CLUSTER' in os.environ:
cluster = os.environ['ECS_CLUSTER']
elif os.path.exists('/etc/ecs/ecs.config'):
pat = re.compile(r'\bECS_CLUSTER\b\s*=\s*(\w*)')
cluster = pat.findall(open('/etc/ecs/ecs.config').read())[-1]
else:
cluster = None
def get_task_arns(family):
"""
Get the ARN of running task, given the family name.
"""
response = ecs.list_tasks(cluster=cluster, family=family)
arns = response['ListTasksResponse']['ListTasksResult']['taskArns']
if len(arns) == 0:
return None
return arns
def get_ec2_interface(container_instance_arn):
"""
Get the ec2 interface from an container instance ARN.
"""
response = ecs.describe_container_instances(container_instance_arn, cluster=cluster)
ec2_instance_id = response['DescribeContainerInstancesResponse'] \
['DescribeContainerInstancesResult']['containerInstances'] \
[0]['ec2InstanceId']
response = ec2.get_all_instances(filters={'instance-id': ec2_instance_id})
return response[0].instances[0].interfaces[0]
def get_zone_for_vpc(vpc_id):
"""
Identify the Hosted Zone for the given VPC.
Assumes a 1 to 1 relationship.
NOTE: There is an existing bug.
https://github.com/boto/boto/issues/3061
When that changes, I expect to have to search ['VPCs'] as a list of
dictionaries rather than a dictionary. This has the unfortunate side
effect of not working for Hosted Zones that are associated with more than
one VPC. (But, why would you expect internal DNS for 2 different private
networks to be the same anyway?)
"""
response = route53.get_all_hosted_zones()['ListHostedZonesResponse']
for zone in response['HostedZones']:
zone_id = zone['Id'].split('/')[-1]
detail = route53.get_hosted_zone(zone_id)['GetHostedZoneResponse']
try:
if detail['VPCs']['VPC']['VPCId'] == vpc_id:
return {'zone_id': zone_id, 'zone_name': zone['Name']}
except KeyError:
pass
def get_service_info(service_name):
info = {
"name": service_name,
"tasks": []
}
if service_name[-8:] == '-service':
info['name'] = service_name[:-8]
task_arns = get_task_arns(service_name)
if not task_arns:
logging.info('{0} is NOT RUNNING'.format(service_name))
return None
else:
logging.info('{0} is RUNNING'.format(service_name))
data = ecs.describe_tasks(task_arns, cluster=cluster)
tasks = data['DescribeTasksResponse']['DescribeTasksResult']['tasks']
for task in tasks:
interface = get_ec2_interface(task['containerInstanceArn'])
task_info = {
'ip': interface.private_ip_address,
'ports': {}
}
for container in task['containers']:
if container['networkBindings']:
for port in container['networkBindings']:
if port['protocol'] == 'tcp':
task_info['ports'][port['containerPort']] = port['hostPort']
info['tasks'].append(task_info)
info['vpc_id'] = interface.vpc_id
return info
def update_dns(zone_id, zone_name, service_name, service_ips, ttl=20):
"""
Insert or update DNS record.
"""
host_name = '.'.join([service_name, zone_name])
record_set = boto.route53.record.ResourceRecordSets(route53, zone_id)
record = record_set.add_change('UPSERT', host_name, 'A', ttl)
for service_ip in service_ips:
record.add_value(service_ip)
record_set.commit()
return record_set
def update_servic
|
e(service_name, method, prefix):
"""
Update DNS to allow discovery of properly
|
named task definitions.
"""
info = get_service_info(service_name)
if not info:
return None
if method == 'dns':
network = get_zone_for_vpc(info["vpc_id"])
ips = [t['ip'] for t in info['tasks']]
logging.info('Registering {0}.{1} as {2}'.format(
info['name'], network['zone_name'], ','.join(ips)))
update_dns(network['zone_id'], network['zone_name'],
info['name'], ips)
elif method == 'etcd':
data = json.dumps(info['tasks'])
logging.info('Registering {0} as {1}'.format(
info['name'], data))
host = requests.get("http://169.254.169.254/latest/meta-data/local-ipv4").content
client = Client(host=host, port=4001)
key = '/' + '/'.join([i for i in ['tasks', prefix, info['name']] if i])
client.node.set(key, data)
def main():
"""
Main function that handles running the command.
"""
parser = argparse.ArgumentParser()
parser.add_argument('service_name', nargs=1,
help='list of services to start')
parser.add_argument('method', nargs=1,
help='method of registering service')
parser.add_argument('-p', '--prefix', action='store', default=False,
help='prefix when saving to etcd')
parser.add_argument('-q', '--quiet', action='store_true',
help='suppress output')
parser.add_argument('-r', '--rerun', action='store_true',
help='run again after a 60 second pause')
args = parser.parse_args()
if not args.quiet:
logging.getLogger().setLevel(logging.INFO)
update_service(args.service_name[0], args.method[0], args.prefix)
if args.rerun:
sleep(60)
update_service(args.service_name[0], args.method[0], args.prefix)
if __name__ == '__main__':
main()
|
carlosvin/pricecalculator
|
data_input/__init__.py
|
Python
|
apache-2.0
| 1,020
| 0.015686
|
# -*- coding: utf8 -*-
from urllib.request import Request, urlopen
import logging
import parsing
__author__ = 'carlos'
class Downloader(object):
def __init__(self, url):
self.url = url
def read(self):
request = Request( self.url )
request.add_header('Accept-encoding', 'text/html')
response = urlopen(request)
charset = response.headers.get('charset')
data = response.read()
logging.debug('Read %u bytes from %s (%s)' % (len(data), self.url, charset))
return data
class StocksInfoU
|
pdater(object):
def __init__(self, url):
self.downloader = Downloader(url)
self.parser = parsing.StockParser()
|
def update(self):
dataread = self.downloader.read()
self.parser.feed(dataread)
return self.parser.stocks
@property
def stocks(self):
return self.parser.stocks
@property
def url(self):
return self.downloader.url
|
AllanYangZhou/oppia
|
core/controllers/suggestion.py
|
Python
|
apache-2.0
| 5,721
| 0.001049
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for suggestions."""
from constants import constants
from core.controllers import base
from core.domain import acl_decorators
from core.domain import suggestion_services
from core.platform import models
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
class SuggestionHandler(base.BaseHandler):
""""Handles operations relating to suggestions."""
@acl_decorators.can_suggest_changes
def post(self):
if not constants.USE_NEW_SUGGESTION_FRAMEWORK:
raise self.PageNotFoundException
suggestion_services.create_suggestion(
self.payload.get('suggestion_type'),
self.payload.get('target_type'), self.payload.get('target_id'),
self.payload.get('target_version_at_submission'),
self.user_id, self.payload.get('change_cmd'),
self.payload.get('description'),
self.payload.get('final_reviewer_id'))
self.render_json(self.values)
class SuggestionToExplorationActionHandler(base.BaseHandler):
"""Handles actions performed on suggestions to explorations."""
ACTION_TYPE_ACCEPT = 'accept'
ACTION_TYPE_REJECT = 'reject'
# TODO (nithesh): Add permissions for users with enough
|
scores to review
# Will be added as part of milestone 2 of the generali
|
zed review system
# project.
@acl_decorators.can_edit_exploration
def put(self, exploration_id, suggestion_id):
if not constants.USE_NEW_SUGGESTION_FRAMEWORK:
raise self.PageNotFoundException
if len(suggestion_id.split('.')) != 3:
raise self.InvalidInputException('Invalid format for suggestion_id.'
' It must contain 3 parts'
' separated by \'.\'')
if suggestion_id.split('.')[0] != 'exploration':
raise self.InvalidInputException('This handler allows actions only'
' on suggestions to explorations.')
if suggestion_id.split('.')[1] != exploration_id:
raise self.InvalidInputException('The exploration id provided does '
'not match the exploration id '
'present as part of the '
'suggestion_id')
action = self.payload.get('action')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if action == self.ACTION_TYPE_ACCEPT:
suggestion_services.accept_suggestion(
suggestion, self.user_id, self.payload.get('commit_message'),
self.payload.get('review_message'))
elif action == self.ACTION_TYPE_REJECT:
suggestion_services.reject_suggestion(
suggestion, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class SuggestionListHandler(base.BaseHandler):
"""Handles list operations on suggestions."""
LIST_TYPE_AUTHOR = 'author'
LIST_TYPE_ID = 'id'
LIST_TYPE_REVIEWER = 'reviewer'
LIST_TYPE_STATUS = 'status'
LIST_TYPE_SUGGESTION_TYPE = 'type'
LIST_TYPE_TARGET_ID = 'target'
LIST_TYPES_TO_SERVICES_MAPPING = {
LIST_TYPE_AUTHOR: suggestion_services.get_suggestions_by_author,
LIST_TYPE_ID: suggestion_services.get_suggestion_by_id,
LIST_TYPE_REVIEWER: suggestion_services.get_suggestions_reviewed_by,
LIST_TYPE_STATUS: suggestion_services.get_suggestions_by_status,
LIST_TYPE_SUGGESTION_TYPE: suggestion_services.get_suggestion_by_type,
LIST_TYPE_TARGET_ID: suggestion_services.get_suggestions_by_target_id
}
PARAMS_FOR_LIST_TYPES = {
LIST_TYPE_AUTHOR: ['author_id'],
LIST_TYPE_ID: ['suggestion_id'],
LIST_TYPE_REVIEWER: ['reviewer_id'],
LIST_TYPE_STATUS: ['status'],
LIST_TYPE_SUGGESTION_TYPE: ['suggestion_type'],
LIST_TYPE_TARGET_ID: ['target_type', 'target_id']
}
def get_params_from_request(self, request, list_type):
return [request.get(param_name)
for param_name in self.PARAMS_FOR_LIST_TYPES[list_type]]
@acl_decorators.open_access
def get(self):
if not constants.USE_NEW_SUGGESTION_FRAMEWORK:
raise self.PageNotFoundException
list_type = self.request.get('list_type')
if list_type not in self.LIST_TYPES_TO_SERVICES_MAPPING:
raise self.InvalidInputException('Invalid list type.')
params = self.get_params_from_request(self.request, list_type)
suggestions = self.LIST_TYPES_TO_SERVICES_MAPPING[list_type](*params)
# When querying by ID, only a single suggestion is retrieved, so we make
# it a list.
if list_type == self.LIST_TYPE_ID:
suggestions = [suggestions]
self.values.update({'suggestions': [s.to_dict() for s in suggestions]})
self.render_json(self.values)
|
ademariag/kapitan
|
kapitan/validator/base.py
|
Python
|
apache-2.0
| 369
| 0.00271
|
# SPDX-FileCopyrightText: 2020 The Kapitan Authors <[email protected]>
#
# SPDX-License-Identifier: Apache-2.0
import logging
logger = loggi
|
ng.getLogger(__name__)
class
|
Validator(object):
def __init__(self, cache_dir, **kwargs):
self.cache_dir = cache_dir
def validate(self, validate_obj, **kwargs):
raise NotImplementedError
|
mlperf/training_results_v0.5
|
v0.5.0/nvidia/submission/code/translation/pytorch/fairseq/modules/grad_multiply.py
|
Python
|
apache-2.0
| 550
| 0
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
|
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return gra
|
d * ctx.scale, None
|
kaedroho/wagtail
|
wagtail/tests/modeladmintest/apps.py
|
Python
|
bsd-3-clause
| 251
| 0
|
from django.apps import AppConfig
from django.utils
|
.translation import gettext_lazy as _
class WagtailTestsAppConfig(AppConfig):
name = 'wagt
|
ail.tests.modeladmintest'
label = 'modeladmintest'
verbose_name = _("Test Wagtail Model Admin")
|
tensorflow/tensorflow
|
tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py
|
Python
|
apache-2.0
| 8,944
| 0.007603
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.from_sparse_tensor_slices()`."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class FromSparseTensorSlicesTest(test_base.DatasetTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=1, mode=["graph"]),
combinations.combine(slices=[[
[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []
], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]])))
def testFromSparseTensorSlices(self, slices):
"""Test a dataset based on slices of a `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
# Test with sparse tensor in the appropriate order.
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
# pylint: enable=g-complex-comprehension
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
for i, s in enumerate(slices):
results = sess.run(get_nex
|
t)
self.assertAllEqual(s, results.values)
expected_indices = np.array(
[[j] for j in range(len(slices[i]))]).reshape([-1, 1])
self.assertAllEqual(expected_indices, results.indices)
self.assertAllEqual(dense_shape[1:], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combina
|
tions.times(
combinations.combine(tf_api_version=1, mode=["graph"]),
combinations.combine(slices=[[
[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []
], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]])))
def testFromSparseTensorSlicesInReverse(self, slices):
"""Test a dataset based on slices of a `tf.sparse.SparseTensor` in reverse order."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
# pylint: enable=g-complex-comprehension
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
# Test with sparse tensor in the reverse order, which is not
# currently supported.
reverse_order_indices = indices[::-1, :]
reverse_order_values = values[::-1]
sparse_feed = sparse_tensor.SparseTensorValue(
reverse_order_indices, reverse_order_values, dense_shape)
with self.assertRaises(errors.UnimplementedError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlices(self):
"""Test a dataset based on slices of an empty `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
# Test with an empty sparse tensor.
empty_indices = np.empty((0, 4), dtype=np.int64)
empty_values = np.empty((0,), dtype=np.float64)
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
empty_dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlicesInvalid(self):
"""Test a dataset based on invalid `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# Test with an empty sparse tensor but with non empty values.
empty_indices = np.empty((0, 4), dtype=np.int64)
non_empty_values = [1, 2, 3, 4]
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices,
non_empty_values,
empty_dense_shape)
# Here, we expect the test to fail when running the feed.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlicesInvalid2(self):
"""Test a dataset based on invalid `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# Test with an empty sparse tensor but with non empty values.
empty_indices = [[]]
empty_values = []
dense_shape = [1, 1]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
dense_shape)
# Here, we expect the test to fail when running the feed.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=2, mode=["eager"]))
def testFromSparseTensorSlicesError(self):
with self.assertRaises(AttributeError):
dataset_ops.Dataset.from_sparse_tensor_slices(None)
class FromSparseTensorSlicesCheckpointTest(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
def _build_sparse_tensor_slice_dataset(self, slices):
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))],
dtype=np.int64)
values = np.array([val for s in slices for val in s], dtype=np.floa
|
mccdaq/mcculw
|
mcculw/device_info/daq_device_info.py
|
Python
|
mit
| 4,437
| 0
|
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
from mcculw import ul
from mcculw.ul import ULError
from mcculw.enums import (BoardInfo, InfoType, ErrorCode, EventType,
ExpansionInfo)
from .ai_info import AiInfo
from .ao_info import AoInfo
from .ctr_info import CtrInfo
from .daqi_info import DaqiInfo
from .daqo_info import DaqoInfo
from .dio_info import DioInfo
class DaqDeviceInfo:
"""Provides hardware information for the DAQ device configured with the
specified board number.
NOTE: This class is primarily used to provide hardware information for the
library examples and may change some hardware configuration values. It is
recommended that values provided by this class be hard-coded in production
code.
Parameters
----------
board_num : int
The board number associated with the device when created with
:func:`.create_daq_device` or configured with Instacal.
"""
def __init__(self, board_num):
self._board_num = board_num
self._board_type = ul.get_config(InfoType.BOARDINFO, board_num, 0,
BoardInfo.BOARDTYPE)
if self._board_type == 0:
raise ULError(ErrorCode.BADBOARD)
self._ai_info = AiInfo(self._board_num)
self._ao_info = AoInfo(self._board_num)
self._ctr_info = CtrInfo(self._board_num)
self._daqi_info = DaqiInfo(self._board_num)
self._daqo_info = DaqoInfo(self._board_num)
self._dio_info = DioInfo(self._board_num)
@property
def board_num(self): # -> int
return self._board_num
@property
def product_name(self): # -> str
return ul.get_board_name(self._board_num)
@property
def unique_id(self): # -> str
return ul.get_config_string(InfoType.BOARDINFO, self._board_num, 0,
BoardInfo.DEVUNIQUEID, 32)
@property
def supports_analog_input(self): # -> boolean
return self._ai_info.is_supported
@property
def supports_temp_input(self): # -> boolean
return self._ai_info.temp_supported
def get_ai_info(self): # -> AiInfo
return self._ai_info
@property
def supports_analog_output(self): # -> boolean
return self._ao_info.is_supported
def get_ao_info(self): # -> AoInfo
return self._ao_info
@property
def supports_counters(self): # -> boolean
return self._ctr_info.is_supported
def get_ctr_info(self): # -> CtrInfo
return self._ctr_info
@property
def supports_daq_input(self): # -> boolean
return self._daqi_info.is_supported
def get_daqi_info(self): # -> DaqiInfo
return self._daqi_info
@property
def supports_daq_output(self): # -> boolean
return self._daqo_info.is_supported
def get_daqo_info(self): # -> DaqoInfo
return self._daqo_info
@property
def supports_digital_io(self): # -> boolean
return self._dio_info.is_supported
def get_dio_info(self): # -> DioInfo
return self._dio_info
@property
def supported_event_types(self): # -> list[EventType]
event_types = []
for event_type in EventType:
try:
ul.disable_event(self._board_num, event_type)
event_types.append(event_type)
except ULError:
pass
return event_types
@property
def num_expansions(self): # -> int
return ul.get_config(InfoType.BOARDINFO, self.board_num, 0,
BoardInfo.NUMEXPS)
@property
def exp_info(self): # -> list[ExpInfo]
exp_info = []
for expansion_num in range(self.num_expansions):
|
exp_info.append(ExpInfo(self._board_num, expansion_num))
return exp_info
class ExpInfo:
def __init__(self,
|
board_num, expansion_num):
self._board_num = board_num
self._expansion_num = expansion_num
@property
def board_type(self):
return ul.get_config(InfoType.EXPANSIONINFO, self._board_num,
self._expansion_num, ExpansionInfo.BOARDTYPE)
@property
def mux_ad_chan(self):
return ul.get_config(InfoType.EXPANSIONINFO, self._board_num,
self._expansion_num, ExpansionInfo.MUX_AD_CHAN1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.