repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
li-xirong/jingwei
|
util/simpleknn/bigfile.py
|
Python
|
mit
| 3,569
| 0.009526
|
import os, sys, array
import numpy as np
class BigFile:
def __init__(self, datadir):
self.nr_of_images, self.ndims = map(int, open(os.path.join(datadir,'shape.txt')).readline().split())
id_file = os.path.join(datadir, "id.txt")
self.names = open(id_file).read().strip().split()
assert(len(self.names) == self.nr_of_images)
self.name2index = dict(zip(self.names, range(self.nr_of_images)))
self.binary_file = os.path.join(datadir, "feature.bin")
print ("[%s] %dx%d instances loaded from %s" % (self.__class__.__name__, self.nr_of_images, self.ndims, datadir))
def read(self, requested, isname=True):
requested = set(requested)
if isname:
index_name_array = [(self.name2index[x], x) for x in requested if x in self.name2index]
else:
assert(min(requested)>=0)
assert(max(requested)<len(self.names))
index_name_array = [(x, self.names[x]) for x in requested]
if len(index_name_array) == 0:
return [], []
index_name_array.sort(key=lambda v:v[0])
sorted_index = [x[0] for x in index_name_array]
nr_of_images = len(index_name_array)
vecs = [None] * nr_of_images
offset = np.float32(1).nbytes * self.ndims
res = array.array('f')
fr = open(self.binary_file, 'rb')
fr.seek(index_name_array[0][0] * offset)
res.fromfile(fr, self.ndims)
previous = index_name_array[0][0]
for next in sorted_index[1:]:
move = (next-1-previous) * offset
#print next, move
fr.seek(move, 1)
res.fromfile(fr, self.ndims)
previous = next
fr.close()
return [x[1] for x in index_name_array], [ res[i*self.ndims:(i+1)*self.ndims].tolist() for i in range(nr_of_images) ]
def read_one(self, name):
renamed, vectors = self.read([name])
return vectors[0]
def shape(self):
return [self.nr_of_images, self.ndims]
class StreamFile:
def __init__(self, datadir):
self.feat_dir = datadir
self.nr_of_images, self.ndims = map(int, open(os.path.join(datadir,'shape.txt')).readline().split())
id_file = os.path.join(datadir, "id.txt")
self.names = open(id_file).read().strip().split()
assert(len(self.names) == self.nr_of_images)
self.name2index = dict(zip(self.names, range(self.nr_of_images)))
self.binary_file = os.path.join(datadir, "featu
|
re.bin")
print ("[%s] %dx%d instances loaded from %s" % (self.__class__.__name__, self.nr_of_images, self.ndims, datadir))
self.fr = None
self.current = 0
def open(self):
self.fr = open(os.path.join(self.feat_dir,'feature.bin'), 'rb')
self.current = 0
def close(self):
if self.fr:
self.fr.close()
self.fr = None
def __iter__(self):
|
return self
def next(self):
if self.current >= self.nr_of_images:
self.close()
raise StopIteration
else:
res = array.array('f')
res.fromfile(self.fr, self.ndims)
_id = self.names[self.current]
self.current += 1
return _id, res.tolist()
if __name__ == '__main__':
bigfile = BigFile('toydata/FeatureData/f1')
imset = str.split('b z a a b c')
renamed, vectors = bigfile.read(imset)
for name,vec in zip(renamed, vectors):
print name, vec
|
jlcarmic/producthunt_simulator
|
venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_traversal.py
|
Python
|
mit
| 2,390
| 0
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\
csgraph_to_dense, csgraph_from_dense
def test_graph_breadth_first():
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0, 1, 2, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 7, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first():
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
dfirst = np.array([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 0, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 1, 0]])
for directed in [True, False]:
dfirst_test = depth_first_tree(csgr
|
aph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(dfirst_test),
dfirst)
def test_graph_breadth_first_trivial_graph():
csgraph = np.array([[0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0]])
for directed in [True, False]:
bfirst_test = breadth_
|
first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first_trivial_graph():
csgraph = np.array([[0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0]])
for directed in [True, False]:
bfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
|
TeamADEA/Hunger_Games
|
HG_Driver.py
|
Python
|
mit
| 1,353
| 0.01626
|
from HG_Code.HG_Test import UnitTest as UT
from HG_Code import Model as mo
from HG_Code.Visualize import Visualizer
from HG_Code.SimManager import sim_manager
from HG_Code.Hunger_Grid import hunger_grid
from HG_Code.Kat import Kat
from HG_Code import hg_settings
from HG_Code import Mutate as mu
unitTest = UT.Run_Unit_Test()
unitTest.run_test()
#mo.run_model(from_lava = .02, # START LAVA CHANCE
# to_lava = .02, # END FROM LAVA CHANCE
# from_berry = .05, # START BERRY CHANCE
# to_berry = .05, # END BERRY CHANCE
# from_mut=10, # START MUTATION CHANCE
# to_mut=10, # END MUTATION CHANCE
# from_gen = 33, # START GENERATE CHANCE
# to_gen = 33, # END GENERATE CHANCE
#t_name = 'Default' # TITLE OF TEST
# frames = -1 # Defaults to -1 (-1:Don't, 0:Only Last, N:every N)
#mo.run_model() #Default
mo.run_model(.02,.5,.05,.05, 10, 10, 33, 3
|
3, 'Lava World', -1)
#mo.run_model(.2,.2,.05,.01, 10, 50, 33, 33, 'Nuclear Wasteland')
#mo.run_model(.02,.5,.05,.5, 10, 10, 33, 33, 'Berry World')
#mo.run_model(.00,.00,.1,.1, 10, 10, 33, 33, "No Lava")
#mo.run_model(.1,.1,0.0,0.0, 10, 10, 33, 33, "No Berries")
#mo.run_model(.1,.1,.1
|
,.1,10,10,33,33,"Lava & Berries")
|
theolind/pymysensors
|
mysensors/cli/gateway_tcp.py
|
Python
|
mit
| 1,102
| 0
|
"""Start a tcp gateway."""
import click
from mysensors.cli.helper import (
common_gateway_options,
handle_msg,
run_async_gateway,
run_gateway,
)
from mysensors.gateway_tcp import AsyncTCPGateway, TCPGateway
def common_t
|
cp_options(func):
"""Supply common tcp gateway options."""
func = click.option(
"-p",
"--port",
default=5003,
show_default=True,
|
type=int,
help="TCP port of the connection.",
)(func)
func = click.option(
"-H", "--host", required=True, help="TCP address of the gateway."
)(func)
return func
@click.command(options_metavar="<options>")
@common_tcp_options
@common_gateway_options
def tcp_gateway(**kwargs):
"""Start a tcp gateway."""
gateway = TCPGateway(event_callback=handle_msg, **kwargs)
run_gateway(gateway)
@click.command(options_metavar="<options>")
@common_tcp_options
@common_gateway_options
def async_tcp_gateway(**kwargs):
"""Start an async tcp gateway."""
gateway = AsyncTCPGateway(event_callback=handle_msg, **kwargs)
run_async_gateway(gateway)
|
sunfishcode/cretonne
|
lib/cretonne/meta/cdsl/typevar.py
|
Python
|
apache-2.0
| 30,326
| 0.000132
|
"""
Type variables for Parametric polymorphism.
Cretonne instructions and instruction transformations can be specified to be
polymorphic by using type variables.
"""
from __future__ import absolute_import
import math
from . import types, is_power_of_two
from copy import copy
try:
from typing import Tuple, Union, Iterable, Any, Set, TYPE_CHECKING # noqa
if TYPE_CHECKING:
from srcgen import Formatter # noqa
Interval = Tuple[int, int]
# An Interval where `True` means 'everything'
BoolInterval = Union[bool, Interval]
# Set of special types: None, False, True, or iterable.
SpecialSpec = Union[bool, Iterable[types.SpecialType]]
except ImportError:
pass
MAX_LANES = 256
MAX_BITS = 64
MAX_BITVEC = MAX_BITS * MAX_LANES
def int_log2(x):
# type: (int) -> int
return int(math.log(x, 2))
def intersect(a, b):
# type: (Interval, Interval) -> Interval
"""
Given two `(min, max)` inclusive intervals, compute their intersection.
Use `(None, None)` to represent the empty interval on input and output.
"""
if a[0] is None or b[0] is None:
return (None, None)
lo = max(a[0], b[0])
assert lo is not None
hi = min(a[1], b[1])
assert hi is not None
if lo <= hi:
return (lo, hi)
else:
return (None, None)
def is_empty(intv):
# type: (Interval) -> bool
return intv is None or intv is False or intv == (None, None)
def encode_bitset(vals, size):
# type: (Iterable[int], int) -> int
"""
Encode a set of values (each between 0 and size) as a bitset of width size.
"""
res = 0
assert is_power_of_two(size) and size <= 64
for v in vals:
assert 0 <= v and v < size
res |= 1 << v
return res
def pp_set(s):
# type: (Iterable[Any]) -> str
"""
Return a consistent string representation of a set (ordering is fixed)
"""
return '{' + ', '.join([repr(x) for x in sorted(s)]) + '}'
def decode_interval(intv, full_range, default=None):
# type: (BoolInterval, Interval, int) -> Interval
"""
Decode an interval specification which can take the following values:
True
Use the `full_range`.
`False` or `None`
An empty interval
(lo, hi)
An explicit interval
"""
if isinstance(intv, tuple):
# mypy bug here: 'builtins.None' object is not iterable
lo, hi = intv
assert is_power_of_two(lo)
assert is_power_of_two(hi)
assert lo <= hi
assert lo >= full_range[0]
assert hi <= full_range[1]
re
|
turn intv
if intv:
return full_range
else:
return (default, default)
def interval_to_set(intv):
# type: (Interval) -> Set
if is_empty(intv):
return set()
(lo, hi) = intv
assert is_power_of_two(lo)
assert is_power_of_two(hi)
assert lo <= hi
return set([2**i for i in range(int_log2(lo), int_log2(hi)+1)])
def legal_bool(bits):
# type: (int) -> bool
"""
T
|
rue iff bits is a legal bit width for a bool type.
bits == 1 || bits \in { 8, 16, .. MAX_BITS }
"""
return bits == 1 or \
(bits >= 8 and bits <= MAX_BITS and is_power_of_two(bits))
class TypeSet(object):
"""
A set of types.
We don't allow arbitrary subsets of types, but use a parametrized approach
instead.
Objects of this class can be used as dictionary keys.
Parametrized type sets are specified in terms of ranges:
- The permitted range of vector lanes, where 1 indicates a scalar type.
- The permitted range of integer types.
- The permitted range of floating point types, and
- The permitted range of boolean types.
The ranges are inclusive from smallest bit-width to largest bit-width.
A typeset representing scalar integer types `i8` through `i32`:
>>> TypeSet(ints=(8, 32))
TypeSet(lanes={1}, ints={8, 16, 32})
Passing `True` instead of a range selects all available scalar types:
>>> TypeSet(ints=True)
TypeSet(lanes={1}, ints={8, 16, 32, 64})
>>> TypeSet(floats=True)
TypeSet(lanes={1}, floats={32, 64})
>>> TypeSet(bools=True)
TypeSet(lanes={1}, bools={1, 8, 16, 32, 64})
Similarly, passing `True` for the lanes selects all possible scalar and
vector types:
>>> TypeSet(lanes=True, ints=True)
TypeSet(lanes={1, 2, 4, 8, 16, 32, 64, 128, 256}, ints={8, 16, 32, 64})
Finally, a type set can contain special types (derived from `SpecialType`)
which can't appear as lane types.
:param lanes: `(min, max)` inclusive range of permitted vector lane counts.
:param ints: `(min, max)` inclusive range of permitted scalar integer
widths.
:param floats: `(min, max)` inclusive range of permitted scalar floating
point widths.
:param bools: `(min, max)` inclusive range of permitted scalar boolean
widths.
:param bitvecs : `(min, max)` inclusive range of permitted bitvector
widths.
:param specials: Sequence of special types to appear in the set.
"""
def __init__(
self,
lanes=None, # type: BoolInterval
ints=None, # type: BoolInterval
floats=None, # type: BoolInterval
bools=None, # type: BoolInterval
bitvecs=None, # type: BoolInterval
specials=None # type: SpecialSpec
):
# type: (...) -> None
self.lanes = interval_to_set(decode_interval(lanes, (1, MAX_LANES), 1))
self.ints = interval_to_set(decode_interval(ints, (8, MAX_BITS)))
self.floats = interval_to_set(decode_interval(floats, (32, 64)))
self.bools = interval_to_set(decode_interval(bools, (1, MAX_BITS)))
self.bools = set(filter(legal_bool, self.bools))
self.bitvecs = interval_to_set(decode_interval(bitvecs,
(1, MAX_BITVEC)))
# Allow specials=None, specials=True, specials=(...)
self.specials = set() # type: Set[types.SpecialType]
if isinstance(specials, bool):
if specials:
self.specials = set(types.ValueType.all_special_types)
elif specials:
self.specials = set(specials)
def copy(self):
# type: (TypeSet) -> TypeSet
"""
Return a copy of our self.
"""
n = TypeSet()
n.lanes = copy(self.lanes)
n.ints = copy(self.ints)
n.floats = copy(self.floats)
n.bools = copy(self.bools)
n.bitvecs = copy(self.bitvecs)
n.specials = copy(self.specials)
return n
def typeset_key(self):
# type: () -> Tuple[Tuple, Tuple, Tuple, Tuple, Tuple, Tuple]
"""Key tuple used for hashing and equality."""
return (tuple(sorted(list(self.lanes))),
tuple(sorted(list(self.ints))),
tuple(sorted(list(self.floats))),
tuple(sorted(list(self.bools))),
tuple(sorted(list(self.bitvecs))),
tuple(sorted(s.name for s in self.specials)))
def __hash__(self):
# type: () -> int
h = hash(self.typeset_key())
assert h == getattr(self, 'prev_hash', h), "TypeSet changed!"
self.prev_hash = h
return h
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, TypeSet):
return self.typeset_key() == other.typeset_key()
else:
return False
def __ne__(self, other):
# type: (object) -> bool
return not self.__eq__(other)
def __repr__(self):
# type: () -> str
s = 'TypeSet(lanes={}'.format(pp_set(self.lanes))
if len(self.ints) > 0:
s += ', ints={}'.format(pp_set(self.ints))
if len(self.floats) > 0:
s += ', floats={}'.format(pp_set(self.floats))
if len(self.bools) > 0:
s += ', bools={}'.format(pp_set(self.bools))
if len(self.bitvecs) > 0:
s += ', bitvecs={}'.format(pp_set(self.bitvecs))
if len(self.specials) > 0:
|
amife/carpool
|
service/src/call2ride/interface.py
|
Python
|
gpl-3.0
| 62,939
| 0.018145
|
"""
Open CarPool is a free and open source carpooling/dynamic ride sharing
system, open to be connected to other car pools and public transport.
Copyright (C) 2009-2014 Julian Rath, Oliver Pintat
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
In case this software interacts with other software tools, other
licenses may apply.
Module for the core functionality for the call2ride pilot and Open CarPool
"""
import psycopg2
import datetime
import pytz
import logging
from sms_providers import sms_trade_de
import md5
DEFAULT_COUNTRY_CODE = '49'
## Max Minutes a time can be in the past before adding a day
MINUTES_DELTA = 10
logger = logging.getLogger(__name__)
def sms_mail(receiver, message, config):
logger.info("sending mail: %s" % message)
# Import smtplib for the actual sending function
import smtplib
# Import the email modules we'll need
from email.mime.text import MIMEText
# Open a plain text file for reading. For this example, assume that
# the text file contains only ASCII characters.
#fp = open(textfile, 'rb')
# Create a text/plain message
msg = MIMEText(message)
#fp.close()
# me == the sender's email address
# you == the recipient's email address
msg['Subject'] = 'Call2Ride Text Message to %s' % receiver
msg['From'] = '[email protected]'
msg['To'] = '[email protected]'
# Send the message via our own SMTP server, but don't include the
# envelope header.
s = smtplib.SMTP(config.get('mail', 'smtp'))
s.ehlo()
s.login(config.get('mail', 'user'), config.get('mail', 'password'))
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
def c2r_check_appointment_in_future(start_date, tm, tzname):
## if no date, then result is always true
if start_date==None or start_date=='':
return True
time = datetime.time(int(tm[0:2]), int(tm[2:4]))
date = datetime.date(int(start_date[0:4]),int(start_date[4:6]),int(start_date[6:8]))
tz = pytz.timezone(tzname)
now = datetime.datetime.now(tz)
timenow = datetime.time(now.hour, now.minute)
timeadd = datetime.timedelta(minutes=MINUTES_DELTA)
timeplus = now - timeadd
timeplus2 = datetime.time(timeplus.hour, timeplus.minute)
resultdate = datetime.datetime.combine(date, time)
if resultdate > now.replace(tzinfo=None):
return True
return False
def c2r_time_to_datetime(start_date, tm, tzname, checkonly=False, force=False):
"""
@param tm: 4 digit str
@return: datetime.datetime object with date of today and combined time
"""
time = datetime.time(int(tm[0:2]), int(tm[2:4]))
date = datetime.date.today()
start_date_bool = True
if start_date!=None and start_date!='':
date = datetime.date(int(start_date[0:4]),int(start_date[4:6]),int(start_date[6:8]))
start_date_bool = False
tz = pytz.timezone(tzname)
now = datetime.datetime.now(tz)
timenow = datetime.time(now.hour, now.minute)
timeadd = datetime.timedelta(minutes=MINUTES_DELTA)
timeplus = now - timeadd
timeplus2 = datetim
|
e.time(timeplus.hour, timeplus.minute)
resu
|
ltdate = datetime.datetime.combine(date, time)
## Check Datetime, if in the past use tomorrow
if (timeplus2 > time or force) and start_date_bool:
if checkonly:
return True
aday = datetime.timedelta(days=1)
resultdate = resultdate + aday
if checkonly:
return False
resultdate = tz.localize(resultdate)
return resultdate
def normalize_caller_id(clip):
"""
@raise: ValueError if erong format
This convert a clip (eg 0712123123123, +49712123123123 or
0049712123123123) to the international form starting with +XX.
If the number is starting with only one 0 zero, germany(+49) is assumed.
"""
clip = clip.strip()
if clip.startswith('+'):
numerical_rep = clip[1:]
elif clip.startswith('00'):
numerical_rep = clip
elif clip.startswith('0'):
numerical_rep = DEFAULT_COUNTRY_CODE+clip[1:]
else:
raise ValueError, 'CLIP format unsupported "%"' % clip
##test if it is a real number
numerical_rep = long(numerical_rep)
return '+%s' % ( numerical_rep)
def verify_user_id(user_id, db_con):
"""
verify the format of the id and if it is existing in database
"""
user_id = int(user_id)
cur = db_con.cursor()
cur.execute("SELECT NULL FROM users WHERE id = %d" % user_id)
cur.fetchone()
if cur.rowcount == 1:
return True
else:
return False
def verify_user_number_id(user_number_id, db_con):
"""
verify the format of the id and if it is existing in database
"""
user_number_id = int(user_number_id)
cur = db_con.cursor()
cur.execute("SELECT NULL FROM user_number WHERE id = %d" % user_number_id)
cur.fetchone()
if cur.rowcount == 1:
return True
else:
return False
def verify_route_id(route_id, db_con):
"""
verify the format of the id and if it is existing in database
"""
user_id = int(route_id)
cur = db_con.cursor()
cur.execute("SELECT NULL FROM routes WHERE id = %d" % route_id)
cur.fetchone()
if cur.rowcount == 1:
return True
else:
return False
def get_user_id_from_number_id(number_id, db_con):
cur = db_con.cursor()
sql = 'select user_id from user_number where id = %d'
cur.execute(sql % number_id)
user_id, = cur.fetchone()
cur.close()
return user_id
def get_active_offer_ids(user_number_id, route_id, reverse, db_con, start_date=None):
"""
returns the latest active offer ids
"""
user_id = get_user_id_from_number_id(user_number_id, db_con)
date = datetime.date.today()
if start_date!=None and start_date!='':
date = datetime.date(int(start_date[0:4]),int(start_date[4:6]),int(start_date[6:8]))
sqldate = ' date_trunc(\'day\', start_time)=TIMESTAMP \'%s\' and ' % date.strftime('%Y-%m-%d')
cur = db_con.cursor()
rev = 'reverse=FALSE'
if reverse == 1:
rev = 'reverse=TRUE'
sql = "SELECT id FROM ride_offers WHERE %s user_number_id in (select id from user_number where user_id = %s) AND route_id = %s AND status = 'open' and %s" % (sqldate, user_id, route_id, rev)
cur.execute(sql)
res = cur.fetchall()
if res != []:
return [c[0] for c in res]
else:
return None
def get_active_request_ids(user_number_id, start_point, end_point, db_con, start_date=None):
"""
retruns the latest active offer ids
"""
date = datetime.date.today()
if start_date!=None and start_date!='':
date = datetime.date(int(start_date[0:4]),int(start_date[4:6]),int(start_date[6:8]))
sqldate = ' date_trunc(\'day\', earliest_start_time)=TIMESTAMP \'%s\' and ' % date.strftime('%Y-%m-%d')
user_id = get_user_id_from_number_id(user_number_id, db_con)
cur = db_con.cursor()
cur.execute("SELECT id FROM ride_requests WHERE %s user_number_id in (select id from user_number where user_id = %s) AND start_point = %s AND end_point = %s AND status = 'open'" % (sqldate, user_id, start_point, end_point))
res = cur.fetchall()
if res != []:
return [c[0] for c in res]
else:
return None
class Call2RideError(Exception):
"""
General call2ride exception
"""
pass
class Call2Ride(object):
"""
Main class implementing core functions for call2ride, may offered by
webservices or used directly
"""
_INSERT_OFFER_STATMENT = '''INSERT INTO ride_offers
(user_number_id, route_id, start_time, status, reverse) VALUES(%i, %i, '%s', 'ope
|
scls19fr/openchrono
|
openchrono/databuffer.py
|
Python
|
gpl-3.0
| 1,914
| 0.009404
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
CSV_DEFAULT_SEP = ','
CSV_DEFAULT_LF = '\n'
def append_same_length(columns, data):
if columns != []:
return len(columns) == len(data)
return True
class DataBuffer(object):
def __init__(self, csv_filename, columns=None, csv_sep=CSV_DEFAULT_SEP, csv_lf=CSV_DEFAULT_LF):
self._fd = open(csv_filename, "w")
self.csv_sep = csv_sep
self.csv_lf = csv_lf
if columns is None:
self._columns = []
else:
self._columns = columns
@property
def columns(self):
return self._columns
@columns.setter
def columns(self, value):
self._columns = value
self._append_columns()
def _append_columns(self):
if self.columns != []:
self.append(*self.columns)
def append(self, *data):
assert append_same_length(self.columns, data), "data and columns must have same length"
s = self.csv_sep.join(map(lambda x: str(x), data)) + self.csv_lf
self._fd.write(s)
def close(self):
self._fd.close()
#def to_csv(self, filename):
# pass
def __enter__(self):
return self
def __exit__(self, typ, value, traceback):
self.close()
def main():
data = DataBuffer("data.csv")
data.columns = ["a"
|
, "b", "c"]
data.append(1, 2.5, 3)
data.append(4, 5, 6)
data.append(7, 8, 9)
data.close()
def main_with_context_manager():
"""
Using a context manager (with ...) will
automatically
|
close file descriptor
"""
with DataBuffer("data.csv") as data:
data.columns = ["a", "b", "c"]
data.append(1, 2.5, 3)
data.append(4, 5, 6)
data.append(7, 8, 9)
if __name__ == '__main__':
main_with_context_manager()
|
OSSOS/MOP
|
src/ossos/core/ossos/pipeline/update_astrometry.py
|
Python
|
gpl-3.0
| 19,113
| 0.006017
|
#!python
"""
Update the astrometric and photometric measurements of an mpc observation based on the header contents of the
observation.
"""
from copy import deepcopy
import time
from astropy import units
import os
import math
import numpy
import logging
import re
import mp_ephem
from ossos import storage, wcs, astrom
from ossos import orbfit
import argparse
__author__ = 'jjk'
# Maximum allowed change in angle during re-measure
TOLERANCE = 2.00 * units.arcsec
def _flipped_ccd(ccd):
"""
Is this CCD likely flipped?
The MegaCam imager on CFHT has E/N flipped for some of their CCDs.
@param ccd:
@return:
"""
return ccd < 18 or ccd in [36, 37]
def remeasure(mpc_in, reset_pixel_coordinates=True):
"""
Compute the RA/DEC of the line based on the X/Y in the comment and the WCS of the associated image.
Comment of supplied astrometric line (mpc_in) must be in OSSOSComment format.
@param mpc_in: An line of astrometric measurement to recompute the RA/DEC from the X/Y in the comment.
@type mpc_in: mp_ephem.Observation
@param reset_pixel_coordinates: try and determine correct X/Y is X/Y doesn't map to correct RA/DEC value
@type reset_pixel_coordinates: bool
@type reset_pixecl_coordinates: bool
"""
if mpc_in.null_observation:
return mpc_in
mpc_obs = deepcopy(mpc_in)
logging.debug("rm start: {}".format(mpc_obs.to_string()))
if not isinstance(mpc_obs.comment, mp_ephem.ephem.OSSOSComment):
logging.error("Failed to convert comment line")
return mpc_in
parts = re.search('(?P<expnum>\d{7})(?P<type>\S)(?P<ccd>\d\d)', str(mpc_obs.comment.frame))
if not parts:
logging.error("Failed to parse expnum from frame info in comment line")
return mpc_in
ccd = int(parts.group('ccd'))
expnum = int(parts.group('expnum'))
exp_type = parts.group('type')
try:
header = _connection_error_wrapper(storage._get_sghead, expnum)[ccd+1]
except IOError as ioerr:
logging.error(str(ioerr))
logging.error("Failed to get astrometric header for: {}".format(mpc_obs))
return mpc_in
this_wcs = wcs.WCS(header)
coordinate = this_wcs.xy2sky(mpc_obs.comment.x, mpc_obs.comment.y, usepv=True)
mpc_obs.coordinate = coordinate[0].to('degree').value, coordinate[1].to('degree').value
sep = mpc_in.coordinate.separation(mpc_obs.coordinate)
if sep > TOLERANCE*20 and mpc_in.discovery and _flipped_ccd(ccd):
logging.warn("Large ({}) offset using X/Y in comment line to compute RA/DEC".format(sep))
if reset_pixel_coordinates:
logging.info("flipping/flopping the discvoery x/y position recorded.")
x = header['NAXIS1'] - mpc_obs.comment.x + 1
y = header['NAXIS2'] - mpc_obs.comment.y + 1
new_coordinate = this_
|
wcs.xy2sky(x, y, usepv=True)
new_sep = mpc_in.coordinate.separation(new_coordinate)
if new_sep < TOLERANCE*2:
mpc_obs.coordinate = new_coordinate
mpc_obs.comment.x = x
mpc_obs.comment.y = y
sep = new_sep
if sep > TOLERANCE:
# use the old header RA/DEC to predict the X/Y and then use that X/Y to get new RA/DEC
logging.warn("sep: {} -
|
-> large offset when using comment line X/Y to compute RA/DEC")
if reset_pixel_coordinates:
logging.warn("Using RA/DEC and original WCS to compute X/Y and replacing X/Y in comment.".format(sep))
header2 = _connection_error_wrapper(storage.get_astheader, expnum, ccd)
image_wcs = wcs.WCS(header2)
(x, y) = image_wcs.sky2xy(mpc_in.coordinate.ra.degree, mpc_in.coordinate.dec.degree, usepv=False)
mpc_obs.coordinate = this_wcs.xy2sky(x, y, usepv=True)
mpc_obs.comment.x = x
mpc_obs.comment.y = y
logging.info("Coordinate changed: ({:5.2f},{:5.2f}) --> ({:5.2f},{:5.2f})".format(mpc_obs.comment.x,
mpc_obs.comment.y,
x, y))
if mpc_obs.comment.mag_uncertainty is not None:
try:
merr = float(mpc_obs.comment.mag_uncertainty)
fwhm = float(_connection_error_wrapper(storage.get_fwhm, expnum, ccd))
centroid_err = merr * fwhm * header['PIXSCAL1']
logging.debug("Centroid uncertainty: {} {} => {}".format(merr, fwhm, centroid_err))
except Exception as err:
logging.error(str(err))
logging.error("Failed to compute centroid_err for observation:\n"
"{}\nUsing default of 0.2".format(mpc_obs.to_string()))
centroid_err = 0.2
else:
centroid_err = 0.2
mpc_obs.comment.astrometric_level = header.get('ASTLEVEL', "0")
try:
asterr = float(header['ASTERR'])
residuals = (asterr ** 2 + centroid_err ** 2) ** 0.5
logging.debug("Residuals: {} {} => {}".format(asterr, centroid_err, residuals))
except Exception as err:
logging.error(str(err))
logging.error("Failed while trying to compute plate uncertainty for\n{}".format(mpc_obs.to_string()))
logging.error('Using default of 0.25')
residuals = 0.25
mpc_obs.comment.plate_uncertainty = residuals
logging.debug("sending back: {}".format(mpc_obs.to_string()))
return mpc_obs
def _connection_error_wrapper(func, *args, **kwargs):
"""
Wrap a call to func in a try/except that repeats on ConnectionError
@param func:
@param args:
@param kwargs:
@return:
"""
counter = 0
while counter < 5:
try:
result = func(*args, **kwargs)
return result
except Exception as ex:
time.sleep(5)
counter += 1
logging.warning(str(ex))
def recompute_mag(mpc_in, skip_centroids=False):
"""
Get the mag of the object given the mp_ephem.ephem.Observation
"""
# TODO this really shouldn't need to build a 'reading' to get the cutout...
from ossos.downloads.cutouts import downloader
dlm = downloader.ImageCutoutDownloader()
mpc_obs = deepcopy(mpc_in)
assert isinstance(mpc_obs, mp_ephem.ephem.Observation)
assert isinstance(mpc_obs.comment, mp_ephem.ephem.OSSOSComment)
if mpc_obs.null_observation:
return mpc_obs
parts = re.search('(?P<expnum>\d{7})(?P<type>\S)(?P<ccd>\d\d)', mpc_obs.comment.frame)
if parts is None:
return mpc_obs
expnum = parts.group('expnum')
ccd = parts.group('ccd')
file_type = parts.group('type')
observation = astrom.Observation(expnum, file_type, ccd)
assert isinstance(observation, astrom.Observation)
ast_header = _connection_error_wrapper(storage._get_sghead, int(expnum))[int(ccd)+1]
filter_value = None
for keyword in ['FILTER', 'FILT1 NAME']:
filter_value = ast_header.get(keyword, None)
if filter_value is not None:
if filter_value.startswith('gri'):
filter_value = 'w'
else:
filter_value = filter_value[0]
break
# The ZP for the current astrometric lines is the pipeline one. The new ZP is in the astheader file.
new_zp = ast_header.get('PHOTZP')
# The .zeropoint.used value is likely the one used for the original photometry.
old_zp = _connection_error_wrapper(storage.get_zeropoint, int(expnum), int(ccd))
reading = astrom.SourceReading(float(mpc_obs.comment.x), float(mpc_obs.comment.y), float(mpc_obs.comment.x),
float(mpc_obs.comment.y), mpc_obs.coordinate.ra.degree,
mpc_obs.coordinate.dec.degree, float(mpc_obs.comment.x), float(mpc_obs.comment.y),
observation, ssos=True, from_input_file=True, null_observation=False,
discovery=mpc_obs.discovery)
cutout = _connection_error_wrapper(dlm.download_cutout, reading, needs_apcor=True)
cutout._zmag = new_zp
if ma
|
cowboysmall/rosalind
|
src/textbook/rosalind_ba4l.py
|
Python
|
mit
| 569
| 0.015817
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import table
import genetics
def main(argv):
int_mass = table.integer_mass(argv[0])
lines = files.r
|
ead_lines(argv[1])
leaderboard = [([int_mass[p] for p in peptide], peptide) for peptide in lines[0].split()]
spectrum
|
= [int(m) for m in lines[1].split()]
N = int(lines[2])
print ' '.join(leader[1] for leader in genetics.trim_leaderboard(leaderboard, spectrum, N))
if __name__ == "__main__":
main(sys.argv[1:])
|
fsxfreak/club-suite
|
clubsuite/suite/models/__init__.py
|
Python
|
mit
| 216
| 0
|
from .mdl_user import *
from .mdl_club import *
from .mdl_event import *
from .mdl_receipt import *
from .mdl_budget import *
from .mdl
|
_division import *
from .mdl_eventsignin import *
from .mdl_joinrequest import *
| |
fabricehong/zim-desktop
|
zim/__init__.py
|
Python
|
gpl-2.0
| 7,256
| 0.008131
|
# -*- coding: utf-8 -*-
# Copyright 2008-2014 Jaap Karssenberg <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
'''
This is the development documentation of zim.
B{NOTE:} There is also some generic development documentation in the
"HACKING" folder in the source distribution. Please also have a look
at that if you want to help with zim development.
In this API documentation many of the methods with names starting with
C{do_} and C{on_} are not documented. The reason is that these are
signal handlers that are not part of the external API. They act upon
a signal but should never be called directly by other objects.
Overview
========
The script C{zim.py} is a thin wrapper around the C{main()} function
defined in L{zim.main}. This main function constructs a C{Command}
object that implements a specific commandline command. The C{Command}
object then either connects to a running instance of zim, or executes
the application.
To execute the application, the command typically constructs a
C{Notebook} object, a C{PluginManager} and a C{ConfigManager}. Then
depending on the command the graphical interface is constructed, a
webserver is started or some other action is executed on the notebook.
The C{Notebook} object is found in L{zim.notebook} and implements the
API for accessing and storing pages, attachments and other data in
the notebook folder.
The notebook works together with an C{Index} object which keeps a
SQLite database of all the pages to speed up notebook access and allows
to e.g. show a list of pages in the side pane of the user interface.
Another aspect of the notebook is the parsing of the wiki text in the
pages such that it can be shown in the interface or exported to another
format. See L{zim.formats} for implementations of different parsers.
All classes related to configuration are located in L{zim.config}.
The C{ConfigManager} handles looking up config files and provides them
for all components.
Plugins are defined as sub-modules of L{zim.plugins}. The
C{PluginManager} manages the plugins that are loaded and objects that
can be extended by plugins.
The graphical user interface is implemented in the L{zim.gui} module
and it's sub-modules. The webinterface is implemented in L{zim.www}.
The graphical interface uses a background process to coordinate
between running instances, this is implemented in L{zim.ipc}.
Functionality for exporting content is implemented in L{zim.exporter}.
And search functionality can be found in L{zim.search}.
Many classes in zim have signals which allow other objects to connect
to a listen for specific events. This allows for an event driven chain
of control, which is mainly used in the graphical interface, but is
also used elsewhere. If you are not familiar with event driven programs
please refer to a Gtk manual.
Infrastructure classes
----------------------
All functions and objects to interact with the file system can be
found in L{zim.fs}.
For executing external applications see L{zim.applications} or
L{zim.gui.applications}.
Some generic base classes and functions can be found in L{zim.utils}
@newfield signal: Signal, Signals
@newfield emits: Emits, Emits
@newfield implementation: Implementation
'''
# New epydoc fields defined above are inteded as follows:
# @signal: signal-name (param1, param2): description
# @emits: signal
# @implementation: must implement / optional for sub-classes
# Bunch of meta data, used at least in the about dialog
__version__ = '0.62'
__url__='http://www.zim-wiki.org'
__author__ = 'Jaap Karssenberg <[email protected]>'
__copyright__ = 'Copyright 2008 - 2014 Jaap Karssenberg <[email protected]>'
__license__='''\
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any
|
later version.
This program is distributed in the hope that it will b
|
e useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
'''
import os
import sys
import gettext
import logging
import locale
logger = logging.getLogger('zim')
#: This parameter can be set by ./setup.py, can be e.g. "maemo"
PLATFORM = None
########################################################################
## Note: all init here must happen before importing any other zim
## modules, so can not use zim.fs utilities etc.
## therefore ZIM_EXECUTABLE is a string, not an object
## Check executable and relative data dir
## (sys.argv[0] should always be correct, even for compiled exe)
if os.name == "nt":
# See notes in zim/fs.py about encoding expected by abspath
ZIM_EXECUTABLE = os.path.abspath(
unicode(sys.argv[0], sys.getfilesystemencoding())
)
else:
ZIM_EXECUTABLE = unicode(
os.path.abspath(sys.argv[0]),
sys.getfilesystemencoding()
)
## Initialize locale (needed e.g. for natural_sort)
locale.setlocale(locale.LC_ALL, '')
## Initialize gettext (maybe make this optional later for module use ?)
if os.name == "nt" and not os.environ.get('LANG'):
# Set locale config for gettext (other platforms have this by default)
# Using LANG because it is lowest prio - do not override other params
lang, enc = locale.getlocale()
os.environ['LANG'] = lang + '.' + enc
logging.info('Locale set to: %s', os.environ['LANG'])
_localedir = os.path.join(os.path.dirname(ZIM_EXECUTABLE), 'locale')
if not os.name == "nt":
_localedir = _localedir.encode(sys.getfilesystemencoding())
if os.path.isdir(_localedir):
# We are running from a source dir - use the locale data included there
gettext.install('zim', _localedir, unicode=True, names=('_', 'gettext', 'ngettext'))
else:
# Hope the system knows where to find the data
gettext.install('zim', None, unicode=True, names=('_', 'gettext', 'ngettext'))
########################################################################
## Now we are allowed to import sub modules
import zim.environ # initializes environment parameters
import zim.config
# Check if we can find our own data files
_file = zim.config.data_file('zim.png')
if not (_file and _file.exists()): #pragma: no cover
raise AssertionError(
'ERROR: Could not find data files in path: \n'
'%s\n'
'Try setting XDG_DATA_DIRS'
% map(str, zim.config.data_dirs())
)
def get_zim_revision():
'''Returns multiline string with bazaar revision info, if any.
Otherwise a string saying no info was found. Intended for debug
logging.
'''
try:
from zim._version import version_info
return '''\
Zim revision is:
branch: %(branch_nick)s
revision: %(revno)s %(revision_id)s
date: %(date)s''' % version_info
except ImportError:
return 'No bzr version-info found'
|
crepererum/intbitset
|
tests/test_intbitset.py
|
Python
|
gpl-2.0
| 24,902
| 0.002972
|
# -*- coding: utf-8 -*-
##
## This file is part of intbitset.
## Copyright (C) 2007, 2008, 2009, 2010, 2011, 2013, 2014 CERN.
##
## intbitset is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## intbitset is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with intbitset; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the intbitset data structure."""
__revision__ = "$Id$"
import sys
import zlib
import six
import re
import pkg_resources
import unittest
class IntbitsetTest(unittest.TestCase):
"""Test functions related to intbitset data structure."""
if sys.version_info < (2, 7):
def assertIn(self, test_value, expected_set, msg=None):
if msg is None:
msg = "%s did not occur in %s" % (test_value, expected_set)
self.assert_(test_value in expected_set, msg)
def setUp(self):
from intbitset import intbitset
self.intbitset = intbitset
with open('tests/intbitset_example.int', 'rb') as f:
CFG_INTBITSET_BIG_EXAMPLE = f.read()
self.sets = [
[1024],
[10, 20],
[10, 40],
[60, 70],
[60, 8
|
0],
[10, 20, 60, 70],
[10, 40, 60, 80],
[1000],
[10000],
[23, 45, 67, 89, 110, 130, 174, 1002, 2132, 23434],
[700,
|
2000],
list(range(1000, 1100)),
[30], [31], [32], [33],
[62], [63], [64], [65],
[126], [127], [128], [129]
]
self.fncs_list = [
(intbitset.__and__, set.__and__, int.__and__, False),
(intbitset.__or__, set.__or__, int.__or__, False),
(intbitset.__xor__, set.__xor__, int.__xor__, False),
(intbitset.__sub__, set.__sub__, int.__sub__, False),
(intbitset.__iand__, set.__iand__, int.__and__, True),
(intbitset.__ior__, set.__ior__, int.__or__, True),
(intbitset.__ixor__, set.__ixor__, int.__xor__, True),
(intbitset.__isub__, set.__isub__, int.__sub__, True),
]
self.cmp_list = [
(intbitset.__eq__, set.__eq__, lambda x, y: cmp(x, y) == 0),
(intbitset.__ge__, set.__ge__, lambda x, y: cmp(x, y) >= 0),
(intbitset.__gt__, set.__gt__, lambda x, y: cmp(x, y) > 0),
(intbitset.__le__, set.__le__, lambda x, y: cmp(x, y) <= 0),
(intbitset.__lt__, set.__lt__, lambda x, y: cmp(x, y) < 0),
(intbitset.__ne__, set.__ne__, lambda x, y: cmp(x, y) != 0),
]
self.big_examples = [list(self.intbitset(CFG_INTBITSET_BIG_EXAMPLE))]
self.corrupted_strdumps = [
six.b("ciao"),
six.b(self.intbitset([2, 6000000]).strbits()),
six.b("djflsdkfjsdljfsldkfjsldjlfk"),
]
def tearDown(self):
del self.big_examples
del self.corrupted_strdumps
def _helper_sanity_test(self, intbitset1, msg=''):
wordbitsize = intbitset1.get_wordbitsize()
size1 = intbitset1.get_size()
allocated1 = intbitset1.get_allocated()
creator_list = intbitset1.extract_finite_list()
up_to1 = creator_list and max(creator_list) or -1
self.assertTrue(up_to1 <= size1 * wordbitsize < allocated1 * wordbitsize, "up_to1=%s, size1=%s, allocated1=%s while testing %s during %s" % (up_to1, size1 * wordbitsize, allocated1 * wordbitsize, intbitset1, msg))
tmp = self.intbitset(intbitset1.fastdump())
size2 = tmp.get_size()
allocated2 = tmp.get_allocated()
creator_list = tmp.extract_finite_list()
up_to2 = creator_list and max(creator_list) or -1
self.assertTrue(up_to2 <= size2 * wordbitsize < allocated2 * wordbitsize, "After serialization up_to2=%s, size2=%s, allocated2=%s while testing %s during %s" % (up_to2, size2 * wordbitsize, allocated2 * wordbitsize, intbitset1, msg))
def _helper_test_via_fncs_list(self, fncs, intbitset1, intbitset2):
orig1 = self.intbitset(intbitset1)
orig2 = self.intbitset(intbitset2)
msg = "Testing %s(%s, %s)" % (fncs[0].__name__, repr(intbitset1), repr(intbitset2))
trailing1 = intbitset1.is_infinite()
trailing2 = intbitset2.is_infinite()
if fncs[3]:
fncs[0](intbitset1, intbitset2)
trailing1 = fncs[2](trailing1, trailing2) > 0
up_to = intbitset1.extract_finite_list() and max(intbitset1.extract_finite_list()) or -1
else:
intbitset3 = fncs[0](intbitset1, intbitset2)
trailing3 = fncs[2](trailing1, trailing2) > 0
up_to = intbitset3.extract_finite_list() and max(intbitset3.extract_finite_list()) or -1
set1 = set(orig1.extract_finite_list(up_to))
set2 = set(orig2.extract_finite_list(up_to))
if fncs[3]:
fncs[1](set1, set2)
else:
set3 = fncs[1](set1, set2)
self._helper_sanity_test(intbitset1, msg)
self._helper_sanity_test(intbitset2, msg)
if fncs[3]:
self.assertEqual(set1 & set(intbitset1.extract_finite_list(up_to)), set(intbitset1.extract_finite_list(up_to)), "%s not equal to %s after executing %s(%s, %s)" % (set1, set(intbitset1.extract_finite_list(up_to)), fncs[0].__name__, repr(orig1), repr(orig2)))
self.assertEqual(set1 | set(intbitset1.extract_finite_list(up_to)), set1, "%s not equal to %s after executing %s(%s, %s)" % (set1, set(intbitset1.extract_finite_list(up_to)), fncs[0].__name__, repr(orig1), repr(orig2)))
self.assertEqual(trailing1, intbitset1.is_infinite(), "%s is not %s as it is supposed to be after executing %s(%s, %s)" % (intbitset1, trailing1 and 'infinite' or 'finite', fncs[0].__name__, repr(orig1), repr(orig2)))
else:
self._helper_sanity_test(intbitset3, msg)
self.assertEqual(set3 & set(intbitset3.extract_finite_list(up_to)), set(intbitset3.extract_finite_list(up_to)), "%s not equal to %s after executing %s(%s, %s)" % (set3, set(intbitset3.extract_finite_list(up_to)), fncs[0].__name__, repr(orig1), repr(orig2)))
self.assertEqual(set3 | set(intbitset3.extract_finite_list(up_to)), set3, "%s not equal to %s after executing %s(%s, %s)" % (set3, set(intbitset3.extract_finite_list(up_to)), fncs[0].__name__, repr(orig1), repr(orig2)))
self.assertEqual(trailing3, intbitset3.is_infinite(), "%s is not %s as it is supposed to be after executing %s(%s, %s)" % (intbitset3, trailing3 and 'infinite' or 'finite', fncs[0].__name__, repr(orig1), repr(orig2)))
def _helper_test_normal_set(self, fncs):
for set1 in self.sets:
for set2 in self.sets:
self._helper_test_via_fncs_list(fncs, self.intbitset(set1), self.intbitset(set2))
def _helper_test_empty_set(self, fncs):
for set1 in self.sets:
self._helper_test_via_fncs_list(fncs, self.intbitset(set1), self.intbitset([]))
self._helper_test_via_fncs_list(fncs, self.intbitset([]), self.intbitset(set1))
self._helper_test_via_fncs_list(fncs, self.intbitset([]), self.intbitset([]))
def _helper_test_inifinite_set(self, fncs):
for set1 in self.sets:
for set2 in self.sets:
self._helper_test_via_fncs_list(fncs, self.intbitset(set1), self.intbitset(set2, trailing_bits=True))
self._helper_test_via_fncs_list(fncs, self.intbitset(set1, trailing_bits=True), self.intbitset(set2))
self._helper_test_via_fncs_list(fncs, self.intbitset(set1, trailing_bits=True), self.intbitset(set2, trailing_bits=True))
def _helper_test_infinite_vs_em
|
GreatFruitOmsk/nativeconfig
|
nativeconfig/configs/registry_config.py
|
Python
|
mit
| 5,235
| 0.004585
|
import logging
import winreg
from nativeconfig.configs.base_config import BaseConfig
LOG = logging.getLogger('nativeconfig')
ERROR_NO_MORE_ITEMS = 259
ERROR_NO_MORE_FILES = 18
def traverse_registry_key(key, sub_key):
"""
Traverse registry key and yield one by one.
@raise WindowsError: If key cannot be opened (e.g. does not exist).
"""
current_key = winreg.OpenKey(key, sub_key, 0, winreg.KEY_ALL_ACCESS)
try:
i = 0
while True:
next_key = winreg.EnumKey(current_key, i)
for k in traverse_registry_key(key, r'{}\{}'.format(sub_key, next_key)):
yield k
i += 1
except OSError:
yield sub_key
class RegistryConfig(BaseConfig):
"""
Store config in Windows Registry.
@cvar REGISTRY_KEY: Key in the registry where config will be stored.
@cvar REGISTRY_PATH: Path relative to REGISTRY_KEY that points to the config.
"""
LOG = LOG.getChild('RegistryConfig')
REGISTRY_KEY = winreg.HKEY_CURRENT_USER
def __init__(self):
k = winreg.CreateKey(self.REGISTRY_KEY, self.REGISTRY_PATH)
winreg.CloseKey(k)
super(RegistryConfig, self).__init__()
#{ BaseConfig
def get_value_cache_free(self, name):
try:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH) as app_key:
try:
value, value_type = winreg.QueryValueEx(app_key, name)
if not value_type == winreg.REG_SZ:
raise ValueError("value must be a REG_SZ")
return value
except OSError:
pass
except:
self.LOG.exception("Unable to get \"%s\" from the registry:", name)
return None
def set_value_cache_free(self, name, raw_value):
try:
if raw_value is not None:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH, 0, winreg.KEY_WRITE) as app_key:
winreg.SetValueEx(app_key, name, 0, winreg.REG_SZ, raw_value)
else:
self.del_value_cache_free(name)
except:
self.LOG.exception("Unable to set \"%s\" in the registry:", name)
def del_value_cache_free(self, name):
try:
try:
for k in traverse_registry_key(self.REGISTRY_KEY, r'{}\{}'.format(self.REGISTRY_PATH, name)):
winreg.DeleteKey(self.REGISTRY_KEY, k)
except OSError:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH, 0, winreg.KEY_ALL_ACCESS) as app_key:
winreg.DeleteValue(app_key, name)
except:
self.LOG.info("Unable to delete \"%s\" from the registry:", name)
def get_array_value_cache_free(self, name):
try:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH) as app_key:
value, value_type = winreg.QueryValueEx(app_key, name)
if not value_type ==
|
winreg.REG_MULTI_SZ:
raise ValueErro
|
r("value must be a REG_MULTI_SZ")
return value
except:
self.LOG.info("Unable to get array \"%s\" from the registry:", name, exc_info=True)
return None
def set_array_value_cache_free(self, name, value):
try:
if value is not None:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH, 0, winreg.KEY_WRITE) as app_key:
winreg.SetValueEx(app_key, name, 0, winreg.REG_MULTI_SZ, value)
else:
self.del_value_cache_free(name)
except:
self.LOG.exception("Unable to set \"%s\" in the registry:", name)
def get_dict_value_cache_free(self, name):
try:
with winreg.OpenKey(self.REGISTRY_KEY, r'{}\{}'.format(self.REGISTRY_PATH, name), 0, winreg.KEY_ALL_ACCESS) as app_key:
v = {}
try:
i = 0
while True:
name, value, value_type = winreg.EnumValue(app_key, i)
if value_type != winreg.REG_SZ:
raise ValueError("value must be a REG_SZ")
if value is not None:
v[name] = value
i += 1
except OSError as e:
if e.winerror != ERROR_NO_MORE_ITEMS and e.winerror != ERROR_NO_MORE_FILES:
raise
else:
pass # end of keys
return v
except:
self.LOG.info("Unable to get dict '%s' from the registry:", name, exc_info=True)
return None
def set_dict_value_cache_free(self, name, value):
try:
self.del_value_cache_free(name)
if value is not None:
with winreg.CreateKey(self.REGISTRY_KEY, r'{}\{}'.format(self.REGISTRY_PATH, name)) as app_key:
for k, v in value.items():
winreg.SetValueEx(app_key, k, 0, winreg.REG_SZ, v)
except:
self.LOG.exception("Unable to set \"%s\" in the registry:", name)
#}
|
ychen820/microblog
|
y/google-cloud-sdk/platform/gsutil/third_party/oauth2client/oauth2client/tools.py
|
Python
|
bsd-3-clause
| 8,468
| 0.006731
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
from __future__ import print_function
__author__ = '[email protected] (Joe Gregorio)'
__all__ = ['argparser', 'run_flow', 'run', 'message_if_missing']
import logging
import socket
import sys
import webbrowser
from six.moves import BaseHTTPServer
from six.moves import urllib
from oauth2client import client
from oauth2client import util
_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
"""
def _CreateArgumentParser():
try:
import argparse
except ImportError:
return None
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--auth_host_name', default='localhost',
help='Hostname when running a local web server.')
parser.add_argument('--noauth_local_webserver', action='store_true',
default=False, help='Do not run a local web server.')
parser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
nargs='*', help='Port web server should listen on.')
parser.add_argument('--logging_level', default='ERROR',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Set the logging level of detail.')
return parser
# argparser is an ArgumentParser that contains command-line options expected
# by tools.run(). Pass it in as part of the 'parents' argument to your own
# ArgumentParser.
argparser = _CreateArgumentParser()
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(self):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
query = self.path.split('?', 1)[-1]
query = dict(urllib.parse.parse_qsl(query))
self.server.query_params = query
self.wfile.write("<html><head><title>Authentication Status</title></head>")
self.wfile.write("<body><p>The authentication flow has completed.</p>")
self.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
@util.positional(3)
def run_flow(flow, storage, flags, http=None):
"""Core code for a command-line application.
The run() function is called from your application and runs through all the
steps to obtain credentials. It takes a Flow argument and attempts to open an
authorization server page in the user's default web browser. The server asks
the user to grant your application access to the user's data. If the user
grants access, the r
|
un() function returns new credentials. The new credentials
are also stored in the Storage argument, which updates the file associated
with the Storage object.
It presumes it is run from a command-line application and supports the
following flags:
--auth_host_name: Host name to use when running a local web server
to handle redirects during OAuth aut
|
horization.
(default: 'localhost')
--auth_host_port: Port to use when running a local web server to handle
redirects during OAuth authorization.;
repeat this option to specify a list of values
(default: '[8080, 8090]')
(an integer)
--[no]auth_local_webserver: Run a local web server to handle redirects
during OAuth authorization.
(default: 'true')
The tools module defines an ArgumentParser the already contains the flag
definitions that run() requires. You can pass that ArgumentParser to your
ArgumentParser constructor:
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args(argv)
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
flags: argparse.ArgumentParser, the command-line flags.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
logging.getLogger().setLevel(getattr(logging, flags.logging_level))
if not flags.noauth_local_webserver:
success = False
port_number = 0
for port in flags.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((flags.auth_host_name, port),
ClientRedirectHandler)
except socket.error:
pass
else:
success = True
break
flags.noauth_local_webserver = not success
if not success:
print('Failed to start a local webserver listening on either port 8080')
print('or port 9090. Please check your firewall settings and locally')
print('running programs that may be blocking or using those ports.')
print()
print('Falling back to --noauth_local_webserver and continuing with')
print('authorization.')
print()
if not flags.noauth_local_webserver:
oauth_callback = 'http://%s:%s/' % (flags.auth_host_name, port_number)
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if not flags.noauth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print('Your browser has been opened to visit:')
print()
print(' ' + authorize_url)
print()
print('If your browser is on a different machine then exit and re-run this')
print('application with the command-line parameter ')
print()
print(' --noauth_local_webserver')
print()
else:
print('Go to the following link in your browser:')
print()
print(' ' + authorize_url)
print()
code = None
if not flags.noauth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print('Failed to find "code" in the query parameters of the redirect.')
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError as e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print('Authentication successful.')
return credential
def message_if_missing(filename):
"""Helpful message to display if the CLIENT_SECRETS file is missing."""
return _CLIENT_SECRETS_MESSAGE % filename
try:
from oauth2client.old_run impo
|
mass-project/mass_server
|
mass_flask_config/app.py
|
Python
|
mit
| 2,133
| 0.002344
|
import os
import subprocess
from pymongo import MongoClient
from flask import Flask, redirect, url_for, request, flash
from flask_bootstrap import Bootstrap
from flask_mongoengine import MongoEngine
from flask_modular_auth import AuthManager, current_authenticated_entity, SessionBasedAuthProvide
|
r, KeyBasedAuthProvider
from .reverse_proxy import ReverseProxied
# Initialize app
app = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
# Generate or
|
load secret key
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')
try:
app.secret_key = open(SECRET_FILE).read().strip()
except IOError:
try:
import random
app.secret_key = ''.join([random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
secret = open(SECRET_FILE, 'w')
secret.write(app.secret_key)
secret.close()
except IOError:
Exception('Please create a %s file with random characters \
to generate your secret key!' % SECRET_FILE)
# Load config
config_path = os.getenv('CONFIG_PATH', 'mass_flask_config.config_development.DevelopmentConfig')
app.config.from_object(config_path)
# Init db
db = MongoEngine(app)
# Init flask-bootstrap
Bootstrap(app)
# Init auth system
def setup_session_auth(user_loader):
app.session_provider = SessionBasedAuthProvider(user_loader)
auth_manager.register_auth_provider(app.session_provider)
def setup_key_based_auth(key_loader):
app.key_based_provider = KeyBasedAuthProvider(key_loader)
auth_manager.register_auth_provider(app.key_based_provider)
def unauthorized_callback():
if current_authenticated_entity.is_authenticated:
flash('You are not authorized to access this resource!', 'warning')
return redirect(url_for('mass_flask_webui.index'))
else:
return redirect(url_for('mass_flask_webui.login', next=request.url))
auth_manager = AuthManager(app, unauthorized_callback=unauthorized_callback)
# Set the version number. For the future we should probably read it from a file.
app.version = '1.0-alpha1'
|
ferchault/iago
|
docs/conf.py
|
Python
|
mit
| 8,700
| 0.006897
|
# -*- coding: utf-8 -*-
#
# iago documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 28 18:57:03 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx']
autodoc_mock_imports = ['pint',]
intersphinx_mapping = {
'python': ('https://docs.python.org/2', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('http://pandas-docs.github.io/pandas-docs-travis/', None),
'mdanalysis': ('http://pythonhosted.org/MDAnalysis', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'iago'
copyright = u'2016, Guido Falk von Rudorff'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "de
|
fault.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
|
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'iagodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'iago.tex', u'iago Documentation',
u'Guido Falk von Rudorff', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'iago', u'iago Documentation',
[u'Guido Falk von Rudorff'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, c
|
malmiron/incubator-airflow
|
tests/operators/test_http_operator.py
|
Python
|
apache-2.0
| 1,793
| 0
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed wi
|
th this work
|
for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
import requests_mock
from airflow.operators.http_operator import SimpleHttpOperator
try:
from unittest import mock
except ImportError:
import mock
class SimpleHttpOpTests(unittest.TestCase):
def setUp(self):
os.environ['AIRFLOW_CONN_HTTP_EXAMPLE'] = 'http://www.example.com'
@requests_mock.mock()
def test_response_in_logs(self, m):
"""
Test that when using SimpleHttpOperator with 'GET',
the log contains 'Example Domain' in it
"""
m.get('http://www.example.com', text='Example.com fake response')
operator = SimpleHttpOperator(
task_id='test_HTTP_op',
method='GET',
endpoint='/',
http_conn_id='HTTP_EXAMPLE',
log_response=True,
)
with mock.patch.object(operator.log, 'info') as mock_info:
operator.execute(None)
mock_info.assert_called_with('Example.com fake response')
|
sakura-internet/saklient.python
|
saklient/cloud/models/model_routerplan.py
|
Python
|
mit
| 3,430
| 0.0117
|
# -*- coding:utf-8 -*-
# This code is automatically transpiled by Saklient Translator
import six
from ..client import Client
from .model import Model
from ..resources.resource import Resource
from ..resources.routerplan import RouterPlan
from ...util import Util
import saklient
str = six.text_type
# module saklient.cloud.models.model_routerplan
class Model_RouterPlan(Model):
## ルータ帯域プランを検索するための機能を備えたクラス。
## @private
# @return {str}
def _api_path(self):
return "/product/internet"
## @private
# @return {str}
def _root_key(self):
return "InternetPlan"
## @private
# @return {str}
def _root_key_m(self):
return "InternetPlans"
## @private
# @return {str}
def _class_name(self):
return "RouterPlan"
## @private
# @param {any} obj
# @param {bool} wrapped=False
# @return {saklient.cloud.resources.resource.Resource}
def _create_resource_impl(self, obj, wrapped=False):
Util.validate_type(wrapped, "bool")
return RouterPlan(self._client, obj, wrapped)
## 次に取得するリストの開始オフセットを指定します。
#
# @param {int} offset オフセット
# @return {saklient.cloud.models.model_routerplan.Model_RouterPlan} this
def offset(self, offset):
Util.validate_type(offset, "int")
return self._offset(offset)
## 次に取得するリストの上限レコード数を指定します。
#
# @param {int} count 上限レコード数
# @return {saklient.cloud.models.model_routerplan.Model_RouterPlan} this
def limit(self,
|
count):
Util.validate_type(count, "int")
return self._limit(count)
## Web APIのフィルタリング設定を直接指定します。
#
# @param {str} key キー
# @param {any} value 値
# @param {bool} multiple=False valueに配列を与え、OR条件で完全一致検索する場合にtrueを指定します。通常、valueはスカラ値であいまい検索されます。
# @return {saklient.cloud.models.model_routerplan.Model_Rou
|
terPlan}
def filter_by(self, key, value, multiple=False):
Util.validate_type(key, "str")
Util.validate_type(multiple, "bool")
return self._filter_by(key, value, multiple)
## 次のリクエストのために設定されているステートをすべて破棄します。
#
# @return {saklient.cloud.models.model_routerplan.Model_RouterPlan} this
def reset(self):
return self._reset()
## 指定したIDを持つ唯一のリソースを取得します。
#
# @param {str} id
# @return {saklient.cloud.resources.routerplan.RouterPlan} リソースオブジェクト
def get_by_id(self, id):
Util.validate_type(id, "str")
return self._get_by_id(id)
## リソースの検索リクエストを実行し、結果をリストで取得します。
#
# @return {saklient.cloud.resources.routerplan.RouterPlan[]} リソースオブジェクトの配列
def find(self):
return self._find()
## @ignore
# @param {saklient.cloud.client.Client} client
def __init__(self, client):
super(Model_RouterPlan, self).__init__(client)
Util.validate_type(client, "saklient.cloud.client.Client")
|
lipk/pyzertz
|
pyzertz/table_view.py
|
Python
|
apache-2.0
| 3,483
| 0.014643
|
from tile_view import *
from table import *
from game_state import *
from circ_button import *
class TableView:
GREY = (100,100,100)
BLACK = (0,0,0)
WHITE = (255,255,255)
BGCOLOR = (60,60,100)
TILE_COLOR = (90, 255, 90)
TILE_RADIUS = 30
TABLE_POS = (245, 90)
# table : Table
# pl1, pl2: Player
# game_board: list(TileView)
# pl1_view pl2_view
# marble_stack: the available marbles
def __init__(self, state: State, surface: pygame.Surface):
pl1 = state.pl1
pl2 = state.pl2
table = state.t
self.game_board = []
r = TableView.TILE_RADIUS
for i in range(-3,4):
for j in range(-3,4):
if math.fabs(i+j)<=3:
self.game_board.append(TileView(table.get(i,j),\
TableView.TABLE_POS[0], TableView.TABLE_POS[1],\
r, TableView.TILE_COLOR))#, lambda btn : print(btn.col, btn.row)))
W = surface.get_width()
H = surface.get_height()
self.pl1_view = PlayerView(pl1, (0,0))
self.pl2_view = PlayerView(pl2, (W-TableView.TILE_RADIUS*2,0))
self.marble_stack = []
self.marble_stack.append(CircleButton(int(W/2-r*3), H-r*2, r, \
TableView.WHITE, str(table.marbles[0])))
self.marble_stack.append(CircleButton(int(W/2), H-r*2, r, \
TableView.GREY, str(table.marbles[1])))
self.marble_stack.append(CircleButton(int(W/2+r*3), H-r*2, r, \
TableView.BLACK, str(table.marbles[2])))
def draw(self, surface: pygame.Surface, state: State):
surface.fill(TableView.BGCOLOR)
for tile in self.game_board:
tile.draw_button
|
(surface,state.t.get(tile.col, tile.row))
self.pl1_view.draw(surface, state.pl1)
self.pl2_view.draw(surface, state.pl2)
for i in range(len(state.t.marbles)):
btn = self.marble_stack[i]
btn.text = str(state.t.marbles[i])
btn.draw_button(surface)
def get_pressed_tile(self, pos):
for tile in self.game_board:
if tile.pressed(pos):
return (tile.col, tile.row)
return None
def get_pres
|
sed_marble(self, pos):
for (i,marble) in enumerate(self.marble_stack):
if marble.pressed(pos):
return i
return None
class PlayerView:
def __init__(self, pl: Player, pos: (int, int)):
self.pos = pos
r = int(TableView.TILE_RADIUS/2)
self.buttons = [CircleButton(pos[0]+r*2, \
r*3, r, \
TableView.WHITE, str(pl.marbles[0]))]
self.buttons.append(CircleButton(pos[0]+r*2, \
r*6, r, \
TableView.GREY, str(pl.marbles[1])))
self.buttons.append(CircleButton(pos[0]+r*2, \
r*9, r, \
TableView.BLACK, str(pl.marbles[2])))
def draw(self, surface: pygame.Surface, player: Player):
for i in range(len(self.buttons)):
btn = self.buttons[i]
btn.text = str(player.marbles[i])
btn.draw_button(surface)
font_size = int(TableView.TILE_RADIUS*3//len(player.name))
myFont = pygame.font.SysFont("Calibri", font_size)
myText = myFont.render(player.name, 1, (0,0,0))
surface.blit(myText, (self.pos[0]+TableView.TILE_RADIUS/2,\
self.pos[1] + TableView.TILE_RADIUS/2))
|
carcinogoy/MemeRepo
|
MemeRepo/delete.py
|
Python
|
mit
| 517
| 0.009671
|
import flask
import MemeRepo.db as db
import MemeRepo.funcs as fnc
from MemeRepo.config import config
def handle(code, uri):
result = db.get_file(uri)
if result
|
== None:
return flask.render_template("error.html", msg="That file does not exist", code="400"), 400
else:
if result['owner'] == code:
db.delete_file(uri)
return 'deleted'
else:
return flask.render_template("error.html", msg="You d
|
o not own that file", code="403"), 403
|
sxjscience/tvm
|
apps/topi_recipe/gemm/android_gemm_square.py
|
Python
|
apache-2.0
| 4,440
| 0.00045
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do square matrix multiplication on Android Phone."""
import tvm
from tvm import te
import os
from tvm import rpc
from tvm.contrib import util, ndk
import numpy as np
# Set to be address of tvm proxy.
proxy_host = os.environ["TVM_ANDROID_RPC_PROXY_HOST"]
proxy_port = 9090
key
|
= "android"
# Change target configuration.
# Run `adb shell cat /proc/cpuinfo` to find the arch.
arch = "arm64"
target = "llvm -mtriple=%s-linux-android" % arch
def ngflops(N):
return 2.0 * f
|
loat(N * N * N) / (10 ** 9)
dtype = "float32"
def evaluate(func, ctx, N, times):
a_np = np.random.uniform(size=(N, N)).astype(dtype)
b_np = np.random.uniform(size=(N, N)).astype(dtype)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((N, N), dtype=dtype), ctx)
time_f = func.time_evaluator(func.entry_name, ctx, number=times)
cost = time_f(a, b, c).mean
gf = ngflops(N) / cost
print("%g secs/op, %g GFLOPS" % (cost, gf))
np.testing.assert_almost_equal(c.asnumpy(), a_np.dot(b_np), decimal=2)
def test_gemm_gpu(N, times, bn, num_block, num_thread):
assert bn <= N
assert num_thread * num_thread * 16 <= N
assert num_block * num_block * 2 <= N
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="Btmp")
k = te.reduce_axis((0, N), name="k")
packedB = te.compute((N, N / bn, bn), lambda x, y, z: B[x, y * bn + z], name="B")
C = te.compute(
(N, N), lambda ii, jj: te.sum(A[ii, k] * packedB[k, jj / bn, jj % bn], axis=k), name="C"
)
s = te.create_schedule(C.op)
CC = s.cache_write(C, "local")
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_xz = te.thread_axis((0, 2), "vthread", name="vx")
thread_yz = te.thread_axis((0, 2), "vthread", name="vy")
pby, pbi = s[packedB].split(packedB.op.axis[0], nparts=num_thread)
pbx, pbj = s[packedB].split(packedB.op.axis[1], nparts=num_thread)
s[packedB].bind(pby, thread_y)
s[packedB].bind(pbx, thread_x)
pbz, pbk = s[packedB].split(packedB.op.axis[2], factor=8)
s[packedB].vectorize(pbk)
by, yi = s[C].split(C.op.axis[0], nparts=num_block)
bx, xi = s[C].split(C.op.axis[1], nparts=num_thread)
s[C].bind(by, block_y)
s[C].bind(bx, thread_y)
s[C].reorder(by, bx, yi, xi)
tyz, yi = s[C].split(yi, nparts=2)
ty, yi = s[C].split(yi, nparts=num_block)
txz, xi = s[C].split(xi, nparts=2)
tx, xi = s[C].split(xi, nparts=num_thread)
s[C].reorder(tyz, txz, ty, tx, yi, xi)
s[C].bind(tyz, thread_yz)
s[C].bind(txz, thread_xz)
s[C].bind(ty, block_x)
s[C].bind(tx, thread_x)
xyi, xxi = s[C].split(xi, factor=8)
s[C].reorder(tyz, txz, ty, tx, yi, xyi, xxi)
s[C].vectorize(xxi)
s[CC].compute_at(s[C], yi)
yo, xo = CC.op.axis
s[CC].reorder(k, yo, xo)
xo, xi = s[CC].split(xo, factor=8)
s[CC].vectorize(xi)
ko, ki = s[CC].split(k, factor=2)
s[CC].unroll(ki)
print(tvm.lower(s, [A, B, C], simple_mode=True))
f = tvm.build(s, [A, B, C], "opencl", target_host=target, name="gemm_gpu")
temp = util.tempdir()
path_dso = temp.relpath("gemm_gpu.so")
f.export_library(path_dso, ndk.create_shared)
# connect to the proxy
remote = rpc.connect(proxy_host, proxy_port, key=key)
ctx = remote.cl(0)
remote.upload(path_dso)
f = remote.load_module("gemm_gpu.so")
evaluate(f, ctx, N, times)
if __name__ == "__main__":
test_gemm_gpu(1024, times=5, bn=8, num_block=2, num_thread=8)
|
warrieka/portal4argis_tools
|
portal/csvportal.py
|
Python
|
mit
| 4,761
| 0.018694
|
import arcpy, os, json, csv
from portal import additem, shareItem, generateToken, getUserContent, updateItem, getGroupID, deleteItem, getGroupContent
from metadata import metadata
from ESRImapservice import ESRImapservice
class csvportal(object):
def __init__(self, user, password, portal, worksspace, groups=[]):
"""Connect to portal with username and pasword, also set the local workspace"""
self.user = user
self.password = password
self.portal = portal
self.groups = groups
self.token = generateToken(self.user, self.password, self.portal)
self.groupIDs = [getGroupID(g, self.token, self.portal) for g in self.groups]
if len(self.groupIDs) == 0:
self.userContent = getUserContent(user, '', self.token, self.portal )
else:
self.userContent = getGroupContent(self.groups[0], self.token, self.portal)
self.existingIDs = { n['title'] : n['id'] for n in self.userContent["items"]}
self.LayersFoundinMXD = []
self.ws = worksspace
if worksspace: arcpy.env.workspace = worksspace
def updateToken(self):
"""refresh the token, might be necessary if becomes invalid"""
self.token = generateToken(self.user, self.password, self.portal)
return self.token
def uploadCsv(self, csvpath, sep=";", headerlines=1, nameCol=0, pathCol=1, urlCol=2):
"""upload every row in a csv"""
with open( csvpath , 'rb') as csvfile:
nr = 0
csv_reader = csv.reader(csvfile, dialect=csv.excel, delimiter=sep)
for n in range(headerlines): csv_reader.next()
for row i
|
n csv_reader:
line = [unicode(cell, 'latin-1') for cell in row]
name, ds, url = (line[nameCol], line[pathCol], line[urlCol])
if self.ws and os.path.dirname(ds).endswith('.sde'):
ds = os.path.join(self.ws , os.path.basename(ds) )
self.addLyr(ds, name, url, self.groupIDs)
|
#generate new token every 50 uses
if not nr%50 : self.token = generateToken(self.user, self.password, self.portal)
nr += 1
##TODO: DELETE layers in group and not in csv
def addLyr(self, dataSource, name, serviceUrl, groupIDs=[]):
"""Add *dataSource* to *portal* for *user* , as a item with *name*
representing a layer in *service* """
meta = metadata.metadataFromArcgis( dataSource )
author = meta.credits if len( meta.credits ) else "Stad Antwerpen"
descrip = ( "<strong>"+ meta.title +"</strong> <div><em>"+
meta.orgname + "</em></div> " + meta.description +
"\n<br/> Creatiedatum: " + meta.createDate +
"\n<br/> Publicatiedatum: " + meta.pubDate +
"\n<br/> Revisiedatum: " + meta.reviseDate +
"\n<br/> Beheer: " + meta.contacts +
"\n<br/> Contact: " + meta.eMails )
if name in self.existingIDs.keys():
self.LayersFoundinMXD.append(name)
arcpy.AddMessage( "updating " + name )
item = updateItem(self.user, self.token, self.portal, self.existingIDs[name], serviceUrl,
title=name, summary=meta.purpose, description=descrip, author=author, tags=",".join(meta.tags))
else:
arcpy.AddMessage( "adding " + name )
item = additem(self.user, self.token, self.portal, serviceUrl,
title=name, summary=meta.purpose, description=descrip, author=author, tags=",".join(meta.tags) )
if "success" in item.keys() and item["success"]:
id = item["id"]
arcpy.AddMessage( shareItem(id, self.token, self.portal, True, True, groupIDs) )
elif "success" in item.keys() and not item["success"]:
raise Exception( "Error uploading "+ name +" "+ json.dumps(result))
else:
arcpy.AddMessage("unsure of success for layer "+ name +" "+ json.dumps(result))
def delLyr(self, name):
if name in self.existingIDs.keys():
result = deleteItem(self.existingIDs[name] , self.token, self.portal, self.user)
if "success" in result.keys() and result["success"]:
arcpy.AddMessage("Deleted layer: " + name )
elif "success" in result.keys() and not result["success"]:
raise Exception( "Error deleting "+ name +" "+ json.dumps(result))
else:
arcpy.AddMessage("unsure of success for layer "+ name +" "+ json.dumps(result))
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/win32com/gen_py/C866CA3A-32F7-11D2-9602-00C04F8EE628x0x5x4.py
|
Python
|
gpl-3.0
| 308,803
| 0.056677
|
# -*- coding: mbcs -*-
# Created by makepy.py version 0.5.01
# By python version 2.7.9 (default, Dec 10 2014, 12:28:03) [MSC v.1500 64 bit (AMD64)]
# From type library '{C866CA3A-32F7-11D2-9602-00C04F8EE628}'
# On Fri Jun 30 10:48:37 2017
'Microsoft Speech Object Library'
makepy_version = '0.5.01'
python_version = 0x20709f0
import win32com.client.CLSIDToClass, pythoncom, pywintypes
import win32com.client.util
from pywintypes import IID
from win32com.client import Dispatch
# The following 3 lines may need tweaking for the particular server
# Candidates are pythoncom.Missing, .Empty and .ArgNotFound
defaultNamedOptArg=pythoncom.Empty
defaultNamedNotOptArg=pythoncom.Empty
defaultUnnamedArg=pythoncom.Empty
CLSID = IID('{C866CA3A-32F7-11D2-9602-00C04F8EE628}')
MajorVersion = 5
MinorVersion = 4
LibraryFlags = 8
LCID = 0x0
class constants:
DISPID_SRGCmdLoadFromFile =7 # from enum DISPIDSPRG
DISPID_SRGCmdLoadFromMemory =10 # from enum DISPIDSPRG
DISPID_SRGCmdLoadFromObject =8 # from enum DISPIDSPRG
DISPID_SRGCmdLoadFromProprietaryGrammar=11 # from enum DISPIDSPRG
DISPID_SRGCmdLoadFromResource =9 # from enum DISPIDSPRG
DISPID_SRGCmdSetRuleIdState =13 # from enum DISPIDSPRG
DISPID_SRGCmdSetRuleState =12 # from enum DISPIDSPRG
DISPID_SRGCommit =6 # from enum DISPIDSPRG
DISPID_SRGDictationLoad =14 # from enum DISPIDSPRG
DISPID_SRGDictationSetState =16 # from enum DISPIDSPRG
DISPID_SRGDictationUnload =15 # from enum DISPIDSPRG
DISPID_SRGId =1 # from enum DISPIDSPRG
DISPID_SRGIsPronounceable =19 # from enum DISPIDSPRG
DISPID_SRGRecoContext =2 # from enum DISPIDSPRG
DISPID_SRGReset =5 # from enum DISPIDSPRG
DISPID_SRGRules =4 # from enum DISPIDSPRG
DISPID_SRGSetTextSelection =18 # from enum DISPIDSPRG
DISPID_SRGSetWordSequenceData =17 # from enum DISPIDSPRG
DISPID_SRGState =3 # from enum DISPIDSPRG
DISPIDSPTSI_ActiveLength =2 # from enum DISPIDSPTSI
DISPIDSPTSI_ActiveOffset =1 # from enum DISPIDSPTSI
DISPIDSPTSI_SelectionLength =4 # from enum DISPIDSPTSI
DISPIDSPTSI_SelectionOffset =3 # from enum DISPIDSPTSI
DISPID_SABufferInfo =201 # from enum DISPID_SpeechAudio
DISPID_SABufferNotifySize =204 # from enum DISPID_SpeechAudio
DISPID_SADefaultFormat =202 # from enum DISPID_SpeechAudio
DISPID_SAEventHandle =205 # from enum DISPID_SpeechAudio
DISPID_SASetState =206 # from enum DISPID_SpeechAudio
DISPID_SAStatus =200 # from enum DISPID_SpeechAudio
DISPID_SAVolume =203 # from enum DISPID_SpeechAudio
DISPID_SABIBufferSize =2 # from enum DISPID_SpeechAudioBufferInfo
DISPID_SABIEventBias =3 # from enum DISPID_SpeechAudioBufferInfo
DISPID_SABIMinNotification =
|
1 # from enum DISPID_SpeechAudioBufferInfo
DISPID_SAFGetWaveFormatEx =3 # from enum DISPID_SpeechAudi
|
oFormat
DISPID_SAFGuid =2 # from enum DISPID_SpeechAudioFormat
DISPID_SAFSetWaveFormatEx =4 # from enum DISPID_SpeechAudioFormat
DISPID_SAFType =1 # from enum DISPID_SpeechAudioFormat
DISPID_SASCurrentDevicePosition=5 # from enum DISPID_SpeechAudioStatus
DISPID_SASCurrentSeekPosition =4 # from enum DISPID_SpeechAudioStatus
DISPID_SASFreeBufferSpace =1 # from enum DISPID_SpeechAudioStatus
DISPID_SASNonBlockingIO =2 # from enum DISPID_SpeechAudioStatus
DISPID_SASState =3 # from enum DISPID_SpeechAudioStatus
DISPID_SBSFormat =1 # from enum DISPID_SpeechBaseStream
DISPID_SBSRead =2 # from enum DISPID_SpeechBaseStream
DISPID_SBSSeek =4 # from enum DISPID_SpeechBaseStream
DISPID_SBSWrite =3 # from enum DISPID_SpeechBaseStream
DISPID_SCSBaseStream =100 # from enum DISPID_SpeechCustomStream
DISPID_SDKCreateKey =8 # from enum DISPID_SpeechDataKey
DISPID_SDKDeleteKey =9 # from enum DISPID_SpeechDataKey
DISPID_SDKDeleteValue =10 # from enum DISPID_SpeechDataKey
DISPID_SDKEnumKeys =11 # from enum DISPID_SpeechDataKey
DISPID_SDKEnumValues =12 # from enum DISPID_SpeechDataKey
DISPID_SDKGetBinaryValue =2 # from enum DISPID_SpeechDataKey
DISPID_SDKGetStringValue =4 # from enum DISPID_SpeechDataKey
DISPID_SDKGetlongValue =6 # from enum DISPID_SpeechDataKey
DISPID_SDKOpenKey =7 # from enum DISPID_SpeechDataKey
DISPID_SDKSetBinaryValue =1 # from enum DISPID_SpeechDataKey
DISPID_SDKSetLongValue =5 # from enum DISPID_SpeechDataKey
DISPID_SDKSetStringValue =3 # from enum DISPID_SpeechDataKey
DISPID_SFSClose =101 # from enum DISPID_SpeechFileStream
DISPID_SFSOpen =100 # from enum DISPID_SpeechFileStream
DISPID_SGRAddResource =6 # from enum DISPID_SpeechGrammarRule
DISPID_SGRAddState =7 # from enum DISPID_SpeechGrammarRule
DISPID_SGRAttributes =1 # from enum DISPID_SpeechGrammarRule
DISPID_SGRClear =5 # from enum DISPID_SpeechGrammarRule
DISPID_SGRId =4 # from enum DISPID_SpeechGrammarRule
DISPID_SGRInitialState =2 # from enum DISPID_SpeechGrammarRule
DISPID_SGRName =3 # from enum DISPID_SpeechGrammarRule
DISPID_SGRSAddRuleTransition =4 # from enum DISPID_SpeechGrammarRuleState
DISPID_SGRSAddSpecialTransition=5 # from enum DISPID_SpeechGrammarRuleState
DISPID_SGRSAddWordTransition =3 # from enum DISPID_SpeechGrammarRuleState
DISPID_SGRSRule =1 # from enum DISPID_SpeechGrammarRuleState
DISPID_SGRSTransitions =2 # from enum DISPID_SpeechGrammarRuleState
DISPID_SGRSTNextState =8 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTPropertyId =6 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTPropertyName =5 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTPropertyValue =7 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTRule =3 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTText =2 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTType =1 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTWeight =4 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTsCount =1 # from enum DISPID_SpeechGrammarRuleStateTransitions
DISPID_SGRSTsItem =0 # from enum DISPID_SpeechGrammarRuleStateTransitions
DISPID_SGRSTs_NewEnum =-4 # from enum DISPID_SpeechGrammarRuleStateTransitions
DISPID_SGRsAdd =3 # from enum DISPID_SpeechGrammarRules
DISPID_SGRsCommit =4 # from enum DISPID_SpeechGrammarRules
DISPID_SGRsCommitAndSave =5 # from enum DISPID_SpeechGrammarRules
DISPID_SGRsCount =1 # from enum DISPID_SpeechGrammarRules
DISPID_SGRsDynamic =2 # from enum DISPID_SpeechGrammarRules
DISPID_SGRsFindRule =6 # from enum DISPID_SpeechGrammarRules
DISPID_SGRsItem =0 # from enum DISPID_SpeechGrammarRules
DISPID_SGRs_NewEnum =-4 # from enum DISPID_SpeechGrammarRules
DISPID_SLAddPronunciation =3 # from enum DISPID_SpeechLexicon
DISPID_SLAddPronunciationByPhoneIds=4 # from enum DISPID_SpeechLexicon
DI
|
ovresko/erpnext
|
erpnext/setup/doctype/supplier_group/supplier_group.py
|
Python
|
gpl-3.0
| 639
| 0.017214
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.nestedset import NestedSet, get_root_of
class SupplierGroup(NestedSet):
nsm_parent_field = 'parent_supplier_group'
def validate(self):
if not self.
|
parent_supplier_group
|
:
self.parent_supplier_group = get_root_of("Supplier Group")
def on_update(self):
NestedSet.on_update(self)
self.validate_one_root()
def on_trash(self):
NestedSet.validate_if_child_exists(self)
frappe.utils.nestedset.update_nsm(self)
|
bottero/IMCMCrun
|
examples/Synthetic1/createSyntheticRandomProfile.py
|
Python
|
mit
| 6,749
| 0.017188
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 21:31:53 2015
Create random synthetic velocity profile + linear first guesses
@author: alex
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
plt.close('all')
random.seed(2)
X = range(256)
Z = np.linspace(0,1000,256)
pos = 50
posVpVs = 1.7
L = np.array([pos])
vpvs = np.array([posVpVs])
for x in X[1:]:
pos += random.choice((-0.9,1)) #random.choice((-1,1))
posVpVs += random.choice((-0.02,0.02))
L=np.append(L,pos)
vpvs = np.append(vpvs,posVpVs)
L=70*L
Vp = savitzky_golay(L, 51, 3) # window size 51, polynomial order 3
A = np.array([ Z, np.ones(256)])
Vs = Vp/savitzky_golay(vpvs, 51, 3) # window size 51, polynomial order 3
w = np.linalg.lstsq(A.T,Vp)[0] # obtaining the parameters
# plotting the line
lineP = w[0]*Z+w[1]+500 # regression line
w = np.linalg.lstsq(A.T,Vs)[0] # obtaining the parameters
# plotting the line
lineS = w[0]*Z+w[1]-250 # regression line
plt.figure()
plt.hold(True)
plt.plot(L,Z,label="Random walk")
plt.plot(Vp,Z,linewidth=4,label="P wave velocity from this random walk")
plt.plot(lineP,Z,linewidth=4,label="First guess")
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
plt.legend()
plt.figure()
plt.hold(True)
plt.plot(vpvs,Z,linewidth=4,label="Random walk vp/vs")
plt.legend()
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
plt.figure()
plt.hold(True)
plt.plot(Vs,Z,linewidth=4,label="S wave velocity from random vp/vs")
plt.plot(lineS,Z,linewidth=4,label="First guess")
plt.legend()
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
# Save profiles
np.savetxt("dataExample/realProfileP.txt",np.dstack((Z,Vp))[0])
np.savetxt("dataExample/realProfileS.txt",np.dstack((Z,Vs))[0])
np.savetxt("dataExample/firstGuessP.txt",np.dstack((Z,lineP))[0])
np.savetxt("dataExample/firstGuessS.txt",np.dstack((Z,lineS))[0])
#####################################################################
coordShotsX=[300,500]
coordShotsY=[400]
coordShotsZ=[650]
coordStatsX=[200,300,400,500,600]
coordStatsY=[200,300,400,500,600]
coordStatsZ=[200,300,400,500,600]
Xshots=[]
Yshots=[]
Zshots=[]
Xstats=[]
Ystats=[]
Zstats=[]
#Open a file in write mode:
fo = open("dataExample/coordShots.txt", "w+")
for coordX in coordShotsX:
for coordY in coordShotsY:
for coordZ in coordShotsZ:
Xshots.append(coordX)
Yshots.append(coordY)
Zshots.append(coordZ)
fo.write(str(coordX)+" "+str(coordY)+" "+str(coordZ)+"\n")
# Close opened file
fo.close()
#Open a file in write mode:
fo = open("dataExample/coordStats.txt", "w+")
for coordX in coordStatsX:
for coordY in coordStatsY:
for coordZ in coordStatsZ:
Xstats.append(coordX)
Ystats.append(coordY)
Zstats.append(coordZ)
fo.write(str(co
|
ordX)+" "+str(coordY)+" "+str(coordZ)+"\n")
# Close opened file
fo.close()
fig = plt.figure()
ax = fig.gca(projection='3d') #Axes3D(fig)
ax.hold(True)
ax.scatter(Xstats,Ystats,Zstats,zdir='z',s=20,c='b')
if (len(coordShotsX) > 3):
ax.scat
|
ter(Xshots,Yshots,Zshots,zdir='z',s=20,c='r',marker='^')
else:
ax.scatter(Xshots,Yshots,Zshots,zdir='z',s=200,c='r',marker='^')
ax.set_xlim3d(min(min(Xshots),min(Xstats))-100,max(max(Xshots),max(Xstats))+100)
ax.set_ylim3d(min(min(Yshots),min(Ystats))-100,max(max(Yshots),max(Ystats))+100)
ax.set_zlim3d(min(min(Zshots),min(Zstats))-100,max(max(Zshots),max(Zstats))+100)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
ax.set_zlabel('Z (m)')
ax.set_title('Geometry')
ax.invert_zaxis()
|
mfherbst/spack
|
lib/spack/spack/repo.py
|
Python
|
lgpl-2.1
| 42,050
| 0.000143
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
|
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundat
|
ion, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import collections
import os
import stat
import shutil
import errno
import sys
import inspect
import imp
import re
import traceback
import tempfile
import json
from contextlib import contextmanager
from six import string_types
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from types import ModuleType
import yaml
import llnl.util.lang
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp, install
import spack.config
import spack.caches
import spack.error
import spack.spec
from spack.provider_index import ProviderIndex
from spack.util.path import canonicalize_path
from spack.util.naming import NamespaceTrie, valid_module_name
from spack.util.naming import mod_to_class, possible_spack_module_names
#: Super-namespace for all packages.
#: Package modules are imported as spack.pkg.<namespace>.<pkg-name>.
repo_namespace = 'spack.pkg'
#
# These names describe how repos should be laid out in the filesystem.
#
repo_config_name = 'repo.yaml' # Top-level filename for repo config.
repo_index_name = 'index.yaml' # Top-level filename for repository index.
packages_dir_name = 'packages' # Top-level repo directory containing pkgs.
package_file_name = 'package.py' # Filename for packages in a repository.
#: Guaranteed unused default value for some functions.
NOT_PROVIDED = object()
#: Code in ``_package_prepend`` is prepended to imported packages.
#:
#: Spack packages were originally expected to call `from spack import *`
#: themselves, but it became difficult to manage and imports in the Spack
#: core the top-level namespace polluted by package symbols this way. To
#: solve this, the top-level ``spack`` package contains very few symbols
#: of its own, and importing ``*`` is essentially a no-op. The common
#: routines and directives that packages need are now in ``spack.pkgkit``,
#: and the import system forces packages to automatically include
#: this. This way, old packages that call ``from spack import *`` will
#: continue to work without modification, but it's no longer required.
#:
#: TODO: At some point in the future, consider removing ``from spack import *``
#: TODO: from packages and shifting to from ``spack.pkgkit import *``
_package_prepend = 'from spack.pkgkit import *'
def _autospec(function):
"""Decorator that automatically converts the argument of a single-arg
function to a Spec."""
def converter(self, spec_like, *args, **kwargs):
if not isinstance(spec_like, spack.spec.Spec):
spec_like = spack.spec.Spec(spec_like)
return function(self, spec_like, *args, **kwargs)
return converter
class SpackNamespace(ModuleType):
""" Allow lazy loading of modules."""
def __init__(self, namespace):
super(SpackNamespace, self).__init__(namespace)
self.__file__ = "(spack namespace)"
self.__path__ = []
self.__name__ = namespace
self.__package__ = namespace
self.__modules = {}
def __getattr__(self, name):
"""Getattr lazily loads modules if they're not already loaded."""
submodule = self.__package__ + '.' + name
setattr(self, name, __import__(submodule))
return getattr(self, name)
class FastPackageChecker(Mapping):
"""Cache that maps package names to the stats obtained on the
'package.py' files associated with them.
For each repository a cache is maintained at class level, and shared among
all instances referring to it. Update of the global cache is done lazily
during instance initialization.
"""
#: Global cache, reused by every instance
_paths_cache = {}
def __init__(self, packages_path):
# The path of the repository managed by this instance
self.packages_path = packages_path
# If the cache we need is not there yet, then build it appropriately
if packages_path not in self._paths_cache:
self._paths_cache[packages_path] = self._create_new_cache()
#: Reference to the appropriate entry in the global cache
self._packages_to_stats = self._paths_cache[packages_path]
def _create_new_cache(self):
"""Create a new cache for packages in a repo.
The implementation here should try to minimize filesystem
calls. At the moment, it is O(number of packages) and makes
about one stat call per package. This is reasonably fast, and
avoids actually importing packages in Spack, which is slow.
"""
# Create a dictionary that will store the mapping between a
# package name and its stat info
cache = {}
for pkg_name in os.listdir(self.packages_path):
# Skip non-directories in the package root.
pkg_dir = os.path.join(self.packages_path, pkg_name)
# Warn about invalid names that look like packages.
if not valid_module_name(pkg_name):
msg = 'Skipping package at {0}. '
msg += '"{1}" is not a valid Spack module name.'
tty.warn(msg.format(pkg_dir, pkg_name))
continue
# Construct the file name from the directory
pkg_file = os.path.join(
self.packages_path, pkg_name, package_file_name
)
# Use stat here to avoid lots of calls to the filesystem.
try:
sinfo = os.stat(pkg_file)
except OSError as e:
if e.errno == errno.ENOENT:
# No package.py file here.
continue
elif e.errno == errno.EACCES:
tty.warn("Can't read package file %s." % pkg_file)
continue
raise e
# If it's not a file, skip it.
if stat.S_ISDIR(sinfo.st_mode):
continue
# If it is a file, then save the stats under the
# appropriate key
cache[pkg_name] = sinfo
return cache
def __getitem__(self, item):
return self._packages_to_stats[item]
def __iter__(self):
return iter(self._packages_to_stats)
def __len__(self):
return len(self._packages_to_stats)
class TagIndex(Mapping):
"""Maps tags to list of packages."""
def __init__(self):
self._tag_dict = collections.defaultdict(list)
def to_json(self, stream):
json.dump({'tags': self._tag_dict}, stream)
@staticmethod
def from_json(stream):
d = json.load(stream)
r = TagIndex()
for tag, list in d['tags'].items():
r[tag].extend(list)
return r
def __getitem__(self, item):
return self._tag_dict[item]
def __iter__(self):
return iter(self._tag_dict)
def __len__(self):
return len(self._tag_dict)
def update_package(self, pkg_name):
"""Updates a package in the tag i
|
ytsapras/robonet_site
|
scripts/reception_data.py
|
Python
|
gpl-2.0
| 34,418
| 0.014614
|
import glob
import operational_instruments
from astropy.io import fits
from numpy.fft import fft2, ifft2
import sewpy
from astropy import wcs
from astropy.table import Table
from astropy.io import ascii
from astropy.time import Time
import pytz
import numpy as np
import os
import time
import log_utilities
import datetime
import rome_telescopes_dict
import rome_filters_dict
import shutil
import api_tools
import socket
import config_parser
import pwd
# Django modules had to be removed to make API compatible and run outside
# the docker container. Timezone applied at the API endpoint.
#from django.utils import timezone
class QuantityLimits(object):
def __init__(self):
self.sky_background_median_limit = 10000.0
self.sky_background_std_limit = 200
self.sky_background_minimum = 100
self.sky_background_maximum = 5000
self.minimum_moon_sep = 10
self.minimum_number_of_stars = {'gp': 1000, 'rp': 2000, 'ip' : 4000}
self.maximum_ellipticity = 0.4
self.maximum_seeing = 2.0
class Image(object):
def __init__(self, image_directory, image_output_origin_directory,
image_name, logger ):
self.image_directory = image_directory
self.image_name = image_name
self.origin_directory = image_output_origin_directory
self.logger = logger
self.banzai_bpm = None
self.banzai_catalog = None
try:
images = fits.open(os.path.join(self.image_directory,self.image_name))
for image in images:
try :
if image.header['EXTNAME'] == 'BPM':
self.banzai_bpm = image
logger.info('Loaded the bad pixel mask')
if image.header['EXTNAME'] == 'SCI':
science_image = image
self.data = science_image.data
self.header = science_image.header
self.oldheader = science_image.header.copy()
logger.info('Loaded the science data')
if image.header['EXTNAME'] == 'CAT':
self.banzai_catalog = image
|
logger.info('Loaded the BANZAI catalogue')
except :
pass
except:
logger.error('I cannot load the image!')
# self.data = science_image.data
# self.header
|
= science_image.header
# self.oldheader = science_image.header.copy()
self.camera = None
self.sky_level = None
self.sky_level_std = None
self.sky_minimum_level = None
self.sky_maximum_level = None
self.number_of_stars = None
self.ellipticity = None
self.seeing = None
self.quality_flags = []
self.thumbnail_box_size = 60
self.field_name = None
self.x_shift = 0
self.y_shift = 0
self.header_date_obs = '1986-04-04T00:00:00.00' #dummy value
self.header_telescope_site = None
self.header_dome_id = None
self.header_group_id = ''
self.header_track_id = ''
self.header_request_id = ''
self.header_object_name = None
self.header_moon_distance = None
self.header_moon_status = False
self.header_moon_fraction = None
self.header_airmass = None
self.header_seeing = None
self.header_ccd_temp = None
self.header_ellipticity = None
self.header_sky_level = None
self.header_sky_temperature = None
self.header_sky_measured_mag = None
self.find_camera()
self.find_object_and_field_name()
self.quantity_limits = QuantityLimits()
def process_the_image(self):
#self.extract_header_statistics()
#self.find_wcs_template()
#self.generate_sextractor_catalog()
#self.
#self.update_image_wcs()
#self.move_frame()
pass
def update_image_wcs(self):
try:
hdutemplate = fits.open(os.path.join(self.template_directory,self.template_name))
templateheader=hdutemplate[0].header
hdutemplate.close()
imageheader=self.header
#STORE OLD FITSHEADER AND ADJUST BASED ON TEMPLATE
imageheader['DPXCORR'] = self.x_shift
imageheader['DPYCORR'] = self.y_shift
imageheader['WCSRFCAT'] = templateheader['WCSRFCAT']
imageheader['RA'] = templateheader['RA']
imageheader['DEC'] = templateheader['DEC']
imageheader['CRPIX1'] = templateheader['CRPIX1']
imageheader['CRPIX2'] = templateheader['CRPIX2']
imageheader['CRVAL1'] = templateheader['CRVAL1']
imageheader['CRVAL2'] = templateheader['CRVAL2']
imageheader['CD1_1'] = templateheader['CD1_1']
imageheader['CD1_2'] = templateheader['CD1_2']
imageheader['CD2_1'] = templateheader['CD2_1']
imageheader['CD2_2'] = templateheader['CD2_2']
imageheader['CRPIX1'] = self.x_new_center
imageheader['CRPIX2'] = self.y_new_center
imageheader['CDELT1'] = templateheader['CDELT1']
imageheader['CDELT2'] = templateheader['CDELT2']
imageheader['CROTA1'] = templateheader['CROTA1']
imageheader['CROTA2'] = templateheader['CROTA2']
imageheader['SECPIX1'] = templateheader['SECPIX1']
imageheader['SECPIX2'] = templateheader['SECPIX2']
imageheader['WCSSEP'] = templateheader['WCSSEP']
self.logger.info('WCS header successfully updated')
except:
self.logger.error('WCS header successfully updated')
def find_wcs_template(self):
field_name = self.field_name.replace('ROME-','')
template_name = 'WCS_template_' + field_name + '.fits'
thumbnail_name = 'WCS_template_' + field_name + '.thumbnail'
origin_directory = self.origin_directory
template_directory = origin_directory + 'wcs_templates/'
self.template_name = template_name
self.template_directory = template_directory
try:
coord=np.loadtxt(os.path.join(self.template_directory,thumnail_name))
self.x_center_thumbnail_world=coord[0]
self.y_center_thumbnail_world=coord[1]
except:
self.x_center_thumbnail_world=self.header['CRVAL1']
self.y_center_thumbnail_world=self.header['CRVAL2']
self.logger.info('Extracted WCS information')
def find_camera(self):
try:
self.camera_name = self.image_name[9:13]
self.camera = operational_instruments.define_instrument(self.camera_name)
self.filter = self.header[self.camera.header_dictionnary['filter']]
self.logger.info('Successfully identified the associated camera, '+str(self.camera_name))
except:
self.logger.error('I do not recognise camera '+str(self.camera_name))
def find_object_and_field_name(self):
try:
self.object_name = self.header[self.camera.header_dictionnary['object']]
self.field_name = self.object_name
self.logger.info('Object name is : '+self.object_name)
self.logger.info('And so the assiocated field : '+self.field_name)
except:
self.logger.error('I cannot recognize the object name or/and field name!')
def de
|
wandsdn/RheaFlow
|
RheaFlow/NetlinkProcessor.py
|
Python
|
apache-2.0
| 6,275
| 0.000478
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# Copyright (C) 2016 Oladimeji Fayomi, University of Waikato.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Name: NetlinkProcessor.py
# Author: Oladimeji Fayomi
# Created: 25 May 2016
# Last Modified: 17 August 2016
# Version: 1.0
# Description: Listens to netlink messages and notifies the RheaFlow
# application of important netlink messages.
import socket
from pyroute2 import IPDB
import eventlet
from datetime import datetime
from log import log
try:
import cPickle as pickle
except:
import pickle
server_addr = ('127.0.0.1', 55651)
class NetlinkClient(object):
def __init__(self):
self.neighbours = []
self.unresolvedneighbours = []
self.ip = IPDB(ignore_rtables=[254])
self.ip_uuid = self.ip.register_callback(self.callback)
self.server = eventlet.listen(('127.0.0.1', 55652))
self.socket = None
self.serve = True
self.pool = eventlet.GreenPool()
self.not_connect = True
def callback(self, ipdb, msg, action):
if action is 'RTM_NEWNEIGH':
self.add_neighbour(msg)
if action is 'RTM_DELNEIGH':
self.remove_neighbour(msg)
if action is 'RTM_NEWLINK':
self.notify(['ifaceTable', self.ifaceTable(ipdb)])
if action is 'RTM_DELLINK':
|
self.notify(['ifaceTable', self.ifaceTable(ipdb)])
if action is 'RTM_NEWADDR':
log.info("RTM_NEWADDR happened at %s", str(datetime.now()))
self.notify(['ifaceTable', self.ifaceTable(ipdb)])
if action is 'RTM_DELADDR':
log.info("RTM_DELADDR happened at %s", str(datetime.now()))
self.notify(['iface
|
Table', self.ifaceTable(ipdb)])
def add_neighbour(self, msg):
attributes = msg['attrs']
ip_addr = attributes[0][1]
if attributes[1][0] is 'NDA_LLADDR':
mac_addr = attributes[1][1]
iface_index = msg['ifindex']
host = {'ipaddr': ip_addr, 'mac_addr': mac_addr,
'ifindex': iface_index}
if host not in self.neighbours:
self.notify(['add_neigh', host])
self.neighbours.append(host)
if ip_addr in self.unresolvedneighbours:
self.unresolvedneighbours = list(filter(lambda x: x !=
ip_addr,
self.unresolvedneighbours)
)
else:
if ip_addr not in self.unresolvedneighbours:
self.unresolvedneighbours.append(ip_addr)
self.notify(['unresolved', self.unresolvedneighbours])
def remove_neighbour(self, msg):
attributes = msg['attrs']
ip_addr = attributes[0][1]
if attributes[1][0] is 'NDA_LLADDR':
mac_addr = attributes[1][1]
iface_index = msg['ifindex']
host = {'ipaddr': ip_addr, 'mac_addr': mac_addr,
'ifindex': iface_index}
self.notify(['remove_neigh', host])
self.neighbours = list(filter(
lambda x: x != host, self.neighbours))
def notify(self, rheamsg):
notification = pickle.dumps(rheamsg)
if self.socket is not None:
self.socket.send(notification)
recv = self.socket.recv(8192)
def ifaceTable(self, ipdb):
ifaces = ipdb.by_name.keys()
table = []
for iface in ifaces:
mac_addr = ipdb.interfaces[iface]['address']
ip_addresses = ipdb.interfaces[iface]['ipaddr']
ifindex = ipdb.interfaces[iface]['index']
state = ipdb.interfaces[iface]['operstate']
table.append({'ifname': iface, 'mac-address': mac_addr,
'IP-Addresses': [x for x in ip_addresses],
'ifindex': ifindex,
'state': state})
return table
def neighbourtable(self):
return self.neighbours
def returnunresolvedhost(self):
return self.unresolvedneighbours
def process_requests(self, ipdb, request):
if request[0] == 'ifaceTable':
res = self.ifaceTable(ipdb)
result = ['ifaceTable', res]
return pickle.dumps(result)
if request[0] == 'neighbourtable':
res = self.neighbourtable()
result = ['neighbourtable', res]
return pickle.dumps(result)
if request[0] == 'get_unresolved':
res = self.returnunresolvedhost()
result = ['unresolved', res]
return pickle.dumps(result)
def handle_request(self, sock):
is_active = True
while is_active:
received = sock.recv(8192)
if len(received) != 0:
request = pickle.loads(received)
response = self.process_requests(self.ip, request)
sock.send(response)
if len(received) == 0:
is_active = False
sock.close()
sock.close()
def try_connect(self):
while self.not_connect:
try:
self.socket = eventlet.connect(('127.0.0.1', 55651))
except socket.error as e:
pass
else:
self.not_connect = False
def serve_forever(self):
while self.serve:
nl_sock, address = self.server.accept()
self.pool.spawn_n(self.handle_request, nl_sock)
log.info("Rhea has contacted us")
self.try_connect()
if __name__ == "__main__":
nlclient = NetlinkClient()
nlclient.serve_forever()
|
erudit/eruditorg
|
eruditorg/apps/public/auth/urls.py
|
Python
|
gpl-3.0
| 2,483
| 0.003222
|
# -*- coding: utf-8 -*-
from django.urls import re_path
from django.contrib.auth import views as auth_views
from django.utils.translation import gettext_lazy as _
from django.urls import reverse_lazy
from . import forms
from . import views
app_name = "auth"
urlpatterns = [
# Sign in / sign out
re_path(
_(r"^connexion/$"),
views.LoginView.as_view(
template_name="public/auth/login.html", authentication_form=forms.Authent
|
icationForm
),
name="login",
),
re_path(_(r"^deconnexion/$"), auth_views.LogoutView.as_view(next_page="/"), name="logout"),
re_path(_(r"^bienvenue/$"), views.UserLoginLandingRedirectView.as_view(), name="landing"),
# Parameters & personal data
re_path(
_(r"^donnees-personnelles/$"),
views.UserPersonalDataUpdateView.as_view(),
name="personal_data",
),
re_path(_(r"^parametres
|
/$"), views.UserParametersUpdateView.as_view(), name="parameters"),
# Password change
re_path(_(r"^mot-de-passe/$"), views.UserPasswordChangeView.as_view(), name="password_change"),
# Password reset
re_path(
_(r"^mot-de-passe/reinitialisation/$"),
auth_views.PasswordResetView.as_view(
template_name="public/auth/password_reset_form.html",
email_template_name="emails/auth/password_reset_registered_email.html",
subject_template_name="emails/auth/password_reset_registered_email_subject.txt",
form_class=forms.PasswordResetForm,
success_url=reverse_lazy("public:auth:password_reset_done"),
),
name="password_reset",
),
re_path(
_(r"^mot-de-passe/reinitialisation/termine/$"),
auth_views.PasswordResetDoneView.as_view(
template_name="public/auth/password_reset_done.html"
),
name="password_reset_done",
),
re_path(
_(r"^reinitialisation/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]+-[0-9A-Za-z]+)/$"),
auth_views.PasswordResetConfirmView.as_view(
template_name="public/auth/password_reset_confirm.html",
success_url=reverse_lazy("public:auth:password_reset_complete"),
),
name="password_reset_confirm",
),
re_path(
_(r"^reinitialisation/termine/$"),
auth_views.PasswordResetCompleteView.as_view(
template_name="public/auth/password_reset_complete.html"
),
name="password_reset_complete",
),
]
|
mjdarby/RogueDetective
|
dialogue.py
|
Python
|
gpl-2.0
| 2,401
| 0.016243
|
# This stores all the dialogue related stuff
import screen
class Dialogue(object):
"""Stores the dialogue tree for an individual NPC"""
def __init__(self, npc):
super(Dialogue, self).__init__()
self.npc = npc
self.game = npc.game
self.root = None
self.currentNode = None
def setRootNode(self, node):
self.root = node
def resetCurrentNode(self):
self.currentNode = self.root
def beginConversation(self):
self.resetCurrentNode()
self.runNextNode()
def runNextNode(self):
if self.currentNode is None:
return
# Grab all the DialogueChoices that should be shown
availableChoices = []
for (choice, predicate, child) in self.currentNode.choices:
if predicate is not None:
if
|
predicate():
availableChoices.append((choice, child))
else:
availableChoices.append((choice, child))
npcName = None
if self.game.player.notebook.isNpcKnown(self.n
|
pc):
npcName = self.npc.firstName + " " + self.npc.lastName
choiceTexts = [choice.choiceText for (choice, child) in availableChoices]
screen.printDialogueChoices(self.game.screen, self.game.player,
choiceTexts, npcName)
choiceIdx = self.game.getDialogueChoice(len(choiceTexts)) - 1
self.game.draw()
(choice, nextNode) = availableChoices[choiceIdx]
response = ""
response += choice.response
if choice.responseFunction is not None:
response = choice.responseFunction(self.npc, response)
self.game.printDescription(response, npcName)
self.currentNode = nextNode
self.runNextNode()
class DialogueNode(object):
"""A single node of the dialogue tree"""
def __init__(self):
super(DialogueNode, self).__init__()
self.choices = []
def addChoice(self, choice, choicePredicate=None, childNode=None):
self.choices.append((choice, choicePredicate, childNode))
class DialogueChoice(object):
"""Stores the choice/function pair"""
def __init__(self, choiceText, response, responseFunction=None):
super(DialogueChoice, self).__init__()
self.choiceText = choiceText
self.response = response
self.responseFunction = responseFunction
def callResponseFunction(self, npcArgument, response):
if responseFunction is not None:
self.responseFunction(npcArgument, response)
|
uniflex/uniflex
|
uniflex/core/timer.py
|
Python
|
mit
| 1,583
| 0
|
import threading
__author__ = "Piotr Gawlowicz"
__copyright__ = "Copyright (c) 2015, Technische Universitat Berlin"
__version__ = "0.1.0"
__email__ = "[email protected]"
class Timer(object):
def __init__(self, handler_):
assert callable(handler_)
super().__init__()
self._handler = handler_
self._event = threading.Event()
self._thread = None
def start(self, interval):
"""interval is in seconds"""
if self._thread:
self.cancel()
self._event.clear()
self._thread = threading.Thread(target=self._timer, args=[interval])
self._thread.setDaemon(True)
self._thread.start()
def cancel(self):
if (not self._thread) or (not self._thread.is_alive()):
return
self._event.set()
# self._thread.join()
self._thread = None
def is_running(self):
return self._thread is not None
def _timer(self, interva
|
l):
# Avoid cancellation during execution of self._callable()
cancel = self._event.wait(interval)
if cancel:
return
self._handler()
class TimerEventSender(Timer):
# timeout handler is called by timer thread context.
# So in order to actual execution context to a
|
pplication's event thread,
# post the event to the application
def __init__(self, app, ev_cls):
super(TimerEventSender, self).__init__(self._timeout)
self._app = app
self._ev_cls = ev_cls
def _timeout(self):
self._app.send_event(self._ev_cls())
|
tommyp1ckles/hippocrates
|
server.py
|
Python
|
mit
| 742
| 0.006739
|
from flask import Flask
from flask import render_template
import euclid
import queue_constant
|
s
import ast
import redis
import threading
REDIS_ADDRESS = "localhost"
REDIS_PORT = 6379
REDIS_DB = 0
app = Flask(__na
|
me__)
@app.route("/")
def monitor():
queue = redis.StrictRedis(host=REDIS_ADDRESS, port=REDIS_PORT, db=REDIS_DB)
nstatus = []
status = ast.literal_eval(queue.get(queue_constants.NODE_KEY).decode())
for s in status:
nstatus.append({"name":s, "status":status[s]["status"]})
return render_template('monitor.html', status=nstatus)
if __name__ == "__main__":
euclidThread = threading.Thread(target=euclid.main)
euclidThread.setDaemon(True)
euclidThread.start()
app.run(host='0.0.0.0')
|
interactiveaudiolab/nussl
|
nussl/core/__init__.py
|
Python
|
mit
| 1,293
| 0.000773
|
"""
Core
====
AudioSignals
------------
.. autoclass:: nussl.core.AudioSignal
:members:
:autosummary:
Masks
-----
.. automodule:: nussl.core.masks
:members:
:autosummary:
Constants
------------
.. automodule:: nussl.core.constants
:members:
:autosummary:
External File Zoo
-----------------
.. automodule:: nussl.core.efz_utils
:members:
:autosummary:
General utilities
-----------------
.. automodule:: nussl.core.utils
:members:
:autosummary:
Audio effects
-------------
.. automodule:: nussl.core.effects
:members:
:autosummary:
Mixing
------
.. automodule:: nussl.core.mixing
:members:
:autosummary:
Playing and embedding audio
---------------------------
.. automodule:: nussl.core.play_utils
:members:
:autosummary:
Checkpoint migration (backwards compatability)
----------------------------------------------
.. automodule:: nussl.core.migration
:members:
:autosummary:
"""
from .audio_signal import AudioSignal, STFTParam
|
s
from . import constants
from . import efz_utils
from . import play_utils
from . import utils
from . import mixing
from . import masks
__all__ = [
'AudioSi
|
gnal',
'STFTParams',
'constants',
'efz_utils',
'play_utils',
'utils',
'mixing'
'masks',
]
|
KyoHS/Python
|
SingleThreadPortScan.py
|
Python
|
gpl-2.0
| 1,536
| 0.009115
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: S.H.
Version: 0.1
Date: 2015-01-17
|
Description:
Scan ip:
74.125.131.0/24
74.125.131.99-125
74.125.131.201
Only three format above.
Read ip form a ip.txt, and scan all port(or a list port).
"""
import os
import io
import socket
fileOpen = open("ip.txt", 'r')
fileTemp = open("temp.txt", 'a')
for line in fileOpen.readlines():
if line.find("-") != -1:
list = line[:line.index("-")]
ip = [int(a) for a in list.split(".")]
b = int(line[line.index("-")+1:])
|
for i in range(ip[3], b+1):
fileTemp.write(str(ip[0])+"."+str(ip[1])+"."+str(ip[2])+"."+str(i)+"\n")
elif line.find("/") != -1:
list = line[:line.index("/")]
ip = [int(a) for a in list.split(".")]
for i in range(256):
fileTemp.write(str(ip[0])+"."+str(ip[1])+"."+str(ip[2])+"."+str(i)+"\n")
else:
fileTemp.write(line)
fileTemp.close()
fileOpen.close()
# print("process is here.")
f = open("temp.txt", 'r')
print("===Scan Staring===")
for line in f.readlines():
hostIP = socket.gethostbyname(line)
# print(hostIP)
# for port in range(65535):
portList = [80, 8080]
for port in portList:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((hostIP, port))
if result == 0:
print("Port {} is OPEN on:\t\t\t {}".format(port, hostIP))
else:
print("Port {} is NOT open on {}".format(port, hostIP))
sock.close()
f.close()
os.remove("temp.txt")
print("===Scan Complement===")
|
chrsrds/scikit-learn
|
sklearn/covariance/empirical_covariance_.py
|
Python
|
bsd-3-clause
| 9,848
| 0
|
"""
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
# avoid division truncation
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_array
from ..utils.extmath import fast_logdet
from ..metrics.pairwise import pairwise_distances
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : boolean
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
location_ : array-like, shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EmpiricalCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = EmpiricalCovariance().fit(X)
>>> cov.covariance_
array([[0.7569..., 0.2818...],
[0.2818..., 0.3928...]])
>>> cov.location_
array([0.0622..., 0.0193...])
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_fe
|
atures)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ =
|
covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y
not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y
not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean
|
RedHatInsights/insights-core
|
insights/parsers/ls_var_run.py
|
Python
|
apache-2.0
| 1,120
| 0
|
"""
LsVarRun - command ``ls -lnL /var/run``
=======================================
The ``ls -lnL /var/run`` command provides information for the listing of the
``/var/run`` directory.
Sample input is shown in t
|
he Examples. See ``FileListing`` class for
additional information.
Sample directory list::
total 20
drwx--x---. 2 0 984 40 May 15 09:29 openvpn
drwxr-xr-x. 2 0 0 40 May 15 09:30 plymouth
drwxr-xr-x. 2 0 0 40 May 15 09:29 ppp
drwxr-xr-x. 2 75 75 40 May 15 09:29 radvd
-rw-r--r--. 1 0 0 5 May 15 09:30 rhnsd.pid
drwxr-xr-x. 2 0 0 60 May 30 09:31 rhsm
|
drwx------. 2 32 32 40 May 15 09:29 rpcbind
-r--r--r--. 1 0 0 0 May 17 16:26 rpcbind.lock
Examples:
>>> "rhnsd.pid" in ls_var_run
False
>>> "/var/run" in ls_var_run
True
>>> ls_var_run.dir_entry('/var/run', 'openvpn')['type']
'd'
"""
from insights.specs import Specs
from .. import FileListing
from .. import parser
@parser(Specs.ls_var_run)
class LsVarRun(FileListing):
"""Parses output of ``ls -lnL /var/run`` command."""
pass
|
cryptobanana/ansible
|
docs/bin/plugin_formatter.py
|
Python
|
gpl-3.0
| 26,677
| 0.002774
|
#!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# (c) 2012-2014, Michael DeHaan <[email protected]> and others
# (c) 2017 Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import datetime
import glob
import optparse
import os
import re
import sys
import warnings
from collections import defaultdict
from distutils.version import LooseVersion
from pprint import PrettyPrinter
try:
from html import escape as html_escape
except ImportError:
# Python-3.2 or later
import cgi
def html_escape(text, quote=True):
return cgi.escape(text, quote)
import jinja2
import yaml
from jinja2 import Environment, FileSystemLoader
from six import iteritems, string_types
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes
from ansible.utils import plugin_docs
from ansible.utils.display import Display
#####################################################################################
# constants and paths
# if a module is added in a version of Ansible older than this, don't print the version added information
# in the module documentation because everyone is assumed to be running something newer than this already.
TOO_OLD_TO_BE_NOTABLE = 1.3
# Get parent directory of the directory this script lives in
MODULEDIR = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules'
))
# The name of the DOCUMENTATION template
EXAMPLE_YAML = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml'
))
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
DEPRECATED = b" (D)"
pp = PrettyPrinter()
display = Display()
def rst_ify(text):
''' convert symbols like I(this is in italics) to valid restructured text '''
try:
t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
t = _BOLD.sub(r'**' + r"\1" + r"**", t)
t = _MODULE.sub(r':ref:`module_docs/' + r"\1 <\1>" + r"`", t)
t = _URL.sub(r"\1", t)
t = _CONST.sub(r'``' + r"\1" + r"``", t)
except Exception as e:
raise AnsibleError("Could not process (%s) : %s" % (str(text), str(e)))
return t
def html_ify(text):
''' convert symbols like I(this is in italics) to valid HTML '''
t = html_escape(text)
t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
t = _CONST.sub("<code>" + r"\1" + "</code>", t)
return t
def rst_fmt(text, fmt):
''' helper for Jinja2 to do format strings '''
return fmt % (text)
def rst_xline(width, char="="):
''' return a restructured text line of a given length '''
return char * width
def write_data(text, output_dir, outputname, module=None):
''' dumps module output to a file or the screen, as requested '''
if output_dir is not None:
if module:
outputname = outputname % module
if not os.path.exists(output_dir):
os.makedirs(output_dir)
fname = os.path.join(output_dir, outputname)
fname = fname.replace(".py", ""
|
)
with open(fname, 'wb') as f:
f.write(to_bytes(text))
else:
print(text)
def get_plugin_info(module_dir, limit_to=No
|
ne, verbose=False):
'''
Returns information about plugins and the categories that they belong to
:arg module_dir: file system path to the top of the plugin directory
:kwarg limit_to: If given, this is a list of plugin names to
generate information for. All other plugins will be ignored.
:returns: Tuple of two dicts containing module_info, categories, and
aliases and a set listing deprecated modules:
:module_info: mapping of module names to information about them. The fields of the dict are:
:path: filesystem path to the module
:deprecated: boolean. True means the module is deprecated otherwise not.
:aliases: set of aliases to this module name
:metadata: The modules metadata (as recorded in the module)
:doc: The documentation structure for the module
:examples: The module's examples
:returndocs: The module's returndocs
:categories: maps category names to a dict. The dict contains at
least one key, '_modules' which contains a list of module names in
that category. Any other keys in the dict are subcategories with
the same structure.
'''
categories = dict()
module_info = defaultdict(dict)
# * windows powershell modules have documentation stubs in python docstring
# format (they are not executed) so skip the ps1 format files
# * One glob level for every module level that we're going to traverse
files = (
glob.glob("%s/*.py" % module_dir) +
glob.glob("%s/*/*.py" % module_dir) +
glob.glob("%s/*/*/*.py" % module_dir) +
glob.glob("%s/*/*/*/*.py" % module_dir)
)
for module_path in files:
# Do not list __init__.py files
if module_path.endswith('__init__.py'):
continue
# Do not list blacklisted modules
module = os.path.splitext(os.path.basename(module_path))[0]
if module in plugin_docs.BLACKLIST['MODULE'] or module == 'base':
continue
# If requested, limit module documentation building only to passed-in
# modules.
if limit_to is not None and module.lower() not in limit_to:
continue
deprecated = False
if module.startswith("_"):
if os.path.islink(module_path):
# Handle aliases
source = os.path.splitext(os.path.basename(os.path.realpath(module_path)))[0]
module = module.replace("_", "", 1)
aliases = module_info[source].get('aliases', set())
aliases.add(module)
# In case we just created this via get()'s fallback
module_info[source]['aliases'] = aliases
continue
else:
# Handle deprecations
module = module.replace("_", "", 1)
deprecated = True
#
# Regular module to process
#
category = categories
# Start at the second directory because we don't want the "vendor"
mod_path_only = os.path.dirname(module_path[len(module_dir):])
module_categories = []
# build up the categories that this module belongs to
for new_cat in mod_path_only.split('/')[1:]:
if new_cat not in category:
category[new_cat] = dict()
category[new_cat]['_modules'] = []
module_categories.append(new_cat)
category = category[new_cat]
category['_modules'].append(module)
# the category we will use in links (so list_of_all_plugins can point to plugins/action_plugins/*'
if module_categories:
primary_category = module_categories[0]
# use ansible core library to parse out doc metadata YAML and plaintext examples
doc, examples, returndocs, metadata = plugin_docs.get_docstring(module_path
|
menpo/vrml97
|
vrml/weaktuple.py
|
Python
|
bsd-3-clause
| 4,708
| 0.015718
|
"""tuple sub-class which holds weak references to objects"""
import weakref
class WeakTuple( tuple ):
"""tuple sub-class holding weakrefs to items
The weak reference tuple is intended to allow you
to store references to a list of objects without
needing to manage weak references directly.
For the most part, the WeakTuple operates just
like a tuple object, in that it allows for all
of the standard tuple operations. The difference
is that the WeakTuple class only stores weak
references to its items. As a result, adding
an object to the tuple does not necessarily mean
that it will still be there later on during
execution (if the referent has been garbage
collected).
Because WeakTuple's are static (their membership
doesn't change), they will raise ReferenceError
when a sub-item is missing rather than skipping
missing items as does the WeakList. This can
occur for basically _any_ use of the tuple.
"""
def __init__( self, sequence=() ):
"""Initialize the tuple
The WeakTuple will store weak references to objects
within the sequence.
"""
super( WeakTuple, self).__init__( map( self.wrap, sequence))
def valid( self ):
"""Explicit validity check for the tuple
Checks whether all references can be resolved,
basically just sees whether calling list(self)
raises a ReferenceError
"""
try:
list( self )
return 1
except weakref.ReferenceError:
return 0
def wrap( self, item ):
"""Wrap an individual item in a weak-reference
If the item is already a weak reference, we store
a reference to the original item. We use approximately
the same weak reference callback mechanism as the
standard weakref.WeakKeyDictionary object.
"""
if isinstance( item, weakref.ReferenceType ):
item = item()
return weakref.ref( item )
def unwrap( self, item ):
"""Unwrap an individual item
This is a fairly trivial operation at the moment,
it merely calls the item with no arguments and
returns the result.
"""
ref = item()
if ref is None:
raise weakref.ReferenceError( """%s instance no longer valid (item %s has been collected)"""%( self.__class__.__name__, item))
return ref
def __iter__( self ):
"""Iterate over the tuple, yielding strong references"""
index = 0
while index < len(self):
yield self[index]
index += 1
def __getitem__( self, index ):
"""Get the item at the given index"""
return self.unwrap(super (WeakTuple,self).__getitem__( index ))
def __getslice__( self, start, stop ):
"""Get the items in the range start to stop"""
return map(
self.unwrap,
super (WeakTuple,self).__getslice__( start, stop)
)
def __contains__( self, item ):
"""Return boolean indicating whether the item is in the tuple"""
for node in self:
if item is node:
return 1
return 0
def count( self, item ):
"""Return integer count of instances of item in tuple"""
count = 0
for node in self:
if item is node:
count += 1
return count
def index( self, item ):
"""Return integer index of item in tuple"""
count = 0
for node in self:
if item is node:
return count
count += 1
return -1
def __add__(self, other):
"""Return a new path with other as tail"""
return tuple(self) + other
def __eq__( self, sequence ):
"""Compare the tuple to another (==)"""
return list(self) == sequence
def __ge__( self, sequence ):
"""Compare the tuple to another
|
(>=)"""
return list(self) >= sequence
def __gt__( self, sequence ):
"""Compare the tuple to another (>)"""
return list(self) > sequence
def __le__( self, sequence ):
"""Compare the tuple to another (<=)"""
|
return list(self) <= sequence
def __lt__( self, sequence ):
"""Compare the tuple to another (<)"""
return list(self) < sequence
def __ne__( self, sequence ):
"""Compare the tuple to another (!=)"""
return list(self) != sequence
def __repr__( self ):
"""Return a code-like representation of the weak tuple"""
return """%s( %s )"""%( self.__class__.__name__, super(WeakTuple,self).__repr__())
|
igor-toga/local-snat
|
neutron/api/v2/base.py
|
Python
|
apache-2.0
| 33,385
| 0.00012
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import netaddr
from neutron_lib import exceptions
from oslo_log import log as logging
from oslo_policy import policy as oslo_policy
from oslo_utils import excutils
import six
import webob.exc
from neutron._i18n import _, _LE, _LI
from neutron.api import api_common
from neutron.api.v2 import attributes
from neutron.api.v2 import resource as wsgi_resource
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.db import api as db_api
from neutron import policy
from neutron import quota
from neutron.quota import resource_registry
LOG = logging.getLogger(__name__)
FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
exceptions.Conflict: webob.exc.HTTPConflict,
exceptions.InUse: webob.exc.HTTPConflict,
exceptions.BadRequest: webob.exc.HTTPBadRequest,
exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
exceptions.NotAuthorized: webob.exc.HTTPForbidden,
netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
oslo_policy.PolicyNotAuthorized: webob.exc.HTTPForbidden
}
class Controller(object):
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
@property
def plugin(self):
return self._plugin
@property
def resource(self):
return self._resource
@property
def attr_info(self):
return self._attr_info
@property
def member_actions(self):
return self._member_actions
def __init__(self, plugin, collection, resource, attr_info,
allow_bulk=False, member_actions=None, parent=None,
allow_pagination=False, allow_sorting=False):
if member_actions is None:
member_actions = []
self._plugin = plugin
self._collection = collection.replace('-', '_')
self._resource = resource.replace('-', '_')
self._attr_info = attr_info
self._allow_bulk = allow_bulk
self._allow_pagination = allow_pagination
self._allow_sorting = allow_sorting
self._native_bulk = self._is_native_bulk_supported()
self._native_pagination = self._is_native_pagination_supported()
self._native_sorting = self._is_native_sorting_supported()
self._policy_attrs = [name for (name, info) in self._attr_info.items()
if info.get('required_by_policy')]
self._notifier = n_rpc.get_notifier('network')
self._member_a
|
ctions = member_actions
self._primary_key = self._get_primary_key()
if self._allow_pagination and self._native_pagination:
# Native pagination need native sorting support
if not self._native_sorting:
raise exceptions.Invalid(
_("Native pagination depend on native sorting")
)
if
|
not self._allow_sorting:
LOG.info(_LI("Allow sorting is enabled because native "
"pagination requires native sorting"))
self._allow_sorting = True
self.parent = parent
if parent:
self._parent_id_name = '%s_id' % parent['member_name']
parent_part = '_%s' % parent['member_name']
else:
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST: 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in six.iteritems(self._attr_info):
if value.get('primary_key', False):
return key
return default_primary_key
def _is_native_bulk_supported(self):
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_bulk_attr_name, False)
def _is_native_pagination_supported(self):
return api_common.is_native_pagination_supported(self._plugin)
def _is_native_sorting_supported(self):
return api_common.is_native_sorting_supported(self._plugin)
def _exclude_attributes_by_policy(self, context, data):
"""Identifies attributes to exclude according to authZ policies.
Return a list of attribute names which should be stripped from the
response returned to the user because the user is not authorized
to see them.
"""
attributes_to_exclude = []
for attr_name in data.keys():
attr_data = self._attr_info.get(attr_name)
if attr_data and attr_data['is_visible']:
if policy.check(
context,
'%s:%s' % (self._plugin_handlers[self.SHOW], attr_name),
data,
might_not_exist=True,
pluralized=self._collection):
# this attribute is visible, check next one
continue
# if the code reaches this point then either the policy check
# failed or the attribute was not visible in the first place
attributes_to_exclude.append(attr_name)
return attributes_to_exclude
def _view(self, context, data, fields_to_strip=None):
"""Build a view of an API resource.
:param context: the neutron context
:param data: the object for which a view is being created
:param fields_to_strip: attributes to remove from the view
:returns: a view of the object which includes only attributes
visible according to API resource declaration and authZ policies.
"""
fields_to_strip = ((fields_to_strip or []) +
self._exclude_attributes_by_policy(context, data))
return self._filter_attributes(context, data, fields_to_strip)
def _filter_attributes(self, context, data, fields_to_strip=None):
if not fields_to_strip:
return data
return dict(item for item in six.iteritems(data)
if (item[0] not in fields_to_strip))
def _do_field_list(self, original_fields):
fields_to_add = None
# don't do anything if fields were not specified in the request
if original_fields:
fields_to_add = [attr for attr in self._policy_attrs
if attr not in original_fields]
original_fields.extend(self._policy_attrs)
return original_fields, fields_to_add
def __getattr__(self, name):
if name in self._member_actions:
@db_api.retry_db_errors
def _handle_action(request, id, **kwargs):
arg_list = [request.context, id]
# Ensure policy engine is initialized
policy.init()
# Fetch the resource and verify if the user can access it
try:
parent_id = kwargs.get(self._parent_id_name)
resource = self._item(request,
|
mazvv/travelcrm
|
travelcrm/forms/leads_offers.py
|
Python
|
gpl-3.0
| 1,762
| 0
|
# -*-coding: utf-8 -*-
import colander
from . import (
SelectInteger,
ResourceSchema,
BaseForm,
BaseSearchForm,
)
from ..resources.leads_offers import LeadsOffersResource
from ..models.lead_offer import LeadOffer
from ..models.currency import Currency
from ..models.supplier import Supplier
from ..models.service import Service
from ..lib.qb.leads_offers import LeadsOffersQueryBuilder
from ..lib.utils.security_utils import get_auth_employee
class _LeadOfferSchema(ResourceSchema):
service_id = colander.SchemaNode(
SelectInteger(Service),
|
)
supplier_id = colander.SchemaNode(
SelectInteger(Supplier),
)
currency_id = colander.SchemaNode(
SelectInteger(Currency),
)
price = colander.SchemaNode(
colander.Money(),
)
status = colander.SchemaNode(
colander.String(),
)
descr = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=255),
)
class L
|
eadOfferForm(BaseForm):
_schema = _LeadOfferSchema
def submit(self, lead_offer=None):
if not lead_offer:
lead_offer = LeadOffer(
resource=LeadsOffersResource.create_resource(
get_auth_employee(self.request)
)
)
lead_offer.service_id = self._controls.get('service_id')
lead_offer.currency_id = self._controls.get('currency_id')
lead_offer.supplier_id = self._controls.get('supplier_id')
lead_offer.price = self._controls.get('price')
lead_offer.status = self._controls.get('status')
lead_offer.descr = self._controls.get('descr')
return lead_offer
class LeadOfferSearchForm(BaseSearchForm):
_qb = LeadsOffersQueryBuilder
|
Yelp/kafka-utils
|
tests/acceptance/steps/common.py
|
Python
|
apache-2.0
| 2,265
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0
|
(the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica
|
ble law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from behave import given
from behave import when
from steps.util import create_consumer_group
from steps.util import create_random_group_id
from steps.util import create_random_topic
from steps.util import initialize_kafka_offsets_topic
from steps.util import produce_example_msg
PRODUCED_MSG_COUNT = 82
CONSUMED_MSG_COUNT = 39
@given(u'we have an existing kafka cluster with a topic')
def step_impl1(context):
context.topic = create_random_topic(1, 1)
@given(u'we have a kafka consumer group')
def step_impl2(context):
context.group = create_random_group_id()
context.client = create_consumer_group(
context.topic,
context.group,
)
@when(u'we produce some number of messages into the topic')
def step_impl3(context):
produce_example_msg(context.topic, num_messages=PRODUCED_MSG_COUNT)
context.msgs_produced = PRODUCED_MSG_COUNT
@when(u'we consume some number of messages from the topic')
def step_impl4(context):
context.group = create_random_group_id()
context.client = create_consumer_group(
context.topic,
context.group,
num_messages=CONSUMED_MSG_COUNT,
)
context.msgs_consumed = CONSUMED_MSG_COUNT
@given(u'we have initialized kafka offsets storage')
def step_impl5(context):
initialize_kafka_offsets_topic()
@given(u'we have an existing kafka cluster')
def step_impl6(context):
pass
@given(u'we have an existing kafka cluster with multiple topics')
def step_impl7(context):
context.topic = []
context.topic.append(create_random_topic(1, 1, 'abcde'))
context.topic.append(create_random_topic(1, 1, 'abcd'))
|
MrYevral/YevBot
|
Alternate/MasterBot.py
|
Python
|
gpl-3.0
| 586
| 0.020478
|
'''This gile will take arguments from the command line, if none are found it
will look
|
for a .bot file, if that isn't found it will promt the user for auth
tokens :- with this information the masterbot will connect to its own twitch channel and await a !connect command'''
#Author MrYevral
#check for .bot file in current directory
import os
import sys
def getBotInfo():
if len(sys.argv) > 2:
if sys.argv[2] == '-c':
newBotFile()
else:
print "incorrect use of flags please use -c for creating a b
|
ot"
'''for file in os.listdir("."):
if file.endswith(".bot"):
print file'''
|
caioserra/apiAdwords
|
examples/adspygoogle/dfp/v201306/deactivate_placements.py
|
Python
|
apache-2.0
| 2,253
| 0.003107
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example deactivates all active placements. To determine which
placements exist, run get_all_placements.py."""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201306')
# Create query.
values = [{
|
'key': 'status',
'value': {
'xsi_type': 'TextValue',
'value': 'ACTIVE'
}
}]
query = 'WHERE status = :status'
# Get placements by statement.
placements = DfpUtils.GetAllEntitiesByStatementWithService(
placement_service, query=query, bind_vars=values)
for placement in placements:
print ('Placement with id \'%s\', name \'%s\', and status \'%s\' will be '
'deactivated.' % (placement['id'], placement['name'],
placemen
|
t['status']))
print 'Number of placements to be deactivated: %s' % len(placements)
# Perform action.
result = placement_service.PerformPlacementAction(
{'type': 'DeactivatePlacements'}, {'query': query, 'values': values})[0]
# Display results.
if result and int(result['numChanges']) > 0:
print 'Number of placements deactivated: %s' % result['numChanges']
else:
print 'No placements were deactivated.'
|
mammadori/asteroids
|
game/ship.py
|
Python
|
bsd-3-clause
| 3,293
| 0.004555
|
# -*- coding: utf-8 *-*
from pyglet.window import key
from pyglet import clock
from . import util, physicalobject
from . import resources
class Ship(physicalobject.PhysicalObject):
"""A class for the player"""
def __init__(self, thrust_image=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set some easy-to-tweak constants
# play values
self.rotate_speed = 170.0
self.bullet_speed = 500.0
self.thrust_acc = 500
self.friction = 0.95
self.bullet_duration = 0.6
self.thrust = False
self.thrust_image = thrust_image
self.normal_image = self.image
self.bullets = set() # FIXME: bullet by OOT
def on_key_press(self, symbol, modifiers):
if symbol == key.SPACE:
self.shoot()
elif symbol == key.LEFT:
self.turn(-1)
elif symbol == key.RIGHT:
self.turn(1)
elif symbol == key.UP:
self.set_thrust(True)
def on_key_release(self, symbol, modifiers):
if symbol in (key.LEFT, key.RIGHT):
self.turn(0)
elif symbol == key.UP:
self.set_thrust(False)
def update(self, dt):
super().update(dt)
if self.thrust and self.thrust_image:
self.image = self.thrust_image
else:
self.image = self.normal_image
# update velocity
if self.thrust:
acc = util.angle_to_vector(self.rotation)
for i in (0,1):
self.vel[i] += acc[i] * self.thrust_acc * dt
# add friction
for i in (0,1):
self.vel[i] *= (1 - self.friction * dt)
for bullet in set(self.bullets):
if bullet.update(dt):
self.bullets.remove(bullet)
return False
def set_thrust(self, on):
self.thrust = on
if on:
resources.thrust_sound.seek(0)
resources.thrust_sound.play()
else:
resources.thrust_sound.pause()
|
def turn(self, clockwise):
self.rotation_speed = clockwise * self.rotate_speed
def shoot(self):
resources.bullet_sound.play()
forward = util.angle_to_vector(self.rotation)
bullet_pos = [self.x + self.radius * forward[0], self.y + self.radius * forward[1]]
bullet_vel = [self.vel[0] + self.bullet_speed * forward[0], self.vel[1] + self.bullet_speed * forwar
|
d[1]]
bullet = physicalobject.PhysicalObject(lifespan=self.bullet_duration, vel=bullet_vel, x=bullet_pos[0], y=bullet_pos[1],
img=resources.shot_image, batch=self.batch, group=self.group, screensize=self.screensize)
self.bullets.add(bullet)
def destroy(self):
# check invulnerability
if self.opacity != 255:
return
explosion = super().destroy()
self.rotation = -90
self.x = self.screensize[0] / 2
self.y = self.screensize[1] / 2
self.vel = [0, 0]
self.set_thrust(False)
self.visible = True
return explosion
def normal_mode(self, dt):
self.opacity = 255
def invulnerable(self, time):
# be invulnerable for a brief time
self.opacity = 128
clock.schedule_once(self.normal_mode, time)
|
zepheira/freemix
|
freemix/exhibit/admin.py
|
Python
|
apache-2.0
| 632
| 0.011076
|
from django.contrib import admin
from freemix.exhibit import models
class CanvasAdmin(admin.ModelAdmin):
list_display = ('title', 'description')
search_fields = ('title', 'descriptio
|
n',)
admin.site.register(models.Canvas, CanvasAdmin)
class ExhibitAdmin(admin.ModelAdmin):
list_display = ('slug', 'owner',)
search_fie
|
lds = ('slug', 'title', 'description', 'owner__username')
admin.site.register(models.Exhibit, ExhibitAdmin)
class ThemeAdmin(admin.ModelAdmin):
list_display = ('title', 'description')
search_fields = ('title', 'description',)
admin.site.register(models.Theme, ThemeAdmin)
|
laserkelvin/FTSpecViewer
|
setup.py
|
Python
|
gpl-3.0
| 214
| 0
|
import os
req
|
uirements = ["numpy", "scipy", "pandas",
"matplotlib", "peakutils", "uncertainties",
"pyqtgraph"]
for package in requirements:
os.system("pip install " + packag
|
e)
|
NetworkAutomation/jaide
|
jaide/core.py
|
Python
|
gpl-2.0
| 43,240
| 0.000046
|
"""
This core.py module is part of the Jaide (Junos Aide) package.
It is free software for use in manipulating junos devices. To immediately get
started, take a look at the example files for implementation
guidelines. More information can be found at the github page found here:
https://github.com/NetworkAutomation/jaide
"""
# This is for modifying printed output (used for --scp to rewrite the same line
# multiple times.) It is required to be at the top of the file.
from __future__ import print_function
# standard modules.
from os import path
import time
import difflib
# from lxml import etree, objectify
# needed to parse strings into xml for cases when ncclient doesn't handle
# it (commit, validate, etc)
import xml.etree.ElementTree as ET
import logging # logging needed for disabling paramiko logging output
# intra-Jaide imports
from errors import InvalidCommandError
from utils import clean_lines, xpath
# network modules for device connections
try:
from ncclient import manager
from scp import SCPClient
import paramiko
except ImportError as e:
print("FAILED TO IMPORT ONE OR MORE PACKAGES.\n"
"NCCLIENT\thttps://github.com/leopoul/ncclient/\n"
"PARAMIKO\thttps://github.com/paramiko/paramiko\n"
"SCP\t\thttps://pypi.python.org/pypi/scp/0.8.0")
print('\nImport Error:\n')
raise e
class Jaide():
""" Purpose: An object for manipulating a Junos device.
Methods include copying files, running show commands,
shell commands, commit configuration changes, finding
interface errors, and getting device status/information.
All of the methods listed below that touch Junos are wrapped by a
decorator function @check_instance, which handles ensuring the correct
connection is used to perform the requested operation.
"""
def __init__(self, host, username, password, connect_timeout=5,
session_timeout=300, connect="paramiko", port=22):
""" Initialize the Jaide object.
Purpose: This is the initialization function for the Jaide class,
| which creates a connection to a junos device. It will
| return a Jaide object, which can then be used to actually
| send commands to the device. This function establishes the
|
|
connection to the device via a NCClient manager object.
| > **NOTE:** The connect
|
parameter should be ignored under most
| > circumstances. Changing it only affects how Jaide first
| > connects to the device. The decorator function
| > @check_instance will handle moving between session
| > types for you.
@param host: The IP or hostname of the device to connect to.
@type host: str
@param username: The username for the connection
@type username: str
@param password: The password for the connection
@type password: str
@param connect_timeout: The timeout value, in seconds, for attempting
| to connect to the device.
@type connect_timeout: int
@param session_timeout: The timeout value, in seconds, for the
| session. If a command is sent and nothing
| is heard back from the device in this
| timeframe, the session is declared dead,
| and times out.
@type session_timeout: int
@param connect: **NOTE: We default to 'paramiko', but this
| parameter can be set to False to prevent connecting
| on object instantiation. The @check_instance
| decorator function will handle sliding between
| session types depending on what function is being
| called, meaning generally self.conn_type and this
| connect parameter should be ignored.**
|
| The connection type that should be made. Several
| options are available: 'ncclient', 'scp', and
| 'paramiko', 'shell' and 'root'.
|
| 'paramiko' : is used for operational commands
| (couldn't use ncclient because of lack of pipes `|`
| support.
|
| 'scp' : is used for copying files to/from
| the device, and uses an SCP connection.
|
| 'shell' : is for sending shell commands.
|
| 'root' : is when the user is doing operational
| commands, but is logged in as root, (requires
| handling separately, since this puts the session
| into a shell prompt)
|
| 'ncclient' : is used for all other commands.
@type connect: str
@param port: The destination port on the device to attempt the
| connection.
@type port: int
@returns: an instance of the Jaide class
@rtype: jaide.Jaide object
"""
# store object properties and set initial values.
self.host = host.strip()
self.port = port
self.username = username
self.password = password
self.session_timeout = session_timeout
self.connect_timeout = connect_timeout
self._shell = ""
self._scp = ""
self.conn_type = connect
self._in_cli = False
self._filename = None
# make the connection to the device
if connect:
self.connect()
def check_instance(function):
""" Wrapper that tests the type of _session.
Purpose: This decorator function is used by all functions within
| the Jaide class that interact with a device to ensure the
| proper session type is in use. If it is not, it will
| attempt to migrate _session to that type before moving
| to the originally requested function.
| > **NOTE:** This function is a decorator, and should not be
| > used directly. All other methods in this class that touch
| > the Junos device are wrapped by this function to ensure the
| > proper connection type is used.
@param function: the function that is being wrapped around
@type function: function
@returns: the originally requested function
@rtype: function
"""
def wrapper(self, *args, **kwargs):
func_trans = {
"commit": manager.Manager,
"compare_config": manager.Manager,
"commit_check": manager.Manager,
"device_info": manager.Manager,
"diff_config": manager.Manager,
"health_check": manager.Manager,
"interface_errors": manager.Manager,
"op_cmd": paramiko.client.SSHClient,
"shell_cmd": paramiko.client.SSHClient,
"scp_pull": paramiko.client.SSHClient,
"scp_push": paramiko.client.SSHClient
}
# when doing an operational command, logging in as root
# brings you to shell, so we need to enter the device as a shell
# connection, and move to cli to perform the command
# this is a one-off because the isinstance() check will be bypassed
if self.username == "root" and function.__name__ == "op_cmd":
if not self._session:
self.conn_type = "paramiko"
self.connect()
if not self._shell:
self.conn_type = "root"
self.connect()
self.shell_to_cli() # check if we're in the cli
# Have to call shell command separately, since we are using _shell
# for comparison, not _sessio
|
simonsmiley/iliasCorrector
|
iliasCorrector/models.py
|
Python
|
mit
| 2,030
| 0.002463
|
from iliasCorrector import db
def _split_ident(ident):
data = ident.split('_')
matr = int(data[-1])
last = data[0]
first = ' '.join(data[1:-2])
return first, last, matr
class Exercise(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
path = db.Column(db.String(256), unique=True)
submissions = db.relationship('Submission', backref='exercise', lazy='dynamic')
@property
def num_corrected(self):
return len(self.submissions.filter(Submission.grade != None).all())
@property
def num_submissions(self):
return len(self.submissions.all())
@property
def num_to_correct(self):
return len(self.submissions.filter_by(grade=None).all())
def __repr__(self):
|
return '<Exercise {}>'.format(self.name)
class Submission(db.Model):
id = db.Column(db.Integer, primary_key=True)
grade = db.Column(
|
db.Float)
exercise_id = db.Column(db.Integer, db.ForeignKey('exercise.id'))
student_ident = db.Column(db.String(256))
files = db.relationship('File', backref='submission', lazy='dynamic')
remarks = db.Column(db.Text)
def __repr__(self):
return '<Submission of {} for exercise {}>'.format(self.student_ident,
self.exercise)
@property
def first_name(self):
return _split_ident(self.student_ident)[0]
@property
def last_name(self):
return _split_ident(self.student_ident)[1]
@property
def student(self):
return '{}, {}'.format(self.last_name, self.first_name)
@property
def matriculation_number(self):
return _split_ident(self.student_ident)[2]
class File(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
path = db.Column(db.String(256))
submission_id = db.Column(db.Integer, db.ForeignKey('submission.id'))
def __repr__(self):
return '<File {}>'.format(self.name)
|
alexvlis/shape
|
nnmath.py
|
Python
|
gpl-3.0
| 518
| 0.021236
|
import numpy as np
tansig = lambda n: 2 / (1 + np.exp(-2 * n)) - 1
sigmoid = lambda n: 1 / (1 + np.exp(
|
-n))
hardlim = lambda n: 1 if n >= 0 else 0
purelin = lambda n: n
relu = lambda n: np.fmax(0, n)
square_error = lambda x, y: np.sum(0.5 * (x - y)**2)
sig_prime = lambda z: sigmoid(z) * (1 - sigmoid(z))
relu_prime = lambda z: relu(z) * (1 - relu(z))
softmax = lambda n: np.exp(n
|
)/np.sum(np.exp(n))
softmax_prime = lambda n: softmax(n) * (1 - softmax(n))
cross_entropy = lambda x, y: -np.dot(x, np.log(y))
|
jskksj/cv2stuff
|
cv2stuff/tests/test_exception.py
|
Python
|
isc
| 307
| 0.006515
|
import pytest
def
|
test_zero_division():
with pytest.raises(ZeroDivisionError):
1 / 0
def test_recursion_depth():
with pytest.raises(RuntimeError) as excinfo:
def f():
f()
f()
assert 'maximum recursi
|
on depth exceeded' in str(excinfo.value)
|
bath-hacker/binny
|
binny/collector/models.py
|
Python
|
mit
| 297
| 0.006734
|
from
|
__future__ import unicode_literals
from django.db import models
from db.models import Bin
class CollectionEntry(models.Model):
bin_obj = models.ForeignKey(Bin, related_name='requested_bins')
fullness = models.IntegerField()
date_added = models.Dat
|
eTimeField(auto_now_add=True)
|
weso/CWR-DataApi
|
tests/grammar/config/test_options.py
|
Python
|
mit
| 919
| 0
|
# -*- coding: utf-8 -*-
import unittest
from cwr.grammar.factory.config import rule_options
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestConfigOptions(unittest.TestCase):
def setUp(self):
self._rule = rule_options
def test_zero_options(self):
line = '()'
result = self._rule.parseString(line)
self.assertEqual(1, len(result))
self.assertEqual('', result[0])
def test_one_options(self):
line = '(option2)'
result = self._rule.parseString(line)
self.assertEqual(1, len
|
(result))
self.assertEqual(
|
'option2', result[0])
def test_two_options(self):
line = '(option1, option2)'
result = self._rule.parseString(line)
self.assertEqual(2, len(result))
self.assertEqual('option1', result[0])
self.assertEqual('option2', result[1])
|
dagnelies/restfs
|
old/restfs.py
|
Python
|
mit
| 4,068
| 0.004916
|
"""
Operations:
P - login
G - list - list the dir's content
G - read - reads a file
G - info - infos about a file
P - write - writes a file
P - mkdir - makes a dir
P - copy - to=...
P - move - to=...
P - delete - DELETE
P - logout
"""
import canister
import bottle
import os.path
import json
import sys
import shutil
import time
# a callback that can be replaced to determine whether a resource is allowed access or not
# by default, read access is authorized and write access forbidden
def authorized(write, path):
#return not write
return True
root = os.getcwd()
app = bottle.Bottle()
app.install(canister.Canister())
def fullpath(path):
global root
fp = os.path.join(root, path.strip('/'))
fp = os.path.abspath(fp)
if not fp.startswith(root):
raise Exception('Path forbidden: ' + path)
else:
return fp
@app.get('<path:path>')
def get(path='', hidden=False, jstree=False):
path = path.strip('/')
if not authorized(False, path):
return bottle.Response(status=401) # TODO
raise Exception('Unauthorized path: ' + path)
global root
fpath = fullpath(path)
print(fpath)
if os.path.isfile(fpath):
return bottle.static_file(path, root=root)
elif os.path.isdir(fpath):
files = os.listdir(fpath)
listing = []
for name in files:
if not hidden and name[0] == '.':
continue
p = os.path.join(fpath, name)
item = {
'name': name,
'type': 'dir' if os.path.isdir(p) else 'file',
'is_directory': os.path.isdir(p),
'size': os.path.getsize(p),
'last_modified': time.ctime(os.path.getmtime(p))
}
listing.append( item )
listing.sort(key=lambda x: x['name'])
bottle.response.content_type = 'application/json'
return json.dumps(listing)
else:
raise Exception('No such path: ' + path)
@app.post('<path:path>')
def post(path, cmd, to=None):
if not authorized(True, path):
return bottle.Response(status=401)
raise Exception('Unauthorized path: ' + path)
# TODO: exceptions might reveal real paths
fpath = fullpath(path)
app.log.debug('Full path: %s' % fpath)
cmd = cmd.lower()
if cmd == 'set':
content = bottle.request.body.readall()
file = open(fpath, mode='w')
file.write(content)
file.close()
elif cmd == 'upload':
for name, up in bottle.request.files.items():
file =
|
open(os.path.join(fpath, name), mode='w')
file.write(up)
|
file.close()
elif cmd == 'mkdir':
# os.mkdir
# build dirs recursively
os.makedirs(fpath, exist_ok=True)
elif cmd == 'move':
if not to:
raise Exception('Missing destination ("to=...")')
fto = fullpath(to)
shutil.move(fpath, fto)
elif cmd == 'rename':
if not to:
raise Exception('Missing destination ("to=...")')
os.rename(fpath, to)
elif cmd == 'copy':
if not to:
raise Exception('Missing destination ("to=...")')
fto = fullpath(to)
shutil.copy(fpath, fto)
else:
raise Exception('Unknown command: %s' % cmd)
@app.delete('<path:path>')
def delete(path):
if not authorized(True, path):
raise Exception('Unauthorized path: ' + path) # TODO: return response instead
fpath = fullpath(path)
shutil.rmtree(fpath)
if __name__ == '__main__':
print(sys.argv)
args = sys.argv
#if len(args) != 2:
# print('Usage: %s <path-to-serve>' % os.path.basename(args[0]))
#root = os.path.abspath(args[1])
#root = os.getcwd()
import webfs
app.mount('@admin', webfs.app)
print('Serving: ' + root)
app.run(debug=True, host='0.0.0.0')
|
ducandu/aiopening
|
aiopening/misc/special.py
|
Python
|
mit
| 1,167
| 0.003428
|
"""
-------------------------------------------------------------------------
AIOpening - special.py
useful functions
created: 2017/09/01 in PyCharm
(c) 2017 Sven - ducandu GmbH
-------------------------------------------------------------------------
"""
import numpy as np
import tensorflow as tf
def weighted_sample(weights, objects):
"""
Return a random item from objects, with
|
the weighting defined by weights (which must sum to 1).
"""
# An array of the weights, cumulatively summed.
cs = np.cumsum(weights)
# Find the index of the first weight over a random value.
idx = sum(cs < np.random.rand())
return objects[min(idx, len(objects) - 1)]
def to_one_hot(ind, dim):
ret = np.zeros(dim)
ret[ind] = 1
return ret
def to_one_hot_batch(inds, dim):
ret = np.zeros((len(inds), dim))
ret[np.aran
|
ge(len(inds)), inds] = 1
return ret
def from_one_hot(v):
return np.nonzero(v)[0][0]
def from_one_hot_batch(v):
if len(v) == 0:
return []
return np.nonzero(v)[1]
def new_tensor(name, n_dim, dtype):
return tf.placeholder(dtype=dtype, shape=[None] * n_dim, name=name)
|
rbtcollins/lmirror
|
l_mirror/tests/commands/test_commands.py
|
Python
|
gpl-3.0
| 1,692
| 0.004728
|
#
# LMirror is Copyright (C) 2010 Robert Collins <[email protected]>
#
# LMirror is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Fr
|
ee Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
# In the LMirror source tree the file COPYING.txt contains the GNU General Public
# License version 3.
#
"""Tests for the commands command."""
from l_mirror.commands import commands
from l_mirror.ui.model import UI
from l_mirror.tests import ResourcedTestCase
class TestCommandCommands(ResourcedTestCase):
def get_test_ui_and_cmd(self):
ui = UI()
cmd = commands.commands(ui)
ui.set_command(cmd)
return ui, cmd
def test_shows_a_table_of_commands(self):
ui, cmd = self.get_test_ui_and_cmd()
cmd.execute()
self.assertEqual(1, len(ui.outputs))
self.assertEqual('table', ui.outputs[0][0])
self.assertEqual(('command', 'description'), ui.outputs[0][1][0])
command_names = [row[0] for row in ui.outputs[0][1]]
summaries = [row[1] for row in ui.outputs[0][1]]
self.assertTrue('help' in command_names)
self.assertTrue(
'Get help on a command.' in summaries)
|
Incogniiito/Chrononaut
|
my/tensorflow/rnn_cell.py
|
Python
|
apache-2.0
| 9,075
| 0.003857
|
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import DropoutWrapper, RNNCell, LSTMStateTuple
from my.tensorflow import exp_mask, flatten
from my.tensorflow.nn import linear, softsel, double_linear_logits
class SwitchableDropoutWrapper(DropoutWrapper):
def __init__(self, cell, is_train, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
super(SwitchableDropoutWrapper, self).__init__(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob,
seed=seed)
self.is_train = is_train
def __call__(self, inputs, state, scope=None):
outputs_do, new_state_do = super(SwitchableDropoutWrapper, self).__call__(inputs, state, scope=scope)
tf.get_variable_scope().reuse_variables()
outputs, new_state = self._cell(inputs, state, scope)
outputs = tf.cond(self.is_train, lambda: outputs_do, lambda: outputs)
if isinstance(state, tuple):
new_state = state.__class__(*[tf.cond(self.is_train, lambda: new_state_do_i, lambda: new_state_i)
for new_state_do_i, new_state_i in zip(new_state_do, new_state)])
else:
new_state = tf.cond(self.is_train, lambda: new_state_do, lambda: new_state)
return outputs, new_state
class TreeRNNCell(RNNCell):
def __init__(self, cell, input_size, reduce_func):
self._cell = cell
self._input_size = input_size
self._reduce_func = reduce_func
def __call__(self, inputs, state, scope=None):
"""
:param inputs: [N*B, I + B]
:param state: [N*B, d]
:param scope:
:return: [N*B, d]
"""
with tf.variable_scope(scope or self.__class__.__name__):
d = self.state_size
x = tf.slice(inputs, [0, 0], [-1, self._input_size]) # [N*B, I]
mask = tf.slice(inputs, [0, self._input_size], [-1, -1]) # [N*B, B]
B = tf.shape(mask)[1]
prev_state = tf.expand_dims(tf.reshape(state, [-1, B, d]), 1) # [N, B, d] -> [N, 1, B, d]
mask = tf.tile(tf.expand_dims(tf.reshape(mask, [-1, B, B]), -1), [1, 1, 1, d]) # [N, B, B, d]
# prev_state = self._reduce_func(tf.tile(prev_state, [1, B, 1, 1]), 2)
prev_state = self._reduce_func(exp_mask(prev_state, mask), 2) # [N, B, d]
prev_state = tf.reshape(prev_state, [-1, d]) # [N*B, d]
return self._cell(x, prev_state)
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
class NoOpCell(RNNCell):
def __init__(self, num_units):
self._num_units = num_units
def __call__(self, inputs, state, scope=None):
return state, state
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
class MatchCell(RNNCell):
def __init__(self, cell, input_size, q_len):
self._cell = cell
self._input_size = input_size
# FIXME : This won't be needed with good shape guessing
self._q_len = q_len
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""
:param inputs: [N, d + JQ + JQ * d]
:param state: [N, d]
:param scope:
:return:
"""
with tf.variable_scope(scope or self.__class__.__name__):
c_prev, h_prev = state
x = tf.slice(inputs, [0, 0], [-1, self._input_size])
q_mask = tf.slice(inputs, [0, self._input_size], [-1, self._q_len]) # [N, JQ]
qs = tf.slice(inputs, [0, self._input_size + self._q_len], [-1, -1])
qs = tf.reshape(qs, [-1, self._q_len, self._input_size]) # [N, JQ, d]
x_tiled = tf.tile(tf.expand_dims(x, 1), [1, self._q_len, 1]) # [N, JQ, d]
h_prev_tiled = tf.tile(tf.expand_dims(h_prev, 1), [1, self._q_len, 1]) # [N, JQ, d]
f = tf.tanh(linear([qs, x_tiled, h_prev_tiled], self._input_size, True, scope='f')) # [N, JQ, d]
a = tf.nn.softmax(exp_mask(linear(f, 1, True, squeeze=True, scope='a'), q_mask)) # [N, JQ]
q = tf.reduce_sum(qs * tf.expand_dims(a, -1), 1)
z = tf.concat(1, [x, q]) # [N, 2d]
return self._cell(z, state)
class AttentionCell(RNNCell):
def __init__(self, cell, memory, mask=None, controller=None, mapper=None, input_keep_prob=1.0, is_train=None):
"""
Early fusion attention cell: uses the (inputs, state) to control the current attention.
:param cell:
:param memory: [N, M, m]
:param mask:
:param controller: (inputs, prev_state, memory) -> memory_logits
"""
self._cell = cell
self._memory = memory
self._mask = mask
self._flat_memory = flatten(memory, 2)
self._flat_mask = flatten(mask, 1)
if controller is None:
controller = AttentionCell.get_linear_controller(True, is_train=is_train)
self._controller = controller
if mapper is None:
mapper = AttentionCell.get_concat_mapper()
elif mapper == 'sim':
mapper = AttentionCell.get_sim_mapper()
self._mapper = mapper
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or "AttentionCell"):
memory_logits = self._controller(inputs, state, self._flat_memory)
sel_mem = softsel(self._flat_memory, memory_logits, mask=self._flat_mask) # [N, m]
new_inputs, new_state = self._mapper(inputs, state, sel_mem)
return self._cell(new_inputs, state)
@staticmethod
def get_double_linear_controller(size, bias, input_keep_prob=1.0, is_train=None):
def double_linear_controller(inputs, state, memory):
"""
:param inputs: [N, i]
:param state: [N, d]
:param memory: [N, M, m]
:return: [N, M]
"""
rank = len(memory.get_shape())
_memory_size = tf.shape(memory)[rank-2]
tiled_inputs = tf.tile(tf.expand_dims(inputs, 1), [1, _memory_size, 1])
if isinstance(state, tuple):
tiled_states = [tf.tile(tf.expand_dims(each, 1), [1, _memory_size, 1])
for each in state]
else:
tiled_states = [tf.tile(tf.expand_dims(state, 1), [1, _memory_size, 1])]
# [N, M, d]
in_ = tf.concat(2, [tiled_inputs] + tiled_states + [memory])
out = double_linear_logits(in_, size, bias, input_keep_prob=input_keep_prob,
is_train=is_train)
return out
return double_linear_controller
@staticmethod
def get_linear_controller(bias, input_keep_prob=1.0, is_train=None):
def linear_controller(inputs, state, memory):
rank = len(memory.get_shape())
_memory_size = tf.shape(memory)[rank-2]
tiled_inputs = tf.tile(tf.expand_dims(inputs, 1), [1, _memory_size, 1])
if isinstance(state, tuple):
tiled_states = [tf.tile(tf.expand_dims(each, 1), [1, _memory_size, 1])
f
|
or each in state]
|
else:
tiled_states = [tf.tile(tf.expand_dims(state, 1), [1, _memory_size, 1])]
# [N, M, d]
in_ = tf.concat(2, [tiled_inputs] + tiled_states + [memory])
out = linear(in_, 1, bias, squeeze=True, input_keep_prob=input_keep_prob, is_train=is_train)
return out
|
rcbops/nova-buildpackage
|
nova/image/fake.py
|
Python
|
apache-2.0
| 9,011
| 0.008323
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an fake image service"""
import copy
import datetime
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
LOG = logging.getLogger('nova.image.fake')
FLAGS = flags.FLAGS
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
# NOTE(bcwaldon): was image '123456'
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64'}}
# NOTE(bcwaldon): was image 'fake'
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '2'
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '1'
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '3'
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
# NOTE(sirp): was image '6'
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
# NOTE(sirp): was image '7'
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
super(_FakeImageService, self).__init__()
#TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def index(self, context, **kwargs):
"""Returns list of images."""
retval = []
for img in self.images.values():
retval += [dict([(k, v) for k, v in img.iteritems()
if k in ['id', 'name']])]
return retval
#TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def get(self, context, image_id, data):
metadata = self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
return metadata
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def show_by_name(self, context, name):
"""Returns a dict containing
|
image data for the given name."""
images = copy.deepcopy(self.imag
|
es.values())
for image in images:
if name == image.get('name'):
return image
raise exception.ImageNotFound(image_id=name)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', utils.gen_uuid()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image
|
holys/ledis-py
|
ledis/_compat.py
|
Python
|
mit
| 2,327
| 0.006446
|
"""Internal module for Python 2 backwards compatibility."""
import sys
if sys.version_info[0] < 3:
from urlparse import parse_qs, urlparse
from itertools import imap, izip
from string import letters as ascii_letters
from Queue import Queue
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
iteritems = lambda x: x.iteritems()
iterkeys = lambda x: x.iterkeys()
itervalues = lambda x: x.itervalues()
nativestr = lambda x: \
x if isinstance(x, str) else x.encode('utf-8', 'replace')
u = lambda x: x.decode()
b = lambda x: x
next = lambda x: x.next()
byte_to_chr = lambda x: x
unichr = unichr
xrange = xrange
basestring = basestring
unicode = unicode
bytes = str
long = long
else:
from urllib.parse import parse_qs, urlparse
from io import BytesIO
from string import ascii_letters
from queue import Queue
iteritems = lambda x: iter(x.items())
iterkeys = lambda x: iter(x.keys())
itervalues = lambda x: iter(x.values())
byte_to_chr = lambda x: chr(x)
nativestr = lambda x: \
x if isinstance(x, str) else x.decode('utf-8', 'replace')
u = lambda x: x
b = lambda x: x.encode('iso-8859-1') if not isinstance(x, bytes) else x
next = next
unichr = chr
imap = map
izip = zip
xrange = range
basestring = str
unicode = str
bytes = bytes
long = int
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import Empty, Full
try: # Python 2.6 - 2.7
from Queue import LifoQueue
except ImportError: # Python 2.5
from Queue import Queue
# From the Python 2.7 lib. Python 2.5 already extracte
|
d the core
# methods to aid implementating different queue organisations.
class LifoQueue(Queue):
"Override queue methods to implement a last-in first-out queue."
def _init(self, maxsize):
sel
|
f.maxsize = maxsize
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
|
cbrentharris/bricklayer
|
bricklayer/utils/downloader.py
|
Python
|
mit
| 951
| 0.005258
|
import platform
import urllib
import subprocess
from progressbar import ProgressBar
class Downloader(object):
WINDOWS_DOWNLOAD_URL = "http://cache.lego.com/downloads/ldd2.0/installer/setupLDD-PC-4_3_8.exe"
MAC_DOWNLOAD_URL = "http://cache.lego.com/
|
downloads/ldd2.0/installer/setupLDD-MAC-4_3_8.zip"
PB = None
@classmethod
def download_ldd(cls):
if platform.system() == "D
|
arwin":
urllib.urlretrieve(cls.MAC_DOWNLOAD_URL, "ldd.zip", reporthook=cls.download_progress)
elif platform.system() == "Windows":
urllib.urlretrieve(cls.WINDOWS_DOWNLOAD_URL, "ldd.exe")
# subprocess.Popen("ldd.exe")
@classmethod
def download_progress(cls, count, block_size, total_size):
if not cls.PB:
cls.PB = ProgressBar(maxval=total_size).start()
cls.PB.update(count * block_size)
class Installer(object):
@classmethod
def install(cls):
pass
|
m8ttyB/socorro
|
socorro/unittest/cron/jobs/test_upload_crash_report_json_schema.py
|
Python
|
mpl-2.0
| 1,523
| 0
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mock
from nose.tools import ok_
from crontabber.app import CronTabber
from socorro.unittest.cron.jobs.base import Integra
|
tionTestBase
from socorro.unittest.cron.setup_configman import (
get_config_manager_for_crontabber,
)
from socorro.schemas import CRASH_REPORT_JSON_SCHEMA_AS_STRING
class TestUploadCrashReportJSONSchemaCronApp(IntegrationTestBase):
def _setup_config_manager(self):
return get_config_manager_for_crontabber(
jobs='socorro.cron.
|
jobs.upload_crash_report_json_schema.'
'UploadCrashReportJSONSchemaCronApp|30d',
)
@mock.patch('boto.connect_s3')
def test_run(self, connect_s3):
key = mock.MagicMock()
connect_s3().get_bucket().get_key.return_value = None
connect_s3().get_bucket().new_key.return_value = key
with self._setup_config_manager().context() as config:
tab = CronTabber(config)
tab.run_all()
information = self._load_structure()
app_name = 'upload-crash-report-json-schema'
ok_(information[app_name])
ok_(not information[app_name]['last_error'])
ok_(information[app_name]['last_success'])
key.set_contents_from_string.assert_called_with(
CRASH_REPORT_JSON_SCHEMA_AS_STRING
)
|
Gnurou/glmark2
|
waflib/Logs.py
|
Python
|
gpl-3.0
| 5,584
| 0.068947
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re,traceback,sys
from waflib import Utils,ansiterm
if not os.environ.get('NOSYNC',False):
if sys.stdout.isatty()and id(sys.stdout)==id(sys.__stdout__):
sys.stdout=ansiterm.AnsiTerm(sys.stdout)
if sys.stderr.isatty()and id(sys.stderr)==id(sys.__stderr__):
sys.stderr=ansiterm.AnsiTerm(sys.stderr)
import logging
LOG_FORMAT=os.environ.get('WAF_LOG_FORMAT','%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s')
HOUR_FORMAT=os.environ.get('WAF_HOUR_FORMAT','%H:%M:%S')
zones=[]
verbose=0
colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;31m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','GREY':'\x1b[37m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',}
indicator='\r\x1b[K%s%s%s'
try:
unicode
except NameError:
unicode=None
def enable_colors(use):
if use==1:
if not(sys.stderr.isatty()or sys.stdout.isatty()):
use=0
if Utils.is_win32 and os.name!='java':
term=os.environ.get('TERM','')
else:
term=os.environ.get('TERM','dumb')
if term in('dumb','emacs'):
use=0
if use>=1:
os.environ['TERM']='vt100'
colors_lst['USE']=use
try:
get_term_cols=ansiterm.get_term_cols
except AttributeError:
def get_term_cols():
return 80
get_term_cols.__doc__="""
Returns the console width in characters.
:return: the number of characters per line
:rtype: int
"""
def get_color(cl):
if colors_lst['USE']:
return colors_lst.get(cl,'')
return''
class color_dict(object):
def __getattr__(self,a):
return get_color(a)
def __call__(self,a):
return get_color(a)
colors=color_dict()
re_log=re.compile(r'(\w+): (.*)',re.M)
class log_filter(logging.Filter):
def __init__(self,name=''):
logging.Filter.__init__(self,name)
def filter(self,rec):
global verbose
rec.zone=rec.module
if rec.levelno>=logging.INFO:
return True
m=re_log.match(rec.msg)
if m:
rec.zone=m.group(1)
rec.msg=m.group(2)
if zones:
return getattr(rec,'zone','')in zones or'*'in zones
elif not verbose>2:
return False
return True
class log_handler(logging.StreamHandler):
def emit(self,record):
try:
try:
self.stream=record.stream
except AttributeError:
if record.levelno>=logging.WARNING:
record.stream=self.stream=sys.stderr
else:
record.stream=self.stream=sys.stdout
self.emit_override(record)
self.flush()
except(KeyboardInterrupt,SystemExit):
raise
except:
self.handleError(record)
def emit_override(self,record,**kw):
self.terminator=getattr(record,'terminator','\n')
stream=self.stream
if unicode:
msg=self.formatter.format(record)
fs='%s'+self.terminator
try:
if(isinstance(msg,unicode)and getattr(stream,'encoding',None)):
fs=fs.decode(stream.encoding)
try:
stream.write(fs%msg)
except UnicodeEncodeError:
stream.write((fs%msg).encode(stream.encoding))
else:
stream.write(fs%msg)
except UnicodeError:
stream.write((fs%msg).encode('utf-8'))
else:
logging.StreamHandler.emit(self,record)
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT)
def format(self,rec):
try:
msg=rec.msg.decode('utf-8')
except Exception:
msg=rec.msg
use=colors_lst['USE']
if(use==1 and rec.stream.isatty())or use==2:
c1=getattr(rec,'c1',None)
if c1 is None:
c1=''
if rec.levelno>=logging.ERROR:
c1=colors.RED
elif rec.levelno>=logging.WARNING:
c1=colors.YELLOW
elif rec.levelno>=logging.INFO:
c1=colors.GREEN
c2=getattr(rec,'c2',colors.NORMAL)
msg='%s%s%s'%(c1,msg,c2)
else:
msg=re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))','',msg)
if rec.levelno>=logging.INFO:
if rec.args:
return msg%rec.args
return msg
rec.msg=msg
rec.c1=colors.PINK
rec.c2=colors.NORMAL
return logging.Formatter.format(self,rec)
log=None
def debug(*k,**kw):
global verbose
if verbose:
k=list(k)
k[0]=k[0].replace('\n',' ')
global log
log.debug(*k,**kw)
def error(*k,**kw):
global log,verbose
log.error(*k,**kw)
if verbose>2:
st=traceback.extract_stack()
if st:
st=st[:-1]
buf=[]
for filename,lineno,name,line in st:
buf.append(' File %r, line %d, in %s'%(filename,lineno,name))
if line:
buf.append(' %s'%line.strip())
if buf:log.error('\n'.join(buf))
def warn(*k,**kw):
global log
log.warn(*k,**kw)
def info(*k,**kw):
global log
log.info(*k,**kw)
def init_log():
|
global
|
log
log=logging.getLogger('waflib')
log.handlers=[]
log.filters=[]
hdlr=log_handler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
def make_logger(path,name):
logger=logging.getLogger(name)
hdlr=logging.FileHandler(path,'w')
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def make_mem_logger(name,to_log,size=8192):
from logging.handlers import MemoryHandler
logger=logging.getLogger(name)
hdlr=MemoryHandler(size,target=to_log)
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.memhandler=hdlr
logger.setLevel(logging.DEBUG)
return logger
def free_logger(logger):
try:
for x in logger.handlers:
x.close()
logger.removeHandler(x)
except Exception:
pass
def pprint(col,msg,label='',sep='\n'):
global info
info('%s%s%s %s',colors(col),msg,colors.NORMAL,label,extra={'terminator':sep})
|
atmark-techno/atmark-dist
|
user/bind/contrib/queryperf/utils/gen-data-queryperf.py
|
Python
|
gpl-2.0
| 2,783
| 0.004671
|
#!/usr/bin/python
#
# $Id: gen-data-queryperf.py,v 1.1.10.1 2003/05/15 05:07:21 marka Exp $
#
# Contributed by Stephane B
|
ortzmeyer <[email protected]>
#
# "A small tool which may be useful with contrib/queryperf. This script
# can generate files of queries, both with random names (to test the
# behaviour with NXdomain) and with d
|
omains from a real zone file."
#
import sys
import getopt
import random
import re
ldh = []
# Letters
for i in range(97, 122):
ldh.append(chr(i))
# Digits
for i in range(48, 57):
ldh.append(chr(i))
# Hyphen
ldh.append('-')
maxsize=10
tld='org'
num=4
percent_random = 0.3
gen = None
zone_file = None
domains = {}
domain_ns = "^([a-z0-9-]+)(\.([a-z0-9-\.]+|)|)( +IN|) +NS"
domain_ns_re = re.compile(domain_ns, re.IGNORECASE)
def gen_random_label():
label = ""
for i in range(gen.randint(1, maxsize)):
label = label + gen.choice(ldh)
return label
def make_domain(label):
return "www." + label + "." + tld + " A"
def usage():
sys.stdout.write("Usage: " + sys.argv[0] + " [-n number] " + \
"[-p percent-random] [-t TLD]\n")
sys.stdout.write(" [-m MAXSIZE] [-f zone-file]\n")
try:
optlist, args = getopt.getopt(sys.argv[1:], "hp:f:n:t:m:",
["help", "percentrandom=", "zonefile=",
"num=", "tld=",
"maxsize="])
for option, value in optlist:
if option == "--help" or option == "-h":
usage()
sys.exit(0)
elif option == "--number" or option == "-n":
num = int(value)
elif option == "--maxsize" or option == "-m":
maxsize = int(value)
elif option == "--percentrandom" or option == "-p":
percent_random = float(value)
elif option == "--tld" or option == "-t":
tld = str(value)
elif option == "--zonefile" or option == "-f":
zone_file = str(value)
else:
error("Unknown option " + option)
except getopt.error, reason:
sys.stderr.write(sys.argv[0] + ": " + str(reason) + "\n")
usage()
sys.exit(1)
if len(args) <> 0:
usage()
sys.exit(1)
gen = random.Random()
if zone_file:
file = open(zone_file)
line = file.readline()
while line:
domain_line = domain_ns_re.match(line)
if domain_line:
domain = domain_line.group(1)
domains[domain] = 1
line = file.readline()
file.close()
for i in range(num):
if zone_file:
if gen.random() < percent_random:
print make_domain(gen_random_label())
else:
print make_domain(gen.choice(domains.keys()))
else:
print make_domain(gen_random_label())
|
qsnake/gpaw
|
gpaw/scf.py
|
Python
|
gpl-3.0
| 2,689
| 0.002975
|
import numpy as np
from gpaw import KohnShamConvergenceError
class SCF
|
Loop:
"""Self-consistent field loop.
converged: Do we have a self-consistent solution?
"""
def __init__(self, eigenstates=0.1, energy=0.1, density=0.1, maxiter=100,
fixdensity=False, niter_fixdensity=None):
self.max_eigenstates_error = max(eigenstates, 1e-20)
self.max_energy_error = energy
self.max_density_error = max(density, 1e-20)
self.maxiter = maxiter
self.fixdensity = fixdensity
|
if niter_fixdensity is None:
niter_fixdensity = 2
self.niter_fixdensity = niter_fixdensity
if fixdensity:
self.fix_density()
self.reset()
def fix_density(self):
self.fixdensity = True
self.niter_fixdensity = 10000000
self.max_density_error = np.inf
def reset(self):
self.energies = []
self.eigenstates_error = None
self.energy_error = None
self.density_error = None
self.converged = False
def run(self, wfs, hamiltonian, density, occupations):
if self.converged:
return
for iter in range(1, self.maxiter + 1):
wfs.eigensolver.iterate(hamiltonian, wfs)
occupations.calculate(wfs)
# XXX ortho, dens, wfs?
energy = hamiltonian.get_energy(occupations)
self.energies.append(energy)
self.check_convergence(density, wfs.eigensolver)
yield iter
if self.converged:
break
if iter > self.niter_fixdensity:
density.update(wfs)
hamiltonian.update(density)
else:
hamiltonian.npoisson = 0
# Don't fix the density in the next step:
self.niter_fixdensity = 0
def check_convergence(self, density, eigensolver):
"""Check convergence of eigenstates, energy and density."""
if self.converged:
return True
self.eigenstates_error = eigensolver.error
if len(self.energies) < 3:
self.energy_error = self.max_energy_error
else:
self.energy_error = np.ptp(self.energies[-3:])
self.density_error = density.mixer.get_charge_sloshing()
if self.density_error is None:
self.density_error = 1000000.0
self.converged = (
self.eigenstates_error < self.max_eigenstates_error and
self.energy_error < self.max_energy_error and
self.density_error < self.max_density_error)
return self.converged
|
rafaelsilvag/pyNFRouter
|
test/teste.py
|
Python
|
gpl-2.0
| 798
| 0.012531
|
from netfilterqueue import NetfilterQueue
from dpkt import ip, icmp, tcp, udp
from scapy.all import *
import socket
def print_and_accept(pkt):
data=pkt.get_payload()
res = ip.IP(data)
res2 = IP(data)
i = ICMP(data)
t = TCP(data)
u = UDP(data)
print "SOURCE IP: %s\tDESTINATION IP: %s" % (socket.inet_ntoa(res.src),socket.inet_ntoa(res.dst))
print res2.show2()
resp=srp1(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst='192.168.0.34'),iface="eth0",timeout=2)
print resp.dst
eth_dst = resp.src
eth_src =
|
resp.dst
eth = Ether(src=eth_src, dst=eth_dst)
eth.type = 2048
sendp(eth/res2/res2,iface="eth0")
pkt.accept()
nfqueue = NetfilterQueue()
nfqueue.bind(6, print_and_accept)
try:
nfqueue.run()
except KeyboardInterrupt, ex:
p
|
rint ex
|
unmrds/cc-python
|
.ipynb_checkpoints/eggs-checkpoint.py
|
Python
|
apache-2.0
| 1,783
| 0.00673
|
#!/usr/bin/env python
import csv
# create an empty list that will be filled with the rows of data from the CSV as dictionaries
csv_content = []
# open and loop through each line of the csv file to populate our data file
|
with open('aaj1945_DataS1_Egg_shape_by_species_v2.csv') as csv_file:
csv_reader = csv.DictReader(csv_file)
lineNo = 0
for row in csv_reader: # process each row of the csv file
csv_content.append(row)
if lineNo < 3: # print o
|
ut a few lines of data for our inspection
print(row)
lineNo += 1
# create some empty lists that we will fill with values for each column of data
order = []
family = []
species = []
asymmetry = []
ellipticity = []
avglength = []
# for each row of data in our dataset write a set of values into the lists of column values
for item in csv_content:
order.append(item['\ufeffOrder'])
family.append(item['Family'])
species.append(item['Species'])
# deal with issues
try:
asymmetry.append(float(item['Asymmetry']))
except:
asymmetry.append(-9999)
try:
ellipticity.append(float(item['Ellipticity']))
except:
ellipticity.append(-9999)
try:
avglength.append(float(item['AvgLength (cm)']))
except:
avglength.append(-9999)
print()
print()
# Calculate and print some statistics
mean_asymmetry = sum(asymmetry)/len(asymmetry)
print("Mean Asymmetry: ", str(mean_asymmetry))
mean_ellipticity = sum(ellipticity)/len(ellipticity)
print("Mean Ellipticity: ", str(mean_ellipticity))
mean_avglength = sum(avglength)/len(avglength)
print("Mean Average Length: ", str(mean_avglength))
# What's wrong with these results? What would you do next to fix the problem?
|
satnet-project/propagators
|
output_predict.py
|
Python
|
apache-2.0
| 2,281
| 0.003069
|
################################################################################
# Copyright 2015 Samuel Gongora Garcia ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# alon
|
g with this program. If not, see <http://www.gnu.org/licenses/>.
#
###########################################################################
|
#####
# Author: s.gongoragarcia[at]gmail.com
################################################################################
class Read_predict_data:
def __init__(self, index_satellite):
from os import getcwd, chdir
index_satellite = index_satellite + 1
directorio_script = getcwd()
# predict routine
self.open_predict(directorio_script)
self.open_files_predict(index_satellite)
chdir(directorio_script)
def open_predict(self, directorio_script):
from os import chdir, listdir, getcwd
chdir(directorio_script + '/results/predict')
self.files_predict = listdir(getcwd())
self.files_predict.sort()
def open_files_predict(self, index_satellite):
for i in range(index_satellite):
self.open_file_predict(self.files_predict[i])
def open_file_predict(self, name):
self.predict_simulation_time = []
self.predict_alt_satellite = []
self.predict_az_satellite = []
import csv
with open(name) as tsv:
for line in csv.reader(tsv, delimiter = "\t"):
if float(line[1]) >= 0:
linea0 = float(line[0])
self.predict_simulation_time.append(linea0)
self.predict_alt_satellite.append(float(line[1]))
self.predict_az_satellite.append(float(line[2]))
|
SequencingDOTcom/App-Market-API-integration
|
python/bootstrap/urls.py
|
Python
|
mit
| 231
| 0
|
from django.conf.urls import include, url
from django.contrib import ad
|
min
urlpat
|
terns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^external/', include('external.urls')),
url(r'^dev/', include('dev.urls')),
]
|
mongodb/motor
|
test/test_environment.py
|
Python
|
apache-2.0
| 11,605
| 0.001034
|
# Copyright 2012-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discover environment and server configuration, initialize PyMongo client."""
import os
import socket
import sys
from functools import wraps
from test.utils import create_user
from test.version import Version
from unittest import SkipTest
import pymongo.errors
HAVE_SSL = True
try:
import ssl
except ImportError:
HAVE_SSL = False
ssl = None
HAVE_TORNADO
|
= True
try:
import tornado
except ImportError:
HAVE_TORNADO = False
tornado = None
HAVE_ASYNCIO = True
try:
import asyncio
except ImportError:
HAVE_ASYNCIO = False
asyncio = None
HAVE_AIOHTTP = True
try:
|
import aiohttp
except ImportError:
HAVE_AIOHTTP = False
aiohttp = None
# Copied from PyMongo.
def partition_node(node):
"""Split a host:port string into (host, int(port)) pair."""
host = node
port = 27017
idx = node.rfind(":")
if idx != -1:
host, port = node[:idx], int(node[idx + 1 :])
if host.startswith("["):
host = host[1:-1]
return host, port
def connected(client):
"""Convenience, wait for a new PyMongo MongoClient to connect."""
client.admin.command("ping") # Force connection.
return client
# If these are set to the empty string, substitute None.
db_user = os.environ.get("DB_USER") or None
db_password = os.environ.get("DB_PASSWORD") or None
CERT_PATH = os.environ.get(
"CERT_DIR", os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates")
)
CLIENT_PEM = os.path.join(CERT_PATH, "client.pem")
CA_PEM = os.path.join(CERT_PATH, "ca.pem")
MONGODB_X509_USERNAME = "CN=client,OU=kerneluser,O=10Gen,L=New York City,ST=New York,C=US"
def is_server_resolvable():
"""Returns True if 'server' is resolvable."""
socket_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(1)
try:
socket.gethostbyname("server")
return True
except socket.error:
return False
finally:
socket.setdefaulttimeout(socket_timeout)
class TestEnvironment(object):
def __init__(self):
self.initialized = False
self.host = None
self.port = None
self.mongod_started_with_ssl = False
self.mongod_validates_client_cert = False
self.server_is_resolvable = is_server_resolvable()
self.sync_cx = None
self.is_standalone = False
self.is_mongos = False
self.is_replica_set = False
self.rs_name = None
self.w = 1
self.hosts = None
self.arbiters = None
self.primary = None
self.secondaries = None
self.v8 = False
self.auth = False
self.uri = None
self.rs_uri = None
self.version = None
self.sessions_enabled = False
self.fake_hostname_uri = None
self.server_status = None
def setup(self):
assert not self.initialized
self.setup_sync_cx()
self.setup_auth_and_uri()
self.setup_version()
self.setup_v8()
self.server_status = self.sync_cx.admin.command("serverStatus")
self.initialized = True
def setup_sync_cx(self):
"""Get a synchronous PyMongo MongoClient and determine SSL config."""
host = os.environ.get("DB_IP", "localhost")
port = int(os.environ.get("DB_PORT", 27017))
connectTimeoutMS = 100
serverSelectionTimeoutMS = 100
socketTimeoutMS = 10000
try:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
connectTimeoutMS=connectTimeoutMS,
socketTimeoutMS=socketTimeoutMS,
serverSelectionTimeoutMS=serverSelectionTimeoutMS,
tlsCAFile=CA_PEM,
ssl=True,
)
)
self.mongod_started_with_ssl = True
except pymongo.errors.ServerSelectionTimeoutError:
try:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
connectTimeoutMS=connectTimeoutMS,
socketTimeoutMS=socketTimeoutMS,
serverSelectionTimeoutMS=serverSelectionTimeoutMS,
tlsCAFile=CA_PEM,
tlsCertificateKeyFile=CLIENT_PEM,
)
)
self.mongod_started_with_ssl = True
self.mongod_validates_client_cert = True
except pymongo.errors.ServerSelectionTimeoutError:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
connectTimeoutMS=connectTimeoutMS,
socketTimeoutMS=socketTimeoutMS,
serverSelectionTimeoutMS=serverSelectionTimeoutMS,
)
)
response = client.admin.command("ismaster")
self.sessions_enabled = "logicalSessionTimeoutMinutes" in response
self.is_mongos = response.get("msg") == "isdbgrid"
if "setName" in response:
self.is_replica_set = True
self.rs_name = str(response["setName"])
self.w = len(response["hosts"])
self.hosts = set([partition_node(h) for h in response["hosts"]])
host, port = self.primary = partition_node(response["primary"])
self.arbiters = set([partition_node(h) for h in response.get("arbiters", [])])
self.secondaries = [
partition_node(m)
for m in response["hosts"]
if m != self.primary and m not in self.arbiters
]
elif not self.is_mongos:
self.is_standalone = True
# Reconnect to found primary, without short timeouts.
if self.mongod_started_with_ssl:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
tlsCAFile=CA_PEM,
tlsCertificateKeyFile=CLIENT_PEM,
)
)
else:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
ssl=False,
)
)
self.sync_cx = client
self.host = host
self.port = port
def setup_auth_and_uri(self):
"""Set self.auth and self.uri."""
if db_user or db_password:
if not (db_user and db_password):
sys.stderr.write("You must set both DB_USER and DB_PASSWORD, or neither\n")
sys.exit(1)
self.auth = True
uri_template = "mongodb://%s:%s@%s:%s/admin"
self.uri = uri_template % (db_user, db_password, self.host, self.port)
# If the hostname 'server' is resolvable, this URI lets us use it
|
SamR1/FitTrackee
|
e2e/utils.py
|
Python
|
agpl-3.0
| 2,863
| 0
|
import os
import random
import st
|
ring
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
TEST_APP_URL = os.get
|
env('TEST_APP_URL')
TEST_CLIENT_URL = os.getenv('TEST_CLIENT_URL')
E2E_ARGS = os.getenv('E2E_ARGS')
TEST_URL = TEST_CLIENT_URL if E2E_ARGS == 'client' else TEST_APP_URL
def random_string(length=8):
return ''.join(random.choice(string.ascii_letters) for x in range(length))
def register(selenium, user):
selenium.get(f'{TEST_URL}/register')
selenium.implicitly_wait(1)
username = selenium.find_element_by_id('username')
username.send_keys(user.get('username'))
email = selenium.find_element_by_id('email')
email.send_keys(user.get('email'))
password = selenium.find_element_by_id('password')
password.send_keys(user.get('password'))
password_conf = selenium.find_element_by_id('confirm-password')
password_conf.send_keys(user.get('password_conf'))
submit_button = selenium.find_element_by_tag_name('button')
submit_button.click()
def login(selenium, user):
selenium.get(f'{TEST_URL}/login')
selenium.implicitly_wait(1)
email = selenium.find_element_by_id('email')
email.send_keys(user.get('email'))
password = selenium.find_element_by_id('password')
password.send_keys(user.get('password'))
submit_button = selenium.find_element_by_tag_name('button')
submit_button.click()
def register_valid_user(selenium):
user_name = random_string()
user = {
'username': user_name,
'email': f'{user_name}@example.com',
'password': 'p@ssw0rd',
'password_conf': 'p@ssw0rd',
}
register(selenium, user)
WebDriverWait(selenium, 15).until(EC.url_changes(f"{TEST_URL}/register"))
return user
def register_valid_user_and_logout(selenium):
user_name = random_string()
user = {
'username': user_name,
'email': f'{user_name}@example.com',
'password': 'p@ssw0rd',
'password_conf': 'p@ssw0rd',
}
register(selenium, user)
WebDriverWait(selenium, 15).until(EC.url_changes(f"{TEST_URL}/register"))
user_menu = selenium.find_element_by_class_name('nav-items-user-menu')
logout_link = user_menu.find_elements_by_class_name('nav-item')[2]
logout_link.click()
selenium.implicitly_wait(1)
return user
def login_valid_user(selenium, user):
login(selenium, user)
WebDriverWait(selenium, 10).until(EC.url_changes(f"{TEST_URL}/login"))
return user
def assert_navbar(selenium, user):
nav = selenium.find_element_by_id('nav').text
assert 'Register' not in nav
assert 'Login' not in nav
assert 'Dashboard' in nav
assert 'Workouts' in nav
assert 'Statistics' in nav
assert 'Add a workout' in nav
assert user['username'] in nav
assert 'Logout' in nav
|
RagtagOpen/bidwire
|
bidwire/scrapers/massgov/url_scraper_dict.py
|
Python
|
mit
| 1,717
| 0.008154
|
UL_CATEGORY_LI = '//ul[@class="category
|
"]/li'
H2_A_TITLELINK = './h2/a[@class="titlelink"]'
SPAN_A_TITLELINK = './span/a[@class="titlelink"]'
DIV_BODYFIELD_P = '//div[contains(@class,"bodyfield"
|
)]/p'
CATEGORY_H2_XPATH = [ UL_CATEGORY_LI, H2_A_TITLELINK ]
BODYFIELD_SPAN_XPATH = [ DIV_BODYFIELD_P, SPAN_A_TITLELINK ]
"""Mapping of relative URL (for EOPSS pages) to the xpath needed
to extract documents (1st xpath for section, 2nd xpath for document link)
"""
MASSGOV_DICT = {
'homeland-sec/grants/docs/':
[
UL_CATEGORY_LI,
'./h2/span/a[@class="titlelink"]'
],
'homeland-sec/grants/hs-grant-guidance-and-policies.html':
BODYFIELD_SPAN_XPATH,
'homeland-sec/grants/standard-documents.html':
[
'//div[contains(@class,"bodyfield")]/ul/li',
SPAN_A_TITLELINK
],
'law-enforce/grants/': CATEGORY_H2_XPATH,
'law-enforce/grants/2017-muni-public-safety-staffing-grant.html':
BODYFIELD_SPAN_XPATH,
'law-enforce/grants/le-grants-public-records.html':
BODYFIELD_SPAN_XPATH,
'justice-and-prev/grants/': CATEGORY_H2_XPATH,
'justice-and-prev/grants/bgp/': CATEGORY_H2_XPATH,
'hwy-safety/grants/': CATEGORY_H2_XPATH,
'hwy-safety/grants/ffy-2017-traffic-enforcement-grant-program.html':
BODYFIELD_SPAN_XPATH,
'hwy-safety/grants/ffy2017-hsd-grant-opportunities.html':
BODYFIELD_SPAN_XPATH,
'hwy-safety/grants/ffy-2017-step.html': BODYFIELD_SPAN_XPATH,
'hwy-safety/grants/highway-safety-grants-public-records.html':
BODYFIELD_SPAN_XPATH
}
|
graingert/sqlalchemy
|
lib/sqlalchemy/orm/loading.py
|
Python
|
mit
| 36,424
| 0
|
# orm/loading.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to convert database
rows into object instances and associated state.
the functions here are called primarily by Query, Mapper,
as well as some of the attribute loading strategies.
"""
from __future__ import absolute_import
import collections
from . import attributes
from . import exc as orm_exc
from . import path_registry
from . import strategy_options
from .base import _DEFER_FOR_STATE
from .base import _RAISE_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .util import _none_set
from .util import aliased
from .util import state_str
from .. import exc as sa_exc
from .. import util
from ..engine import result_tuple
from ..sql import util as sql_util
_new_runid = util.counter()
def instances(query, cursor, context):
"""Return an ORM result as an iterator."""
context.runid = _new_runid()
context.post_load_paths = {}
filtered = query._has_mapper_entities
single_entity = query.is_single_entity
if filtered:
if single_entity:
filter_fn = id
else:
def filter_fn(row):
return tuple(
id(item) if ent.use_id_for_hash else item
for ent, item in zip(query._entities, row)
)
try:
(process, labels, extra) = list(
zip(
*[
query_entity.row_processor(query, context, cursor)
for query_entity in query._entities
]
)
)
if not single_entity:
keyed_tuple = result_tuple(labels, extra)
while True:
context.partials = {}
if query._yield_per:
fetch = cursor.fetchmany(query._yield_per)
if not fetch:
break
else:
fetch = cursor.fetchall()
if single_entity:
proc = process[0]
rows = [proc(row) for row in fetch]
else:
rows = [
keyed_tuple([proc(row) for proc in process])
for row in fetch
]
for path, post_load in context.post_load_paths.items():
post_load.invoke(context, path)
if filtered:
rows = util.unique_list(rows, filter_fn)
for row in rows:
yield row
if not query._yield_per:
break
except Exception:
with util.safe_reraise():
cursor.close()
@util.preload_module("sqlalchemy.orm.query")
def merge_result(query, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session."""
querylib = util.preloaded.orm_query
session = query.session
if load:
# flush current contents if we expect to load data
session._autoflush()
autoflush = session.autoflush
try:
session.autoflush = False
single_entity = len(query._entities) == 1
if single_entity:
if isinstance(query._entities[0], querylib._MapperEntity):
result = [
session._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load,
_recursive={},
_resolve_conflict_map={},
)
for instance in iterator
]
else:
result = list(iterator)
else:
mapped_entities = [
i
for i, e in enumerate(query._entities)
if isinstance(e, querylib._MapperEntity)
|
]
result = []
keys = [ent._label_name for ent in query._entities]
keyed_tuple = result_tuple(
keys, [ent.entities for ent in query._entities]
)
for row in iterator:
newrow = list(row)
for i in mapped_entities:
if newrow[i] is not None:
n
|
ewrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load,
_recursive={},
_resolve_conflict_map={},
)
result.append(keyed_tuple(newrow))
return iter(result)
finally:
session.autoflush = autoflush
def get_from_identity(session, mapper, key, passive):
"""Look up the given key in the given session's identity map,
check the object for expired state if found.
"""
instance = session.identity_map.get(key)
if instance is not None:
state = attributes.instance_state(instance)
if mapper.inherits and not state.mapper.isa(mapper):
return attributes.PASSIVE_CLASS_MISMATCH
# expired - ensure it still exists
if state.expired:
if not passive & attributes.SQL_OK:
# TODO: no coverage here
return attributes.PASSIVE_NO_RESULT
elif not passive & attributes.RELATED_OBJECT_OK:
# this mode is used within a flush and the instance's
# expired state will be checked soon enough, if necessary
return instance
try:
state._load_expired(state, passive)
except orm_exc.ObjectDeletedError:
session._remove_newly_deleted([state])
return None
return instance
else:
return None
def load_on_ident(
query,
key,
refresh_state=None,
with_for_update=None,
only_load_props=None,
no_autoflush=False,
):
"""Load the given identity key from the database."""
if key is not None:
ident = key[1]
identity_token = key[2]
else:
ident = identity_token = None
if no_autoflush:
query = query.autoflush(False)
return load_on_pk_identity(
query,
ident,
refresh_state=refresh_state,
with_for_update=with_for_update,
only_load_props=only_load_props,
identity_token=identity_token,
)
def load_on_pk_identity(
query,
primary_key_identity,
refresh_state=None,
with_for_update=None,
only_load_props=None,
identity_token=None,
):
"""Load the given primary key identity from the database."""
if refresh_state is None:
q = query._clone()
q._get_condition()
else:
q = query._clone()
if primary_key_identity is not None:
mapper = query._mapper_zero()
(_get_clause, _get_params) = mapper._get_clause
# None present in ident - turn those comparisons
# into "IS NULL"
if None in primary_key_identity:
nones = set(
[
_get_params[col].key
for col, value in zip(
mapper.primary_key, primary_key_identity
)
if value is None
]
)
_get_clause = sql_util.adapt_criterion_to_null(_get_clause, nones)
if len(nones) == len(primary_key_identity):
util.warn(
"fully NULL primary key identity cannot load any "
"object. This condition may raise an error in a future "
"release."
)
_get_clause = q._adapt_clause(_get_clause, True, False)
q._criterion = _get_clause
params = dict(
[
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(
primary_key_identity, mapper.primary_key
)
]
)
|
pkimber/booking
|
booking/tests/scenario.py
|
Python
|
apache-2.0
| 2,769
| 0.001083
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from datetime import (
datetime,
timedelta,
)
from dateutil.relativedelta import relativedelta
from base.tests.model_maker import clean_and_save
from booking.models import (
Booking,
Category,
Location,
)
def get_alpe_d_huez():
return Booking.objects.get(title='Alpe D Huez')
def make_booking(start_date, end_date, title, **kwargs):
defaults = dict(
start_date=start_date,
end_date=end_date,
title=title,
)
defaults.update(kwargs)
return clean_and_save(Booking(**defaults))
def make_booking_in_past(start_date, end_date, title):
"""Save a booking without cleaning (validating) the data."""
b = Booking(**dict(
start_date=start_date,
end_date=end_date,
title=title,
))
b.save()
return b
def next_weekday(d, weekday):
"""Find the date for the next weekday.
Copied from:
http://stackoverflow.com/questions/6558535/python-find-the-date-for-the-first-monday-after-a-given-a-date
"""
days_a
|
head = weekday - d.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
return d + timedelta(days_ahead)
def demo_data():
# set-up some dates
today = datetime.today().date()
# 1st week last month starting Saturday
first_prev_month = today + relativedelta(months=-1, day=1)
start_date = next_weekday(first_prev_month, 5)
end_date = start_date + timedelta(days=7)
make_booking_in_past(start_date, end_date, 'Tignes
|
')
# 2nd week last month
make_booking_in_past(end_date, end_date + timedelta(days=7), 'Meribel')
# 1st week this month starting Saturday
first_this_month = today + relativedelta(day=1)
start_date = next_weekday(first_this_month, 5)
make_booking_in_past(start_date, start_date + timedelta(days=3), 'Whistler')
# later this month starting Tuesday
start_date = next_weekday(first_this_month + timedelta(days=10), 1)
make_booking_in_past(start_date, start_date + timedelta(days=3), 'Dorset')
# span this and next month
start_date = datetime(today.year, today.month, 27).date()
first_next_month = today + relativedelta(months=+1, day=1)
end_date = datetime(first_next_month.year, first_next_month.month, 2).date()
make_booking_in_past(start_date, end_date, 'Devon')
# next month
start_date = next_weekday(first_next_month + timedelta(days=3), 2)
end_date = next_weekday(start_date, 5)
make_booking(start_date, end_date, 'Alpe D Huez')
make_booking(end_date, end_date + timedelta(days=4), 'Cornwall')
# misc
Category.objects.create_category('Meeting')
Location.objects.create_location('Community Centre')
|
alzeih/ava
|
ava_core/integration/integration_ldap/test_data.py
|
Python
|
gpl-3.0
| 6,603
| 0.001212
|
# Rest Imports
from rest_framework import status
# Local Imports
from ava_core.abstract.test_data import AvaCoreTestData
from ava_core.integration.integration_ldap.models import LDAPIntegrationAdapter
# Implementation
class LDAPIntegrationAdapterTestData(AvaCoreTestData):
"""
Test data for LDAPIntegrationAdapter
"""
@staticmethod
def init_requirements():
# Import the required model and data
from ava_core.gather.gather_ldap.models import LDAPGatherHistory
from ava_core.gather.gather_ldap.test_data import LDAPGatherHistoryTestData
# Check that requirements haven't already been created.
# True - Create necessary requirements.
if LDAPGatherHistory.objects.count() == 0:
LDAPGatherHistoryTestData.init_requirements()
model = LDAPGatherHistory.objects.create(**LDAPGatherHistoryTestData.get_data('standard'))
model.save()
model = LDAPGatherHistory.objects.create(**LDAPGatherHistoryTestData.get_data('unique'))
model.save()
# Import the required model and data
from ava_core.integration.integration_abstract.models import IntegrationAdapter
from ava_core.integration.integration_abstract.test_data import IntegrationAdapterTestData
# Check that requirements haven't already been created.
# True - Create necessary requirements.
if IntegrationAdapter.objects.count() == 0:
IntegrationAdapterTestData.init_requirements()
model = IntegrationAdapter.objects.create(**IntegrationAdapterTestData.get_data('standard'))
model.save()
model = IntegrationAdapter.objects.create(**IntegrationAdapterTestData.get_data('unique'))
model.save()
# Store self information
model = LDAPIntegrationAdapter
url = '/example'
standard = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
unique = {
'ldap_password'
|
: 'unique_char',
'salt': 'unique_char',
'd
|
ump_dn': 'unique_char',
'ldap_user': 'unique_char',
'ldap_integration_history': '/example/2/',
'integrationadapter_ptr': 'default',
'server': 'unique_char',
}
missing_ldap_password = {
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_ldap_password = {
'ldap_password': 'modified_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_salt = {
'ldap_password': 'standard_char',
'salt': 'modified_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_salt = {
'ldap_password': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_dump_dn = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'modified_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_dump_dn = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_ldap_user = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_ldap_user = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'modified_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_ldap_integration_history = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/2/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_ldap_integration_history = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_integrationadapter_ptr = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_integrationadapter_ptr = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'server': 'standard_char',
}
missing_server = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
}
modified_server = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'modified_char',
}
|
aiden0z/snippets
|
leetcode/049_group_anagrams.py
|
Python
|
mit
| 1,315
| 0.00076
|
"""Group Anagrams
Given an array of strings, group anagrams together.
Example:
Input:
["eat", "tea", "tan", "ate", "nat", "bat"]
Output:
[
["ate","eat","tea"],
["nat","tan"],
["bat"]
]
Note:
All inputs will be in lowercase.
The order of your output does not matter.
"""
from typing import List
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
store = {}
for item in strs:
key = ''.join(sorted(item))
if
|
key in store:
store[key].append(item)
else:
store[key] = [item]
return store.values()
if __name__ == '__main__':
cases = [
(
["eat", "tea", "tan", "ate", "nat", "bat"],
[
["ate", "eat", "tea"],
["nat", "tan"],
["bat"]
]
),
] # yapf: disable
for case in cases:
for S in [Solution]:
result = S().groupAnagra
|
ms(case[0])
for l in case[1]:
for item in l:
found = False
for ll in result:
if item in ll:
found = True
assert found
|
galihmelon/sendgrid-python
|
sendgrid/helpers/mail/content.py
|
Python
|
mit
| 735
| 0
|
class Content(object):
def __init__(self, type_=None, value=None):
self._type = None
|
self._value = None
if type_ is not None:
self.type = type_
if value is not None:
self.value = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
|
self._type = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def get(self):
content = {}
if self.type is not None:
content["type"] = self.type
if self.value is not None:
content["value"] = self.value
return content
|
hugovk/diff-cover
|
diff_cover/tool.py
|
Python
|
agpl-3.0
| 6,230
| 0.000482
|
"""
Implement the command-line tool interface.
"""
from __future__ import unicode_literals
import argparse
import os
import sys
import diff_cover
from diff_cover.diff_reporter import GitDiffReporter
from diff_cover.git_diff import GitDiffTool
from diff_cover.git_path import GitPathTool
from diff_cover.violations_reporter import (
XmlCoverageReporter, Pep8QualityReporter,
PyflakesQualityReporter, PylintQualityReporter
)
from diff_cover.report_generator import (
HtmlReportGenerator, StringReportGenerator,
HtmlQualityReportGenerator, StringQualityReportGenerator
)
from lxml import etree
import six
COVERAGE_XML_HELP = "XML coverage report"
HTML_REPORT_HELP = "Diff coverage HTML output"
COMPARE_BRANCH_HELP = "Branch to compare"
VIOLATION_CMD_HELP = "Which code quality tool to use"
INPUT_REPORTS_HELP = "Pep8, pyflakes or pylint reports to use"
OPTIONS_HELP = "Options to be passed to the violations tool"
QUALITY_REPORTERS = {
'pep8': Pep8QualityReporter,
'pyflakes': PyflakesQualityReporter,
'pylint': PylintQualityReporter
}
import logging
LOGGER = logging.getLogger(__name__)
def parse_coverage_args(argv):
"""
Parse command line arguments, returning a dict of
valid options:
{
'coverage_xml': COVERAGE_XML,
'html_report': None | HTML_REPORT
}
where `COVERAGE_XML` is a path, and `HTML_REPORT` is a path.
The path strings may or may not exist.
"""
parser = argparse.ArgumentParser(description=diff_cover.DESCRIPTION)
parser.add_argument(
'coverage_xml',
type=str,
help=COVERAGE_XML_HELP,
nargs='+'
)
parser.add_argument(
'--html-report',
type=str,
default=None,
help=HTML_REPORT_HELP
)
parser.add_argument(
'--compare-branch',
type=str,
default='origin/master',
help=COMPARE_BRANCH_HELP
)
return vars(parser.parse_args(argv))
def parse_quality_args(argv):
"""
Parse command line arguments, returning a dict of
valid options:
{
'violations': pep8 | pyflakes | pylint
'html_report': None | HTML_REPORT
}
where `HTML_REPORT` is a path.
"""
parser = argparse.ArgumentParser(
description=diff_cover.QUALITY_DESCRIPTION
)
parser.add_argument(
'--violations',
type=str,
help=VIOLATION_CMD_HELP,
required=True
)
parser.add_argument(
'--html-report',
type=str,
default=None,
help=HTML_REPORT_HELP
)
parser.add_argument(
'--compare-branch',
type=str,
default='origin/master',
help=COMPARE_BRANCH_HELP
)
parser.add_argument(
'input_reports',
type=str,
nargs="*",
default=[],
help=INPUT_REPORTS_HELP
)
parser.add_argument(
'--options',
type=str,
nargs='?',
default=None,
help=OPTIONS_HELP
)
return vars(parser.parse_args(argv))
def generate_coverage_report(coverage_xml, compare_branch, html_report=None):
"""
Generate the diff coverage report, using kwargs from `parse_args()`.
"""
diff = GitDiffReporter(compare_branch, git_diff=GitDiffTool())
xml_roots = [etree.parse(xml_root) for xml_root in coverage_xml]
coverage = XmlCoverageReporter(xml_roots)
# Build a report generator
if html_report is not None:
reporter = HtmlReportGenerator(coverage, diff)
with open(html_report, "wb") as output_file:
reporter.generate_report(output_file)
reporter = StringReportGenerator(coverage, diff)
output_file = sys.stdout if six.PY2 else sys.stdout.buffer
# Generate the report
reporter.generate_report(output_file)
def generate_quality_report(tool, compare_branch, html_report=None):
"""
Generate the quality report, using kwargs from `parse_args()`.
"""
diff = GitDiffReporter(compare_branch, git_diff=GitDiffTool())
if html_report is not None:
reporter = HtmlQualityReportGenerator(tool, diff)
output_file = open(html_report, "wb")
else:
reporter = StringQualityReportGenerator(tool, diff)
output_file = sys.stdout if six.PY2 else sys.stdout.buffer
reporter.generate_report(output_file)
def main():
"""
Main entry point for the tool, used by setup.py
"""
progname = sys.argv[0]
# Init the path tool to work with the current directory
try:
cwd = os.getcwdu()
except AttributeError:
cwd = os.getcwd()
GitPathTool.set_cwd(cwd)
if progname.endswith('diff-cover'):
arg_dict = parse_coverage_args(sys.argv[1:])
generate_coverage_report(
arg_dict['coverage_xml'],
arg_dict['compare_branch'],
html_report=arg_dict['html_report'],
)
elif progname.endswith('diff-quality'):
arg_dict = parse_quality_args(sys.argv[1:])
tool = arg_dict['violations']
user_options = arg_dict.get('options')
if user_options:
user_options = user_options[1:-1] # Strip quotes
reporter_class = QUALITY_REPORTERS.get(tool)
if reporter_class is not None:
# If we've been given pre-generated reports,
# try to open the files
input_reports = []
for path in arg_dict['input_reports']:
try:
input_reports.append(open(path, 'rb'))
except IOError:
LOGGER.warning("Could no
|
t load '{0}'".format(path))
try:
reporter = reporter_class(tool, input_reports, user_options=user_options)
generate_quality_report(
reporter,
arg_dict['compare_branch'],
arg_dict['html_report']
)
|
# Close any reports we opened
finally:
for file_handle in input_reports:
file_handle.close()
else:
LOGGER.error("Quality tool not recognized: '{0}'".format(tool))
exit(1)
if __name__ == "__main__":
main()
|
mikaelboman/home-assistant
|
homeassistant/components/device_tracker/owntracks.py
|
Python
|
mit
| 7,427
| 0
|
"""
Support the OwnTracks platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.owntracks/
"""
import json
import logging
import threading
from collections import defaultdict
import homeassistant.components.mqtt as mqtt
from homeassistant.const import STATE_HOME
from homeassistant.util import convert, slugify
DEPENDENCIES = ['mqtt']
REGIONS_ENTERED = defaultdict(list)
MOBILE_BEACONS_ACTIVE = defaultdict(list)
BEACON_DEV_ID = 'beacon'
LOCATION_TOPIC = 'owntracks/+/+'
EVENT_TOPIC = 'owntracks/+/+/event'
_LOGGER = logging.getLogger(__name__)
LOCK = threading.Lock()
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
def setup_scanner(hass, config, see):
"""Setup an OwnTracks tracker."""
max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)
def validate_payload(payload, data_type):
"""Validate OwnTracks payload."""
try:
data = json.loads(payload)
except ValueError:
# If invalid JSON
_LOGGER.error('Unable to parse payload as JSON: %s', payload)
return None
if not isinstance(data, dict) or data.get('_type') != data_type:
_LOGGER.debug('Skipping %s update for following data '
'because of missing or malformatted data: %s',
data_type, data)
return None
if max_gps_accuracy is not None and \
convert(data.get('acc'), float, 0.0) > max_gps_accuracy:
_LOGGER.debug('Skipping %s update because expected GPS '
'accuracy %s is not met: %s',
data_type, max_gps_accuracy, data)
return None
if convert(data.get('acc'), float, 1.0) == 0.0:
_LOGGER.debug('Skipping %s update because GPS accuracy'
'is zero',
data_type)
return None
return data
def owntracks_location_update(topic, payload, qos):
"""MQTT message received."""
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typelocation
data = validate_payload(payload, 'location')
if not data:
return
dev_id, kwargs = _parse_see_args(topic, data)
# Block updates if we're in a region
with LOCK:
if REGIONS_ENTERED[dev_id]:
_LOGGER.debug(
"location update ignored - inside region %s",
REGIONS_ENTERED[-1])
return
see(**kwargs)
see_beacons(dev_id, kwargs)
def owntracks_event_update(topic, payload, qos):
"""MQTT event (geofences) received."""
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typetransition
data = validate_payload(payload, 'transition')
if not data:
return
if data.get('desc') is None:
_LOGGER.error(
"Location missing from `Entering/Leaving` message - "
"please turn `Share` on in OwnTracks app")
return
# OwnTracks uses - at the start of a beacon zone
# to switch on 'hold mode' - ignore this
location = slugify(data['desc'].lstrip("-"))
if location.lower() == 'home':
location = STATE_HOME
dev_id, kwargs = _parse_see_args(topic, data)
def enter_event():
"""Execute enter event."""
zone = hass.states.get("zone.{}".format(location))
with LOCK:
if zone is None and data.get('t') == 'b':
# Not a HA zone, and a beacon so assume mobile
beacons = MOBILE_BEACONS_ACTIVE[dev_id]
|
if location not in beacons:
beacons.append(location)
_LOGGER.info("Added beacon %s", location)
else:
# Normal region
regions = REGIONS_ENTERED[dev_id]
if location not in regions:
regions.append(lo
|
cation)
_LOGGER.info("Enter region %s", location)
_set_gps_from_zone(kwargs, location, zone)
see(**kwargs)
see_beacons(dev_id, kwargs)
def leave_event():
"""Execute leave event."""
with LOCK:
regions = REGIONS_ENTERED[dev_id]
if location in regions:
regions.remove(location)
new_region = regions[-1] if regions else None
if new_region:
# Exit to previous region
zone = hass.states.get("zone.{}".format(new_region))
_set_gps_from_zone(kwargs, new_region, zone)
_LOGGER.info("Exit to %s", new_region)
see(**kwargs)
see_beacons(dev_id, kwargs)
else:
_LOGGER.info("Exit to GPS")
# Check for GPS accuracy
if not ('acc' in data and
max_gps_accuracy is not None and
data['acc'] > max_gps_accuracy):
see(**kwargs)
see_beacons(dev_id, kwargs)
else:
_LOGGER.info("Inaccurate GPS reported")
beacons = MOBILE_BEACONS_ACTIVE[dev_id]
if location in beacons:
beacons.remove(location)
_LOGGER.info("Remove beacon %s", location)
if data['event'] == 'enter':
enter_event()
elif data['event'] == 'leave':
leave_event()
else:
_LOGGER.error(
'Misformatted mqtt msgs, _type=transition, event=%s',
data['event'])
return
def see_beacons(dev_id, kwargs_param):
"""Set active beacons to the current location."""
kwargs = kwargs_param.copy()
# the battery state applies to the tracking device, not the beacon
kwargs.pop('battery', None)
for beacon in MOBILE_BEACONS_ACTIVE[dev_id]:
kwargs['dev_id'] = "{}_{}".format(BEACON_DEV_ID, beacon)
kwargs['host_name'] = beacon
see(**kwargs)
mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1)
mqtt.subscribe(hass, EVENT_TOPIC, owntracks_event_update, 1)
return True
def _parse_see_args(topic, data):
"""Parse the OwnTracks location parameters, into the format see expects."""
parts = topic.split('/')
dev_id = slugify('{}_{}'.format(parts[1], parts[2]))
host_name = parts[1]
kwargs = {
'dev_id': dev_id,
'host_name': host_name,
'gps': (data['lat'], data['lon'])
}
if 'acc' in data:
kwargs['gps_accuracy'] = data['acc']
if 'batt' in data:
kwargs['battery'] = data['batt']
return dev_id, kwargs
def _set_gps_from_zone(kwargs, location, zone):
"""Set the see parameters from the zone parameters."""
if zone is not None:
kwargs['gps'] = (
zone.attributes['latitude'],
zone.attributes['longitude'])
kwargs['gps_accuracy'] = zone.attributes['radius']
kwargs['location_name'] = location
return kwargs
|
bkpathak/Algorithms-collections
|
src/DP/coin_play.py
|
Python
|
apache-2.0
| 2,084
| 0.024952
|
# Consider a row of n coins of values v1 . . . vn, where n is even.
# We play a game against an opponent by alternating turns. In each turn,
# a player selects either the first or last coin from the row, removes it
# from the row permanently, and receives the value of the coin. Determine the
# maximum possible amount of money we can d
|
efinitely win if we move first.
# Note: The opponent is as clever as the user.
# http://www.geeksforgeeks.org/dynamic-programming-set-31-optimal-strategy-for-a-game/
def find_max_val_recur(coins,l,r):
if l + 1 == r:
return max(coins[l],coins[r])
if l == r:
return coins[i]
left_choose = coins[l] + min(find_max_val_recur(coins,l+1,r - 1),find_max_val_recur(coins,l+2,r))
|
right_choose = coins[r] + min(find_max_val_recur(coins,l + 1,r-1),find_max_val_recur(coins,l,r-2))
return max(left_choose,right_choose)
coin_map = {}
def find_max_val_memo(coins,l,r):
if l + 1 == r:
return max(coins[l],coins[r])
if l == r:
return coins[i]
if (l,r) in coin_map:
return coin_map[(l,r)]
left_choose = coins[l] + min(find_max_val_memo(coins,l+1,r - 1),find_max_val_memo(coins,l+2,r))
right_choose = coins[r] + min(find_max_val_memo(coins,l + 1,r-1),find_max_val_memo(coins,l,r-2))
max_val = max(left_choose,right_choose)
coin_map[(l,r)] = max_val
return max_val
def find_max_val_bottom_up(coins):
coins_len = len(coins)
table = [[0] * coins_len for i in range(coins_len + 1)]
for gap in range(coins_len):
i = 0
for j in range(gap,coins_len):
# Here x is value of F(i+2, j), y is F(i+1, j-1) and
# z is F(i, j-2) in above recursive formula
x = table[i+2][j] if (i+2) <= j else 0
y = table[i+1][j-1] if (i+1) <= (j-1) else 0
z = table[i][j-2] if i <= (j-2) else 0
table[i][j] = max(coins[i] + min(x,y),coins[j] + min(y,z))
i += 1
return table[0][coins_len - 1]
if __name__=="__main__":
coins = [8,15,3,7]
print(find_max_val_bottom_up(coins))
|
lorenzogil/yith-library-server
|
yithlibraryserver/views.py
|
Python
|
agpl-3.0
| 4,160
| 0
|
# Yith Library Server is a password storage server.
# Copyright (C) 2012-2013 Yaco Sistemas
# Copyright (C) 2012-2013 Alejandro Blanco Escudero <[email protected]>
# Copyright (C) 2012-2015 Lorenzo Gil Sanchez <[email protected]>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import logging
from deform import Button, Form, ValidationFailure
from pyramid.i18n import get_locale_name
from pyramid.httpexceptions import HTTPFound
from pyramid.renderers import render_to_response
from pyramid.view import view_config
from yithlibraryserver.email import send_email_to_admins
from yithlibraryserver.i18n import TranslationString as _
from yithlibraryserver.schemas import ContactSchema
log = logging.getLogger(__name__)
@view_config(route_name='home', rende
|
rer='templates/home.pt')
def home(request):
return {}
@view_config(route_name='contact', renderer='templates/contact.pt')
def contact(request):
button1 = Button('submit', _('Send message'))
button1.css_class = 'btn-primary'
button2 = Button('cancel', _('Cancel'))
button2.css_class = 'btn-default'
form = Form(ContactSchema(),
|
buttons=(button1, button2))
if 'submit' in request.POST:
controls = request.POST.items()
try:
appstruct = form.validate(controls)
except ValidationFailure as e:
return {'form': e.render()}
context = {'link': request.route_url('contact')}
context.update(appstruct)
subject = ("%s sent a message from Yith's contact form"
% appstruct['name'])
result = send_email_to_admins(
request,
'yithlibraryserver:templates/email_contact',
context,
subject,
extra_headers={'Reply-To': appstruct['email']},
)
if result is None:
log.error(
'%s <%s> tried to send a message from the contact form but no '
'admin emails were configured. Message: %s' % (
appstruct['name'],
appstruct['email'],
appstruct['message'],
)
)
request.session.flash(
_('Thank you very much for sharing your opinion'),
'info',
)
return HTTPFound(location=request.route_path('home'))
elif 'cancel' in request.POST:
return HTTPFound(location=request.route_path('home'))
initial = {}
if request.user is not None:
initial['name'] = request.user.first_name
if request.user.email_verified:
initial['email'] = request.user.email
return {'form': form.render(initial)}
@view_config(route_name='tos', renderer='templates/tos.pt')
def tos(request):
return {}
@view_config(route_name='faq', renderer='string')
def faq(request):
# We don't want to mess up the gettext .po file
# with a lot of strings which don't belong to the
# application interface.
#
# We consider the FAQ as application content
# so we simple use a different template for each
# language. When a new locale is added to the
# application it needs to translate the .po files
# as well as this template
locale_name = get_locale_name(request)
template = 'yithlibraryserver:templates/faq-%s.pt' % locale_name
return render_to_response(template, {}, request=request)
@view_config(route_name='credits', renderer='templates/credits.pt')
def credits(request):
return {}
|
manterd/myPhyloDB
|
database/migrations/0021_auto_20190305_1458.py
|
Python
|
gpl-3.0
| 1,211
| 0.001652
|
# -*- coding: utf-8 -*-
# Generated by Django
|
1.11.12 on 2019-03-05 14:58
from __future__ import unicode_literals
from django.conf
|
import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('database', '0020_daymetdata'),
]
operations = [
migrations.CreateModel(
name='PublicProjects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('List', models.TextField(blank=True)),
],
),
migrations.RemoveField(
model_name='daymetdata',
name='id',
),
migrations.AddField(
model_name='userprofile',
name='privateProjectList',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='daymetdata',
name='user',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
wooga/airflow
|
airflow/providers/papermill/operators/papermill.py
|
Python
|
apache-2.0
| 2,587
| 0.000773
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compl
|
iance
# with the License. You may obtain a copy of the License at
#
#
|
http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional
import attr
import papermill as pm
from airflow.lineage.entities import File
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
@attr.s(auto_attribs=True)
class NoteBook(File):
"""
Jupyter notebook
"""
type_hint: Optional[str] = "jupyter_notebook"
parameters: Optional[Dict] = {}
meta_schema: str = __name__ + '.NoteBook'
class PapermillOperator(BaseOperator):
"""
Executes a jupyter notebook through papermill that is annotated with parameters
:param input_nb: input notebook (can also be a NoteBook or a File inlet)
:type input_nb: str
:param output_nb: output notebook (can also be a NoteBook or File outlet)
:type output_nb: str
:param parameters: the notebook parameters to set
:type parameters: dict
"""
supports_lineage = True
@apply_defaults
def __init__(self,
input_nb: Optional[str] = None,
output_nb: Optional[str] = None,
parameters: Optional[Dict] = None,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if input_nb:
self.inlets.append(NoteBook(url=input_nb,
parameters=parameters))
if output_nb:
self.outlets.append(NoteBook(url=output_nb))
def execute(self, context):
if not self.inlets or not self.outlets:
raise ValueError("Input notebook or output notebook is not specified")
for i in range(len(self.inlets)):
pm.execute_notebook(self.inlets[i].url, self.outlets[i].url,
parameters=self.inlets[i].parameters,
progress_bar=False, report_mode=True)
|
ity/pants
|
src/python/pants/pantsd/pants_daemon.py
|
Python
|
apache-2.0
| 6,887
| 0.009438
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import sys
import threading
from setproctitle import setproctitle as set_process_title
from pants.goal.run_tracker import RunTracker
from pants.logging.setup import setup_logging
from pants.pantsd.process_manager import ProcessManager
from pants.pantsd.util import clean_global_runtime_state
class _StreamLogger(object):
"""A sys.{stdout,stderr} replacement that pipes output to a logger."""
def __init__(self, logger, log_level):
"""
:param logging.Logger logger: The logger instance to emit writes to.
:param int log_level: The log level to use for the given logger.
"""
self._logger = logger
self._log_level = log_level
def write(self, msg):
for line in msg.rstrip().splitlines():
self._logger.log(self._log_level, line.rstrip())
def flush(self):
return
class PantsDaemon(ProcessManager):
"""A daemon that manages PantsService instances."""
JOIN_TIMEOUT_SECONDS = 1
LOG_NAME = 'pantsd.log'
class StartupFailure(Exception): pass
class RuntimeFailure(Exception): pass
def __init__(self, build_root, work_dir, log_level, log_dir=None, services=None,
metadata_base_dir=None):
"""
:param string build_root: The pants build root.
:param string work_dir: The pants work directory.
:param int log_level: The log level to use for daemon logging.
:param string log_dir: The directory to use for file-based logging via the daemon. (Optional)
:param tuple services: A tuple of PantsService instances to launch/manage. (Optional)
"""
super(PantsDaemon, self).__init__(name='pantsd', metadata_base_dir=metadata_base_dir)
self._logger = logging.getLogger(__name__)
self._build_root = build_root
self._work_dir = work_dir
self._log_level = log_level
self._log_dir = log_dir or os.path.join(work_dir, self.name)
self._services = services or ()
self._socket_map = {}
# N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it.
self._kill_switch = threading.Event()
@property
def is_killed(self):
return self._kill_switch.is_set()
def set_services(self, services):
self._services = services
def set_socket_map(self, socket_map):
self._socket_map = socket_map
def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
for service, service_thread in service_thread_map.items():
self._logger.info('terminating pantsd service: {}'.format(service))
service.terminate()
service_thread.join()
self._logger.info('terminating pantsd')
self._kill_switch.set()
@staticmethod
def _close_fds():
"""Close pre-fork stdio streams to avoid output in the pants process that launched pantsd."""
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no)
def _setup_logging(self, log_level):
"""Reinitialize logging post-fork to clear all handlers, file descriptors, locks etc.
This must happen first thing post-fork, before any further logging is emitted.
"""
# Re-initialize the childs logging locks post-fork to avoid potent
|
ial deadlocks if pre-fork
# threads have any locks acquired at the time of fork.
logging._lock = threading.RLock() if logging.thread else None
for handler in logging.getLogger(
|
).handlers:
handler.createLock()
# Invoke a global teardown for all logging handlers created before now.
logging.shutdown()
# Reinitialize logging for the daemon context.
setup_logging(log_level, console_stream=None, log_dir=self._log_dir, log_name=self.LOG_NAME)
# Close out pre-fork file descriptors.
self._close_fds()
# Redirect stdio to the root logger.
sys.stdout = _StreamLogger(logging.getLogger(), logging.INFO)
sys.stderr = _StreamLogger(logging.getLogger(), logging.WARN)
self._logger.debug('logging initialized')
def _setup_services(self, services):
for service in services:
self._logger.info('setting up service {}'.format(service))
service.setup()
def _run_services(self, services):
"""Service runner main loop."""
if not services:
self._logger.critical('no services to run, bailing!')
return
service_thread_map = {service: threading.Thread(target=service.run) for service in services}
# Start services.
for service, service_thread in service_thread_map.items():
self._logger.info('starting service {}'.format(service))
try:
service_thread.start()
except (RuntimeError, service.ServiceError):
self.shutdown(service_thread_map)
raise self.StartupFailure('service {} failed to start, shutting down!'.format(service))
# Monitor services.
while not self.is_killed:
for service, service_thread in service_thread_map.items():
if not service_thread.is_alive():
self.shutdown(service_thread_map)
raise self.RuntimeFailure('service failure for {}, shutting down!'.format(service))
else:
# Avoid excessive CPU utilization.
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
def _write_named_sockets(self, socket_map):
"""Write multiple named sockets using a socket mapping."""
for socket_name, socket_info in socket_map.items():
self.write_named_socket(socket_name, socket_info)
def _run(self):
"""Synchronously run pantsd."""
# Switch log output to the daemon's log stream from here forward.
self._setup_logging(self._log_level)
self._logger.info('pantsd starting, log level is {}'.format(self._log_level))
# Purge as much state as possible from the pants run that launched us.
clean_global_runtime_state()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title('pantsd [{}]'.format(self._build_root))
# Write service socket information to .pids.
self._write_named_sockets(self._socket_map)
# Enter the main service runner loop.
self._setup_services(self._services)
self._run_services(self._services)
def pre_fork(self):
"""Pre-fork() callback for ProcessManager.daemonize()."""
# Teardown the RunTracker's SubprocPool pre-fork.
RunTracker.global_instance().shutdown_worker_pool()
# TODO(kwlzn): This currently aborts tracking of the remainder of the pants run that launched
# pantsd.
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemonize()."""
self._run()
|
nirenzang/Serpent-Pyethereum-Tutorial
|
pyethereum/ethereum/meta.py
|
Python
|
gpl-3.0
| 2,865
| 0.00349
|
from ethereum.slogging import get_logger
log = get_logger('eth.block_creation')
from ethereum.block import Block, BlockHeader
from ethereum.common import mk_block_from_prevstate, validate_header, \
verify_execution_results, validate_transaction_tree, \
set_execution_results, add_transactions, post_finalize
from ethereum.consensus_strategy import get_consensus_strategy
from ethereum.messages import apply_transaction
from ethereum.state import State
from ethereum.utils import sha3, encode_hex
import rlp
# Applies the block-level state transition function
def apply_block(state, block):
# Pre-processing and verification
snapshot = state.snapshot()
cs = get_consensus_strategy(state.config)
try:
# Start a new block context
cs.initialize(state, block)
# Basic validation
assert validate_header(state, block.header)
assert cs.check_seal(state, block.header)
assert cs.validate_uncles(state, block)
assert validate_transaction_tree(state, block)
# Process transactions
for tx in block.transactions:
apply_transaction(state, tx)
# Finalize (incl paying block rewards)
cs.finalize(state, block)
# Verify state root, tx list root, receipt root
assert verify_execution_results(state, block)
# Post-finalize (ie. add the block header to the state for now)
post_finalize(state, block)
except (ValueError, AssertionError) as e:
state.revert(snapshot)
raise e
return state
# Creates a candidate block on top of the given chain
def make_head_candidate(chain, txqueue=None,
parent=None,
|
timestamp=None,
coinbase='\x35'*20,
extra_data='moo ha ha says the laughing cow.',
min_gasprice=0):
log.info('Cr
|
eating head candidate')
if parent is None:
temp_state = State.from_snapshot(chain.state.to_snapshot(root_only=True), chain.env)
else:
temp_state = chain.mk_poststate_of_blockhash(parent.hash)
cs = get_consensus_strategy(chain.env.config)
# Initialize a block with the given parent and variables
blk = mk_block_from_prevstate(chain, temp_state, timestamp, coinbase, extra_data)
# Find and set the uncles
blk.uncles = cs.get_uncles(chain, temp_state)
blk.header.uncles_hash = sha3(rlp.encode(blk.uncles))
# Call the initialize state transition function
cs.initialize(temp_state, blk)
# Add transactions
add_transactions(temp_state, blk, txqueue, min_gasprice)
# Call the finalize state transition function
cs.finalize(temp_state, blk)
# Set state root, receipt root, etc
set_execution_results(temp_state, blk)
log.info('Created head candidate successfully')
return blk, temp_state
|
smokeyfeet/smokeyfeet-registration
|
src/smokeyfeet/registration/forms.py
|
Python
|
mit
| 2,149
| 0
|
from django import forms
from .models import PassType, Registration
class SignupForm(forms.ModelForm):
pass_type = forms.ModelChoiceField(
queryset=PassType.objects.filter(active=True),
widget=forms.widgets.RadioSelect(),
)
class Meta:
model = Registration
fields = (
"first_name",
"last_name",
"email",
"residing_country",
"dance_role",
"pass_type",
"workshop_partner_name",
"workshop_partner_email",
"lunch",
)
widgets = {
"dance_role": forms.widgets.RadioSelect(),
"lunch": forms.widgets.RadioSelect(),
}
class Media:
css = {"all": ("css/forms.css",)}
email_repeat = forms.EmailField()
agree_to_terms = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
self.fields["pass_type"].empty_label = None
self.fields["lunch"].empty_lab
|
el = None
def clean_workshop_partner_email(self):
"""
Take care of uniqueness constraint ourselves
"""
email = self.cleaned_data.get("workshop_partner_email")
qs = Registration.objects.filter(workshop_partner_email=email).exists(
|
)
if email and qs:
raise forms.ValidationError("Workshop parter already taken.")
return email
def clean_agree_to_terms(self):
data = self.cleaned_data["agree_to_terms"]
if data is False:
raise forms.ValidationError("You must agree to the terms.")
return data
def clean(self):
cleaned_data = super().clean()
email = cleaned_data.get("email")
email_repeat = cleaned_data.get("email_repeat")
ws_partner_email = cleaned_data.get("workshop_partner_email")
if email != email_repeat:
raise forms.ValidationError("Ensure email verfication matches.")
if email and ws_partner_email and email == ws_partner_email:
raise forms.ValidationError("You can't partner with yourself.")
|
jmankiewicz/odooAddons
|
hr_attendance_new_check/__openerp__.py
|
Python
|
agpl-3.0
| 958
| 0.003135
|
# -*- coding: utf-8 -*-
{
'name': "Better validation for Attendance",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author':
|
"Jörn Mankiewicz",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '8.0.0.1',
# any module necessary for this one to work correctly
'depends': ['base','hr_attendance','hr_timesheet_improvement'],
|
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/hr_attendance.xml',
],
# only loaded in demonstration mode
'demo': [
'demo.xml',
],
}
|
sebwink/deregnet
|
graphs/kegg/keggtranslator/bin/make_graphml_igraph_readable.py
|
Python
|
bsd-3-clause
| 169
| 0.011834
|
import sys
import networkx as nx
def main(graphml):
g = nx.read_graphml(graphml)
nx.write_graphml(g, graphml)
if __nam
|
e__ == '__main__':
main(sys.argv[
|
1])
|
kmova/bootstrap
|
docker/py2docker/numpy-sum.py
|
Python
|
apache-2.0
| 323
| 0.009288
|
import numpy as np
# Example taken from : http://cs231n.github.io/python-numpy-tutorial/#numpy
x = np.array([[1,2],[3,4]])
print np.sum(x) # Compute sum of all elements; prints "10"
prin
|
t np.sum(x, axis=0) # Compute sum of each column; pr
|
ints "[4 6]"
print np.sum(x, axis=1) # Compute sum of each row; prints "[3 7]"
|
dpaleino/bootchart2
|
pybootchartgui/batch.py
|
Python
|
gpl-3.0
| 1,604
| 0.006858
|
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
|
# along with pybootchartgui. If not, see <http://www.gnu.or
|
g/licenses/>.
import cairo
import draw
def render(writer, res, options, filename):
handlers = {
"png": (lambda w, h: cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h), \
lambda sfc: sfc.write_to_png(filename)),
"pdf": (lambda w, h: cairo.PDFSurface(filename, w, h), lambda sfc: 0),
"svg": (lambda w, h: cairo.SVGSurface(filename, w, h), lambda sfc: 0)
}
if options.format is None:
fmt = filename.rsplit('.', 1)[1]
else:
fmt = options.format
if not (fmt in handlers):
writer.error ("Unknown format '%s'." % fmt)
return 10
make_surface, write_surface = handlers[fmt]
(w, h) = draw.extents (1.0, *res)
w = max (w, draw.MIN_IMG_W)
surface = make_surface (w, h)
ctx = cairo.Context (surface)
draw.render (ctx, options, 1.0, *res)
write_surface (surface)
writer.status ("bootchart written to '%s'" % filename)
|
lmEshoo/st2contrib
|
packs/sensu/etc/st2_handler.py
|
Python
|
apache-2.0
| 4,880
| 0.002459
|
#!/usr/bin/env python
import httplib
try:
import simplejson as json
except ImportError:
import json
import os
import sys
from urlparse import urljoin
try:
import requests
except ImportError:
raise ImportError('Missing dependency "requests". Do ``pip install requests``.')
try:
import yaml
except ImportError:
raise ImportError('Missing dependency "pyyaml". Do ``pip install pyyaml``.')
# ST2 configuration
ST2_CONFIG_FILE = './config.yaml'
ST2_API_BASE_URL = 'http://localhost:9101/v1'
ST2_AUTH_BASE_URL = 'http://localhost:9100'
ST2_USERNAME = None
ST2_PASSWORD = None
ST2_AUTH_TOKEN = None
ST2_AUTH_PATH = 'tokens'
ST2_WEBHOOKS_PATH = 'webhooks/st2/'
ST2_TRIGGERS_PATH = 'triggertypes/'
ST2_TRIGGERTYPE_PACK = 'sensu'
ST2_TRIGGERTYPE_NAME = 'event_handler'
ST2_TRIGGERTYPE_REF = '.'.join([ST2_TRIGGERTYPE_PACK, ST2_TRIGGERTYPE_NAME])
REGISTERED_WITH_ST2 = False
OK_CODES = [httplib.OK, httplib.CREATED, httplib.ACCEPTED, httplib.CONFLICT]
def _create_trigger_type():
try:
url = _get_st2_triggers_url()
payload = {
'name': ST2_TRIGGERTYPE_NAME,
'pack': ST2_TRIGGERTYPE_PACK,
'description': 'Trigger type for sensu event handler.'
}
# sys.stdout.write('POST: %s: Body: %s\n' % (url, payload))
headers = {}
headers['Content-Type'] = 'application/json; charset=utf-8'
if ST2_AUTH_TOKEN:
headers['X-Auth-Token'] = ST2_AUTH_TOKEN
post_resp = requests.post(url, data=json.dumps(payload), headers=headers)
except:
sys.stderr.write('Unable to register trigger type with st2.')
raise
else:
status = post_resp.status_code
if status not in OK_CODES:
sys.stderr.write('Failed to register trigger type with st2. HTTP_CODE: %d\n' %
status)
raise
else:
sys.stdout.write('Registered trigger type with st2.\n')
def _get_auth_url():
return urljoin(ST2_AUTH_BASE_URL, ST2_AUTH_PATH)
def _get_auth_token():
global ST2_AUTH_TOKEN
auth_url = _get_auth_url()
try:
resp = requests.post(auth_url, json.dumps({'ttl': 5 * 60}),
auth=(ST2_USERNAME, ST2_PASSWORD))
except:
raise Exception('Cannot get auth token from st2. Will try unauthed.')
else:
ST2_AUTH_TOKEN = resp.json()['token']
def _register_with_st2():
global REGISTERED_WITH_ST2
try:
url = urljoin(_get_st2_triggers_url(), ST2_TRIGGERTYPE_REF)
# sys.stdout.write('GET: %s\n' % url)
if not ST2_AUTH_TOKEN:
_get_auth_token()
if ST2_AUTH_TOKEN:
get_resp = requests.get(url, headers={'X-Auth-Token': ST2_AUTH_TOKEN})
else:
get_resp = requests.get(url)
if get_resp.status_code != httplib.OK:
_create_trigger_type()
else:
body = json.loads(get_resp.text)
if len(body) == 0:
_create_trigger_type()
except:
raise
else:
REGISTERED_WITH_ST2 = True
def _get_st2_triggers_url():
url = urljoin(ST2_API_BASE_URL, ST2_TRIGGERS_PATH)
return url
def _get_st2_webhooks_url():
url = urljoin(ST2_API_BASE_URL, ST2_WEBHOOKS_PATH)
return url
def _post_event_to_st2(url, body):
headers = {}
headers['X-ST2-Integration'] = 'sensu.'
headers['Content-Type'] = 'application/json; charset=utf-8'
if ST2_AUTH_TOKEN:
headers['X-Auth-Token'] = ST2_AUTH_TOKEN
try:
sys.stdout.write('POST: url: %s, body: %s\n' % (url, body))
r = requests.post(url, data=json.dumps(body), headers=headers)
except:
sys.stderr.write('Cannot connect to st2 endpoint.')
else:
status = r.status_code
if status not in OK_CODES:
sys.stderr.write('Failed posting sensu event to st2. HTTP_CODE: %d\n' % status)
else:
sys.stdout.write('Sent sensu event to st2. HTTP_CODE: %d\n' % status)
def main(args):
body = {}
body['trigger'] = ST2_TRIGGERTYPE_REF
|
body['payload'] = json.loads(sys.stdin.read().strip())
_post_event_to_st2(_get_st2_webhooks_url(), body)
if __name__ == '__main__':
try:
if not os.path.exists(ST2_CONFIG_FILE):
sys.stderr.write('Configuration file not found. Exiting.\n')
sys.exit(1
|
)
with open(ST2_CONFIG_FILE) as f:
config = yaml.safe_load(f)
ST2_USERNAME = config['st2_username']
ST2_PASSWORD = config['st2_password']
ST2_API_BASE_URL = config['st2_api_base_url']
ST2_AUTH_BASE_URL = config['st2_auth_base_url']
if not REGISTERED_WITH_ST2:
_register_with_st2()
except:
sys.stderr.write('Failed registering with st2. Won\'t post event.\n')
else:
main(sys.argv)
|
ykaneko/ryu
|
ryu/tests/unit/ofproto/test_ofproto_common.py
|
Python
|
apache-2.0
| 1,111
| 0
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitat
|
ions under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittes
|
t
import logging
from nose.tools import eq_
from ryu.ofproto.ofproto_common import *
LOG = logging.getLogger('test_ofproto_common')
class TestOfprotCommon(unittest.TestCase):
""" Test case for ofprotp_common
"""
def test_struct_ofp_header(self):
eq_(OFP_HEADER_PACK_STR, '!BBHI')
eq_(OFP_HEADER_SIZE, 8)
def test_define_constants(self):
eq_(OFP_TCP_PORT, 6633)
eq_(OFP_SSL_PORT, 6633)
|
ErickMurillo/ciat_plataforma
|
ficha_granos_basicos/views.py
|
Python
|
mit
| 17,583
| 0.05029
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from .models import *
from .forms import *
from comunicacion.lugar.models import *
from mapeo.models import *
from django.http import HttpResponse
from django.db.models import Sum, Count, Avg
import collections
import numpy as np
# Create your views here.
def _queryset_filtrado(request):
params = {}
if request.session['year']:
params['annio'] = request.session['year']
if request.session['municipio']:
params['productor__comunidad__municipio__in'] = request.session['municipio']
else:
if request.session['comunidad']:
params['productor__comunidad__in'] = request.session['comunidad']
if request.session['ciclo']:
params['ciclo_productivo'] = request.session['ciclo']
if request.session['rubro']:
params['cultivo'] = request.session['rubro']
if request.session['organizacion']:
params['productor__productor__organizacion'] = request.session['organizacion']
unvalid_keys = []
for key in params:
if not params[key]:
unvalid_keys.append(key)
for key in unvalid_keys:
del params[key]
return Monitoreo.objects.filter(**params)
def consulta(request,template="granos_basicos/consulta.html"):
if request.method == 'POST':
mensaje = None
form = Consulta(request.POST)
if form.is_valid():
request.session['year'] = form.cleaned_data['year']
request.session['municipio'] = form.cleaned_data['municipio']
request.session['comunidad'] = form.cleaned_data['comunidad']
request.session['ciclo'] = form.cleaned_data['ciclo']
request.session['rubro'] = form.cleaned_data['rubro']
request.session['organizacion'] = form.cleaned_data['organizacion']
mensaje = "Todas las variables estan correctamente :)"
request.session['activo'] = True
centinela = 1
else:
centinela = 0
else:
form = Consulta()
mensaje = "Existen alguno errores"
centinela = 0
try:
del request.session['year']
del request.session['municipio']
del request.session['comunidad']
del request.session['ciclo']
del request.session['rubro']
del request.session['organizacion']
except:
pass
return render(request, template, locals())
def genero_produccion(request,template="granos_basicos/productores/genero_produccion.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
CHOICE_SEXO = ((1,'Hombre'),(2,'Mujer'))
choice = ((1,'Hombre'),(2,'Mujer'),(3,'Compartida'))
sexo_productor = {}
for obj in choice:
conteo = filtro.filter(productor__productor__jefe = obj[0]).distinct('productor').count()
sexo_productor[obj[1]] = conteo
if request.GET.get('jefe'):
jefe = request.GET['jefe']
if jefe == '1':
CHOICE_SEXO_JEFE = ((1,'Hombre'),)
elif jefe == '2':
CHOICE_SEXO_JEFE = ((2,'Mujer'),)
elif jefe == '3':
CHOICE_SEXO_JEFE = ((3,'Compartida'),)
else:
CHOICE_SEXO_JEFE = ((1,'Hombre'),(2,'Mujer'),(3,'Compartida'))
RELACION_CHOICES = ((1,'Jefe/Jefa de familia'),(2,'Cónyuge'),
(3,'Hijo/Hija'),(4,'Otro familiar'),
(5,'Administrador'),)
prod_gb = {}
prod = {}
dic_relacion = {}
for obj in CHOICE_SEXO_JEFE:
for x in CHOICE_SEXO:
#relacion entre responsables de familia
jefe_familia = filtro.filter(productor__sexo = x[0],productor__productor__jefe = obj[0]).distinct('productor').count()
prod[x[1]] = jefe_familia
for relacion in RELACION_CHOICES:
conteo = filtro.filter(productor__productorgranosbasicos__relacion = relacion[0],productor__productor__jefe = obj[0]).distinct('productor').count()
dic_relacion[relacion[1]] = conteo
for x in CHOICE_SEXO:
conteo = filtro.filter(productor__sexo = x[0]).distinct('productor').count()
prod_gb[x[1]] = conteo
return render(request, template, locals())
def composicion_familiar(request,template="granos_basicos/productores/composicion_familiar.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
#nuevas salidas
lista_hijos = []
lista_hijas = []
lista_sumatoria = []
for obj in filtro:
hijos = ComposicionFamiliar.objects.filter(persona = obj.productor,familia = '3').count()
lista_hijos.append(hijos)
hijas = ComposicionFamiliar.objects.filter(persona = obj.productor,familia = '4').count()
lista_hijas.append(hijas)
sumatoria = hijos + hijas
lista_sumatoria.append(sumatoria)
result = []
#promedio,mediana,desviacion standard, minimo y maximo
promedios = [np.mean(lista_hijos),np.mean(lista_hijas),np.mean(lista_sumatoria)]
mediana = [np.median(lista_hijos),np.median(lista_hijas),np.median(lista_sumatoria)]
desviacion = [np.std(lista_hijos),np.std(lista_hijas),np.std(lista_sumatoria)]
minimo = [min(lista_hijos),min(lista_hijas),min(lista_sumatoria)]
maximo = [max(lista_hijos),max(lista_hijas),max(lista_sumatoria)]
# agregando a la lista
result.append(promedios)
result.append(mediana)
result.append(desviacion)
result.append(minimo)
result.append(maximo)
#grafico nivel educativo de los padres en las familias
ESCOLARIDAD_CHOICES = (
(1,'Ninguno'),(2,'Primaria Incompleta'),(3,'Primaria'),
(4,'Secundaria Incompleta'),(5,'Secundaria'),(6,'Técnico'),
(7,'Universitario'),(8,'Profesional'))
escolaridad = collections.OrderedDict()
for obj in
|
ESCOLARIDAD_CHOICES:
madre = filtro.filter(productor__composicionfamiliar__familia = '2',
productor__composicionfamiliar__escolaridad = obj[0]).distinct('productor
|
__composicionfamiliar').count()
padre = filtro.filter(productor__composicionfamiliar__familia = '1',
productor__composicionfamiliar__escolaridad = obj[0]).distinct('productor__composicionfamiliar').count()
#hijos--------------------
hijos_5_12 = filtro.filter(productor__composicionfamiliar__familia = '3',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (5,12)).distinct('productor__composicionfamiliar').count()
hijos_13_18 = filtro.filter(productor__composicionfamiliar__familia = '3',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (13,18)).distinct('productor__composicionfamiliar').count()
hijos_19 = filtro.filter(productor__composicionfamiliar__familia = '3',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (19,100)).distinct('productor__composicionfamiliar').count()
#hijas--------------------
hijas_5_12 = filtro.filter(productor__composicionfamiliar__familia = '4',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (5,12)).distinct('productor__composicionfamiliar').count()
hijas_13_18 = filtro.filter(productor__composicionfamiliar__familia = '4',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (13,18)).distinct('productor__composicionfamiliar').count()
hijas_19 = filtro.filter(productor__composicionfamiliar__familia = '4',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (19,100)).distinct('productor__composicionfamiliar').count()
escolaridad[obj[1]] = (madre,padre,
hijos_5_12,hijos_13_18,hijos_19,
hijas_5_12,hijas_13_18,hijas_19)
#--------------------------------------------------------------------------------
SI_NO_CHOICES = ((1,'Si'),(2,'No'))
FAMILIA_CHOICES = ((1,'Padre'),(2,'Madre'),(3,'Hijo'),(4,'Hija'),(5,'Hermano'),
(6,'Hermana'),(7,'Sobrino'),(8,'Sobrina'),(9,'Abuelo'),
(10,'Abuela'),(11,'Cuñado'),(12,'Cuñada'),(13,'Yerno'),
(14,'Nuera'),(15,'Otro'),)
list_participacion = []
for obj in FAMILIA_CHOICES:
total = filtro.filter(productor__composicionfamiliar__familia = obj[0]).distinct(
'productor__composicionfamiliar').count()
si_participa = filtro.filter(productor__composicionfamiliar__familia = obj[0],
productor__composicionfamiliar__participacion = '1').distinct(
'productor__composicionfamiliar').count()
promedio = total / float(productores)
promedio = round(p
|
stvstnfrd/edx-platform
|
openedx/core/djangoapps/embargo/tests/test_models.py
|
Python
|
agpl-3.0
| 12,890
| 0.001164
|
"""Tes
|
t of models for embargo app"""
import json
import pytest
import six
from django.db.utils import IntegrityError
from django.test import TestCase
from opaque_keys.edx.locator import CourseLocator
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from ..models import (
Country,
CountryAccessRule,
CourseAccessRuleHistory,
|
EmbargoedCourse,
EmbargoedState,
IPFilter,
RestrictedCourse
)
class EmbargoModelsTest(CacheIsolationTestCase):
"""Test each of the 3 models in embargo.models"""
ENABLED_CACHES = ['default']
def test_course_embargo(self):
course_id = CourseLocator('abc', '123', 'doremi')
# Test that course is not authorized by default
assert not EmbargoedCourse.is_embargoed(course_id)
# Authorize
cauth = EmbargoedCourse(course_id=course_id, embargoed=True)
cauth.save()
# Now, course should be embargoed
assert EmbargoedCourse.is_embargoed(course_id)
assert six.text_type(cauth) == u"Course '{course_id}' is Embargoed".format(course_id=course_id)
# Unauthorize by explicitly setting email_enabled to False
cauth.embargoed = False
cauth.save()
# Test that course is now unauthorized
assert not EmbargoedCourse.is_embargoed(course_id)
assert six.text_type(cauth) == u"Course '{course_id}' is Not Embargoed".format(course_id=course_id)
def test_state_embargo(self):
# Azerbaijan and France should not be blocked
good_states = ['AZ', 'FR']
# Gah block USA and Antartica
blocked_states = ['US', 'AQ']
currently_blocked = EmbargoedState.current().embargoed_countries_list
for state in blocked_states + good_states:
assert state not in currently_blocked
# Block
cauth = EmbargoedState(embargoed_countries='US, AQ')
cauth.save()
currently_blocked = EmbargoedState.current().embargoed_countries_list
for state in good_states:
assert state not in currently_blocked
for state in blocked_states:
assert state in currently_blocked
# Change embargo - block Isle of Man too
blocked_states.append('IM')
cauth.embargoed_countries = 'US, AQ, IM'
cauth.save()
currently_blocked = EmbargoedState.current().embargoed_countries_list
for state in good_states:
assert state not in currently_blocked
for state in blocked_states:
assert state in currently_blocked
def test_ip_blocking(self):
whitelist = u'127.0.0.1'
blacklist = u'18.244.51.3'
cwhitelist = IPFilter.current().whitelist_ips
assert whitelist not in cwhitelist
cblacklist = IPFilter.current().blacklist_ips
assert blacklist not in cblacklist
IPFilter(whitelist=whitelist, blacklist=blacklist).save()
cwhitelist = IPFilter.current().whitelist_ips
assert whitelist in cwhitelist
cblacklist = IPFilter.current().blacklist_ips
assert blacklist in cblacklist
def test_ip_network_blocking(self):
whitelist = u'1.0.0.0/24'
blacklist = u'1.1.0.0/16'
IPFilter(whitelist=whitelist, blacklist=blacklist).save()
cwhitelist = IPFilter.current().whitelist_ips
assert u'1.0.0.100' in cwhitelist
assert u'1.0.0.10' in cwhitelist
assert u'1.0.1.0' not in cwhitelist
cblacklist = IPFilter.current().blacklist_ips
assert u'1.1.0.0' in cblacklist
assert u'1.1.0.1' in cblacklist
assert u'1.1.1.0' in cblacklist
assert u'1.2.0.0' not in cblacklist
class RestrictedCourseTest(CacheIsolationTestCase):
"""Test RestrictedCourse model. """
ENABLED_CACHES = ['default']
def test_unicode_values(self):
course_id = CourseLocator('abc', '123', 'doremi')
restricted_course = RestrictedCourse.objects.create(course_key=course_id)
assert six.text_type(restricted_course) == six.text_type(course_id)
def test_restricted_course_cache_with_save_delete(self):
course_id = CourseLocator('abc', '123', 'doremi')
RestrictedCourse.objects.create(course_key=course_id)
# Warm the cache
with self.assertNumQueries(1):
RestrictedCourse.is_restricted_course(course_id)
RestrictedCourse.is_disabled_access_check(course_id)
# it should come from cache
with self.assertNumQueries(0):
RestrictedCourse.is_restricted_course(course_id)
RestrictedCourse.is_disabled_access_check(course_id)
assert not RestrictedCourse.is_disabled_access_check(course_id)
# add new the course so the cache must get delete and again hit the db
new_course_id = CourseLocator('def', '123', 'doremi')
RestrictedCourse.objects.create(course_key=new_course_id, disable_access_check=True)
with self.assertNumQueries(1):
RestrictedCourse.is_restricted_course(new_course_id)
RestrictedCourse.is_disabled_access_check(new_course_id)
# it should come from cache
with self.assertNumQueries(0):
RestrictedCourse.is_restricted_course(new_course_id)
RestrictedCourse.is_disabled_access_check(new_course_id)
assert RestrictedCourse.is_disabled_access_check(new_course_id)
# deleting an object will delete cache also.and hit db on
# get the is_restricted course
abc = RestrictedCourse.objects.get(course_key=new_course_id)
abc.delete()
with self.assertNumQueries(1):
RestrictedCourse.is_restricted_course(new_course_id)
# it should come from cache
with self.assertNumQueries(0):
RestrictedCourse.is_restricted_course(new_course_id)
class CountryTest(TestCase):
"""Test Country model. """
def test_unicode_values(self):
country = Country.objects.create(country='NZ')
assert six.text_type(country) == 'New Zealand (NZ)'
class CountryAccessRuleTest(CacheIsolationTestCase):
"""Test CountryAccessRule model. """
ENABLED_CACHES = ['default']
def test_unicode_values(self):
course_id = CourseLocator('abc', '123', 'doremi')
country = Country.objects.create(country='NZ')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
access_rule = CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.WHITELIST_RULE,
country=country
)
assert six.text_type(access_rule) == u'Whitelist New Zealand (NZ) for {course_key}'.format(course_key=course_id)
course_id = CourseLocator('def', '123', 'doremi')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
access_rule = CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.BLACKLIST_RULE,
country=country
)
assert six.text_type(access_rule) == u'Blacklist New Zealand (NZ) for {course_key}'.format(course_key=course_id)
def test_unique_together_constraint(self):
"""
Course with specific country can be added either as whitelist or blacklist
trying to add with both types will raise error
"""
course_id = CourseLocator('abc', '123', 'doremi')
country = Country.objects.create(country='NZ')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.WHITELIST_RULE,
country=country
)
with pytest.raises(IntegrityError):
CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.BLACKLIST_RULE,
country=country
)
def test_country_access_list_cache_with_save_delete(self):
course_id = CourseLocator('abc', '123', 'doremi')
|
antoinecarme/pyaf
|
tests/artificial/transf_Anscombe/trend_Lag1Trend/cycle_7/ar_/test_artificial_32_Anscombe_Lag1Trend_7__100.py
|
Python
|
bsd-3-clause
| 263
| 0.087452
|
i
|
mport pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artific
|
ial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 0);
|
theopak/storytellingbot
|
Extrapolate/Extrapolate.py
|
Python
|
mit
| 7,576
| 0.005148
|
#!/usr/bin/python3
#import nltk
#import pattern.en
from nltk import word_tokenize
from nltk import pos_tag
from nltk.corpus import wordnet
#import nltk.fuf.linearizer
from nltk.stem.wordnet import WordNetLemmatizer as wnl
from re import sub
import string
import random
from .genderPredictor import genderPredictor
#nltk.download()
# nltk downloads: maxent_ne_chunker, maxent_treebank_pos_tagger, punkt, wordnet
# install numpy
# WordNetLemmatizer().lemmatize(word,'v')
class Extrapolate:
def __init__(self):
self.sent_syns = []
print("Setting up Gender Predictor: ")
self.gp = genderPredictor.genderPredictor()
accuracy = self.gp.trainAndTest()
# print("Accuracy:", accuracy)
# print ('Most Informative Features')
feats = self.gp.getMostInformativeFeatures(10)
# for feat in feats:
# print (feat)
def change_gender(self, pnoun, gender):
pnlist = [(("her", "F"), ("him", "M")),
(("she", "F"), ("he", "M")),
(("hers", "F"), ("his", "M")),
(("herself", "F"), ("himself", "M"))]
for pair in pnlist:
for pi, p in enumerate(pair):
if p[0] == pnoun and p[1] != gender:
return pair[(pi-1)%2][0]
else:
return pnoun
def find_synonyms(self, w, wpos):
syn_words = []
synsets = wordnet.synsets(w, pos=wpos)
for s in synsets:
for l in s.lemmas():
syn_words.append(l.name())
return syn_words
def replace_synonyms(self, o_sent, n_sent):
o_tagged = pos_tag(word_tokenize(o_sent))
n_tagged = pos_tag(word_tokenize(n_sent))
for n in n_tagged:
for sdx, syn_list in enumerate(self.sent_syns):
for syn in syn_list:
if (n[0] == syn):
n_sent = sub(r"\b%s\b" %n[0], o_tagged[sdx][0], n_sent)
return n_sent
def replace_proper_nouns(self, o_sent, n_sent):
proper_nouns = []
p_pnouns = []
o_tagged = pos_tag(word_tokenize(o_sent))
n_tagged = pos_tag(word_tokenize(n_sent))
# print("\nTransforming the output:")
# print("Input sentence:", o_sent)
# print("Found sentence:", n_sent)
# print("Input sentence tagged:", o_tagged)
# print("Found sentence tagged:", n_tagged)
for o in o_tagged:
if o[1] == 'NNP' and o not in proper_nouns:
proper_nouns.append(o)
for n in n_tagged:
if (n[1] == 'PRP' or n[1] == 'PRP$' or n[1] == 'NNP') and n not in p_pnouns:
p_pnouns.append(n)
# print("")
if (len(proper_nouns) == 1) and (len(p_pnouns) > 0):
n_sent = sub(r"\b%s\b" %p_pnouns[0][0] , proper_nouns[0][0], n_sent, 1)
gender = self.gp.classify(proper_nouns[0][0])
# print(proper_nouns[0][0], "is classified as", gender)
for pnoun in p_pnouns:
n_pnoun = self.change_gender(pnoun[0], gender)
n_sent = sub(r"\b%s\b" %pnoun[0] , n_pnoun, n_sent)
elif len(proper_nouns) < 1:
print("No proper nouns to replace")
else:
print("Not yet implemented, :P")
return n_sent
def transform(self, o_sent, n_sent):
n_sent = self.replace_proper_nouns(o_sent, n_sent)
n_sent = self.replace_synonyms(o_sent, n_sent)
return(n_sent)
def strip_pos_copy(self, tag):
new_tag = []
for item in tag:
new_tag.append(item[0])
return new_tag
def extrapolate(self, sent):
# tags the part of speech in each word
tagged = pos_tag(word_tokenize(sent))
tag_list = []
for item in tagged:
tag_list.append(list(item))
# puts nouns and verbs in their base form
for idx, item in enumerate(tag_list):
if item[1][0] == 'V':
tag_list[idx][0] = wnl().lemmatize(item[0],'v')
elif item[1] == 'NN' or item[1] == 'NNS':
tag_list[idx][0] = wnl().lemmatize(item[0],'n')
synonyms = [[] for i in range(len(tag_list))]
# finds synonyms for each wnoun, verb, adj in tag_list -> puts in corresponding index in synonyms
for idx, item in enumerate(tag_list):
if item[1][0] == 'V':
synonyms[idx] = self.find_synonyms(item[0], wordnet.VERB)
#for v in synonyms[idx]:
# v = en.verb.past(v)
elif item[1] == 'NN' or item[1] == 'NNS':
synonyms[idx] = self.find_synonyms(item[0], wordnet.NOUN)
elif item[1][0] == 'J':
synonyms[idx] = self.find_synonyms(item[0], wordnet.ADJ)
# gets rid of duplicates
for si, s in enumerate(synonyms):
s = list(set(s))
# print(tag_list[si][0], ": ", s)
|
self.sent_syns = synonyms
search_sent = []
# creates a list of similar sentences to search for
for idx, item in enumerate(tag_list):
# looks for synonyms at the corresponding index
for s in synonyms[idx]:
temp = sub(r"\b%s\b" %item[0], s, sent)
search_sent.append(temp)
|
# will get rid of duplicates once i make it hashable
search_sent = list(set(search_sent))
# print("\nSample list of synonymous sentences:")
# for i in range(min(len(search_sent), 20)):
# print(search_sent[i])
return search_sent
if __name__ == '__main__':
#list of pretend sentences to search through
sent_list = []
sent_list.append("She danced with the prince and they fall in love.")
sent_list.append("The emperor realized he was swindled but continues the parade anyway.")
sent_list.append("He and his wife were very poor.")
sent_list.append("She promised anything if he would get it for her. ")
sent_list.append("The bears came home and frightened her and she ran away.")
sent_list.append("They came upon a house made of sweets and they ate some. ")
sent_list.append("He climbed the beanstalk and found a giant there who had gold coins that he wanted. ")
sent_list.append("The rats follow him and he led them into the harbor and they die.")
sent_list.append("He begged to be spared and told him about his poor father.")
sent_list.append("The two were married and live happily everafter.")
sent_list.append("The good fairies made another spell so that she would only sleep for 100 years and a prince would awaken her. ")
sent_list.append("The stepmother ordered her to be killed but the huntsman spared her life.")
sent_list.append("The wolf fell into it and died.")
sent_list.append("A fairy granted her wish and gave her a seed to plant. ")
sent_list.append("He decided to run away and came across a cottage. ")
#instantiating extrapolate class, TAKES NOTHING
e = Extrapolate()
#expected input from storytellingbot
o_sent = "Elsa took a sharp sword to slay the monster"
print("\nInput:" + o_sent)
#o_sent = input("Enter a sentence: ")
search_sent = e.extrapolate(o_sent)
index = 6
#index = random.randint(0, len(sent_list)-1)
print("\nTest index: "+ str(index+1))
#index = int(input("Enter a number between 1 and "+str(len(sent_list))+": "))-1
#print(sent_list[index])
output = e.transform(o_sent, sent_list[index])
print(output)
#this would be the post
|
martyone/sailfish-qtcreator
|
tests/system/suite_SCOM/tst_SCOM01/test.py
|
Python
|
lgpl-2.1
| 3,047
| 0.009846
|
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GN
|
U Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Publ
|
ic License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
# entry of test
def main():
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
# create qt quick application
checkedTargets, projectName = createNewQtQuickApplication(tempDir(), "SampleApp")
# build it - on all build configurations
availableConfigs = iterateBuildConfigs(len(checkedTargets))
if not availableConfigs:
test.fatal("Haven't found a suitable Qt version - leaving without building.")
for kit, config in availableConfigs:
selectBuildConfig(len(checkedTargets), kit, config)
# try to compile
test.log("Testing build configuration: " + config)
clickButton(waitForObject(":*Qt Creator.Build Project_Core::Internal::FancyToolButton"))
waitForCompile()
# check output if build successful
ensureChecked(waitForObject(":Qt Creator_CompileOutput_Core::Internal::OutputPaneToggleButton"))
waitFor("object.exists(':*Qt Creator.Cancel Build_QToolButton')", 20000)
cancelBuildButton = findObject(':*Qt Creator.Cancel Build_QToolButton')
waitFor("not cancelBuildButton.enabled", 30000)
compileOutput = waitForObject(":Qt Creator.Compile Output_Core::OutputWindow")
if not test.verify(compileSucceeded(compileOutput.plainText),
"Verifying building of simple qt quick application."):
test.log(compileOutput.plainText)
# exit qt creator
invokeMenuItem("File", "Exit")
|
vphill/eot-cdx-analysis
|
code/cdx_extract_date.py
|
Python
|
cc0-1.0
| 524
| 0
|
from __future__ import print_function
import fileinput
from datetime import datetime
for line in fileinp
|
ut.input():
line = line.rstrip()
# This condition removes CDX header lines
if line[0] is ' ':
continue
# Extract just the timestamp from line
timestamp = line.split(' ', 2)[1]
# Datetiem format in CDX is 20121125005312
date_object = datetime.strptime(timestamp, '%Y%m%d%H%M%S')
# print(date_object.strftime('%Y-%m-%d')
|
)
print(date_object.strftime('%Y-%m-%d %H:%M:%S'))
|
uliwebext/uliweb-redbreast
|
test/test_core_utils.py
|
Python
|
bsd-2-clause
| 2,255
| 0.010643
|
from redbreast.core.utils import *
import pytest
from uliweb import manage, functions
import os
def test_import():
a = CommonUtils.get_class('redbreast.core.spec.TaskSpec')
assert str(a)
|
== "<class 'redbreast.core.spec.task.TaskSpec'>"
def test_import_not_exist():
a = CommonUtils.get_class('redbreast.core.spec.NotExistSpec')
assert str(a) == 'None'
def test_import_error():
with pytest.raises(ImportError):
a = CommonUtils.get_class('not.exist.mod
|
ule.NotExistSpec')
class TestUtilInProject(object):
def setup(self):
locate_dir = os.path.dirname(__file__)
os.chdir(locate_dir)
os.chdir('test_project')
import shutil
shutil.rmtree('database.db', ignore_errors=True)
manage.call('uliweb syncdb')
manage.call('uliweb syncspec')
from uliweb.manage import make_simple_application
app = make_simple_application(apps_dir='./apps')
def teardown(self):
import shutil
shutil.rmtree('database.db', ignore_errors=True)
def test_import(self):
maps = {
'simple_task' : 'redbreast.core.spec.task.SimpleTask',
'join_task' : 'redbreast.core.spec.task.JoinTask',
'choice_task' : 'redbreast.core.spec.task.ChoiceTask',
'split_task' : 'redbreast.core.spec.task.SplitTask',
'multichocie_task' : 'redbreast.core.spec.task.MultiChoiceTask',
'auto_simple_task' : 'redbreast.core.spec.task.AutoSimpleTask',
'auto_join_task' : 'redbreast.core.spec.task.AutoJoinTask',
'auto_choice_task' : 'redbreast.core.spec.task.AutoChoiceTask',
'auto_split_task' : 'redbreast.core.spec.task.AutoSplitTask',
'auto_multichoice_task' :'redbreast.core.spec.task.AutoMultiChoiceTask',
}
for spec in maps:
a = CommonUtils.get_spec(spec)
assert str(a) == "<class '%s'>" % maps[spec]
spec1 = maps[spec].replace("redbreast.core.spec.task.", "")
b = CommonUtils.get_spec(spec1)
assert str(b) == "<class '%s'>" % maps[spec]
|
jniediek/mne-python
|
mne/viz/tests/test_evoked.py
|
Python
|
bsd-3-clause
| 7,380
| 0.000136
|
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
# Jona Sassenhagen <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
from mne import read_events, Epochs, pick_types, read_cov
from mne.channels import read_layout
from mne.io import read_raw_fif
from mne.utils import slow_test, run_tests_if_main
from mne.viz.evoked import _butterfly_onselect, plot_compare_evokeds
from mne.viz.utils import _fake_click
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
w
|
arnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test
|
-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.1
n_chan = 6
layout = read_layout('Vectorview-all')
def _get_raw():
"""Get raw data."""
return read_raw_fif(raw_fname, preload=False, add_eeg_ref=False)
def _get_events():
"""Get events."""
return read_events(event_name)
def _get_picks(raw):
"""Get picks."""
return pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
def _get_epochs():
"""Get epochs."""
raw = _get_raw()
raw.add_proj([], remove_existing=True)
events = _get_events()
picks = _get_picks(raw)
# Use a subset of channels for plotting speed
picks = picks[np.round(np.linspace(0, len(picks) - 1, n_chan)).astype(int)]
picks[0] = 2 # make sure we have a magnetometer
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), add_eeg_ref=False)
epochs.info['bads'] = [epochs.ch_names[-1]]
return epochs
def _get_epochs_delayed_ssp():
"""Get epochs with delayed SSP."""
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
proj='delayed', reject=reject,
add_eeg_ref=False)
return epochs_delayed_ssp
@slow_test
def test_plot_evoked():
"""Test plotting of evoked."""
import matplotlib.pyplot as plt
evoked = _get_epochs().average()
with warnings.catch_warnings(record=True):
fig = evoked.plot(proj=True, hline=[1], exclude=[], window_title='foo')
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded & spatial_colors & zorder
evoked.plot(exclude='bads')
evoked.plot(exclude=evoked.info['bads'], spatial_colors=True, gfp=True,
zorder='std')
# test selective updating of dict keys is working.
evoked.plot(hline=[1], units=dict(mag='femto foo'))
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
evoked_delayed_ssp.plot(proj='interactive')
evoked_delayed_ssp.apply_proj()
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
evoked_delayed_ssp.info['projs'] = []
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive', axes='foo')
plt.close('all')
# test GFP only
evoked.plot(gfp='only')
assert_raises(ValueError, evoked.plot, gfp='foo')
evoked.plot_image(proj=True)
# plot with bad channels excluded
evoked.plot_image(exclude='bads', cmap='interactive')
evoked.plot_image(exclude=evoked.info['bads']) # does the same thing
plt.close('all')
evoked.plot_topo() # should auto-find layout
_butterfly_onselect(0, 200, ['mag', 'grad'], evoked)
plt.close('all')
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
evoked.plot_white(cov)
evoked.plot_white([cov, cov])
# plot_compare_evokeds: test condition contrast, CI, color assignment
plot_compare_evokeds(evoked.copy().pick_types(meg='mag'))
evoked.rename_channels({'MEG 2142': "MEG 1642"})
assert len(plot_compare_evokeds(evoked)) == 2
colors = dict(red='r', blue='b')
linestyles = dict(red='--', blue='-')
red, blue = evoked.copy(), evoked.copy()
red.data *= 1.1
blue.data *= 0.9
plot_compare_evokeds([red, blue], picks=3) # list of evokeds
plot_compare_evokeds([[red, evoked], [blue, evoked]],
picks=3) # list of lists
# test picking & plotting grads
contrast = dict()
contrast["red/stim"] = list((evoked.copy(), red))
contrast["blue/stim"] = list((evoked.copy(), blue))
# test a bunch of params at once
plot_compare_evokeds(contrast, colors=colors, linestyles=linestyles,
picks=[0, 2], vlines=[.01, -.04], invert_y=True,
truncate_yaxis=False, ylim=dict(mag=(-10, 10)),
styles={"red/stim": {"linewidth": 1}})
assert_raises(ValueError, plot_compare_evokeds,
contrast, picks='str') # bad picks: not int
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
colors=dict(fake=1)) # 'fake' not in conds
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
styles=dict(fake=1)) # 'fake' not in conds
assert_raises(ValueError, plot_compare_evokeds, [[1, 2], [3, 4]],
picks=3) # evoked must contain Evokeds
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
styles=dict(err=1)) # bad styles dict
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
gfp=True) # no single-channel GFP
assert_raises(TypeError, plot_compare_evokeds, evoked, picks=3,
ci='fake') # ci must be float or None
contrast["red/stim"] = red
contrast["blue/stim"] = blue
plot_compare_evokeds(contrast, picks=[0], colors=['r', 'b'],
ylim=dict(mag=(1, 10)))
# Hack to test plotting of maxfiltered data
evoked_sss = evoked.copy()
evoked_sss.info['proc_history'] = [dict(max_info=None)]
evoked_sss.plot_white(cov)
evoked_sss.plot_white(cov_fname)
plt.close('all')
evoked.plot_sensors() # Test plot_sensors
plt.close('all')
run_tests_if_main()
|
hippke/TTV-TDV-exomoons
|
create_figures/system_22.py
|
Python
|
mit
| 7,952
| 0.005659
|
"""n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 1000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * mi
|
ddle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_arra
|
y))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print transit_duration
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = 0.4218 * 10**9
secondmoon = Body()
secondmoon.mass = M_gan
secondmoon.px = 0.48945554 * 10**9
thirdmoon = Body()
thirdmoon.mass = M_gan
thirdmoon.px = 0.59293316 * 10**9
fourthmoon = Body()
fourthmoon.mass = M_gan
fourthmoon.px = 0.77696224 * 10**9
fithmoon = Body()
fithmoon.mass = M_gan
fithmoon.px = 1.23335068 * 10**9
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
thirdmoon.vy = math.sqrt(G * planet.mass * (2 / thirdmoon.px - 1 / thirdmoon.px))
fourthmoon.vy = math.sqrt(G * planet.mass * (2 / fourthmoon.px - 1 / fourthmoon.px))
fithmoon.vy = math.sqrt(G * planet.mass * (2 / fithmoon.px - 1 / fithmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
gravity_thirdmoon = (thirdmoon.mass / planet.mass) * thirdmoon.px
gravity_fourthmoon = (fourthmoon.mass / planet.mass) * fourthmoon.px
gravity_fithmoon = (fithmoon.mass / planet.mass) * fithmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon + gravity_thirdmoon + gravity_fourthmoon + gravity_fithmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *fithmoon.px ** 3) / (G * (fithmoon.mass + planet.mass)))
orbit_duration = orbit_duration * 1.002
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon, thirdmoon, fourthmoon, fithmoon])
# Output information
print 'TTV amplitude =', numpy.amax(ttv_array), \
'[min] = ', numpy.amax(ttv_array) * 60, '[sec]'
print 'TDV amplitude =', numpy.amax(tdv_array), \
'[min] = ', numpy.amax(tdv_array) * 60, '[sec]'
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2])
plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2])
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparison with eccentric moon
plt.xlim(-0.11, +0.11)
plt.ylim(-0.8, +0.8)
plt.annotate(r"5:4:3:2:1", xy=(-0.105, +0.7), size=16)
plt.savefig("fig_system_22.eps", bbox_inches = 'tight')
|
schreiberx/sweet
|
mule/platforms/50_ppeixoto_usp_gnu/JobPlatform.py
|
Python
|
mit
| 4,663
| 0.016299
|
import platform
import socket
import sys
from mule_local.JobGeneration import *
from mule.JobPlatformResources import *
from . import JobPlatformAutodetect
import multiprocessing
# Underscore defines symbols to be private
_job_id = None
def _whoami(depth=1):
"""
String of function name to recycle code
https://www.oreilly.com/library/view/python-cookbook/0596001673/ch14s08.html
Returns
-------
string
Return function name
"""
return sys._getframe(depth).f_code.co_name
def p_gen_script_info(jg : JobGeneration):
global _job_id
return """#
# Generating function: """+_whoami(2)+"""
# Platform: """+get_platform_id()+"""
# Job id: """+_job_id+"""
#
"""
def get_platform_autodetect():
|
"""
Returns
-------
bool
True if current platform matches, otherwise False
"""
return JobPlatformAutodetect.autodetect()
def get_platform_id():
"""
Return platform ID
Returns
-------
string
unique ID of platform
"""
return "ppeixoto_usp_gnu"
def get_platform_resources():
"""
Return information about hardware
"""
h = JobPlatformResources()
h.num_cores_per_node = multiprocessing.cpu_c
|
ount()
h.num_nodes = 1
# TODO: So far, we only assume a single socket system as a fallback
h.num_cores_per_socket = h.num_cores_per_node
return h
def jobscript_setup(jg : JobGeneration):
"""
Setup data to generate job script
"""
global _job_id
_job_id = jg.runtime.getUniqueID(jg.compile)
return
def jobscript_get_header(jg : JobGeneration):
"""
These headers typically contain the information on e.g. Job exection, number of compute nodes, etc.
Returns
-------
string
multiline text for scripts
"""
content = """#! /bin/bash
"""+p_gen_script_info(jg)+"""
"""
return content
def jobscript_get_exec_prefix(jg : JobGeneration):
"""
Prefix before executable
Returns
-------
string
multiline text for scripts
"""
p = jg.parallelization
content = ""
content += jg.runtime.get_jobscript_plan_exec_prefix(jg.compile, jg.runtime)
if jg.compile.threading != 'off':
content += """
export OMP_NUM_THREADS="""+str(p.num_threads_per_rank)+"""
export OMP_DISPLAY_ENV=VERBOSE
"""
if p.core_oversubscription:
raise Exception("Not supported with this script!")
else:
if p.core_affinity != None:
content += "\necho \"Affnity: "+str(p.core_affinity)+"\"\n"
if p.core_affinity == 'compact':
content += "source $MULE_ROOT/platforms/bin/setup_omp_places.sh nooversubscription close\n"
#content += "\nexport OMP_PROC_BIND=close\n"
elif p.core_affinity == 'scatter':
raise Exception("Affinity '"+str(p.core_affinity)+"' not supported")
content += "\nexport OMP_PROC_BIND=spread\n"
else:
raise Exception("Affinity '"+str(p.core_affinity)+"' not supported")
content += "\n"
return content
def jobscript_get_exec_command(jg : JobGeneration):
"""
Prefix to executable command
Returns
-------
string
multiline text for scripts
"""
p = jg.parallelization
content = """
"""+p_gen_script_info(jg)+"""
# mpiexec ... would be here without a line break
EXEC=\""""+jg.compile.getProgramPath()+"""\"
PARAMS=\""""+jg.runtime.getRuntimeOptions()+"""\"
echo \"${EXEC} ${PARAMS}\"
"""
if jg.compile.sweet_mpi == 'enable':
content += 'mpiexec -n '+str(p.num_ranks)+' '
content += "$EXEC $PARAMS || exit 1"
content += "\n"
content += "\n"
return content
def jobscript_get_exec_suffix(jg : JobGeneration):
"""
Suffix before executable
Returns
-------
string
multiline text for scripts
"""
content = ""
content += jg.runtime.get_jobscript_plan_exec_suffix(jg.compile, jg.runtime)
return content
def jobscript_get_footer(jg : JobGeneration):
"""
Footer at very end of job script
Returns
-------
string
multiline text for scripts
"""
content = """
"""+p_gen_script_info(jg)+"""
"""
return content
def jobscript_get_compile_command(jg : JobGeneration):
"""
Compile command(s)
This is separated here to put it either
* into the job script (handy for workstations)
or
* into a separate compile file (handy for clusters)
Returns
-------
string
multiline text with compile command to generate executable
"""
content = """
SCONS="scons """+jg.compile.getSConsParams()+' -j 4"'+"""
echo "$SCONS"
$SCONS || exit 1
"""
return content
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.