repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
garciparedes/python-examples
|
web/django/graphene/graphene-quickstart/lesson-02-enums.py
|
Python
|
mpl-2.0
| 332
| 0
|
#!/
|
usr/bin/env python3
"""
URL: http://docs.graphene-python.org/en/latest/types/enums/
"""
import graphene
class Episode(graphene.Enum):
NEWHOPE = 4
EMPIRE = 5
JEDI = 6
@property
def description(self):
if self == Episode.NEWHOPE:
return 'New Hope Episode'
ret
|
urn 'Other episode'
|
KaiSzuttor/espresso
|
samples/p3m.py
|
Python
|
gpl-3.0
| 5,654
| 0.000531
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but
|
WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <
|
http://www.gnu.org/licenses/>.
#
"""
Simulate a Lennard-Jones liquid with charges. The P3M method is used to
calculate electrostatic interactions.
"""
import numpy as np
import espressomd
required_features = ["P3M", "WCA"]
espressomd.assert_features(required_features)
from espressomd import electrostatics
import argparse
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("--cpu", action="store_const", dest="mode",
const="cpu", help="P3M on CPU", default="cpu")
group.add_argument("--gpu", action="store_const", dest="mode",
const="gpu", help="P3M on GPU")
args = parser.parse_args()
print("""
=======================================================
= p3m.py =
=======================================================
""")
# System parameters
#############################################################
box_l = 10
density = 0.3
# Interaction parameters (repulsive Lennard-Jones)
#############################################################
wca_eps = 10.0
wca_sig = 1.0
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l] * 3)
np.random.seed(seed=42)
system.time_step = 0.01
system.cell_system.skin = 0.4
# warmup integration (steepest descent)
warm_steps = 20
warm_n_times = 30
# convergence criterion (particles are separated by at least 90% sigma)
min_dist = 0.9 * wca_sig
# integration
int_steps = 1000
int_n_times = 10
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.non_bonded_inter[0, 0].wca.set_params(epsilon=wca_eps, sigma=wca_sig)
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].wca.get_params())
# Particle setup
#############################################################
volume = box_l**3
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
print("Simulate {} particles in a cubic box {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.min_dist()
print("Start with minimal distance {}".format(act_min_dist))
# Assign charges to particles
for i in range(n_part // 2 - 1):
system.part[2 * i].q = -1.0
system.part[2 * i + 1].q = 1.0
# P3M setup after charge assignment
#############################################################
print("\nSCRIPT--->Create p3m\n")
if args.mode == "gpu":
p3m = electrostatics.P3MGPU(prefactor=2.0, accuracy=1e-2)
else:
p3m = electrostatics.P3M(prefactor=1.0, accuracy=1e-2)
print("\nSCRIPT--->Add actor\n")
system.actors.add(p3m)
print("\nSCRIPT--->P3M parameter:\n")
p3m_params = p3m.get_params()
for key in list(p3m_params.keys()):
print("{} = {}".format(key, p3m_params[key]))
print("\nSCRIPT--->Explicit tune call\n")
p3m.tune(accuracy=1e3)
print("\nSCRIPT--->P3M parameter:\n")
p3m_params = p3m.get_params()
for key in list(p3m_params.keys()):
print("{} = {}".format(key, p3m_params[key]))
print(system.actors)
#############################################################
# Warmup Integration #
#############################################################
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# minimize energy using min_dist as the convergence criterion
system.integrator.set_steepest_descent(f_max=0, gamma=1e-3,
max_displacement=wca_sig / 100)
i = 0
while i < warm_n_times and system.analysis.min_dist() < min_dist:
print("minimization: {:+.2e}".format(system.analysis.energy()["total"]))
system.integrator.run(warm_steps)
i += 1
print("minimization: {:+.2e}".format(system.analysis.energy()["total"]))
print()
system.integrator.set_vv()
# activate thermostat
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
# Just to see what else we may get from the C++ core
import pprint
pprint.pprint(system.cell_system.get_state(), width=1)
# pprint.pprint(system.part.__getstate__(), width=1)
pprint.pprint(system.__getstate__())
#############################################################
# Integration #
#############################################################
print("\nStart integration: run {} times {} steps"
.format(int_n_times, int_steps))
for i in range(int_n_times):
print("run {} at time={:.2f}".format(i, system.time))
system.integrator.run(int_steps)
energies = system.analysis.energy()
print(energies['total'])
# terminate program
print("\nFinished.")
|
HailStorm32/Q.bo_stacks
|
qbo_stereo_anaglyph/hrl_lib/src/hrl_lib/msg/_Pose3DOF.py
|
Python
|
lgpl-2.1
| 5,992
| 0.019526
|
"""autogenerated by genpy from hrl_lib/Pose3DOF.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class Pose3DOF(genpy.Message):
_md5sum = "646ead44a0e6fecf4e14ca116f12b08b"
_type = "hrl_lib/Pose3DOF"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
float64 x
float64 y
float64 theta
float64 dt
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','x','y','theta','dt']
_slot_types = ['std_msgs/Header','float64','float64','float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,x,y,theta,dt
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Pose3DOF, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.theta is None:
self.theta = 0.
if self.dt is None:
self.dt = 0.
else:
self.header = std_msgs.msg.Header()
self.x = 0.
self.y = 0.
self.theta = 0.
self.dt = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_4d.pack(_x.x, _x.y, _x.theta, _x.dt))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 32
(_x.x, _x.y, _x.theta, _x.dt,) = _struct_4d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_4d.pack(_x.x, _x.y, _x.theta, _x.dt))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
e
|
nd += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = s
|
elf
start = end
end += 32
(_x.x, _x.y, _x.theta, _x.dt,) = _struct_4d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_4d = struct.Struct("<4d")
|
RedHatInsights/insights-core
|
insights/parsers/tests/test_parsers_module.py
|
Python
|
apache-2.0
| 24,552
| 0.002403
|
import pytest
from collections import OrderedDict
from insights.parsers import (calc_offset, keyword_search, optlist_to_dict, parse_delimited_table, parse_fixed_table,
split_kv_pairs, unsplit_lines, ParseException, SkipException)
SPLIT_TEST_1 = """
# Comment line
keyword1 = value1 # Inline comments
# Comment indented
keyword3 # Key with no separator
keyword2 = value2a=True, value2b=100M
""".strip()
SPLIT_TEST_1_OD = OrderedDict([
('keyword1', 'value1'),
('keyword3', ''),
('keyword2', 'value2a=True, value2b=100M')
])
SPLIT_TEST_2 = """
@ Comment line
keyword1: value1 @ Inline comments
keyword2 : value2a=True, value2b=100M
@ Comment indented
keyword3 @ Key with no separator
""".strip()
OFFSET_CONTENT_1 = """
data 1 line
data 2 line
""".strip()
OFFSET_CONTENT_2 = """
#
Warning line
Error line
data 1 line
data 2 line
Trailing line
Blank line above
Another trailing line
Yet another trailing line
Yet yet another trailing line
""".strip()
def test_split_kv_pairs():
kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines())
assert len(kv_pairs) == 2
assert kv_pairs == {
'keyword1': 'value1',
'keyword2': 'value2a=True, value2b=100M'
}
kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), filter_string='value2')
assert len(kv_pairs) == 1
assert kv_pairs == {
'keyword2': 'value2a=True, value2b=100M'
}
kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), use_partition=True)
assert len(kv_pairs) == 3
assert kv_pairs == {
'keyword1': 'value1',
'keyword2': 'value2a=True, value2b=100M',
'keyword3': ''
}
kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), use_partition=True, ordered=True)
assert len(kv_pairs) == 3
assert kv_pairs == SPLIT_TEST_1_OD
kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':')
assert len(kv_pairs) == 2
assert kv_pairs == {
'keyword1': 'value1',
'keyword2': 'value2a=True, value2b=100M'
}
kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':', filter_string='value2')
assert len(kv_pairs) == 1
assert kv_pairs == {
'keyword2': 'value2a=True, value2b=100M'
}
kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':', use_partition=True)
assert len(kv_pairs) == 3
assert kv_pairs == {
'keyword1': 'value1',
'keyword2': 'value2a=True, value2b=100M',
'keyword3': ''
}
SPLIT_LINES = """
Line one
Line two part 1 \\
line two part 2\\
line two part 3
Line three
""".strip()
SPLIT_LINES_2 = """
Line one
Line two part 1 ^
line two part 2^
line two part 3
Line three^
""".strip()
SPLIT_LINES_3 = """
web.default_taskmaster_tasks = RHN::Task::SessionCleanup, RHN::Task::ErrataQueue,
RHN::Task::ErrataEngine,
RHN::Task::DailySummary, RHN::Task::SummaryPopulation,
RHN::Task::RHNProc,
RHN::Task::PackageCleanup
db_host ="""
def test_unsplit_lines():
lines = list(unsplit_lines(SPLIT_LINES.splitlines()))
assert len(lines) == 3
assert lines[0] == 'Line one'
assert lines[1] == 'Line two part 1 line two part 2 line two part 3'
assert lines[2] == 'Line three'
lines = list(unsplit_lines(SPLIT_LINES_2.splitlines(), cont_char='^'))
assert len(lines) == 3
assert lines[0] == 'Line one'
assert lines[1] == 'Line two part 1 line two part 2 line two part 3'
assert lines[2] == 'Line three' # test continuation on last line
# Test keeping continuation character on line
lines = list(unsplit_lines(
SPLIT_LINES_3.splitlines(), cont_char=',', keep_cont_char=True
))
|
assert len(lines) == 4
assert lines[0] == ''
assert lines[1] == 'web.default_taskmaster_tasks = RHN::Task::SessionCleanup, RHN::Task::ErrataQueue, RHN::Task::ErrataEngine, RHN::Task::DailySummary, RHN::Task::SummaryPopulation, RHN::Task::RHNProc, RHN::Task::Pack
|
ageCleanup'
assert lines[2] == ''
assert lines[3] == 'db_host ='
def test_calc_offset():
assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=[]) == 0
assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=[None]) == 0
assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=['data ']) == 0
with pytest.raises(ValueError):
calc_offset(OFFSET_CONTENT_1.splitlines(), target=['xdata '])
with pytest.raises(ValueError):
calc_offset(OFFSET_CONTENT_1.splitlines(),
target=['data '],
invert_search=True)
assert calc_offset(OFFSET_CONTENT_1.splitlines(),
target=['Trailing', 'Blank', 'Another '],
invert_search=True) == 0
assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=[]) == 0
assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=['data ']) == 3
assert calc_offset(reversed(OFFSET_CONTENT_2.splitlines()),
target=['Trailing', 'Blank', 'Another ', 'Yet'],
invert_search=True) == 6
assert calc_offset(OFFSET_CONTENT_2.splitlines(),
target=['data', '2']) == 3
assert calc_offset(OFFSET_CONTENT_2.splitlines(),
target=['data', '2'],
require_all=True) == 4
assert calc_offset(
reversed(OFFSET_CONTENT_2.splitlines()),
target=['Trailing', 'Blank', 'Another ', 'Yet'],
invert_search=True) == 6
assert calc_offset(
reversed(OFFSET_CONTENT_2.splitlines()),
target=['Trailing', 'Blank', 'Another ', 'Yet'],
invert_search=True,
require_all=True) == 6
FIXED_CONTENT_1 = """
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
""".strip()
FIXED_CONTENT_1A = """
WARNING
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
""".strip()
FIXED_CONTENT_1B = """
Column1 Column2 Column3
data1 data 2
data4 data5 data6
data 7 data 9
""".strip()
FIXED_CONTENT_2 = """
WARNING WARNING WARNING
Some message
Another message
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
""".strip()
FIXED_CONTENT_3 = """
WARNING WARNING WARNING
Some message
Another message
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
Trailing non-data line
Another trailing non-data line
""".strip()
FIXED_CONTENT_4 = """
WARNING WARNING WARNING
Some message
Another message
Column1 Column 2 Column 3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
data10
Trailing non-data line
Another trailing non-data line
""".strip()
FIXED_CONTENT_5 = """
Column1 Column 2 Column 3
data1 data 2 data 3
data 7 data 9
data10
""".strip()
FIXED_CONTENT_DUP_HEADER_PREFIXES = """
NAMESPACE NAME LABELS
default foo app=superawesome
""".strip()
def test_parse_fixed_table():
data = parse_fixed_table(FIXED_CONTENT_1.splitlines())
assert len(data) == 3
assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}
assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'}
data = parse_fixed_table(FIXED_CONTENT_1A.splitlines(), heading_ignore=['Column1 '])
assert len(data) == 3
assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}
assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'}
data = parse_fixed_table(FIXED_CONTENT_1B.splitlines())
assert len(data) == 3
assert data[0]
|
prophile/compd
|
src/screen_db.py
|
Python
|
mit
| 2,580
| 0.005039
|
"""Screen database."""
import redis_client
import control
import re
from twisted.internet import defer
class ScreenDB(object):
"""A screen database."""
def __init__(self):
"""Default constructor."""
pass
def set_mode(self, screen, mode):
redis_client.connection.set('screen:{0}:mode'.format(screen),
mode)
redis_client.connection.publish('screen:update', 'update')
def set_override(self, screen, override):
if override is not None:
redis_client.connection.set('screen:{0}:override'.format(screen),
override)
else:
redis_client.connection.delete('screen:{0}:override'.format(screen))
redis_client.connection.publish('screen:update', 'update')
@defer.inlineCallbacks
def list(self):
screens = yield redis_client.connection.keys('screen:*:mode')
entries = {}
for screen in screens:
screenID = screen.split(':')[1]
mode = yield redis_client.connection.get('screen:{0}:mode'.format(screenID))
host = yield redis_client.connection.get('screen:{0}:host'.format(screenID))
entries[screenID] = {'mode': mode,
'host': host}
defer.returnValue(entries)
screens = ScreenDB()
@control.handler('screen-lis
|
t')
@
|
defer.inlineCallbacks
def perform_screen_list(responder, options):
screen_list = yield screens.list()
for screen, settings in screen_list.iteritems():
if settings['host'] is None:
online_string = 'offline'
else:
online_string = 'online from {0} port {1}'.format(*settings['host'].split(' '))
responder('{0} - {1} ({2})'.format(screen,
settings['mode'],
online_string))
@control.handler('screen-set-mode')
def perform_screen_set_mode(responder, options):
screens.set_mode(options['<id>'], options['<mode>'])
responder('Mode set.')
@control.handler('screen-override')
def perform_screen_override(responder, options):
screens.set_override(options['<id>'], options['<message>'])
responder('Override set.')
@control.handler('screen-clear-override')
def perform_screen_clear_override(responder, options):
screens.set_override(options['<id>'], None)
responder('Override cleared.')
def got_screen(name):
control.broadcast('Screen connected: {0}'.format(name))
redis_client.add_subscribe('screen:connect', got_screen)
|
saechtner/turn-events
|
Turnauswertung-py3/common/views.py
|
Python
|
mit
| 2,597
| 0.004621
|
from django.core.urlresolvers import reverse, reverse_lazy
from django.shortcuts import
|
render, redirect
from django.views import generic
from common.models import Discipline, Performance
# renders index / home page
def index(request):
return redirect(reverse('tournaments.main')
|
)
# renders todo page
def process(request):
return render(request, 'gymnastics/process.html', None)
def performances_index(request):
context = { 'performances': Performance.objects.all() \
.select_related('athlete').select_related('discipline') }
return render(request, 'gymnastics/performances/index.html', context)
class PerformanceCreateView(generic.CreateView):
model = Performance
fields = ['athlete', 'discipline', 'value', 'value_final']
template_name = 'gymnastics/performances/new.html'
success_url = reverse_lazy('performances.index')
class PerformanceDetailView(generic.DetailView):
model = Performance
template_name = 'gymnastics/performances/detail.html'
class PerformanceUpdateView(generic.UpdateView):
model = Performance
fields = ['athlete', 'discipline', 'value', 'value_final']
template_name = 'gymnastics/performances/edit.html'
def get_success_url(self):
some_kwargs = self.kwargs
return reverse('performances.detail', kwargs = { 'pk' : self.kwargs['pk'] })
class PerformanceDeleteView(generic.DeleteView):
model = Performance
template_name = 'gymnastics/performances/delete.html'
success_url = reverse_lazy('performances.index')
def disciplines_index(request):
context = { 'disciplines': Discipline.objects.all() }
return render(request, 'gymnastics/disciplines/index.html', context)
def discipline_detail(request, id, slug):
discipline = Discipline.objects.get(id=id)
streams = discipline.stream_set.all()
performances = discipline.performance_set.all().select_related('athlete')
context = {
'discipline': discipline,
'streams': streams,
'performances': performances
}
return render(request, 'gymnastics/disciplines/detail.html', context)
class DisciplineCreateView(generic.CreateView):
model = Discipline
fields = ['name']
template_name = 'gymnastics/disciplines/new.html'
class DisciplineUpdateView(generic.UpdateView):
model = Discipline
fields = ['name']
template_name = 'gymnastics/disciplines/edit.html'
class DisciplineDeleteView(generic.DeleteView):
model = Discipline
template_name = 'gymnastics/disciplines/delete.html'
success_url = reverse_lazy('disciplines.index')
|
stev-0/bustimes.org.uk
|
busstops/management/commands/import_ie_naptan_xml.py
|
Python
|
mpl-2.0
| 4,087
| 0.003181
|
"""Import an Irish NaPTAN XML file, obtainable from
https://data.dublinked.ie/dataset/national-public-transport-nodes/resource/6d997756-4dba-40d8-8526-7385735dc345
"""
import warnings
import zipfile
import xml.etree.cElementTree as ET
from django.contrib.gis.geos import Point
from django.core.management.base import BaseCommand
from ...models import Locality, AdminArea, StopPoint
class Command(BaseCommand):
ns = {'naptan': 'http://www.naptan.org.uk/'}
@staticmethod
def add_arguments(parser):
parser.add_argument('filenames', nargs='+', type=str)
def handle_stop(self, element):
stop = StopPoint(
atco_code=element.find('naptan:AtcoCode', self.ns).text,
locality_centre=element.find('naptan:Place/naptan:LocalityCentre', self.ns).text == 'true',
active=element.get('Status') == 'active',
)
for subelement in element.find('naptan:Descriptor', self.ns):
tag = subelement.tag[27:]
if tag == 'CommonName':
stop.common_name = subelement.text
elif tag == 'Street':
stop.street = subelement.text
elif tag == 'Indicator':
stop.indicator = subelement.text.lower()
else:
warnings.warn('Stop {} has an unexpected property: {}'.format(stop.atco_code, tag))
stop_classification_element = element.find('naptan:StopClassification', self.ns)
stop_type = stop_classification_element.find('naptan:StopType', self.ns).text
if stop_type != 'class_undefined':
stop.stop_type = stop_type
bus_element = stop_classification_element.find('naptan:OnStreet/naptan:Bus', self.ns)
if bus_element is not None:
stop.bus_stop_type = bus_element.find('naptan:BusStopType', self.ns).text
stop.timing_status = bus_element.find('naptan:TimingStatus', self.ns).text
compass_point_element = bus_element.find(
'naptan:MarkedPoint/naptan:Bearing/naptan:CompassPoint', self.ns
)
if compass_point_element is not None:
stop.bearing = compass_point_element.text
if stop.bus_stop_type == 'type_undefined':
stop.bus_stop_type = ''
place_element = element.find('naptan:Place', self.ns)
location_element = place_element.find('naptan:Location', self.ns)
longitude_element = location_element.find('naptan:Longitude', self.ns)
latitude_element = location_element.find('naptan:Latitude', self.ns)
if longitude_element is None:
warnings.warn('Stop {} has no location'.format(stop.atco_code))
else:
stop.latlong = Point(float(longitude_element.text), float(latitude_element.text))
admin_area_id = element.find('naptan:AdministrativeAreaRef', self.ns).text
if not AdminArea.objects.filter(atco_code=admin_area_id).exists():
AdminArea.objects.create(id=admin_area_id, atco_code=admin_area_id, region_id='NI')
stop.admin_area_id = admin_area_id
locality_element = place_element.find('naptan:NptgLocalityRef', self.ns)
if locality_element is not None:
if not Locality.objects.filter(id=locality_element.text).exists():
Locality.objects.create(id=locality_element.text, admin_area_id=admin_area_id)
stop.locality_id = locality_element.text
|
stop.save()
def handle_file(self, archive, filename):
|
with archive.open(filename) as open_file:
iterator = ET.iterparse(open_file)
for _, element in iterator:
tag = element.tag[27:]
if tag == 'StopPoint':
self.handle_stop(element)
element.clear()
def handle(self, *args, **options):
for filename in options['filenames']:
with zipfile.ZipFile(filename) as archive:
for filename in archive.namelist():
self.handle_file(archive, filename)
|
pferreir/indico
|
indico/modules/events/abstracts/models/fields.py
|
Python
|
mit
| 1,096
| 0.002737
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.db import db
from indico.modules.events.contributions.models.fields import ContributionFieldValueBase
from indico.util.string import format_repr, text_to_repr
class AbstractFieldValue(ContributionFie
|
ldValueBase):
"""Store a field values related to abstracts."""
__tablename__ = 'abstract_field_values'
__table_args__ = {'schema': 'event_abstracts'}
contribution_field_backref_name = 'abstract_values'
abstract_id = db.Column(
db.Integer,
db.ForeignKey('event_abstracts.abstracts.id'),
index=True,
nullable=False,
primary_key=True
)
# relationship backrefs:
# - abstract (Abstract.fi
|
eld_values)
def __repr__(self):
text = text_to_repr(self.data) if isinstance(self.data, str) else self.data
return format_repr(self, 'abstract_id', 'contribution_field_id', _text=text)
|
infobip/infobip-api-python-client
|
infobip_api_client/model/sms_destination.py
|
Python
|
apache-2.0
| 6,770
| 0.000295
|
"""
Infobip Client API Libraries OpenAPI Specification
OpenAPI specification containing public endpoints supported in client API libraries. # noqa: E501
The version of the OpenAPI document: 1.0.172
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from infobip_api_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class SmsDestination(ModelNormal):
"""
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"to": (str,), # noqa: E501
"message_id": (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"to": "to", # noqa: E501
"message_id": "messageId", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, to, *args, **kwargs): # noqa: E501
"""SmsDestination - a model defined in OpenAPI
Args:
to (str): Message destination address. Addresses must be in international format (Example: `41793026727`).
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
|
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we
|
see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
message_id (str): The ID that uniquely identifies the message sent.. [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.to = to
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
singingwolfboy/flask-dance
|
tests/consumer/storage/test_sqla.py
|
Python
|
mit
| 24,167
| 0.001407
|
import pytest
sa = pytest.importorskip("sqlalchemy")
import os
import responses
import flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import event
from flask_caching import Cache
from flask_login import LoginManager, UserMixin, current_user, login_user, logout_user
from flask_dance.consumer import OAuth2ConsumerBlueprint, oauth_authorized, oauth_error
from flask_dance.consumer.storage.sqla import OAuthConsumerMixin, SQLAlchemyStorage
try:
import blinker
except ImportError:
blinker = None
requires_blinker = pytest.mark.skipif(not blinker, reason="requires blinker")
pytestmark = [pytest.mark.usefixtures("responses")]
@pytest.fixture
def blueprint():
"Make a OAuth2 blueprint for a fictional OAuth provider"
bp = OAuth2ConsumerBlueprint(
"test-service",
__name__,
client_id="client_id",
client_secret="client_secret",
state="random-string",
base_url="https://example.com",
authorization_url="https://example.com/oauth/authorize",
token_url="https://example.com/oauth/access_token",
redirect_url="/oauth_done",
)
responses.add(
responses.POST,
"https://example.com/oauth/access_token",
body='{"access_token":"foobar","token_type":"bearer","scope":""}',
)
return bp
@pytest.fixture
def db():
"Make a Flask-SQLAlchemy instance"
return SQLAlchemy()
@pytest.fixture
def app(blueprint, db, request):
"Make a Flask app, attach Flask-SQLAlchemy, and establish an app context"
app = flask.Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = os.environ.get("DATABASE_URI", "sqlite://")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["CACHE_TYPE"] = "simple"
app.secret_key = "secret"
app.register_blueprint(blueprint, url_prefix="/login")
db.init_app(app)
# establish app context
ctx = app.app_context()
ctx.push()
request.addfinalizer(ctx.pop)
return app
class record_queries:
"""
A context manager for recording the SQLAlchemy queries that were executed
in a given context block.
"""
def __init__(self, target, identifier="before_cursor_execute"):
self.target = target
self.identifier = identifier
def record_query(self, conn, cursor, statement, parameters, context, executemany):
self.queries.append(statement)
def __enter__(self):
self.queries = []
event.listen(self.target, self.identifier, self.record_query)
return self.queries
def __exit__(self, exc_type, exc_value, traceback):
event.remove(self.target, self.identifier, self.record_query)
def test_sqla_storage_without_user(app, db, blueprint, request):
class OAuth(OAuthConsumerMixin, db.Model):
pass
blueprint.storage = SQLAlchemyStorage(OAuth, db.session)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
with record_queries(db.engine) as queries:
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
resp = client.get
|
(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_
|
url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/oauth_done"
assert len(queries) == 2
# check the database
authorizations = OAuth.query.all()
assert len(authorizations) == 1
oauth = authorizations[0]
assert oauth.provider == "test-service"
assert isinstance(oauth.token, dict)
assert oauth.token == {
"access_token": "foobar",
"token_type": "bearer",
"scope": [""],
}
def test_sqla_model_repr(app, db, request):
class MyAwesomeOAuth(OAuthConsumerMixin, db.Model):
pass
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
o = MyAwesomeOAuth()
assert "MyAwesomeOAuth" in repr(o)
o.provider = "supercool"
assert 'provider="supercool"' in repr(o)
o.token = {"access_token": "secret"}
assert "secret" not in repr(o)
db.session.add(o)
db.session.commit()
assert "id=" in repr(o)
assert "secret" not in repr(o)
def test_sqla_storage(app, db, blueprint, request):
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
class OAuth(OAuthConsumerMixin, db.Model):
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
# for now, we'll assume that Alice is the only user
alice = User(name="Alice")
db.session.add(alice)
db.session.commit()
# load alice's ID -- this issues a database query
alice.id
blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=alice)
with record_queries(db.engine) as queries:
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/oauth_done"
assert len(queries) == 3
# check the database
alice = User.query.first()
authorizations = OAuth.query.all()
assert len(authorizations) == 1
oauth = authorizations[0]
assert oauth.user_id == alice.id
assert oauth.provider == "test-service"
assert isinstance(oauth.token, dict)
assert oauth.token == {
"access_token": "foobar",
"token_type": "bearer",
"scope": [""],
}
def test_sqla_load_token_for_user(app, db, blueprint, request):
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
class OAuth(OAuthConsumerMixin, db.Model):
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
# set token storage
blueprint.storage = SQLAlchemyStorage(OAuth, db.session)
# make users and OAuth tokens for several people
alice = User(name="Alice")
alice_token = {"access_token": "alice123", "token_type": "bearer"}
alice_oauth = OAuth(user=alice, token=alice_token, provider="test-service")
bob = User(name="Bob")
bob_token = {"access_token": "bob456", "token_type": "bearer"}
bob_oauth = OAuth(user=bob, token=bob_token, provider="test-service")
sue = User(name="Sue")
sue_token = {"access_token": "sue789", "token_type": "bearer"}
sue_oauth = OAuth(user=sue, token=sue_token, provider="test-service")
db.session.add_all([alice, bob, sue, alice_oauth, bob_oauth, sue_oauth])
db.session.commit()
# by default, we should not have a token for anyone
sess = blueprint.session
assert not sess.token
assert not blueprint.token
# load token for various users
blueprint.config["user"] = alice
assert sess.token == alice_token
assert blueprint.token == alice_token
blueprint.config["user"] = bob
assert sess.token == bob_token
assert blueprint.token == bob_token
blueprint.config["user"] = alice
assert sess.token == alice_token
assert blueprint.token == alice_token
blueprint.config["user"] = sue
assert sess.token == sue_token
assert blueprint.token == sue_t
|
abid-mujtaba/fetchheaders
|
fetchheaders.py
|
Python
|
apache-2.0
| 20,367
| 0.022193
|
#!/usr/bin/python2
#
# Copyright 2012 Abid Hasan Mujtaba
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Author: Abid H. Mujtaba
# Email: [email protected]
#
# Start Date: Aug. 9, 2012
# Last Revised: sep. 24, 2012
#
#
# This script is intended as a program that reads a configuration file and uses the information stored there-in to connect to a variety of IMAP servers and display header information about the emails in various folders (INBOX by default). It also has the capability of deleting selected emails. The advantage is that minimal information needs to be downloaded (i.e. only certain header fields) without needing to download the entire email and one can choose to delete unnecessary emails judging by the sender and/or subject only.
# Enable Python 3.x style print function:
from __future__ import print_function
import re
# Create global variables that implement global settings which are used by the following functions.
maxThreads = 5 # This value will be over-written by the global default and possibly a command-line argument
colorTitle = None
colorFlag = None
colorFrom = None
colorDate = None
colorSubjectSeen = None
colorSubjectUnseen = None
showFlags = None
def setOptions( configFile, configSpecFile ) :
'''
This function reads in the options from the configuration file and validates them using the configuration specification file passed to it. It creates a dictionary of options for each account which are used by the pollAccount() function to carry out its tasks. Additionally this function reads the 'global' section in the configuration file and creates and the globalSettings dictionary that contains the global settings for the program.
'''
from configobj import ConfigObj, ConfigObjError, flatten_errors
from validate import Validator
# Note the following code segment concerned with using ConfigObj and validating the entries has been inspired and in part copied from http://www.voidspace.org.uk/python/articles/configobj.shtml (an excellent tutorial on using ConfigObj by its author(s))
try:
config = ConfigObj( configFile, configspec = configSpecFile, file_error = True )
except (ConfigObjError, IOError), e:
print( 'Could not read "%s": %s' % (configFile, e) )
validator = Validator()
results = config.validate( validator )
if results != True : # Validation failed. Inform user of offending entries.
for (section_list, key, _) in flatten_errors( config, results ) :
if key is not None :
print( 'The "%s" key in the section "%s" failed validation' % (key, ','.join( section_list ) ) )
else :
print( 'The following section was missing: %s' % ','.join( section_list ) )
import sys
sys.exit(1)
# Validation successful so we move on to creating the 'servers' dictionary. We are implementing a default account paradigm which is not natively supported by ConfigObj. We want the ConfigParser ability where any option not provided in a subsection but contained in the 'DEFAULT' subsection are copied in to it. To achieve this we will need to know which entries are missing in each subsection without having them filled in using the default values from the config.spec file. To that end we read in the config file without reading the spec file (hence no spec defaults are read in).
configNoSpec = ConfigObj( configFile ) # Note since config passed validation we automatically know that configNoSpec is also valid.
# The first step is to copy out the default account section dictionary and use it as the basic dictionary for all accounts. We will over-write the options that are provided in each account sub-section as we read them.
listDefaultOptions = configNoSpec[ 'accounts' ][ 'DEFAULT' ].keys() # List of Default options as EXPLICITLY provided in the configuration file (hence the use of configNoSpec as compared to just config)
listAccounts = [ x for x in config[ 'accounts' ].keys() if x != 'DEFAULT' ] # List of Accounts that does NOT contain 'DEFAULT'. We are basically carrying out list subtraction here: completely removing certain elements from the list by using list comprehension along with a predicate
# Note: Everywhere a value needs to be read in we must use 'config' and NOT 'configNoSpec' since 'config' by virtue of knowing the required type of each option reads in the values as the correct type rather than as a string which is what we want.
servers = {} # Empty dictionary which we will populate with account configuration information
for account in listAccounts :
servers[ account ] = {} # Create sub-dictionary for account
servers[ account ][ 'name' ] = account # Saving account name for identification and laster use when the sub-dictionary is passed to pollAccount
for key, value in config[ 'accounts' ][ account ].items() :
servers[ account ][ key ] = value # Copy configuration information
# So far we have stored in the dictionary (for this account) the values specified explicitly and the global defaults from config.spec that are automatically loaded for missing options. Now we must over-write with the options that are not explicitly given but ARE explicitly defined in the 'DEFAULT' section since they carry precedence over the global defaults defined in the config.spec file (which should not ideally be edited by the user but rather represents the creator's fall-back default values in case an option is completely deleted by the user in the config file)
# Now we create a list of the options that are explicitly in DEFAULT but NOT in the specific account (Note the use of configNoSpec rather than config) :
listMissingDefaults = [ x for x in listDefaultOptions if x not in configNoSpec[ 'accounts' ][ account ].keys() ]
for key in listMissingDefaults :
ser
|
vers[ account ][ key ] = config[ 'accounts'
|
][ 'DEFAULT' ][ key ]
# Now we read in the global settings:
globalSettings = {} # Create empty dictionary to populate
for key in config[ 'global' ].keys() :
globalSettings[ key ] = config[ 'global' ][ key ]
return servers, globalSettings
def argParse() :
'''
This function reads in the arguments passed to the program, validates them and if validated returns a parser.parse_args() returned object which contains the various arguments passed and which can then be used by the program as it sees fit.
'''
import argparse # This module gives powerful argument parsing abilities along with auto-generation of --help output.
# Specify the various arguments that the program expects and validate them. Additional arguments can be added as required.
parser = argparse.ArgumentParser( description = "A python script which simultaneously polls multiple IMAP accounts to display the subjects of all or only unseen messages in the specified folder (INBOX by default) without downloading complete messages.\n For further details please read the man page." )
parser.add_argument( "-c", "--config", help = "Specify the name and path to the configuration file. If not specified the program will use the default configuration file in $HOME/.fetchheaders/fetchheaders.conf. Note: The configuration specification file (fetchheaders.conf.spec) should not be altered casually and the program will only look for it in $HOME/.fetchheaders/" )
# For --accounts and --exclude which we wish to be mutually exclusive optional arguments we create a mutually exclusive group within the parser to hold them.
group = parser.add_mutually_exclusive_g
|
piotrmaslanka/bellum
|
space/views/planetview.py
|
Python
|
agpl-3.0
| 4,242
| 0.009194
|
# coding=UTF-8
from django.shortcuts import redirect
from bellum.common.alliance import isAllied
from bellum.common.session.login import must_be_logged
from bellum.common.session import getAccount, getRace
from bellum.common.session.mother import getCurrentMother
from djangomako.shortcuts import render_to_response, render_to_string
from bellum.space.models import Planet
from bellum.common.gui import PrimaryGUIObject
from bellum.common.fixtures.province_build import getCosts
from bellum.common.session import getRace, getAccount, getResourceIndex
from bellum.meta import MPBI
from bellum.province.models import Province, Reinforcement
from django.core.exceptions import ObjectDoesNotExist
from bellum.orders.models import LandarmyProvintionalStrikeOrder
from bellum.orders.models import LandarmyPlanetaryStrikeOrder, LandarmyProvintionalStrikeOrder, LandarmyMotherPickupOrder
from bellum.space.ajax.pinfo import dx_html
from bellum.common.fixtures.relocation import getRelocationTime
@must_be_logged
def process_onlyprovince(request, province_id):
try: # check planet
province_id = int(province_id)
province = Province.objects.get(id=province_id)
except:
return redirect('/')
return process(request, province.planet.id, province_id=province_id)
@must_be_logged
def process(request, planet_id, province_id=None):
try: # check planet
planet = Planet.objects.get(id=planet_id)
except:
return redirect('/')
provinces = Province.objects.filter(planet=planet)
provinces_postprocessed = {}
prov = None
try: # faciliates GET getting province to zoom onto
if province_id != None:
provgrabber = province_id
else:
provgrabber = int(request.GET['province'])
except:
provgrabber = None
for province in provinces:
# 0 - MINE, 1 - ENEMY, 2 - ALLIED, 3 - NOBODYS
try:
province.provintionalpresence
except:
pv = 'gray'
else:
if province.provintionalpresence.owner == getAccount(request):
pv = 'green'
elif isAllied(getAccount(request), province.provintionalpresence.owner):
pv = 'blue'
else:
pv = 'red'
provinces_postprocessed[province.id] = [pv, False]
if province.id == provgrabber:
prov = province
try:
if province.provintionalpresence.owner == getAccount(request):
if prov == None:
prov = province
except:
pass
if prov == None:
prov = provinces[0]
provinces_postprocessed[prov.id][1] = True
mum = getCurrentMother(request)
sfx = dx_html(request, prov, mum).decode('utf8')
# can relocate?
can_relocate = False
relocation_time = None
if (planet != mum
|
.duePosition()): # Differe
|
nt position than current
can_relocate = mum.canRelocate()
if can_relocate:
relocation_time = getRelocationTime(mum, getRace(request), mum.orbiting, planet)
# can scan?
can_scan = False
if getRace(request) == 1:
if mum.isRelocating() == False:
if mum.orbiting == planet:
can_scan = True
return render_to_response('space/planetview/planetview.html', {'htmldata':sfx,
'race':getRace(request),
'planet':planet,
'postprocessed':provinces_postprocessed,
'can_scan':can_scan,
'firstprovince':prov,
'can_relocate':can_relocate,
'relocation_time':relocation_time,
'wctg':lambda x: int((x+100.0)*(345.0/200.0)),
'pgo':PrimaryGUIObject(request)})
|
sam-m888/gprime
|
gprime/plug/_docgenplugin.py
|
Python
|
gpl-2.0
| 4,499
| 0.000222
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
This module provides the Plugin class for document generator plugins.
"""
from . import Plugin
from .docgen import TextDoc, DrawDoc
class DocGenPlugin(Plugin):
"""
This class represents a plugin for generating documents from Gramps
"""
def __init__(self, name, description, basedoc,
paper, style, extension, docoptclass, basedocname):
"""
:param name: A friendly name to call this plugin.
Example: "Plain Text"
:type name: string
:param description: A short description of the plugin.
Example: "This plugin will generate text documents in plain text."
:type description: string
:param basedoc: A class that implements the BaseDoc
interface.
:type basedoc: BaseDoc
:param paper: Indicates whether the plugin uses paper or not.
True = use paper; False = do not use paper
:type paper: bool
:param style: Indicates whether the plugin uses styles or not.
True = use styles; False = do not use styles
:type style: bool
:param extension: The extension for the output file.
Example: "txt"
:type extension: str
:param docoptclass: either None or a subclass of DocOptions
:type docoptclass: either None or a DocOptions subclass
:param basedocname: The BaseDoc name of this plugin.
Example: "AsciiDoc"
:type basedocname: string
:return: nothing
|
"""
Plugin.__init__(self, name, description, basedoc.__module__)
self.__basedoc = basedoc
self.__paper = paper
self.__style = style
self.__extension = extension
self.__docoptclass = docoptclass
self.__basedocname = basedocname
def get_basedoc(self):
"""
Get the :class:`.BaseDoc` class for this plugin.
:return: the :class:`.BaseDoc` class passed into :meth:`__init__`
"""
re
|
turn self.__basedoc
def get_paper_used(self):
"""
Get the paper flag for this plugin.
:return: bool - True = use paper; False = do not use paper
"""
return self.__paper
def get_style_support(self):
"""
Get the style flag for this plugin.
:return: bool - True = use styles; False = do not use styles
"""
return self.__style
def get_extension(self):
"""
Get the file extension for the output file.
:return: str
"""
return self.__extension
def get_doc_option_class(self):
"""
Get the :class:`.DocOptions` subclass for this plugin, if any
:return: the :class:`.DocOptions` subclass passed into :meth:`__init__`
"""
return self.__docoptclass
def get_basedocname(self):
"""
Get the :class:`.BaseDoc` name for this plugin.
:return: the :class:`.BaseDoc` name passed into :meth:`__init__`
"""
return self.__basedocname
def get_text_support(self):
"""
Check if the plugin supports the :class:`.TextDoc` interface.
:return: bool: True if :class:`.TextDoc` is supported; False if
:class:`.TextDoc` is not supported.
"""
return bool(issubclass(self.__basedoc, TextDoc))
def get_draw_support(self):
"""
Check if the plugin supports the :class:`.DrawDoc` interface.
:return: bool: True if :class:`.DrawDoc` is supported; False if
:class:`.DrawDoc` is not supported.
"""
return bool(issubclass(self.__basedoc, DrawDoc))
|
shuxin/androguard
|
androguard/gui/fileloading.py
|
Python
|
apache-2.0
| 1,625
| 0
|
import traceback
from PyQt5 import QtCore
import androguard.session as session
from androguard.core import androconf
import logging
log = logging.getLogger("androguard.gui")
class FileLoadingThread(QtCore.QThread):
file_loaded = QtCore.pyqtSignal(bool)
def __init__(self, parent=None):
QtCore.QThread.__init__(self, parent)
self.parent = parent
self.file_path = None
self.incoming_file = ()
def load(self, file_path):
self.file_path = file_path
if file_path.endswith(".ag"):
self.incoming_file = (file_path, 'SESSION')
else:
file_type = androconf.is_android(file_path)
self.incoming_file = (file_path, file_type)
self.start(QtCore.QThread.LowestPriority)
def run(self):
if self.incoming_file:
try:
file_path, file_type = self.incoming_file
if file_type in ["APK", "DEX", "DEY"]:
ret = self.parent.session.add(file_path,
|
open(file_path, 'rb').read())
self.file_loaded.emit(ret)
elif file_type == "SESSION":
self.parent.session = session.Load(file_path)
self.file_loaded.emit(True)
else:
|
self.file_loaded.emit(False)
except Exception as e:
log.debug(e)
log.debug(traceback.format_exc())
self.file_loaded.emit(False)
self.incoming_file = []
else:
self.file_loaded.emit(False)
|
MegaMark16/django-puzzle-captcha
|
puzzle_captcha/admin.py
|
Python
|
bsd-3-clause
| 505
| 0.011881
|
from django.contrib import admin
from puzzle_captcha.models import Puzzle,
|
PuzzlePiece
class PuzzlePieceInline(admin.StackedInline):
model = PuzzlePiece
readonly_fields = ('key', 'image', 'order')
can_delete = False
extra = 0
clas
|
s PuzzleAdmin(admin.ModelAdmin):
list_display = ('key', 'rows', 'cols')
readonly_fields = ('key', 'rows', 'cols')
class Meta:
model = Puzzle
inlines = [
PuzzlePieceInline,
]
admin.site.register(Puzzle, PuzzleAdmin)
|
belangeo/pyo
|
pyo/examples/04-soundfiles/02-read-from-disk-2.py
|
Python
|
lgpl-3.0
| 1,308
| 0.002294
|
"""
02-read-from-disk-2.py - Catching the `end-of-file` signal from the SfPlayer object.
This example demonstrates how to use the `end-of-file` signal
of the SfPlayer object to trigger another playback (possibly
with another sound, another speed, etc.).
When a SfPlayer reaches the end of the file, it sends a trigger
(more on trigger later) that the user can retrieve with the
syntax :
variable_name["trig"]
"""
from pyo import *
import random
|
s = Server().boot()
# Sound bank
folder = "../snds/"
sounds = ["alum1.wav", "alum2.wav", "alum3.wav", "alum4.wav"]
# Creates the left and right players
sfL = SfPlayer(folder + sounds[0], speed=1, mul=0
|
.5).out()
sfR = SfPlayer(folder + sounds[0], speed=1, mul=0.5).out(1)
# Function to choose a new sound and a new speed for the left player
def newL():
sfL.path = folder + sounds[random.randint(0, 3)]
sfL.speed = random.uniform(0.75, 1.5)
sfL.out()
# The "end-of-file" signal triggers the function "newL"
tfL = TrigFunc(sfL["trig"], newL)
# Function to choose a new sound and a new speed for the right player
def newR():
sfR.path = folder + sounds[random.randint(0, 3)]
sfR.speed = random.uniform(0.75, 1.5)
sfR.out(1)
# The "end-of-file" signal triggers the function "newR"
tfR = TrigFunc(sfR["trig"], newR)
s.gui(locals())
|
thisisshi/cloud-custodian
|
tools/c7n_openstack/tests/test_server.py
|
Python
|
apache-2.0
| 2,461
| 0
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from common_openstack import OpenStackTest
class ServerTest(OpenStackTest):
def test_server_query(self):
factory = self.replay_flight_data()
p = self.load_policy({
'name': 'all-servers',
'resource': 'openstack.server'},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_server_filter_name(self):
factory = self.replay_flight_data()
policy = {
'name': 'get-server-c7n-test-1',
'resource': 'openstack.server',
'filters': [
{
"type": "value",
"key": "name",
"value": "c7n-test-1",
},
],
}
p = self.load_policy(policy, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].name, "c7n-test-1")
def test_server_filter_flavor(self):
factory = self.replay_flight_data()
policy = {
'name': 'get-server-c7n-test-1',
'resource': 'openstack.server',
'filters': [
{
"type": "flavor",
"flavor_name": "m1.tiny",
},
],
}
|
p = self.load_policy(policy, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].name, "c7n-test-1")
def test_server_filter_tags(self):
factory = self.replay_flight_data()
p
|
olicy = {
'name': 'get-server-c7n-test-1',
'resource': 'openstack.server',
'filters': [
{
"type": "tags",
"tags": [
{
"key": "a",
"value": "a",
},
{
"key": "b",
"value": "b",
},
],
"op": "all",
},
],
}
p = self.load_policy(policy, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].name, "c7n-test-2")
|
finaldie/final_httpd_mock
|
.ycm_extra_conf.py
|
Python
|
mit
| 6,141
| 0.021821
|
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
|
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
|
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
#'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c99',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./flibs/final_libraries/include',
'-I',
'./src',
'-I',
'./http-parser',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
#try:
# final_flags.remove( '-stdlib=libc++' )
#except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
Knln/COMP2041
|
examples/4/filetest2.py
|
Python
|
mit
| 135
| 0
|
#!/usr/bin/python2.7 -u
import os.path
if os.path.isd
|
ir('/dev/null'):
|
print '/dev/null'
if os.path.isdir('/dev'):
print '/dev'
|
dls-controls/pymalcolm
|
tests/test_profiler.py
|
Python
|
apache-2.0
| 2,778
| 0.00108
|
import ast
import logging
import time
import unittest
from malcolm.profiler import Profiler
# https://github.com/bdarnell/plop/blob/master/plop/test/collector_test.py
class ProfilerTest(unittest.TestCase):
def filter_stacks(self, results):
# Kind of hacky, but this is the simplest way to keep the tests
# working after the internals of the collector changed to support
# multiple formatters.
stack_counts = ast.literal_eval(results)
counts = {}
for stack, count in stack_counts.items():
filtered_stack = [
frame[2] for frame in stack if frame[0].endswith("test_profiler.py")
]
if filtered_stack:
counts[tuple(filtered_stack)] = count
return counts
def check_counts(self, counts, expected):
failed = False
output = []
for stack, count in expected.items():
# every expected frame should appear in the data, but
# the inverse is not true if the signal catches us between
# calls.
self.assertTrue(stack in counts)
ratio = float(counts[stack]) / float(count)
output.append(
"%s: expected %s, got %s (%s)" % (stack, count, counts[stack], ratio)
)
if not (0.70 <= ratio <= 1.25):
failed = True
if failed:
for line in output:
logging.warning(line)
for key in set(counts.keys()) - set(expected.keys()):
logging.warning("unexpected key: %s: got %s" % (key, counts[key]))
self.fail("collected data did not meet expectations")
def test_collector(self):
start = time.time()
def a(end):
while time.time() < end:
pass
c(time.time() + 0.1)
def b(end):
while time.time() < end:
pass
c(time.time() + 0.1)
def c(end):
while time.time() < end:
pass
profiler = Profiler("/tmp")
profiler.start(interval=0.01)
a(time.time() + 0.1)
b(time.time() + 0.2)
c(time.time() + 0.3)
end = ti
|
me.time()
profiler.stop("profiler_test.plop")
elapsed = end - start
self.assertTrue(0.8 < elapsed < 0.9, elapsed)
with open("/tmp/profiler_test.plop") as f:
resul
|
ts = f.read()
counts = self.filter_stacks(results)
expected = {
("a", "test_collector"): 10,
("c", "a", "test_collector"): 10,
("b", "test_collector"): 20,
("c", "b", "test_collector"): 10,
("c", "test_collector"): 30,
}
self.check_counts(counts, expected)
|
fuxiang90/speed-prediction
|
script/loc_train.py
|
Python
|
bsd-3-clause
| 1,847
| 0.02653
|
# coding=utf-8
#! /usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
import mlpy
import sys
import math
from config import loc_store_path, times_day
class LocTrain(object):
"""
"""
def __init__(self, file_name , flag):
if flag == "ridge":
self.lrs = [ mlpy.Ridge() for i in range(7) ]
else :
self.lrs = [ mlpy.OLS() for i in range(7) ]
self.file_name = file_name
self.x_weekday = dict()
self.y_weekday = dict()
#self.x ,self.y =
def train(self):
self.get_input(self.file_name)
for weekday in range(7):
self.lrs[weekday].learn(self.x_weekday[weekday], self.y_weekday[weekday])
def predict(self,weekday,speeds):
pre_speed = self.lrs[weekday].pred(speeds)
return pre_speed
def test(self):
pass
def get_input(self, filename):
fin = open(filename)
x = []
y = []
for each in fin:
each = each[:each.find('\n')]
l = each.split(' ')
each_x = []
each_x.append(1)
each_x.append(float(l[0]))
each_x.append(float(l[1]))
each_x.append(float(l[2]))
each_x.append(float(l[3]))
weekday = int(l[5])
if weekday not in self.x_wee
|
kday:
self.x_weekday[weekday] = []
self.y_weekday[weekday] = []
self.x_
|
weekday[weekday].append(each_x)
self.y_weekday[weekday].append(float(l[4]))
def main():
tmp = LocTrain('../data/train/3860_data',"ridge")
tmp.train()
print tmp.predict(1,[1,10,10,20,10])
pass
if __name__ == '__main__':
main()
|
Katello/katello-cli
|
src/katello/client/api/package.py
|
Python
|
gpl-2.0
| 1,296
| 0.000772
|
# -*- coding: utf-8 -*-
#
# Copyright 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
from katello.client.api.base import KatelloAPI
class PackageAPI(KatelloAPI):
"""
Connection class to access package calls
"""
def package(self, packageId, repoId):
path = "/api/repositories/%s/packages/%s" % (repoId, packageId)
pack
|
= self.server.GET(path)[1]
return pack
def packages_by_repo(self, repoId):
path = "/api/repositories/%s/packages" % repoId
pack_list = self.server.GET(path)[1]
return pack_list
def search(self, query, repoId):
path = "/api/repositories/%s/packages/search" % repoId
pack_list = self.server.GET(path, {"search": query})[1]
return p
|
ack_list
|
onshape-public/onshape-clients
|
python/onshape_client/oas/models/btp_expression_operator244.py
|
Python
|
mit
| 12,168
| 0.000164
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_expression9
except ImportError:
btp_expression9 = sys.modules["onshape_client.oas.models.btp_expression9"]
try:
from onshape_client.oas.models import btp_expression_operator244_all_of
except ImportError:
btp_expression_operator244_all_of = sys.modules[
"onshape_client.oas.models.btp_expression_operator244_all_of"
]
try:
from onshape_client.oas.models import btp_identifier8
except ImportError:
btp_identifier8 = sys.modules["onshape_client.oas.models.btp_identifier8"]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
class BTPExpressionOperator244(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("operator",): {
"NONE": "NONE",
"PLUS": "PLUS",
"MINUS": "MINUS",
"TIMES": "TIMES",
"DIVIDE": "DIVIDE",
"MODULUS": "MODULUS",
"POWER": "POWER",
"NEGATE": "NEGATE",
"OR": "OR",
"AND": "AND",
"NOT": "NOT",
"EQUAL_TO": "EQUAL_TO",
"NOT_EQUAL_TO": "NOT_EQUAL_TO",
"GREATER": "GREATER",
"LESS": "LESS",
"GREATER_OR_EQUAL": "GREATER_OR_EQUAL",
"LESS_OR_EQUAL": "LESS_OR_EQUAL",
"CONCATENATE": "CONCATENATE",
"CONDITIONAL": "CONDITIONAL",
},
("documentation_type",): {
"FUNCTION": "FUNCTION",
"PREDICATE": "PREDICATE",
"CONSTANT": "CONSTANT",
"ENUM": "ENUM",
"
|
USER_TYPE": "USER_TYPE",
"FEATURE_DEFINITION": "FEATURE_DEF
|
INITION",
"FILE_HEADER": "FILE_HEADER",
"UNDOCUMENTABLE": "UNDOCUMENTABLE",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"for_export": (bool,), # noqa: E501
"global_namespace": (bool,), # noqa: E501
"import_microversion": (str,), # noqa: E501
"namespace": ([btp_identifier8.BTPIdentifier8],), # noqa: E501
"operand1": (btp_expression9.BTPExpression9,), # noqa: E501
"operand2": (btp_expression9.BTPExpression9,), # noqa: E501
"operand3": (btp_expression9.BTPExpression9,), # noqa: E501
"operator": (str,), # noqa: E501
"space_after_namespace": (btp_space10.BTPSpace10,), # noqa: E501
"space_after_operator": (btp_space10.BTPSpace10,), # noqa: E501
"space_before_operator": (btp_space10.BTPSpace10,), # noqa: E501
"written_as_function_call": (bool,), # noqa: E501
"atomic": (bool,), # noqa: E501
"documentation_type": (str,), # noqa: E501
"end_source_location": (int,), # noqa: E501
"node_id": (str,), # noqa: E501
"short_descriptor": (str,), # noqa: E501
"space_after": (btp_space10.BTPSpace10,), # noqa: E501
"space_before": (btp_space10.BTPSpace10,), # noqa: E501
"space_default": (bool,), # noqa: E501
"start_source_location": (int,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"for_export": "forExport", # noqa: E501
"global_namespace": "globalNamespace", # noqa: E501
"import_microversion": "importMicroversion", # noqa: E501
"namespace": "namespace", # noqa: E501
"operand1": "operand1", # noqa: E501
"operand2": "operand2", # noqa: E501
"operand3": "operand3", # noqa: E501
"operator": "operator", # noqa: E501
"space_after_namespace": "spaceAfterNamespace", # noqa: E501
"space_after_operator": "spaceAfterOperator", # noqa: E501
"space_before_operator": "spaceBeforeOperator", # noqa: E501
"written_as_function_call": "writtenAsFunctionCall", # noqa: E501
"atomic": "atomic", # noqa: E501
"documentation_type": "documentationType", # noqa: E501
"end_source_location": "endSourceLocation", # noqa: E501
"node_id": "nodeId", # noqa: E501
"short_descriptor": "shortDescriptor", # noqa: E501
"space_after": "spaceAfter", # noqa: E501
"space_before": "spaceBefore", # noqa: E501
"space_default": "spaceDefault", # noqa: E501
"start_source_location": "startSourceLocation", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_expression_operator244.BTPExpressionOperator244 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conv
|
psd-tools/psd-tools
|
src/psd_tools/__init__.py
|
Python
|
mit
| 158
| 0
|
from __future__ import a
|
bsolute_import, unicode_literals
from .api.psd_image import PSDImage
from .composer import compose
__all__ = ['PSDImage', 'compose']
| |
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/code/model/branchrevision.py
|
Python
|
agpl-3.0
| 984
| 0
|
# Copyright 2009-2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
__all__ = [
'BranchRevision',
]
from storm.locals import (
Int,
Reference,
Storm,
)
from zope.interface import implements
from lp.code.interfaces.branchrevision import IBranchRevision
class BranchRevision(Storm):
"""See `IBranchRevision`."""
__storm_table__ = 'BranchRevision'
__storm_primary__ = ("branch_id", "revision_id")
implements(IBranchRevision)
branch_id = Int(name='branch', allow_none=False)
branch = Reference(branch_id, 'Branch.id')
revision_id = Int(name='revision', allow_none=False)
revision = Reference(revision_id, 'Revisi
|
on.i
|
d')
sequence = Int(name='sequence', allow_none=True)
def __init__(self, branch, revision, sequence=None):
self.branch = branch
self.revision = revision
self.sequence = sequence
|
luckasfb/OT_903D-kernel-2.6.35.7
|
kernel/scripts/rt-tester/rt-tester.py
|
Python
|
gpl-2.0
| 5,104
| 0.021552
|
#!/usr/bin/python
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"lockbkl" : "9",
"unlockbkl" : "10",
"signal"
|
: "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "e
|
q" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Seperate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
coll-gate/collgate
|
server/printer/urls.py
|
Python
|
mit
| 317
| 0.003175
|
# -*- coding: utf-8; -*-
#
# @file urls.py
# @brief collgate
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2018-09-20
# @copyright Copyright (c) 2
|
018 INRA/CIRAD
# @license MIT (see LICENSE f
|
ile)
# @details coll-gate printer module url entry point.
from django.conf.urls import include, url
urlpatterns = [
]
|
tcwang817/django-choices-enum
|
django_choices_enum/__init__.py
|
Python
|
mit
| 62
| 0
|
from bas
|
e import ChoicesEnum
from _version i
|
mport __version__
|
towerjoo/mindsbook
|
django/contrib/auth/tests/auth_backends.py
|
Python
|
bsd-3-clause
| 9,952
| 0.003014
|
import warnings
from django.conf import settings
from django.contrib.auth.models import User, Group, Permission, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
class BackendTest(TestCase):
backend = 'django.contrib.auth.backends.ModelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
User.objects.create_user('test', '[email protected]', 'test')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
user = User.objects.get(username='test')
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = False
user.is_superuser = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = User.objects.get(username='test')
content_type=ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
# reloading user to purge the _perm_cache
user = User.objects.get(username='test')
self.assertEqual(user.get_all_permissions() == set([u'auth.test']), True)
self.assertEqual(user.get_group_permissions(), set([]))
self.assertEqual(user.has_module_perms('Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')
user.user_permissions.add(perm)
user.save()
perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')
user.user_permissions.add(perm)
user.save()
user = User.objects.get(username='test')
self.assertEqual(user.get_all_permissions(), set([u'auth.test2', u'auth.test', u'auth.test3']))
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')
group = Group.objects.create(name='test_group')
group.permissions.add(perm)
group.save()
user.groups.add(group)
user = User.objects.get(username='test')
exp = set([u'auth.test2', u'auth.test', u'auth.test3', u'auth.test_group'])
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), set([u'auth.test_group']))
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = User.objects.get(username='test')
content_type=ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set([]))
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), set(['auth.test']))
class TestObj(object):
pass
class SimpleRowlevelBackend(object):
supports_object_permissions = True
# This class also supports tests for anonymous user permissions,
# via subclasses which just set the 'supports_anonymous_user' attribute.
def has_perm(self, user, perm, obj=None):
if not obj:
return # We only support row level perms
if isinstance(obj, TestObj):
if user.username == 'test2':
return True
elif user.is_anonymous() and perm == 'anon':
# not reached due to supports_anonymous_user = False
return True
return False
def has_module_perms(self, user, app_label):
return app_label == "app1"
def get_all_permissions(self, user, obj=None):
if not obj:
return [] # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if user.is_anonymous():
return ['anon']
if user.username == 'test2':
return ['simple', 'advanced']
else:
return ['simple']
def get_group_permissions(self, user, obj=None):
if not obj:
|
return # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if 'test_group' in [group.name for group in user.groups.all()]:
return ['group
|
_perm']
else:
return ['none']
class RowlevelBackendTest(TestCase):
"""
Tests for auth backend that supports object level permissions
"""
backend = 'django.contrib.auth.tests.auth_backends.SimpleRowlevelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = tuple(self.curr_auth) + (self.backend,)
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.user2 = User.objects.create_user('test2', '[email protected]', 'test')
self.user3 = User.objects.create_user('test3', '[email protected]', 'test')
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.auth')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
self.restore_warnings_state()
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user2.has_perm('perm', TestObj()), True)
self.assertEqual(self.user2.has_perm('perm'), False)
self.assertEqual(self.user2.has_perms(['simple', 'advanced'], TestObj()), True)
self.assertEqual(self.user3.has_perm('perm', TestObj()), False)
self.assertEqual(self.user3.has_perm('anon', TestObj()), False)
self.assertEqual(self.user3.has_perms(['simple', 'advanced'], TestObj()), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['simple']))
self.assertEqual(self.user2.get_all_permissions(TestObj()), set(['simple', 'advanced']))
self.assertEqual(self.user2.get_all_permissions(), set([]))
def test_get_group_permissions(self):
content_type=ContentType.objects.get_for_model(Group)
group = Group.objects.create(name='test_group')
self.user3.groups.add(group)
self.assertEqual(self.user3.get_group_permissions(TestObj()), set(['group_perm']))
class AnonymousUserBackend(SimpleRowlevelBackend):
supports_anonymous_user = True
class NoAnonymousUserBackend(SimpleRowlevelBackend):
supports_anonymous_user = False
class AnonymousUserBackendTest(TestCase):
"""
Tests for AnonymousUser delegating to backend if it has 'supports_anonymous_user' = True
"""
backend = 'django.contrib.auth.tests.auth_backends.AnonymousUserBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.user1 = AnonymousUser()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = s
|
jawilson/home-assistant
|
homeassistant/components/ialarm/alarm_control_panel.py
|
Python
|
apache-2.0
| 1,923
| 0.00052
|
"""Interfaces with iAlarm control panels."""
from homeassistant.components.alarm_control_panel import AlarmControlPanelEntity
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DATA_COORDINATOR, DOMAIN
async def async_setup_entry(hass, entry, async_add_entities) -> None:
"""Set up a iAlarm alarm control panel based on a config entry."""
coordinator = hass.data[DOMAIN][entry.entry_id][DATA_COORDINATOR]
async_add_entities([IAlarmPanel(coordinator)], False)
class IAlarmPanel(CoordinatorEntity, AlarmControlPanelEntity):
"""Representation of an iAlarm device."""
@property
def device_info(self) -> DeviceInfo:
"""Return device info for this device."""
return DeviceInfo(
identifiers={(DOMAIN, self.unique_id)},
manufacturer="Antifurto365 - Meian",
name=self.name,
)
@property
def unique_id(self):
"""Return a unique id."""
return self.coordinator.mac
@property
def name(self):
"""Return the name."""
return "iAlarm"
@property
def state(self):
"""Ret
|
urn the state of the device."""
return self.coordinator.state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPOR
|
T_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
def alarm_disarm(self, code=None):
"""Send disarm command."""
self.coordinator.ialarm.disarm()
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self.coordinator.ialarm.arm_stay()
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self.coordinator.ialarm.arm_away()
|
dgoldin/snakebite
|
snakebite/channel.py
|
Python
|
apache-2.0
| 26,513
| 0.002565
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009 Las Cumbres Observatory (www.lcogt.net)
# Copyright (c) 2010 Jan Dittberner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
channel.py - Socket implementation of Google's Protocol Buffers RPC
service interface.
This package contains classes providing a socket implementation of the
RPCChannel abstract class.
Original Authors: Martin Norbury ([email protected])
Eric Saunders ([email protected])
Jan Dittberner ([email protected])
May 2009, Nov 2010
Modified for snakebite: Wouter de Bie ([email protected])
May 2012
'''
# Standard library imports
import socket
import os
import pwd
import math
# Third party imports
from google.protobuf.service import RpcChannel
# Protobuf imports
from snakebite.protobuf.RpcHeader_pb2 import RpcRequestHeaderProto, RpcResponseHeaderProto
from snakebite.protobuf.IpcConnectionContext_pb2 import IpcConnectionContextProto
from snakebite.protobuf.ProtobufRpcEngine_pb2 import RequestHeaderProto
from snakebite.protobuf.datatransfer_pb2 import OpReadBlockProto, BlockOpResponseProto, PacketHeaderProto, ClientReadStatusProto
from snakebite.formatter import format_bytes
from snakebite.errors import RequestError
from snakebite.crc32c import crc
import google.protobuf.internal.encoder as encoder
import google.protobuf.internal.decoder as decoder
# Module imports
import logger
import logging
import struct
import uuid
# Configure package logging
log = logger.getLogger(__name__)
def log_protobuf_message(header, message):
log.debug("%s:\n\n\033[92m%s\033[0m" % (header, message))
def get_delimited_message_bytes(byte_stream, nr=4):
''' Parse a delimited protobuf message. This is done by first getting a protobuf varint from
the stream that represents the length of the message, then reading that amount of
from the message and then parse it.
Since the int can be represented as max 4 bytes, first get 4 bytes and try to decode.
The decoder returns the value and the position where the value was found, so we need
to rewind the buffer to the position, because the remaining bytes belong to the message
after.
'''
(length, pos) = decoder._DecodeVarint32(byte_stream.read(nr), 0)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message length (pos %d): %d" % (pos, length))
delimiter_bytes = nr - pos
byte_stream.rewind(delimiter_bytes)
message_bytes = byte_stream.read(length)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message bytes (%d): %s" % (len(message_bytes), format_bytes(message_bytes)))
total_len = length + pos
return (total_len, message_bytes)
class RpcBufferedReader(object):
'''Class that wraps a socket and provides some utility methods for reading
and rewinding of the buffer. This comes in handy when reading protobuf varints.
'''
MAX_READ_ATTEMPTS = 100
def __init__(self, socket):
self.socket = socket
self.reset()
def read(self, n):
'''Reads n bytes into the internal buffer'''
bytes_wanted = n - self.buffer_length + self.pos + 1
if bytes_wanted > 0:
self._buffer_bytes(bytes_wanted)
end_pos = self.pos + n
ret = self.buffer[self.pos + 1:end_pos + 1]
self.pos = end_pos
return ret
def _buffer_bytes(self, n):
to_read = n
for _ in xrange(self.MAX_READ_ATTEMPTS):
bytes_read = self.socket.recv(to_read)
self.buffer += bytes_read
to_read -= len(bytes_read)
if to_read == 0:
log.debug("Bytes read: %d, total: %d" % (len(bytes_read), self.buffer_length))
return n
if len(bytes_read) <
|
n:
raise Exception("RpcBufferedReader only managed to read %s out of %s bytes" % (len(bytes_read), n))
def rewind(self, places):
'''Rewinds the current buffer to a position. Needed for reading varints,
because we might read bytes that belong to the stream after the varint.
'''
log.debug("Rewinding pos %d with %d places" % (sel
|
f.pos, places))
self.pos -= places
log.debug("Reset buffer to pos %d" % self.pos)
def reset(self):
self.buffer = ""
self.pos = -1 # position of last byte read
@property
def buffer_length(self):
'''Returns the length of the current buffer.'''
return len(self.buffer)
class SocketRpcChannel(RpcChannel):
ERROR_BYTES = 18446744073709551615L
RPC_HEADER = "hrpc"
RPC_SERVICE_CLASS = 0x00
AUTH_PROTOCOL_NONE = 0x00
RPC_PROTOCOL_BUFFFER = 0x02
'''Socket implementation of an RpcChannel.
'''
def __init__(self, host, port, version, effective_user=None):
'''SocketRpcChannel to connect to a socket server on a user defined port.
It possible to define version and effective user for the communication.'''
self.host = host
self.port = port
self.sock = None
self.call_id = -3 # First time (when the connection context is sent, the call_id should be -3, otherwise start with 0 and increment)
self.version = version
self.client_id = str(uuid.uuid4())
self.effective_user = effective_user or pwd.getpwuid(os.getuid())[0]
def validate_request(self, request):
'''Validate the client request against the protocol file.'''
# Check the request is correctly initialized
if not request.IsInitialized():
raise Exception("Client request (%s) is missing mandatory fields" % type(request))
def get_connection(self, host, port):
'''Open a socket connection to a given host and port and writes the Hadoop header
The Hadoop RPC protocol looks like this when creating a connection:
+---------------------------------------------------------------------+
| Header, 4 bytes ("hrpc") |
+---------------------------------------------------------------------+
| Version, 1 byte (default verion 9) |
+---------------------------------------------------------------------+
| RPC service class, 1 byte (0x00) |
+---------------------------------------------------------------------+
| Auth protocol, 1 byte (Auth method None = 0) |
+---------------------------------------------------------------------+
| Length of the RpcRequestHeaderProto + length of the |
| of the IpcConnectionContextProto (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Serialized delimited RpcRequestHeaderProto |
+---------------------------------------------------------------------+
| Serialized delimited IpcConnectionContextProto |
+---------------------------------------------------------------------+
'''
log.debug("##############
|
google-research/text-to-text-transfer-transformer
|
t5/models/mesh_transformer.py
|
Python
|
apache-2.0
| 12,873
| 0.00536
|
# Copyright 2022 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for providing data to Mesh TF transformer."""
import functools
from absl import logging
import gin
import mesh_tensorflow.transformer.dataset as transformer_dataset
import t5.data
from t5.models import utils as model_utils
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
DEPRECATED_GIN_REFERENCES = (
"configurable_vocabulary",
"get_sentencepiece_model_path",
"maybe_print_dataset",
"num_parallel_calls",
"SentencePieceVocabulary",
"t5.data.sentencepiece_vocabulary.SentencePieceVocabulary",
"t5.models.mesh_transformer.get_sentencepiece_model_path",
"train_model",
"vocabularies.Vocabulary",
"Vocabulary",
)
@gin.configurable()
def mesh_train_dataset_fn(
mixture_or_task_name,
sequence_length,
vocabulary=None,
dataset_split=tfds.Split.TRAIN,
shuffle=True,
seed=None,
use_cached=False,
pack=True):
"""Returns the tf.data.Dataset for training on a given mixture.
This uses the format required for utils.run's `train_dataset_fn` argument in
the Mesh TF transformer standalone.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
sequence_length: dict mapping feature key to the int length for that feature
the max sequence length.
vocabulary: unused argument, maintains compatibility with other dataset_fns.
dataset_split: string, which split of the dataset to load. In most cases
this should be "train".
shuffle: Whether or not to shuffle dataset.
seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and
shuffle seed for tf.data
use_cached: bool, whether to load the cached version of this dataset.
pack: bool, whether to pack the dataset.
Returns:
A tf.data.Dataset of preprocessed, tokenized, and batched examples.
"""
del vocabulary
mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name)
ds = mixture_or_task.get_dataset(
sequence_length, split=dataset_split, use_cached=use_cached,
shuffle=shuffle, num_epochs=None, seed=seed)
# Select just the output features which are present in the dataset.
feature_keys = tuple(k for k in mixture_or_task.output_features
if k in tf.data.get_output_shapes(ds))
# Filtering feature keys is done in pack_or_pad function. However, when
# packing is turned off, input_features aren't filtered leading to training
# problems due to strings showing up in the input example. Filtering features
# ensures that we don't rely on pack_or_pad to filter features for training.
def _filter_features(ex):
return {k: ex[k] for k in feature_keys}
ds = ds.map(
_filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)
eos_keys = set(
k for k, f in mixture_or_task.output_features.items() if f.add_eos)
ds = transformer_dataset.pack_or_pad(
ds, sequence_length, pack=pack,
feature_keys=feature_keys, ensure_eos=eos_keys)
return ds
@gin.configurable()
def mesh_inference_dataset_fn(
mixture_or_task_name,
sequence_length,
dataset_split,
shuffle=False,
seed=None,
vocabulary=None,
num_inference_examples=-1,
use_cached=False,
priming_sequence_length=None):
"""Returns all tf.data.Datasets for LM inference on a given mixture.
For Tasks without inputs (such as language modeling), the first
`priming_sequence_length` tokens in the target are used as the "inputs" for
inference.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
sequence_length: dict mapping feature key to the int length for that feature
the max sequence length. If set to None, packing and padding will be
disabled.
dataset_split: string, which split of the dataset to load. NOTE, this
function does NOT receive the split specified in utils.run. It needs to be
specified separately.
shuffle: Whether or not to shuffle dataset.
seed: tf.int64 scalar tf.Tensor
|
(or None). Used as shuffle seed for tf.data.
vocabulary: unused argument, maintains
|
compatibility with other dataaset_fns
num_inference_examples: maximum number of examples per task to do inference
on. If None or less than 0, use all examples.
use_cached: bool, whether to load the cached version of this dataset.
evals but should not be used for iterative decoding.
priming_sequence_length: If the Task only has "targets", select the first
this many tokens from each target sequence to use as "inputs". This is
useful for decoder-only language models where you would like to use a
portion of the targets as a priming sequence for generation.
Returns:
A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples.
"""
del vocabulary
mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name)
def _split_targets_for_primed_inference(ex):
ex["inputs"] = ex["targets"][:priming_sequence_length]
ex["targets"] = ex["targets"][priming_sequence_length:]
ex["inputs"] = tf.pad(
ex["inputs"],
[[0, priming_sequence_length - tf.shape(ex["inputs"])[0]]], "CONSTANT")
ex["inputs"] = tf.reshape(ex["inputs"], shape=(priming_sequence_length,))
return ex
def _prepare_for_unprimed_inference(ex):
ex["inputs"] = tf.constant([], dtype=tf.int64)
return ex
def _get_dataset_for_single_task(task, sequence_length):
"""Get a tensorflow.data.Dataset for the provided task."""
ds = task.get_dataset(
sequence_length, split=dataset_split, use_cached=use_cached,
shuffle=shuffle, seed=seed)
if "inputs" not in ds.element_spec:
if not priming_sequence_length or priming_sequence_length <= 0:
logging.warning("Priming sequence length not specified so priming "
"with the empty string.")
ds = ds.map(_prepare_for_unprimed_inference)
else:
logging.info("Using the first %d tokens of each target as input.",
priming_sequence_length)
ds = ds.map(_split_targets_for_primed_inference)
elif priming_sequence_length is not None:
raise ValueError(
"Setting a priming sequence length only makes sense for decoder-only "
"Tasks, which have `targets` but no `inputs`.")
eos_keys = set(
k for k, f in mixture_or_task.output_features.items() if f.add_eos)
logging.info(
"Padding '%s' with sequence lengths: %s", task.name, sequence_length)
ds = transformer_dataset.pack_or_pad(
ds,
sequence_length,
pack=False,
feature_keys=tuple(task.output_features),
ensure_eos=eos_keys)
if num_inference_examples is not None and num_inference_examples >= 0:
ds = ds.take(num_inference_examples)
return ds
outputs = []
for task in t5.data.get_subtasks(mixture_or_task):
if dataset_split not in task.splits:
logging.info("Task %s has no '%s' split, skipping inference.",
task.name, dataset_split)
continue
outputs.append(
transformer_dataset.EvalDataset(
task.name,
functools.partial(
_get_dataset_for_single_task,
task=task,
sequence_length=sequence_length),
task.postprocess_fn,
task.metric_fns,
)
)
if not outputs:
logging.warning("No %s data found for %s.",
dataset_split, mi
|
palankai/pyrs
|
pyrs/conf.py
|
Python
|
mit
| 200
| 0
|
"""
This module contains the global configurations o
|
f the framework
"""
#: This name suppose to be used for general meta decoration for functions,
#: methods o
|
r even classes
meta_field = '_pyrsmeta'
|
andre487/news487
|
collector/rss/reader.py
|
Python
|
mit
| 2,757
| 0.001451
|
import feedparser
import logging
from rss import sources
from util import date, dict_tool, tags
log = logging.getLogger('app')
def parse_feed_by_name(name):
feed_params = sources.get_source(name)
if not feed_params:
raise ValueError('There is no feed w
|
ith name %s' % name)
source_name = feed_params['name']
feed = feedparser.parse(feed_params['url'])
data = []
for entry in feed['entries']:
data.append(
create_doc(
source_name, feed, e
|
ntry,
feed_params.get('tags', ()),
feed_params.get('author_name'),
feed_params.get('author_link'),
feed_params.get('dressing_params'),
)
)
log.info('%s: got %d documents', source_name, len(data))
return data
def create_doc(source_name, feed, entry, additional_tags, default_author_name, default_author_link, dressing_params):
link = dict_tool.get_alternative(entry, 'feedburner_origlink', 'link', assert_val=True)
published = date.utc_format(
dict_tool.get_alternative(entry, 'published', 'updated', assert_val=True)
)
description = dict_tool.get_alternative(entry, 'summary', 'description', 'title', assert_val=True)
picture = dict_tool.get_deep(entry, 'gd_image', 'src')
text = dict_tool.get_deep(entry, 'content', 0, 'value')
author_name = handle_default_param(
entry,
dict_tool.get_deep(entry, 'authors', 0, 'name'),
default_author_name
)
author_link = handle_default_param(
entry,
dict_tool.get_deep(entry, 'authors', 0, 'href'),
default_author_link
)
entry_tags = []
for tag in entry.get('tags', []):
tag_text = dict_tool.get_alternative(tag, 'term', 'label')
if tag_text:
entry_tags.append(tag_text.lower())
additional_tags += tuple(entry_tags)
comments_count = entry.get('slash_comments')
if comments_count is not None:
comments_count = int(comments_count)
return {
'link': link,
'title': entry['title'],
'published': published,
'picture': picture,
'author_name': author_name,
'author_link': author_link,
'description': description,
'text': text,
'source_name': source_name,
'source_type': 'rss',
'source_title': feed['feed']['title'],
'source_link': feed['feed']['link'],
'comments_count': comments_count,
'tags': tags.create_tags_list(*additional_tags),
'__dressing_params': dressing_params,
}
def handle_default_param(entry, val, default_val):
if callable(default_val):
return default_val(entry, val)
return val or default_val
|
PeWu/python-geneteka
|
merge.py
|
Python
|
apache-2.0
| 4,704
| 0.013818
|
#!/usr/bin/python3
|
"""
Merges raw data from geneteka into larger json files.
"""
from collections import defaultdict
import html
import json
import os
import re
INPUT_DIR = 'data_raw'
OUTPUT_DIR = 'data'
def extractNotes(value):
match = re.search(r'i.png" title="([^"]*)"', value)
if match:
return (value.split('<', 1)[0].strip(), match.group(1))
return (value.strip(), None)
def convertPersonRecord(record):
# Unparsed column with various information.
stuf
|
f = record[9]
lastName, lastNameNotes = extractNotes(record[3])
motherLastName, motherLastNameNotes = extractNotes(record[6])
output = {
'year': record[0].strip(),
'record_number': record[1].strip(),
'first_name': record[2].strip(),
'last_name': lastName,
'father_first_name': record[4].strip(),
'mother_first_name': record[5].strip(),
'mother_last_name': motherLastName,
'parish': record[7].strip(),
'place': record[8].strip(),
'stuff': stuff,
}
# Last name notes.
if lastNameNotes:
output['last_name_notes'] = lastNameNotes
if motherLastNameNotes:
output['mother_last_name_notes'] = motherLastNameNotes
# List of notes.
match = re.search(r'i.png" title="([^"]*)"', stuff)
if match:
output['notes'] = html.unescape(match.group(1)).strip().split('\r')
# Where archives are kept.
match = re.search(r'z.png" title="([^"]*)"', stuff)
if match:
output['archives'] = html.unescape(match.group(1)).strip()
# URL to the place the archives are kept.
match = re.search(r'href="([^"]*)" target', stuff)
if match:
output['archives_url'] = match.group(1)
# URL to metryki.genealodzy.pl where scans can be found.
match = re.search(r'href="([^"]*)">[^>]*s.png', stuff)
if match:
output['metryki_url'] = html.unescape(match.group(1))
# User that entered this record to the database.
match = re.search(r'uname=([^"]*)"', stuff)
if match:
output['user_entered'] = match.group(1)
return output
def convertMarriageRecord(record):
# Unparsed column with various information.
stuff = record[9]
husbandLastName, husbandLastNameNotes = extractNotes(record[3])
wifeLastName, wifeLastNameNotes = extractNotes(record[6])
output = {
'year': record[0].strip(),
'record_number': record[1].strip(),
'husband_first_name': record[2].strip(),
'husband_last_name': husbandLastName,
'husband_parents': record[4].strip(),
'wife_first_name': record[5].strip(),
'wife_last_name': wifeLastName,
'wife_parents': record[7].strip(),
'parish': record[8].strip(),
'stuff': stuff,
}
# Last name notes.
if husbandLastNameNotes:
output['nazwisko_meza_uwagi'] = husbandLastNameNotes
if wifeLastNameNotes:
output['nazwisko_zony_uwagi'] = wifeLastNameNotes
# List of notes.
match = re.search(r'i.png" title="([^"]*)"', stuff)
if match:
output['notes'] = html.unescape(match.group(1)).strip().split('\r')
# Where archives are kept.
match = re.search(r'z.png" title="([^"]*)"', stuff)
if match:
output['archives'] = html.unescape(match.group(1)).strip()
# URL to the place the archives are kept.
match = re.search(r'href="([^"]*)" target', stuff)
if match:
output['archives_url'] = match.group(1)
# URL to metryki.genealodzy.pl where scans can be found.
match = re.search(r'href="([^"]*)">[^>]*s.png', stuff)
if match:
output['metryki_url'] = match.group(1)
# User that entered this record to the database.
match = re.search(r'uname=([^"]*)"', stuff)
if match:
output['user_entered'] = match.group(1)
return output
def main():
# Map from prefix to list of records.
data = defaultdict(list)
# Read all files from INPUT_DIR.
for fileName in os.listdir(INPUT_DIR):
prefix = re.search('[^_]+_._[^_]+', fileName).group(0)
with open(os.path.join(INPUT_DIR, fileName)) as file:
content = json.load(file)
data[prefix] += content['data']
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
# Parse records and write one parish per file.
for key, value in data.items():
voivodeship, recordType, parishId = key.split('_')
if recordType == 'S':
converter = convertMarriageRecord
else:
converter = convertPersonRecord
value[:] = [converter(x) for x in value]
print("Writing %s" % key)
metadata = {
'voivodeship': voivodeship,
'record_type': recordType,
'parish_id': parishId,
}
outputFile = os.path.join(OUTPUT_DIR, key + '.json')
with open(outputFile, 'w') as file:
outputData = {
'data': value,
'metadata': metadata,
}
json.dump(outputData, file)
if __name__ == "__main__":
main()
|
Carreau/gistpynb
|
nbconvert/lexers.py
|
Python
|
apache-2.0
| 904
| 0.006637
|
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from pygments.lexers import PythonLexer, BashLexer
from pygments.lexer import bygroups, using
from pygments.token import Keyword, Operator, Name, Text
#-----------------------------------------------------------------------------
# Classe
|
s
#-----------------------------------------------------------------------------
class IPythonLexer(PythonLexer):
name = 'IPython'
aliases = ['ip', 'ipython']
filenames = ['*.ipy']
tokens = PythonLexer.tokens.copy()
tokens['root'] = [
(r'(\%+)(\w+)\s+(\.*)(\n)', bygroups(Operator, Keyword, using(BashLexer), Text)),
(r'(\%+)(\w+)\b', bygroups(Operat
|
or, Keyword)),
(r'^(!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
] + tokens['root']
|
lightengine/lightstream
|
lightstream/oldlib/dac.py
|
Python
|
bsd-2-clause
| 5,679
| 0.035217
|
# j4cDAC test code
#
# Copyright 2011 Jacob Potter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
import time
import struct
def pack_point(x, y, r, g, b, i = -1, u1 = 0, u2 = 0, flags = 0):
"""Pack some color values into a struct dac_point.
Values must be specified for x, y, r, g, and b. If a value is not
passed in for the other fields, i will default to max(r, g, b); the
rest default to zero.
"""
if i < 0:
i = max(r, g, b)
return struct.pack("<HhhHHHHHH", flags, x, y, r, g, b, i, u1, u2)
class ProtocolError(Exception):
"""Exception used when a protocol error is detected."""
pass
class Status(object):
"""Represents a status response from the DAC."""
def __init__(self, data):
"""Initialize from a chunk of data."""
self.protocol_version, self.le_state, self.playback_state, \
self.source, self.le_flags, self.playback_flags, \
self.source_flags, self.fullness, self.point_rate, \
self.point_count = \
struct.unpack("<BBBBHHHHII", data)
def dump(self, prefix = " - "):
"""Dump to a string."""
lines = [
"Light engine: state %d, flags 0x%x" %
(self.le_state, self.le_flags),
"Playback: state %d, flags 0x%x" %
(self.playback_state, self.playback_flags),
"Buffer: %d points" %
(self.fullness, ),
"Playback: %d kpps, %d points played" %
(self.point_rate, self.point_count),
"Source: %d, flags 0x%x" %
(self.source, self.source_flags)
]
for l in lines:
print prefix + l
class BroadcastPacket(object):
"""Represents a broadcast packet from the DAC."""
def __init__(self, st):
"""Initialize from a chunk of data."""
self.mac = st[:6]
self.hw_rev, self.sw_rev, self.buffer_capacity, \
self.max_point_rate = struct.unpack("<HHHI", st[6:16])
self.status = Status(st[16:36])
def dump(self, prefix = " - "):
"""Dump to a string."""
lines = [
"MAC: " + ":".join(
"%02x" % (ord(o), ) for o in self.mac),
"HW %d, SW %d" %
(self.hw_rev, self.sw_rev),
"Capabilities: max %d points, %d kpps" %
(self.buffer_capacity, self.max_point_rate)
]
for l in lines:
print prefix + l
#self.status.dump(prefix)
class DAC(object):
"""A connection to a DAC."""
def read(self, l):
"""Read exactly length bytes from the connection."""
while l > len(self.buf):
self.buf += self.conn.recv(4096)
obuf = self.buf
self.buf = obuf[l:]
return obuf[:l]
def readresp(self, cmd):
"""Read a response from the DAC."""
data = self.read(22)
response = data[0]
cmdR = data[1]
status = Status(data[2:])
# status.dump()
if cmdR != cmd:
raise ProtocolError("expected resp for %r, got %r"
% (cmd, cmdR))
if response != "a":
raise ProtocolError("expected ACK, got %r"
% (response, ))
self.last_status = status
return status
def __init__(self, host, port = 7765):
"""Connect to the DAC over TCP."""
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((host, port))
#print "Connected to %s:%s" % (host, port)
self.conn = conn
self.buf = ""
# Read the "hello" message
first_status = self.readresp("?")
#first_status.dump()
def begin(self, lwm, rate):
cmd = struct.pack("<cHI", "b", lwm, rate)
self.conn.sendall(cmd)
return self.readresp("b")
def update(self, lwm, rate):
cmd = struct.pack("<cHI", "u", lwm, rate)
self.conn.sendall(cmd)
return self.readresp("u")
def encode_point(self, point):
try:
return pack_point(*point)
except Exception as e:
##print "Exception"
#print point
raise e
def write(self, points):
epoints = map(self.encode_point, points)
cmd = struct.pack("<cH", "d", len(epoints))
self.conn.sendall(cmd + "".join(epoints))
return self.readresp("d")
def prepare(self):
se
|
lf.conn.sendall("p")
return self.readresp("p")
def stop(self):
self.c
|
onn.sendall("s")
return self.readresp("s")
def estop(self):
self.conn.sendall("\xFF")
return self.readresp("\xFF")
def clear_estop(self):
self.conn.sendall("c")
return self.readresp("c")
def ping(self):
self.conn.sendall("?")
return self.readresp("?")
def play_stream(self, stream):
# First, prepare the stream
if self.last_status.playback_state == 2:
raise Exception("already playing?!")
elif self.last_status.playback_state == 0:
self.prepare()
started = 0
while True:
# How much room?
cap = 1799 - self.last_status.fullness
points = stream.read(cap)
if cap < 100:
time.sleep(0.005)
cap += 150
# print "Writing %d points" % (cap, )
t0 = time.time()
self.write(points)
t1 = time.time()
# print "Took %f" % (t1 - t0, )
if not started:
self.begin(0, 30000)
started = 1
def find_dac():
"""Listen for broadcast packets."""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", 7654))
while True:
data, addr = s.recvfrom(1024)
bp = BroadcastPacket(data)
#print "Packet from %s: " % (addr, )
#bp.dump()
def find_first_dac():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", 7654))
data, addr = s.recvfrom(1024)
bp = BroadcastPacket(data)
#print "Packet from %s: " % (addr, )
return addr[0]
|
eeshangarg/zulip
|
zerver/views/realm_icon.py
|
Python
|
apache-2.0
| 2,456
| 0.00285
|
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect
from django.utils.translation import gettext as _
from zerver.decorator import require_realm_admin
from zerver.lib.actions import do_change_icon_source
from zerver.lib.exceptions import JsonableError
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.response import json_success
from zerver.lib.upload import upload_icon_image
from zerver.lib.url_encoding import append_url_query_string
from zerver.models import UserProfile
@require_realm_admin
def upload_icon(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
if len(request.FILES) != 1:
raise JsonableError(_("You must upload exactly one icon."))
icon_file = list(request.FILES.values())[0]
if (settings.MAX_ICON_FILE_SIZE_MIB * 1024 * 1024) < icon_file.size:
raise JsonableError(
_("Uploaded file is larger than the allowed limit of {} MiB").format(
settings.MAX_ICON_FILE_SIZE_MIB,
)
)
upload_icon_image(icon_file, user_profile)
do_change_icon_source(
user_profile.realm, user_profile.realm.ICON_UPLOADED, acting_user=user_profile
)
icon_url = realm_icon_url(user_profile.realm)
json_result = dict(
icon_url=icon_url,
)
return json_success(json_result)
@require_realm_admin
def delete_icon_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
# We don't actually delete the icon because it might still
# be needed if the URL was cached and it is rewritten
# in any case after next update.
do_change_icon_source(
user_profile.realm, user_profile.realm.ICON_FROM_GRAVATAR, acting_user=user_profile
)
gravatar_url = realm_icon_url(
|
user_profile.realm)
json_result = dict(
icon_url=gravatar_url,
)
return json_success(json_result)
def get_icon_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
url = realm_icon_url(user_profile.realm)
# We can rely on the URL already having query parameters. Because
# our temp
|
lates depend on being able to use the ampersand to
# add query parameters to our url, get_icon_url does '?version=version_number'
# hacks to prevent us from having to jump through decode/encode hoops.
url = append_url_query_string(url, request.META["QUERY_STRING"])
return redirect(url)
|
AusDTO/dto-digitalmarketplace-buyer-frontend
|
tests/app/views/test_login.py
|
Python
|
mit
| 14,240
| 0.001545
|
# coding: utf-8
from app.api_client.error import HTTPError
from app.helpers.login_helpers import generate_buyer_creation_token
from dmapiclient.audit import AuditTypes
from dmutils.email import generate_token, EmailError
from dmutils.forms import FakeCsrf
from ...helpers import BaseApplicationTest
from lxml import html
import mock
import pytest
from flask import session
import flask_featureflags as feature
EMAIL_SENT_MESSAGE = "send a link"
USER_CREATION_EMAIL_ERROR = "Failed to send user creation email."
PASSWORD_RESET_EMAIL_ERROR = "Failed to send password reset."
TOKEN_CREATED_BEFORE_PASSWORD_LAST_CHANGED_ERROR = "This password reset link is invalid."
USER_LINK_EXPIRED_ERROR = "The link you used to create an account may have expired."
def has_validation_errors(data, field_name):
document = html.fromstring(data)
form_field = document.xpath('//input[@name="{}"]'.format(field_name))
return 'invalid' in form_field[0].classes or 'invalid' in form_field[0].getparent().classes
class TestLogin(BaseApplicationTest):
def setup(self):
super(TestLogin, self).setup()
data_api_client_config = {'authenticate_user.return_value': self.user(
123, "[email protected]", 1234, 'name', 'name'
)}
self._data_api_client = mock.patch(
'app.main.views.login.data_api_client', **data_api_client_config
)
self.data_api_client_mock = self._data_api_client.start()
def teardown(self):
self._data_api_client.stop()
def test_should_show_login_page(self):
res = self.client.get(self.expand_path('/login'))
assert res.status_code == 200
assert 'private' in res.headers['Cache-Control']
assert "Sign in to the Marketplace" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_redirect_on_buyer_login(self, data_api_client):
with self.app.app_context():
data_api_client.authenticate_user.return_value = self.user(123, "[email protected]", None, None, 'Name')
data_api_client.get_user.return_value = self.user(123, "[email protected]", None, None, 'Name')
res = self.client.post(self.url_for('main.process_login'), data={
'email_address': '[email protected]',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
assert res.status_code == 302
assert res.location == 'http://localhost/2/buyer-dashboard'
assert 'Secure;' in res.headers['Set-Cookie']
@mock.patch('app.main.views.login.data_api_client')
def test_redirect_on_supplier_login(self, data_api_client):
with self.app.app_context():
data_api_client.authenticate_user.return_value = self.user(
123,
'[email protected]',
None,
None,
'Name',
role='supplier'
)
data_api_client.get_user.return_value = self.user(
123,
'[email protected]',
None,
None,
'Name',
role='supplier'
)
res = self.client.post(self.url_for('main.process_login'), data={
'email_address': '[email protected]',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
assert res.status_code == 302
assert res.location == 'http://localhost' + \
self.expand_path('/2/seller-dashboard')
assert 'Secure;' in res.headers['Set-Cookie']
def test_should_redirect_logged_in_buyer(self):
self.login_as_buyer()
res = self.client.get(self.url_for('main.render_login'))
assert res.status_code == 302
assert res.location == 'http://localhost/2/buyer-dashboard'
def test_should_strip_whitespace_surrounding_login_email_address_field(self):
self.client.post(self.expand_path('/login'), data={
'email_address': ' [email protected] ',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
self.data_api_client_mock.authenticate_user.assert_called
|
_with('[email protected]', '1234567890')
def test_should_not_strip_whitespace_surrounding_login_password_field(self):
self.client.post(self.expand_path('/login'), data={
'email_address': '[email protected]',
'password': ' 1234567890 ',
'csrf_token': FakeCsrf.valid_token,
})
self.data_api_client_mock.authenticate_user.assert_called_with(
'valid@
|
email.com', ' 1234567890 ')
@mock.patch('app.main.views.login.data_api_client')
def test_ok_next_url_redirects_buyer_on_login(self, data_api_client):
with self.app.app_context():
data_api_client.authenticate_user.return_value = self.user(123, "[email protected]", None, None, 'Name')
data_api_client.get_user.return_value = self.user(123, "[email protected]", None, None, 'Name')
data = {
'email_address': '[email protected]',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
}
res = self.client.post(self.expand_path('/login?next={}'.format(self.expand_path('/bar-foo'))), data=data)
assert res.status_code == 302
assert res.location == 'http://localhost' + self.expand_path('/bar-foo')
@mock.patch('app.main.views.login.data_api_client')
def test_bad_next_url_redirects_user(self, data_api_client):
with self.app.app_context():
data_api_client.authenticate_user.return_value = self.user(123, "[email protected]", None, None, 'Name')
data_api_client.get_user.return_value = self.user(123, "[email protected]", None, None, 'Name')
data = {
'email_address': '[email protected]',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
}
res = self.client.post(self.expand_path('/login?next=http://badness.com'), data=data)
assert res.status_code == 302
assert res.location == 'http://localhost/2/buyer-dashboard'
def test_should_have_cookie_on_redirect(self):
with self.app.app_context():
self.app.config['SESSION_COOKIE_DOMAIN'] = '127.0.0.1'
self.app.config['SESSION_COOKIE_SECURE'] = True
res = self.client.post(self.expand_path('/login'), data={
'email_address': '[email protected]',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
cookie_value = self.get_cookie_by_name(res, 'dm_session')
assert cookie_value['dm_session'] is not None
assert cookie_value["Domain"] == "127.0.0.1"
def test_should_redirect_to_login_on_logout(self):
res = self.client.get(self.expand_path('/logout'))
assert res.status_code == 302
assert res.location == 'http://localhost/2/login'
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_a_403_for_invalid_login(self, data_api_client):
data_api_client.authenticate_user.return_value = None
data_api_client.get_user.return_value = None
res = self.client.post(self.expand_path('/login'), data={
'email_address': '[email protected]',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
assert self.strip_all_whitespace("Make sure you've entered the right email address and password") \
in self.strip_all_whitespace(res.get_data(as_text=True))
assert res.status_code == 403
def test_should_be_validation_error_if_no_email_or_password(self):
res = self.client.post(self.expand_path('/login'), data={'csrf_token': FakeCsrf.valid_token})
data = res.get_data(as_text=True)
assert res.status_code == 400
assert has_validation_errors(data, 'email_address')
assert has_validat
|
JamesLinEngineer/RKMC
|
addons/script.module.axel.downloader/lib/standalone_server.py
|
Python
|
gpl-2.0
| 1,470
| 0.010204
|
'''
Axe
|
lProxy XBMC Addon
Copyright (C) 2013 Eldorado
This
|
program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
'''
import axelcommon
import axelproxy
# This is xbmc linked class. TODO: read the settings here and send it to proxy for port etc
#TODO: check if start at launch setting is configured!
#Address and IP for Proxy to listen on
HOST_NAME = '127.0.0.1'
#HOST_NAME = 'localhost'
PORT_NUMBER = 45550 ##move this somewhere which could be configured by UI
if __name__ == '__main__':
file_dest = axelcommon.profile_path #replace this line if you want to be specific about the download folder
print file_dest
axelproxy.ProxyManager().start_proxy(port=PORT_NUMBER, host_name=HOST_NAME,download_folder=file_dest), #more param to come
|
patilsangram/erpnext
|
erpnext/stock/doctype/stock_reconciliation/stock_reconciliation.py
|
Python
|
gpl-3.0
| 11,573
| 0.02575
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
import frappe.defaults
from frappe import msgprint, _
from frappe.utils import cstr, flt, cint
from erpnext.stock.stock_ledger import update_entries_after
from erpnext.controllers.stock_controller import StockController
from erpnext.accounts.utils import get_company_default
from erpnext.stock.utils import get_stock_balance
class OpeningEntryAccountError(frappe.ValidationError): pass
class EmptyStockReconciliationItemsError(frappe.ValidationError): pass
class StockReconciliation(StockController):
def __init__(self, *args, **kwargs):
super(StockReconciliation, self).__init__(*args, **kwargs)
self.head_row = ["Item Code", "Warehouse", "Quantity", "Valuation Rate"]
def validate(self):
if not self.expense_account:
self.expense_account = frappe.get_cached_value('Company', self.company, "stock_adjustment_account")
if not self.cost_center:
self.cost_center = frappe.get_cached_value('Company', self.company, "cost_center")
self.validate_posting_time()
self.remove_items_with_no_change()
self.validate_data()
self.validate_expense_account()
self.set_total_qty_and_amount()
def on_submit(self):
self.update_stock_ledger()
self.make_gl_entries()
def on_cancel(self):
self.delete_and_repost_sle()
self.make_gl_entries_on_cancel()
def remove_items_with_no_change(self):
"""Remove items if qty or rate is not changed"""
self.difference_amount = 0.0
def _changed(item):
qty, rate = get_stock_balance(item.item_code, item.warehouse,
self.posting_date, self.posting_time, with_valuation_rate=True)
if (item.qty==None or item.qty==qty) and (item.valuation_rate==None or item.valuation_rate==rate):
return False
else:
# set default as current rates
if item.qty==None:
item.qty = qty
if item.valuation_rate==None:
item.valuation_rate = rate
item.current_qty = qty
item.current_valuation_rate = rate
self.difference_amount += (flt(item.qty, item.precision("qty")) * \
flt(item.valuation_rate or rate, item.precision("valuation_rate")) \
- flt(qty, item.precision("qty")) * flt(rate, item.precision("valuation_rate")))
return True
items = list(filter(lambda d: _changed(d), self.items))
if not items:
frappe.throw(_("None of the items have any change in quantity or value."),
EmptyStockReconciliationItemsError)
elif len(items) != len(self.items):
self.items = items
for i, item in enumerate(self.items):
item.idx = i + 1
frappe.msgprint(_("Removed items with no change in quantity or value."))
def validate_data(self):
def _get_msg(row_num, msg):
return _("Row # {0}: ").format(row_num+1) + msg
self.validation_messages = []
item_warehouse_combinations = []
default_currency = frappe.db.get_default("currency")
for row_num, row in enumerate(self.items):
# find duplicates
if [row.item_code, row.warehouse] in item_warehouse_combinations:
self.validation_messages.append(_get_msg(row_num, _("Duplicate entry")))
else:
item_warehouse_combinations.append([row.item_code, row.warehouse])
self.validate_item(row.item_code, row_num+1)
# validate warehouse
if not frappe.db.get_value("Warehouse", row.warehouse):
self.validation_messages.append(_get_msg(row_num, _("Warehouse not found in the system")))
# if both not specified
if row.qty in ["", None] and row.valuation_rate in ["", None]:
self.validation_messages.append(_get_msg(row_num,
_("Please specify either Quantity or Valuation Rate or both")))
# do not allow negative quantity
if flt(row.qty) < 0:
self.validation_messages.append(_get_msg(row_num,
_("Negative Quantity is not allowed")))
# do not allow negative valuation
if flt(row.valuation_rate) < 0:
self.validation_messages.append(_get_msg(row_num,
_("Negative Valuation Rate is not allowed")))
|
if row.qty and row.valuation_rate in ["", None]:
row.valuation_rate = get_stock_balance(row.item_code, row.warehouse,
self.posting_date, self.posting_time, with
|
_valuation_rate=True)[1]
if not row.valuation_rate:
# try if there is a buying price list in default currency
buying_rate = frappe.db.get_value("Item Price", {"item_code": row.item_code,
"buying": 1, "currency": default_currency}, "price_list_rate")
if buying_rate:
row.valuation_rate = buying_rate
else:
# get valuation rate from Item
row.valuation_rate = frappe.get_value('Item', row.item_code, 'valuation_rate')
# throw all validation messages
if self.validation_messages:
for msg in self.validation_messages:
msgprint(msg)
raise frappe.ValidationError(self.validation_messages)
def validate_item(self, item_code, row_num):
from erpnext.stock.doctype.item.item import validate_end_of_life, \
validate_is_stock_item, validate_cancelled_item
# using try except to catch all validation msgs and display together
try:
item = frappe.get_doc("Item", item_code)
# end of life and stock item
validate_end_of_life(item_code, item.end_of_life, item.disabled, verbose=0)
validate_is_stock_item(item_code, item.is_stock_item, verbose=0)
# item should not be serialized
if item.has_serial_no == 1:
raise frappe.ValidationError(_("Serialized Item {0} cannot be updated using Stock Reconciliation, please use Stock Entry").format(item_code))
# item managed batch-wise not allowed
if item.has_batch_no == 1:
raise frappe.ValidationError(_("Batched Item {0} cannot be updated using Stock Reconciliation, instead use Stock Entry").format(item_code))
# docstatus should be < 2
validate_cancelled_item(item_code, item.docstatus, verbose=0)
except Exception as e:
self.validation_messages.append(_("Row # ") + ("%d: " % (row_num)) + cstr(e))
def update_stock_ledger(self):
""" find difference between current and expected entries
and create stock ledger entries based on the difference"""
from erpnext.stock.stock_ledger import get_previous_sle
for row in self.items:
previous_sle = get_previous_sle({
"item_code": row.item_code,
"warehouse": row.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time
})
if previous_sle:
if row.qty in ("", None):
row.qty = previous_sle.get("qty_after_transaction", 0)
if row.valuation_rate in ("", None):
row.valuation_rate = previous_sle.get("valuation_rate", 0)
if row.qty and not row.valuation_rate:
frappe.throw(_("Valuation Rate required for Item in row {0}").format(row.idx))
if ((previous_sle and row.qty == previous_sle.get("qty_after_transaction")
and row.valuation_rate == previous_sle.get("valuation_rate"))
or (not previous_sle and not row.qty)):
continue
self.insert_entries(row)
def insert_entries(self, row):
"""Insert Stock Ledger Entries"""
args = frappe._dict({
"doctype": "Stock Ledger Entry",
"item_code": row.item_code,
"warehouse": row.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time,
"voucher_type": self.doctype,
"voucher_no": self.name,
"company": self.company,
"stock_uom": frappe.db.get_value("Item", row.item_code, "stock_uom"),
"is_cancelled": "No",
"qty_after_transaction": flt(row.qty, row.precision("qty")),
"valuation_rate": flt(row.valuation_rate, row.precision("valuation_rate"))
})
self.make_sl_entries([args])
def delete_and_repost_sle(self):
""" Delete Stock Ledger Entries related to this voucher
and repost future Stock Ledger Entries"""
existing_entries = frappe.db.sql("""select distinct item_code, warehouse
from `tabStock Ledger Entry` where voucher_type=%s and voucher_no=%s""",
(self.doctype, self.name), as_dict=1)
# delete entries
frappe.db.sql("""delete from `tabStock Ledger Entry`
where voucher_type=%s and voucher_no=%s""", (self.doctype, self.name))
# repost future entries for selected item_code, warehouse
for entries in existing_entries:
update_entries
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/keras/preprocessing/__init__.py
|
Python
|
mit
| 371
| 0.002695
|
"""Imports for Python API.
This file is M
|
ACHI
|
NE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.tools.api.generator.api.keras.preprocessing import image
from tensorflow.tools.api.generator.api.keras.preprocessing import sequence
from tensorflow.tools.api.generator.api.keras.preprocessing import text
|
ivanlyon/exercises
|
kattis/k_sequences.py
|
Python
|
mit
| 969
| 0.003096
|
'''
Swap counting
Status: Accepted
'''
###############################################################################
def inversions(constants, variables):
"""Number of swaps"""
|
if variables:
pow2 = pow(2, variables - 1, 1_000_000_007)
return pow2 * (constants * 2 + variables)
return constants
###############################################################################
def main():
"""Read input and print output"""
zeroes, qmarks, swaps = 0, 0, 0
for glyph in reversed(input()):
if glyph == '
|
0':
zeroes += 1
else:
if glyph == '1':
swaps += inversions(zeroes, qmarks)
if glyph == '?':
swaps += inversions(zeroes, qmarks) + swaps
qmarks += 1
swaps %= 1_000_000_007
print(swaps)
###############################################################################
if __name__ == '__main__':
main()
|
hackaugusto/raiden
|
raiden/network/rpc/client.py
|
Python
|
mit
| 34,000
| 0.002441
|
import copy
import json
import os
import warnings
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple
import gevent
import structlog
from eth_utils import (
decode_hex,
encode_hex,
is_checksum_address,
remove_0x_prefix,
to_canonical_address,
to_checksum_address,
)
from gevent.lock import Semaphore
from hexbytes import HexBytes
from requests.exceptions import ConnectTimeout
from web3 import Web3
from web3.contract import ContractFunction
from web3.eth import Eth
from web3.gas_strategies.rpc import rpc_gas_price_strategy
from web3.middleware import geth_poa_middleware
from web3.utils.contracts import prepare_transaction
from web3.utils.empty import empty
from web3.utils.toolz import assoc
from raiden import constants
from raiden.exceptions import (
AddressWithoutCode,
EthNodeCommunicationError,
EthNodeInterfaceError,
InsufficientFunds,
)
from raiden.network.rpc.middleware import (
block_hash_cache_middleware,
connection_test_middleware,
http_retry_with_backoff_middleware,
)
from raiden.network.rpc.smartcontract_proxy import ContractProxy
from raiden.utils import pex, privatekey_to_address
from raiden.utils.ethereum_clients import is_supported_client
from raiden.utils.filters import StatelessFilter
from raiden.utils.solc import (
solidity_library_symbol,
solidity_resolve_symbols,
solidity_unresolved_symbols,
)
from raiden.utils.typing import (
ABI,
Address,
AddressHex,
BlockHash,
BlockSpecification,
Nonce,
TransactionHash,
)
log = structlog.get_logger(__name__) # pylint: disable=invalid-name
def logs_blocks_sanity_check(from_block: BlockSpecification, to_block: BlockSpecification) -> None:
"""Checks that the from/to blocks passed onto log calls contain only appropriate types"""
is_valid_from = isinstance(from_block, int) or isinstance(from_block, str)
assert is_valid_from, "event log from block can be integer or latest,pending, earliest"
is_valid_to = isinstance(to_block, int) or isinstance(to_block, str)
assert is_valid_to, "event log to block can be integer or latest,pending, earliest"
def geth_assert_rpc_interfaces(web3: Web3):
try:
web3.version.node
except ValueError:
raise EthNodeInterfaceError(
"The underlying geth node does not have the web3 rpc interface "
"enabled. Please run it with --rpcapi eth,net,web3,txpool"
)
try:
web3.eth.blockNumber
except ValueError:
raise EthNodeInterfaceError(
"The underlying geth node does not have the eth rpc interface "
"enabled. Please run it with --rpcapi eth,net,web3,txpool"
)
try:
web3.net.version
except ValueError:
raise EthNodeInterfaceError(
"The underlying geth node does not have the net rpc interface "
"enabled. Please run it with --rpcapi eth,net,web3,txpool"
)
try:
web3.txpool.inspect
except ValueError:
raise EthNodeInterfaceError(
"The underlying geth node does not have the txpool rpc interface "
"enabled. Please run it with --rpcapi eth,net,web3,txpool"
)
def parity_assert_rpc_interfaces(web3: Web3):
try:
web3.version.node
except ValueError:
raise EthNodeInterfaceError(
"The underlying parity node does not have the web3 rpc interface "
"enabled. Please run it with --jsonrpc-apis=eth,net,web3,parity"
)
try:
web3.eth.blockNumber
except ValueError:
raise EthNodeInterfaceError(
"The underlying parity node does not have the eth rpc interface "
"enabled. Please run it with --jsonrpc-apis=eth,net,web3,parity"
)
try:
web3.net.version
except ValueError:
raise EthNodeInterfaceError(
"The underlying parity node does not have the net rpc interface "
"enabled. Please run it with --jsonrpc-apis=eth,net,web3,parity"
)
try:
web3.manager.request_blocking(
"parity_nextNonce", ["0x0000000000000000000000000000000000000000"]
)
except ValueError:
raise EthNodeInterfaceError(
"The underlying parity node does not have the parity rpc interface "
"enabled. Please run it with --jsonrpc-apis=eth,net,web3,parity"
)
def parity_discover_next_available_nonce(web3: Web3, address: AddressHex) -> Nonce:
"""Returns the next available nonce for `address`."""
next_nonce_encoded = web3.manager.request_blocking("parity_nextNonce", [address])
return Nonce(int(next_nonce_encoded, 16))
def geth_discover_next_available_nonce(web3: Web3, address: AddressHex) -> Nonce:
"""Returns the next available nonce for `address`."""
# The nonces of the mempool transactions are considered used, and it's
# assumed these transactions are different from the ones currently pending
# in the client. This is a simplification, otherwise it would be necessary
# to filter the local pending transactions based on the mempool.
pool = web3.txpool.inspect or {}
# pool is roughly:
#
# {'queued': {'account1': {nonce1: ... nonce2: ...}, 'account2': ...}, 'pending': ...}
#
# Pending refers to the current block and if it contains transactions from
# the user, these will be the younger transactions. Because this needs the
# largest nonce, queued is checked first.
address = to_checksum_address(address)
queued = pool.get("queued", {}).get(address)
if queued:
return Nonce(max(int(k) for k in queued.keys()) + 1)
pending = pool.get("pending", {}).get(address)
if pending:
return Nonce(max(int(k) for k in pending.keys()) + 1)
# The first valid nonce is 0, therefore the count is already the next
# available nonce
return web3.eth.getTransactionCount(address, "latest")
def check_address_has_code(client: "JSONRPCClient", address: Address, contract_name: str = ""):
""" Checks that the given address contains code. """
result = client.web3.eth.getCode(to_checksum_address(address), "latest")
if not result:
if contract_name:
formated_contract_name = "[{}]: ".format(contract_name)
else:
formated_contract_name = ""
raise AddressWithoutCode(
"{}Address {} does not contain code".format(
formated_contract_name, to_checksum_address(address)
)
)
def deploy_dependencies_symbols(all_contract):
dependencies = {}
symbols_to_contract = dict()
for contract_name in all_contract:
symbol = solidity_library_symbol(contract_name)
if symbol in symbols_to_contract:
raise ValueError("Conflicting library names.")
symbols_to_contract[symbol] = contract_name
for contract_name, contract in all_contract.items():
unresolved_symbols = solidity_unresolved_symbols(contract["bin"])
dependencies[contract_name] = [
|
symbols_to_contract[unresolved] for unresolved in unresolved_symbols
]
return dependencies
def dependencies_order_of_build(target_contract, dependencies_map):
""" Return an ordered list of contracts that is sufficient to successfully
deploy the target contract.
Note:
This function assumes that the `dependencies_map` is an acyclic graph.
"""
if not dependencies_map:
return [target_contract]
|
if target_contract not in dependencies_map:
raise ValueError("no dependencies defined for {}".format(target_contract))
order = [target_contract]
todo = list(dependencies_map[target_contract])
while todo:
target_contract = todo.pop(0)
target_pos = len(order)
for dependency in dependencies_map[target_contract]:
# we need to add the current contract before all its depedencies
if dependency in order:
target_pos = order.index(dependency)
else:
todo.append(dependency)
order.insert(target_pos, target_con
|
npdoty/pywikibot
|
setup.py
|
Python
|
mit
| 8,699
| 0.000345
|
# -*- coding: utf-8 -*-
"""Installer script for Pywikibot 3.0 framework."""
#
# (C) Pywikibot team, 2009-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, print_function, unicode_literals
import itertools
import os
import sys
try:
# Work around a traceback on Python < 2.7.4 and < 3.3.1
# http://bugs.python.org/issue15881#msg170215
import multiprocessing
except ImportError:
pass
# pyflakes workaround
__unused__ = (multiprocessing, )
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
PY26 = (PYTHON_VERSION < (2, 7))
versions_required_message = """
Pywikibot not available on:
%s
Pywikibot is only supported under Python 2.6.5+, 2.7.2+ or 3.3+
"""
def python_is_supported():
"""Check that Python is supported."""
# Any change to this must be copied to pwb.py
return (PYTHON_VERSION >= (3, 3, 0) or
(PY2 and PYTHON_VERSION >= (2, 7, 2)) or
(PY26 and PYTHON_VERSION >= (2, 6, 5)))
if not python_is_supported():
raise RuntimeError(versions_required_message % sys.version)
test_deps = ['bz2file', 'mock']
dependencies = ['requests!=2.18.2']
# the irc module has no Python 2.6 support since 10.0
irc_dep = 'irc==8.9' if sys.version_info < (2, 7) else 'irc'
csv_dep = 'unicodecsv!=0.14.0' if PYTHON_VERSION < (2, 7) else 'unicodecsv'
extra_deps = {
# Core library dependencies
'eventstreams': ['sseclient'],
'isbn': ['python-stdnum'],
'Graphviz': ['pydot>=1.0.28'],
'Google': ['google>=1.7'],
'IRC': [irc_dep],
'mwparserfromhell': ['mwparserfromhell>=0.3.3'],
'Tkinter': ['Pillow<3.5.0' if PY26 else 'Pillow'],
# 0.6.1 supports socket.io 1.0, but WMF is using 0.9 (T91393 and T85716)
'rcstream': ['socketIO-client<0.6.1'],
'security': ['requests[security]', 'pycparser!=2.14'],
'mwoauth': ['mwoauth>=0.2.4,!=0.3.1'],
'html': ['BeautifulSoup4'],
}
if PY2:
# Additional core library dependencies which are only available on Python 2
extra_deps.update({
'csv': [csv_dep],
'MySQL': ['oursql'],
'unicode7': ['unicodedata2>=7.0.0-2'],
})
script_deps = {
'flickrripper.py': ['Pillow<3.5.0' if PY26 else 'Pillow'],
'states_redirect.py': ['pycountry'],
'weblinkchecker.py': ['memento_client>=0.5.1,!=0.6.0'],
'patrol.py': ['mwparserfromhell>=0.3.3'],
}
# flickrapi 1.4.4 installs a root logger in verbose mode; 1.4.5 fixes this.
# The problem doesnt exist in flickrapi 2.x.
# pywikibot accepts flickrapi 1.4.5+ on Python 2, as it has been stable for a
# long time, and only depends on python-requests 1.x, whereas flickrapi 2.x
# depends on python-requests 2.x, which is first packaged in Ubuntu 14.04
# and will be first packaged for Fedora Core 21.
# flickrapi 1.4.x does not run on Python 3, and setuptools can only
# select flickrapi 2.x for Python 3 installs.
script_deps['flickrripper.py'].append(
'flickrapi>=1.4.5,<2' if PY26 else 'flickrapi')
# lunatic-python is only available for Linux
if sys.platform.startswith('linux'):
script_deps['script_wui.py'] = [irc_dep, 'lunatic-python', 'crontab']
# The main pywin32 repository contains a Python 2 only setup.py with a small
# wrapper setup3.py for Python 3.
# http://pywin32.hg.sourceforge.net:8000/hgroot/pywin32/pywin32
# The main pywinauto repository doesnt support Python 3.
# The repositories used below have a Python 3 compliant setup.py
dependency_links = [
'git+https://github.com/AlereDevices/lunatic-python.git#egg=lunatic-python',
'hg+https://bitbucket.org/TJG/pywin32#egg=pywin32',
'git+https://github.com/vasily-v-ryabov/pywinauto-64#egg=pywinauto',
'git+https://github.com/nlhepler/pydot#egg=pydot-1.0.29',
]
if PYTHON_VERSION < (2, 7, 3):
# work around distutils hardcoded unittest dependency
# work around T106512
import unittest
__unused__ += (unittest, )
if 'test' in sys.argv:
import unittest2
sys.modules['unittest'] = unittest2
if sys.version_info[0] == 2:
if PY26:
script_deps['replicate_wiki.py'] = ['argparse']
dependencies.append('future>=0.15.0') # provides collections backports
dependencies += extra_deps['unicode7'] # T102461 workaround
# tools.ip does not have a hard dependency on an IP address module,
# as it falls back to using regexes if one is not available.
# The functional backport of py3 ipaddress is acceptable:
# https://pypi.python.org/pypi/ipaddress
# However the Debian package python-ipaddr is also supported:
# https://pypi.python.org/pypi/ipaddr
# Other backports are likely broken.
# ipaddr 2.1.10+ is distributed with Debian and Fedora. See T105443.
dependencies.append('ipaddr>=2.1.10')
if sys.version_info < (2, 7, 9):
# Python versions before 2.7.9 will cause urllib3 to trigger
# InsecurePlatformWarning warnings for all HTTPS requests. By
# installing with security extras, requests will automatically set
# them up and the warnings will stop. See
# <https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning>
# for more details.
dependencies += extra_deps['security']
script_deps['data_ingestion.py'] = extra_deps['csv']
try:
import bz2
__unused__ += (bz2, )
except ImportError:
# Use bz2file if the python is not compiled with bz2 support.
dependencies.append('bz2file')
# Some of the ui_tests depend on accessing the console window
|
's menu
# to set the console font and copy and paste, achieved using pywinauto
# which depends on pywin32.
# These tests may be disabled because pywin32 depends on VC++, is time
# comsuming to build, and the console window cant be accessed du
|
ring appveyor
# builds.
# Microsoft makes available a compiler for Python 2.7
# http://www.microsoft.com/en-au/download/details.aspx?id=44266
# If you set up your own compiler for Python 3, on 3.3 two demo files
# packaged with pywin32 may fail. Remove com/win32com/demos/ie*.py
if os.name == 'nt' and os.environ.get('PYSETUP_TEST_NO_UI', '0') != '1':
# FIXME: tests/ui_tests.py suggests pywinauto 0.4.2
# which isnt provided on pypi.
test_deps += ['pywin32', 'pywinauto>=0.4.0']
extra_deps.update(script_deps)
# Add all dependencies as test dependencies,
# so all scripts can be compiled for script_tests, etc.
if 'PYSETUP_TEST_EXTRAS' in os.environ:
test_deps += list(itertools.chain(*(extra_deps.values())))
if 'oursql' in test_deps and os.name == 'nt':
test_deps.remove('oursql') # depends on Cython
if 'requests[security]' in test_deps:
# Bug T105767 on Python 2.7 release 9+
if sys.version_info[:2] == (2, 7) and sys.version_info[2] >= 9:
test_deps.remove('requests[security]')
# These extra dependencies are needed other unittest fails to load tests.
if sys.version_info[0] == 2:
test_deps += extra_deps['csv'] + ['mock']
else:
test_deps += ['six']
from setuptools import setup, find_packages
name = 'pywikibot'
version = '3.0'
try:
import subprocess
date = subprocess.check_output(['git', 'log', '-1', '--format=%ci']).strip()
date = date.decode().split(' ')[0].replace('-', '')
version = version + "." + date
except Exception as e:
print(e)
version = version + "-dev"
github_url = 'https://github.com/wikimedia/pywikibot-core'
setup(
name=name,
version=version,
description='Python MediaWiki Bot Framework',
long_description=open('pypi_description.rst').read(),
keywords=('pywikibot', 'python', 'mediawiki', 'bot', 'wiki', 'framework',
'wikimedia', 'wikipedia', 'pwb', 'pywikipedia', 'API'),
maintainer='The Pywikibot team',
maintainer_email='[email protected]',
license='MIT License',
packages=[str(name)] + [package
for package in find_packages()
if package.startswith('pywikibot.')],
install_requires=dependencies,
dependency_links=dependency_links,
extras_require=extra_deps,
url='https://www.mediawiki.org/wiki/Pywikibot',
test_suite="tests.collector",
tests_requir
|
MarketShareData/Internal
|
code/test1/inheritanceTest.py
|
Python
|
mit
| 600
| 0.006667
|
__author__ = 'http://www.python-course.eu/python3_inheritance
|
.php'
class Person:
def __init__(self, first, last):
self.firstname = first
self.lastname = last
def Name(self):
return self.firstname + " " + self.lastname
class Employee(Person):
def __init__(self, first, last, staffnum):
Person.__init__(self,first, last)
self.staffnumber = staffnum
def GetEmployee(self):
return self.Name() + ", " + self.staffnumber
x = Person("Marge", "Simpson")
y = Em
|
ployee("Homer", "Simpson", "1007")
print(x.Name())
print(y.GetEmployee())
|
rht/zulip
|
zerver/webhooks/clubhouse/view.py
|
Python
|
apache-2.0
| 27,498
| 0.002655
|
from functools import partial
from typing import Any, Callable, Dict, Generator, List, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventType
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
EPIC_NAME_TEMPLATE = "**{name}**"
STORY_NAME_TEMPLATE = "[{name}]({app_url})"
COMMENT_ADDED_TEMPLATE = (
"New comment added to the {entity} {name_template}:\n``` quote\n{text}\n```"
)
NEW_DESC_ADDED_TEMPLATE = (
"New description added to the {entity} {name_template}:\n``` quote\n{new}\n```"
)
DESC_CHANGED_TEMPLATE = (
"Description for the {entity} {name_template} was changed from:\n"
"``` quote\n{old}\n```\nto\n``` quote\n{new}\n```"
)
DESC_REMOVED_TEMPLATE = "Description for the {entity} {name_template} was removed."
STATE_CHANGED_TEMPLATE = (
"State of the {entity} {name_template} was changed from **{old}** to **{new}**."
)
NAME_CHANGED_TEMPLATE = (
"The name of the {entity} {name_template} was changed from:\n"
"``` quote\n{old}\n```\nto\n``` quote\n{new}\n```"
)
ARCHIVED_TEMPLATE = "The {entity} {name_template} was {operation}."
STORY_TASK_TEMPLATE = "Task **{task_description}** was {operation} the story {name_template}."
STORY_TASK_COMPLETED_TEMPLATE = (
"Task **{task_description}** ({name_template}) was completed. :tada:"
)
STORY_ADDED_REMOVED_EPIC_TEMPLATE = (
"The story {story_name_template} was {operation} the epic {epic_name_template}."
)
STORY_EPIC_CHANGED_TEMPLATE = "The story {story_name_template} was moved from {old_epic_name_template} to {new_epic_name_template}."
STORY_ESTIMATE_TEMPLATE = "The estimate for the story {story_name_template} was set to {estimate}."
FILE_ATTACHMENT_TEMPLATE = (
"A {type} attachment `{file_name}` was added to the story {name_template}."
)
LABEL_TEMPLATE = "**{name}**"
STORY_LABEL_TEMPLATE = "The label {labels} was added to the story {name_template}."
STORY_LABEL_PLURAL_TEMPLATE = "The labels {labels} were added to the story {name_template}."
STORY_UPDATE_PROJECT_TEMPLATE = (
"The story {name_template} was moved from the **{old}** project to **{new}**."
)
STORY_UPDATE_TYPE_TEMPLATE = (
"The type of the story {name_template} was changed from **{old_type}** to **{new_type}**."
)
DELETE_TEMPLATE = "The {entity_type} **{name}** was deleted."
STORY_UPDATE_OWNER_TEMPLATE = "New owner added to the story {name_template}."
TRAILING_WORKFLOW_STATE_CHANGE_TEMPLATE = " ({old} -> {new})"
STORY_GITHUB_PR_TEMPLATE = (
"New GitHub PR [#{name}]({url}) opened for story {name_template}{workflow_state_template}."
)
STORY_GITHUB_COMMENT_PR_TEMPLATE = "Existing GitHub PR [#{name}]({url}) associated with story {name_template}{workflow_state_template}."
STORY_GITHUB_BRANCH_TEMPLATE = "New GitHub branch [{name}]({url}) associated with story {name_template}{workflow_state_template}."
STORY_UPDATE_BATCH_TEMPLATE = "The story {name_template} {templates}{workflow_state_template}."
STORY_UPDATE_BATCH_CHANGED_TEMPLATE = "{operation} from {sub_templates}"
STORY_UPDATE_BATCH_CHANGED_SUB_TEMPLATE = "{entity_type} **{old}** to **{new}**"
STORY_UPDATE_BATCH_ADD_REMOVE_TEMPLATE = "{operation} with {entity}"
def get_action_with_primary_id(payload: Dict[str, Any]) -> Dict[str, Any]:
for action in payload["actions"]:
if payload["primary_id"] == action["id"]:
action_with_primary_id = action
return action_with_primary_id
def get_event(payload: Dict[str, Any], action: Dict[str, Any]) -> Optional[str]:
event = "{}_{}".format(action["entity_type"], action["action"])
# We only consider the change to be a batch update only if there are multiple stories (thus there is no primary_id)
if event == "story_update" and payload.get("primary_id") is None:
return "{}_{}".format(event, "batch")
if event in IGNORED_EVENTS:
return None
changes = action.get("changes")
if changes is not None:
if changes.get("description") is not None:
event = "{}_{}".format(event, "description")
elif changes.get("state") is not None:
eve
|
nt = "{}_{}".format(event, "state")
elif changes.get("workflow_state_id") is not None:
event = "{}_{}".format(event, "state")
elif changes.get("name") is not None:
event = "{}_{}".format(event, "name")
elif changes.get("archived") is not None:
event = "{}_{}".format(event, "archived")
elif changes.get("complete") is not None:
event = "{
|
}_{}".format(event, "complete")
elif changes.get("epic_id") is not None:
event = "{}_{}".format(event, "epic")
elif changes.get("estimate") is not None:
event = "{}_{}".format(event, "estimate")
elif changes.get("file_ids") is not None:
event = "{}_{}".format(event, "attachment")
elif changes.get("label_ids") is not None:
event = "{}_{}".format(event, "label")
elif changes.get("project_id") is not None:
event = "{}_{}".format(event, "project")
elif changes.get("story_type") is not None:
event = "{}_{}".format(event, "type")
elif changes.get("owner_ids") is not None:
event = "{}_{}".format(event, "owner")
return event
def get_topic_function_based_on_type(payload: Dict[str, Any], action: Dict[str, Any]) -> Any:
entity_type = action["entity_type"]
return EVENT_TOPIC_FUNCTION_MAPPER.get(entity_type)
def get_delete_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
return DELETE_TEMPLATE.format(**action)
def get_story_create_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
if action.get("epic_id") is None:
message = "New story [{name}]({app_url}) of type **{story_type}** was created."
kwargs = action
else:
message = "New story [{name}]({app_url}) was created and added to the epic **{epic_name}**."
kwargs = {
"name": action["name"],
"app_url": action["app_url"],
}
epic_id = action["epic_id"]
refs = payload["references"]
for ref in refs:
if ref["id"] == epic_id:
kwargs["epic_name"] = ref["name"]
return message.format(**kwargs)
def get_epic_create_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
message = "New epic **{name}**({state}) was created."
return message.format(**action)
def get_comment_added_body(payload: Dict[str, Any], action: Dict[str, Any], entity: str) -> str:
actions = payload["actions"]
kwargs = {"entity": entity}
for action in actions:
if action["id"] == payload["primary_id"]:
kwargs["text"] = action["text"]
elif action["entity_type"] == entity:
name_template = get_name_template(entity).format(
name=action["name"],
app_url=action.get("app_url"),
)
kwargs["name_template"] = name_template
return COMMENT_ADDED_TEMPLATE.format(**kwargs)
def get_update_description_body(
payload: Dict[str, Any], action: Dict[str, Any], entity: str
) -> str:
desc = action["changes"]["description"]
kwargs = {
"entity": entity,
"new": desc["new"],
"old": desc["old"],
"name_template": get_name_template(entity).format(
name=action["name"],
app_url=action.get("app_url"),
),
}
if kwargs["new"] and kwargs["old"]:
body = DESC_CHANGED_TEMPLATE.format(**kwargs)
elif kwargs["new"]:
body = NEW_DESC_ADDED_TEMPLATE.format(**kwargs)
else:
body = DESC_REMOVED_TEMPLATE.format(**kwargs)
return body
def get_epic_update_state_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
state = action["changes"]["state"]
kwargs = {
"entity": "epic",
"new": state["new"],
"old": state["old"],
"name_template": EPIC_NAME_TEMPLATE.f
|
cactusbin/nyt
|
matplotlib/examples/axes_grid/demo_colorbar_with_inset_locator.py
|
Python
|
unlicense
| 1,052
| 0.013308
|
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[6, 3])
axins1 = inset_axes(ax1,
width="50%", # width = 10% of parent_bbox width
height="5%", # height : 50%
loc=1)
im1=ax1.imshow([[1,2],[2, 3]])
plt.colorbar(im1, cax=axins1, orientation="horizontal", ticks=[1,2,3])
axins1.xaxis.set_ticks_position("bottom")
axins = inset_axes(ax2,
width="5%", # width = 10% of parent_bbox width
height="50%", # height : 50%
loc=3,
bbox_to_anchor=(1.05, 0., 1, 1),
|
bbox_transform=ax2.transAxes,
borderpad=0,
)
# Controlling the placement of the
|
inset axes is basically same as that
# of the legend. you may want to play with the borderpad value and
# the bbox_to_anchor coordinate.
im=ax2.imshow([[1,2],[2, 3]])
plt.colorbar(im, cax=axins, ticks=[1,2,3])
plt.draw()
plt.show()
|
buffer/thug
|
thug/ActiveX/modules/ScriptingFileSystemObject.py
|
Python
|
gpl-2.0
| 5,185
| 0.010222
|
import os
import string
import random
import logging
from thug.ActiveX.modules import WScriptShell
from thug.ActiveX.modules import TextStream
from thug.ActiveX.modules import File
from thug.ActiveX.modules import Folder
from thug.OS.Windows import win32_files
from thug.OS.Windows import win32_folders
log = logging.getLogger("Thug")
def BuildPath(self, arg0, arg1): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] BuildPath("{arg0}", "{arg1}")')
return f"{arg0}\\{arg1}"
def CopyFile(self, source, destination, overwritefiles = False): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] CopyFile("{source}", "{destination}")')
log.TextFiles[destination] = log.TextFiles[source]
def DeleteFile(self, filespec, force = False): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] DeleteFile("{filespec}", {force})')
def CreateTextFile(self, filename, overwrite = False, _unicode = False): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] CreateTextFile("{filename}", '
f'"{overwrite}", '
f'"{_unicode}")')
stream = TextStream.TextStream()
stream._filename = filename
return stream
def CreateFolder(self, path): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] CreateFolder("{path}")')
return Folder.Folder(path)
def FileExists(self, filespec): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] FileExists("{filespec}")')
if not filespec:
return True
if filespec.lower() in win32_files:
return True
if getattr(log, "TextFiles", None) and filespec in log.TextFiles:
return True
return False
def FolderExists(self, folder): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] FolderExists("{folder}")')
return str(folder).lower() in win32_folders
def GetExtensionName(self, path): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] GetExtensionName("{path}")')
ext = os.path.splitext(path)[1]
return ext if ext else ""
def GetFile(self, filespec): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] GetFile("{filespec}")')
return File.File(filespec)
def GetSpecialFolder(self, arg):
log.T
|
hugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] GetSpecialFolder("{arg}")')
arg = int(arg)
f
|
older = ''
if arg == 0:
folder = WScriptShell.ExpandEnvironmentStrings(self, "%windir%")
elif arg == 1:
folder = WScriptShell.ExpandEnvironmentStrings(self, "%SystemRoot%\\system32")
elif arg == 2:
folder = WScriptShell.ExpandEnvironmentStrings(self, "%TEMP%")
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] Returning {folder} for GetSpecialFolder("{arg}")')
return folder
def GetTempName(self): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn('[Scripting.FileSystemObject ActiveX] GetTempName()')
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8))
def MoveFile(self, source, destination): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] MoveFile("{source}", "{destination}")')
log.TextFiles[destination] = log.TextFiles[source]
del log.TextFiles[source]
def OpenTextFile(self, sFilePathAndName, ForWriting = True, flag = True):
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] OpenTextFile("{sFilePathAndName}", '
f'"{ForWriting}" ,'
f'"{flag}")')
log.ThugLogging.log_exploit_event(self._window.url,
"Scripting.FileSystemObject ActiveX",
"OpenTextFile",
data = {
"filename" : sFilePathAndName,
"ForWriting": ForWriting,
"flag" : flag
},
forward = False)
if getattr(log, 'TextFiles', None) is None:
log.TextFiles = {}
if sFilePathAndName in log.TextFiles:
return log.TextFiles[sFilePathAndName]
stream = TextStream.TextStream()
stream._filename = sFilePathAndName
if log.ThugOpts.local and sFilePathAndName in (log.ThugLogging.url, ): # pragma: no cover
with open(sFilePathAndName, encoding = 'utf-8', mode = 'r') as fd:
data = fd.read()
stream.Write(data)
log.TextFiles[sFilePathAndName] = stream
return stream
|
meowlab/shadowsocks-comment
|
shadowsocks/lru_cache.py
|
Python
|
apache-2.0
| 4,886
| 0.003684
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement # use the features of python 3
import collections
import logging
import time
# this LRUCache is optimized for concurrency, not QPS
# n: concurrenc
|
y, keys stored in the cache
# m: visits not timed out, proportional to QPS * timeout
# get & set is O(1), not O(n). thus we can support very large n
# TODO: if timeout or QPS is too large, then this cache is not very efficient,
# as sweep() causes long pause
class LRUCache(collections.
|
MutableMapping): # ABCs for read-only and mutable mappings.
"""This class is not thread safe"""
def __init__(self, timeout=60, close_callback=None, *args, **kwargs):
self.timeout = timeout # the cache expire time
self.close_callback = close_callback # called when value will be swept from cache
self._store = {} # dict<key, value>: store cache data key value
self._time_to_keys = collections.defaultdict(list) # defaultdict<time, list<key>>
# defaultdict: dict subclass that calls a factory function to supply missing values
self._keys_to_last_time = {} # dict<key, time> stores the last time of one key visited.
self._last_visits = collections.deque() # deque<time> store all the time once key is visited.
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._time_to_keys[t].append(key)
self._last_visits.append(t)
return self._store[key]
def __setitem__(self, key, value):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._store[key] = value
self._time_to_keys[t].append(key)
self._last_visits.append(t)
def __delitem__(self, key):
# O(1)
del self._store[key]
del self._keys_to_last_time[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def sweep(self):
# O(m)
now = time.time()
c = 0 # use to log how many keys has been swept.
while len(self._last_visits) > 0:
least = self._last_visits[0] # fetch the oldest time point
if now - least <= self.timeout: # the oldest time point hasn't expire
break
if self.close_callback is not None: # callback function has been set
for key in self._time_to_keys[least]: # fetch each key visited on the oldest time
if key in self._store: # finded the cache key
if now - self._keys_to_last_time[key] > self.timeout:
value = self._store[key] # get the key of the last time and check expire or yet.
self.close_callback(value) # call callback
for key in self._time_to_keys[least]:
self._last_visits.popleft() # can't understand and have error personally
# @Sunny: use popleft to remove oldest time point in last visits
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
del self._store[key]
del self._keys_to_last_time[key]
c += 1
del self._time_to_keys[least]
if c:
logging.debug('%d keys swept' % c)
def test():
c = LRUCache(timeout=0.3)
c['a'] = 1
assert c['a'] == 1
time.sleep(0.5)
c.sweep()
assert 'a' not in c
c['a'] = 2
c['b'] = 3
time.sleep(0.2)
c.sweep()
assert c['a'] == 2
assert c['b'] == 3
time.sleep(0.2)
c.sweep()
c['b']
time.sleep(0.2)
c.sweep()
assert 'a' not in c
assert c['b'] == 3
time.sleep(0.5)
c.sweep()
assert 'a' not in c
assert 'b' not in c
if __name__ == '__main__':
test()
|
jphnoel/udata
|
udata/core/dataset/signals.py
|
Python
|
agpl-3.0
| 226
| 0
|
# -*- coding: utf-8 -*-
from __futur
|
e__ import unicode_literals
from blinker import Namespace
namespace = Namespace()
#: Trigerred when a dataset is published
on_dataset_published = namespace.signal('on-dataset-published'
|
)
|
ktan2020/legacy-automation
|
win/Lib/site-packages/wx-3.0-msw/wx/lib/analogclock/lib_setup/fontselect.py
|
Python
|
mit
| 1,713
| 0.005838
|
# AnalogClock's font selector for setup dialog
# E. A. Tacao <e.a.tacao |at| estadao.com.br>
# http://j.domaindlx.com/elements28/wxpython/
# 15 Fev 2006, 22:00 GMT-03:00
# Distributed under the wxWidgets license.
import wx
from wx.lib.newevent import NewEvent
from wx.lib.buttons import GenButton
#----------------------------------------------------------------------------
(FontSelectEvent, EVT_FONTSELECT) = NewEvent()
#----------------------------------------------------------------------------
class FontSelect(GenButton):
def __init__(self, parent, size=(75, 21), value=None):
GenButton.__init__(self, parent, wx.ID_ANY, label="Select...",
size=size)
self.SetBezelWidth(1)
self.parent = parent
self.SetValue(value)
self.parent.Bind(wx.EVT_BUTTON, self.OnClick, self)
def GetValue(
|
self):
return self.value
def SetValue(self, value):
if value is None:
value = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self.value = value
def OnClick(self, event):
data = wx.FontData()
data.EnableEffects(False)
font = self.value; font.SetPointSize(10)
data.SetInitialFont(font)
dlg = wx.FontDialog(self, data)
changed = dlg.ShowModal() == wx.ID_OK
if changed:
|
data = dlg.GetFontData()
self.value = data.GetChosenFont()
self.Refresh()
dlg.Destroy()
if changed:
nevt = FontSelectEvent(id=self.GetId(), obj=self, val=self.value)
wx.PostEvent(self.parent, nevt)
#
##
### eof
|
weargoggles/jwt-oauth-test
|
project/hooks/views.py
|
Python
|
gpl-3.0
| 995
| 0.00201
|
import binascii
from cryptography.hazmat.primitives import hmac
from cryptography.hazmat.primitives.hashes import SHA1
from cryptography.hazmat.primitives.constant_time import bytes_eq
import cryptography.hazmat.backends
from django.conf import settings
from django.core.mail import send_mail
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
crypto_backend
|
= cryptography.hazmat.backends.default_backend()
def verify_signature(request, request_body):
hmac_hmac = hmac.HMAC(settings.GITHUB_WEBHOOK_SECRET, SHA1(), crypto_backend)
hmac_hmac.update(request_body)
signature = b'sha1=' + binascii.hexlify(hmac_hmac.finalize())
return bytes_eq(signature, request.META['HTTP_X_HUB_SIGNATURE'])
@csrf_exempt
def receive_hook(re
|
quest):
verify_signature(request, request.body)
send_mail('Hook from github', request.body, settings.SERVER_EMAIL,
map(lambda t: t[-1], settings.ADMINS))
return HttpResponse(status=200)
|
arskom/spyne
|
spyne/util/tdict.py
|
Python
|
lgpl-2.1
| 2,861
| 0.00035
|
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The typed dict module"""
from itertools import chain
class tdict(dict):
def __init__(self, kt=None, vt=None, data=None):
"""This is a typed dict implementation that optionally enforces given
types on contained values on assignment."""
self._kt = kt
self._vt = vt
if kt is None and vt is None:
self.check = self._check_noop
elif kt is None:
self.check = self._check_v
elif vt is None:
self.check = self._check_k
else:
self.check = self._check_kv
if data is not None:
self.update(data)
def _check_noop(self, *_):
pass
def _check_k(self, key, _):
if not isinstance(key, self._kt):
raise TypeError(repr(key))
def _check_v(self, _, value):
if not isinstance(value, self._vt):
raise TypeError(repr(value))
def _check_kv(self, key, value):
if not isinstance(key, self._kt):
raise TypeError(repr(key))
if not isinstance(value, self._vt):
raise TypeError(repr(value))
def __setitem__(self, key, value):
self.check(key, value)
super(tdict, self).__setitem__(key, value)
def update(self, E=None, **F):
try:
it = chain(E.items(), F.items())
except AttributeError:
it = chain(E, F)
for k, v in it:
self[k] = v
def setdefault(self, k, d=None):
self._check_k(k, d) if self._kt is None else None
self._check_v(k, d) if self._vt is None else None
super(tdict, self).setdefault(k, d)
@classmethod
def fromkeys(cls, S, v=None):
kt = vt = None
if len(S) > 0:
kt, = set((type(s) for s in S))
if v is not None
|
:
vt = type(v)
retval = tdict(kt, vt)
for s in S:
|
retval[s] = v
return retval
def repr(self):
return "tdict(kt=%s, vt=%s, data=%s)" % \
(self._kt, self._vt, super(tdict, self).__repr__())
|
kdlucas/pyrering
|
lib/baserunner_test.py
|
Python
|
apache-2.0
| 10,873
| 0.003219
|
#!/usr/bin/python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Mingyu Wu)
"""Unittest for baserunner module."""
__author__ = '[email protected] (Mingyu Wu)'
import os
import shutil
import sys
import tempfile
import time
import unittest
from lib import baserunner
from lib import filesystemhandlerextend
from lib import mock_emailmessage
from lib import mock_reporter
from lib import mock_scanscripts
from lib import pyreringconfig
from lib import pyreringutil
global_settings = pyreringconfig.GlobalPyreRingConfig.settings
class BaseRunnerTest(unittest.TestCase):
"""Unit test cases for BaseRunner class."""
def setUp(self):
# I should config global_settings here instead of read it from file system.
self.tempdir = tempfile.mkdtemp()
root_dir = os.path.abspath(os.path.join(os.path.split(sys.argv[0])[0],
'../'))
global_settings.update(
{'report_dir': os.path.join(self.tempdir, 'report'),
'email_recipients': os.getenv('LOGNAME'),
'host_name': 'test.host',
'log_file': 'pyrering.log',
'file_errors': False,
'project_name': 'pyrering_unittest',
'root_dir': root_dir,
'sendmail': False,
'runner': 'baserunner',
'source_dir': os.path.join(root_dir, 'test'),
'tester': os.getenv('LOGNAME'),
'FATAL_STRING': 'Fatal:',
'header_file': 'header_info.txt',
'time': time.strftime('%Y%m%d%H%M'),
'skip_setup': False,
})
# get a default config and mocks
self.one_config = pyreringutil.PRConfigParser().Default()
self.scanner = mock_scanscripts.MockScanScripts()
self.emailmessage = mock_emailmessage.MockEmailMessage()
self.reporter = mock_reporter.MockTxtReporter()
self.runner = baserunner.BaseRunner(
name='test',
scanner=self.scanner,
email_message=self.emailmessage,
filesystem=filesystemhandlerextend.FileSystemHandlerExtend(),
reporter=self.reporter)
self.runner.Prepare()
if not os.path.isdir(global_settings['report_dir']):
os.makedirs(global_settings['report_dir'])
# I don't want the unit test to mess with the original log file.
global_settings['log_file'] += '.unittest'
def tearDown(self):
self.runner.CleanUp()
self.runner = ''
pyreringconfig.Reset()
self.scanner.CleanConfig()
shutil.rmtree(self.tempdir)
def testFindHeaderInfoFile(self):
global_settings['header_file'] = os.path.join(self.tempdir, 'header.txt')
fh = open(global_settings['header_file'], 'w')
fh.write('test info')
fh.close()
self.one_config['TEST_SCRIPT'] = 'echo 1'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testFindHeaderInfoFile'], False)
self.assertEqual(self.reporter.header, 'test info')
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
# Positive Test Cases:
def testOneCommand(self):
"""A simple sleep command takes some time to finish."""
# prepare the test script here
self.one_config['TEST_SCRIPT'] = 'sleep 3'
# set the mock scanscript to return this thing.
self.scanner.SetConfig([self.one_config])
# now run the test and return should be expected.
result = self.runner.Run(['testOneCommand'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
def testEchoCommand(self):
"""A simple command has output on stdout."""
self.one_config['TEST_SCRIPT'] = 'echo testEchoCommand'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testEchoCommand'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
#TODO(mwu): need to check the log file has this hello line
def testEchoToSTDERRCommand(self):
"""A simple command has outpu
|
t redirect to stderr."""
self.one_config['TEST_SCRIPT'] = 'echo testEchoToSTDERRCommand >&2'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testEchoSTDERRCommand'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
#TODO(mwu): need to check the log file has this hello line
def testRu
|
nScript(self):
"""A real script to run."""
self.one_config['TEST_SCRIPT'] = os.path.join(global_settings['root_dir'],
'test/test1_echo.sh')
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testRunScript'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
#TODO(mwu): need to check the log file has the echo output
def testRunScripts(self):
"""2 scripts to be run."""
self.one_config['TEST_SCRIPT'] = 'echo testRunScripts1'
config2 = pyreringutil.PRConfigParser().Default()
config2['TEST_SCRIPT'] = 'echo testRunScripts2'
self.scanner.SetConfig([self.one_config, config2])
result = self.runner.Run(['testRunScripts'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 2)
# TODO(mwu): verify both scripts run fine
def testEmailSend(self):
"""Test Email should be send."""
self.one_config['TEST_SCRIPT'] = 'echo send_email_test;exit 1'
self.scanner.SetConfig([self.one_config])
try:
self.runner.Run(['testEmailSend'], True)
except self.emailmessage.EmailCalledError:
self.assertTrue(True)
else:
self.fail(msg='Send email was not called')
def testEmailNotSendIfTestPass(self):
"""Test email should not go if all tests pass."""
self.one_config['TEST_SCRIPT'] = 'echo send_email_test;exit 0'
self.scanner.SetConfig([self.one_config])
try:
self.runner.Run(['testEmailSend'], True)
except self.emailmessage.EmailCalledError:
self.fail()
# Negative Test Cases
def testTimeoutCommand(self):
"""A command times out."""
self.one_config['TEST_SCRIPT'] = 'echo timeouttest; sleep 8'
self.one_config['TIMEOUT'] = 2
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testTimeoutCommand'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.timeout, 1)
def testNonExistCommand(self):
"""Test a wrong system command."""
self.one_config['TEST_SCRIPT'] = 'nonexist_command'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testNonExistCommand'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testNonExistScript(self):
"""Test a nonexist script."""
self.one_config['TEST_SCRIPT'] = '/tmp/nonexist_script.sh'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testNonExistScript'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testPermissionDenied(self):
"""Test something without permission."""
self.one_config['TEST_SCRIPT'] = 'touch /pyrering.txt'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testPermissionDenied'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testCatchWarningMessage(self):
"""Test a command has warning output."""
self.one_config['TEST_SCRIPT'] = 'echo warn message'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testCatchWarningMessage'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
def testCatchFatalMessage(self):
"""Test a command has fatal error message even exit code still 0."""
self.one_config['TEST_SCRI
|
ptrendx/mxnet
|
python/mxnet/optimizer/optimizer.py
|
Python
|
apache-2.0
| 66,906
| 0.002735
|
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
"""Weight updating functions."""
import logging
import math
import pickle
import warnings
import os
import numpy
from ..base import py_str
from ..ndarray import (NDArray, zeros, clip, sqrt, cast, maximum, abs as NDabs, array, multiply)
from ..ndarray import (sgd_update, sgd_mom_update, adam_update, rmsprop_update, rmspropalex_update,
mp_sgd_update, mp_sgd_mom_update, square, ftrl_update, ftml_update,
signsgd_update, signum_update,
multi_sgd_update, multi_sgd_mom_update, multi_mp_sgd_update,
multi_mp_sgd_mom_update)
from ..ndarray import sparse
from ..random import normal
__all__ = [
'AdaDelta', 'AdaGrad', 'Adam', 'Adamax', 'DCASGD', 'FTML', 'Ftrl', 'LBSGD',
'NAG', 'NDabs', 'Nadam', 'Optimizer', 'RMSProp', 'SGD', 'SGLD', 'Signum',
'Test', 'Updater', 'ccSGD', 'create', 'get_updater', 'register'
]
def _flatten_list(nested_list):
return [item for sublist in nested_list for item in sublist]
class Optimizer(object):
"""The base class inherited by all optimizers.
Parameters
----------
rescale_grad : float, optional, default 1.0
Multiply the gradient with `rescale_grad` before updating. Often
choose to be ``1.0/batch_size``.
param_idx2name : dict from int to string, optional, default None
A dictionary that maps int index to string name.
clip_gradient : float, optional, default None
Clip the gradient by projecting onto the box ``[-clip_gradient, clip_gradient]``.
learning_rate : float, optional, default 0.01
The initial learning rate.
lr_scheduler : LRScheduler, optional, default None
The learning rate scheduler.
wd : float, optional, default 0.0
The weight decay (or L2 regularization) coefficient. Modifies objective
by adding a penalty for having large weights.
sym: Symbol, optional, default None
The Symbol this optimizer is applying to.
begin_num_update : int, optional, default 0
The initial number of updates.
multi_precision : bool, optional, default False
Flag to control the internal precision of the optimizer.::
False: results in using the same precision as the weights (default),
True: makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
param_dict : dict of int -> gluon.Parameter, default None
Dictionary of parameter index to gluon.Parameter, used to lookup parameter attributes
such as lr_mult, wd_mult, etc. param_dict shall not be deep copied.
Properties
----------
learning_rate : float
The current learning rate of the optimizer. Given an Optimizer object
optimizer, its learning rate can be accessed as optimizer.learning_rate.
"""
def __init__(self, rescale_grad=1., param_idx2name=None, wd=0.,
clip_gradient=None, learning_rate=0.01,
lr_scheduler=None, sym=None, begin_num_update=0,
multi_precision=False, param_dict=None):
self.rescale_grad = rescale_grad
self.lr = learning_rate
self.lr_scheduler = lr_scheduler
if lr_scheduler is not None:
self.lr_scheduler.base_lr = learning_rate
self.wd = wd
self.lr_mult = {}
self.wd_mult = {}
self.begin_num_update = begin_num_update
self.num_update = begin_num_update
self._index_update_count = {}
self.clip_gradient = clip_gradient
self.multi_precision = multi_precision
self.aggregate_num = 0
if param_idx2name is None:
param_idx2name = {}
assert isinstance(param_idx2name, dict), \
'param_idx2name should be a dict of param indexes to names.'
self.idx2name = param_idx2name.copy()
self.sym_info = (sym.attr_dict(), sym.list_arguments()) if sym is not None else ()
self.param_dict = param_dict if param_dict else {}
self.set_lr_mult({})
self.set_wd_mult({})
opt_registry = {}
@staticmethod
def register(klass):
"""Registers a new optimizer.
Once an optimizer is registered, we can create an instance of this
optimizer with `create_optimizer` later.
Examples
--------
>>> @mx.optimizer.Optimizer.register
... class MyOptimizer(mx.optimizer.Optimizer):
... pass
>>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer')
>>> print(type(optim))
<class '__main__.MyOptimizer'>
"""
assert(isinstance(klass, type))
name = klass.__name__.lower()
if name in Optimizer.opt_registry:
warnings.warn('WARNING: New optimizer %s.%s is overriding '
'existing optimizer %s.%s' %
(klass.__module__, klass.__name__,
Optimizer.opt_registry[name].__module__,
Optimizer.opt_registry[name].__name__))
Optimizer.opt_registry[name] = klass
return klass
@staticmethod
def create_optimizer(name, **kwargs):
"""Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'>
"""
if name.lower() in Optimizer.opt_registry:
return Optimizer.opt_registry[name.lower()](**kwargs)
else:
raise ValueError('Cannot find optimizer %s' % name)
@property
def learning_rate(self):
if self.lr_scheduler is not None:
return self.lr_scheduler(self.num_update)
else:
return self.lr
def create_state(self, index, weight):
"""Creates auxiliary state for a given weight.
Some optimizers require additional states, e.g. as momentum, in addition
to gradients in order to update weights. This function creates state
for a given weight which will be used in `update`. This function is
called only once for each weight.
Parameters
----------
index : int
An unique
|
index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
"""
def create_state_multi_precision(self, index, weight):
"""Creates auxiliary state for a given weight, including FP32 high
precision c
|
opy if original weight is FP16.
Th
|
xmikos/qhangups
|
qhangups/ui_qhangupsbrowser.py
|
Python
|
gpl-3.0
| 1,146
| 0.004363
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qhangups/qhangupsbrowser.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file
|
will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_QHangupsBrowser(object):
def setupUi(self, QHangupsBrowser):
QHangupsBrowser.setObjectName("QHangupsBrowser")
QHangupsBrowser.resize(600, 450)
self.verticalLayout = QtWidgets.QVBoxLayout(QHangupsBrowser)
self.verticalLayout.setObjectName
|
("verticalLayout")
self.browserWebView = QtWebKitWidgets.QWebView(QHangupsBrowser)
self.browserWebView.setUrl(QtCore.QUrl("about:blank"))
self.browserWebView.setObjectName("browserWebView")
self.verticalLayout.addWidget(self.browserWebView)
self.retranslateUi(QHangupsBrowser)
QtCore.QMetaObject.connectSlotsByName(QHangupsBrowser)
def retranslateUi(self, QHangupsBrowser):
_translate = QtCore.QCoreApplication.translate
QHangupsBrowser.setWindowTitle(_translate("QHangupsBrowser", "QHangups - Browser"))
from PyQt5 import QtWebKitWidgets
|
M4rtinK/pyside-android
|
tests/QtGui/qlayout_ref_test.py
|
Python
|
lgpl-2.1
| 4,597
| 0.003481
|
'''Test cases for QLayout handling of child widgets references'''
import unittest
from sys import getrefcount
from PySide.QtGui import QHBoxLayout, QVBoxLayout, QGridLayout, QWidget
from PySide.QtGui import QStackedLayout, QFormLayout
from PySide.QtGui import QApplication, QPushButton, QLabel
from helper import UsesQApplication
class SaveReference(UsesQApplication):
'''Test case to check if QLayout-derived classes increment the refcount
of widgets passed to addWidget()'''
# Adding here as nose can't see the qapplication attrib we inherit
qapplication = True
def setUp(self):
#Acquire resources
super(SaveReference, self).setUp()
self.widget1 = QPushButton('click me')
self.widget2 = QLabel('aaa')
def tearDown(self):
#Release resources
del self.widget2
del self.widget1
super(SaveReference, self).tearDown()
def checkLayoutReference(self, layout):
#Checks the reference cound handling of layout.addWidget
self.assertEqual(getrefcount(self.widget1), 2)
layout.addWidget(self.widget1)
self.assertEqual(getrefcount(self.widget1), 3)
self.assertEqual(getrefcount(self.widget2), 2)
layout.addWidget(self.widget2)
self.assertEqual(getrefcount(self.widget2), 3)
# Check if doesn't mess around with previous widget refcount
self.assertEqual(getrefcount(self.widget1), 3)
def testMoveLayout(self):
l = QHBoxLayout()
self.assertEqual(getrefcount(self.widget1), 2)
l.addWidget(self.widget1)
self.assertEqual(getrefcount(self.widget1), 3)
w = QWidget()
w.setLayout(l)
self.assertEqual(getrefcount(self.widget1), 3)
def testHBoxReference(self):
#QHBoxLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QHBoxLayout(w))
def testVBoxReference(self):
#QVBoxLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QVBoxLayout(w))
def testGridReference(self):
#QGridLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QGridLayout(w))
def testFormReference(self):
#QFormLayout.addWidget reference count
w = QWidget()
self.chec
|
kLayoutReference(QFormLayout(w))
def testStackedReference(self):
#QStackedLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QStackedLayout(w))
class MultipleAdd(UsesQApplication):
'''Test case to check if refcount is incremented only once when multiple
|
calls to addWidget are made with the same widget'''
qapplication = True
def setUp(self):
#Acquire resources
super(MultipleAdd, self).setUp()
self.widget = QPushButton('click me')
self.win = QWidget()
self.layout = QHBoxLayout(self.win)
def tearDown(self):
#Release resources
del self.widget
del self.layout
del self.win
super(MultipleAdd, self).tearDown()
def testRefCount(self):
#Multiple QLayout.addWidget calls on the same widget
self.assertEqual(getrefcount(self.widget), 2)
self.layout.addWidget(self.widget)
self.assertEqual(getrefcount(self.widget), 3)
self.layout.addWidget(self.widget)
self.assertEqual(getrefcount(self.widget), 3)
self.layout.addWidget(self.widget)
self.assertEqual(getrefcount(self.widget), 3)
class InternalAdd(UsesQApplication):
def testInternalRef(self):
mw = QWidget()
w = QWidget()
ow = QWidget()
topLayout = QGridLayout()
# unique reference
self.assertEqual(getrefcount(w), 2)
self.assertEqual(getrefcount(ow), 2)
topLayout.addWidget(w, 0, 0)
topLayout.addWidget(ow, 1, 0)
# layout keep the referemce
self.assertEqual(getrefcount(w), 3)
self.assertEqual(getrefcount(ow), 3)
mainLayout = QGridLayout()
mainLayout.addLayout(topLayout, 1, 0, 1, 4)
# the same reference
self.assertEqual(getrefcount(w), 3)
self.assertEqual(getrefcount(ow), 3)
mw.setLayout(mainLayout)
# now trasfer the ownership to mw
self.assertEqual(getrefcount(w), 3)
self.assertEqual(getrefcount(ow), 3)
del mw
# remove the ref and invalidate the widget
self.assertEqual(getrefcount(w), 2)
self.assertEqual(getrefcount(ow), 2)
if __name__ == '__main__':
unittest.main()
|
FreshXOpenSource/wallaby-base
|
wallaby/pf/peer/searchDocument.py
|
Python
|
bsd-2-clause
| 1,556
| 0.005141
|
# Copyright (c) by it's authors.
# Some rights reserved. See LICENSE, AUTH
|
ORS.
from peer import *
from viewer import Viewer
from editor import Editor
class SearchDocument(Peer):
#Receiving pillows:
Search = Pillow.In
Sending = [
Viewer.In.Document
]
Routings = [
(Editor.Out.FieldChanged, Viewer.In.Refresh)
]
def __init__(self, searchRoom, resultRoom=None, document=None, identifier=None, appendWildcard=False):
Peer.__init__(self, searchRoom)
if not resultRoom:
result
|
Room = searchRoom
self._resultRoom = resultRoom
self._identifier = identifier
self._appendWildcard = appendWildcard
if document:
self._document = document
else:
from wallaby.common.queryDocument import QueryDocument
self._document = QueryDocument()
self._document.set('identifier', self._identifier)
self._document.set('query', '*')
self._catch(SearchDocument.In.Search, self._search)
def initialize(self):
# set search field editable
self._throw(Viewer.In.Document, self._document)
self._throw(Editor.In.Enable, True)
def _search(self, pillow, feathers):
query = self._document.get('query')
if self._appendWildcard and (len(query) == 0 or query[-1] != "*"):
self._document.set('query', query + "*")
from abstractQuery import AbstractQuery
self._throw(self._resultRoom+":"+AbstractQuery.In.Query, self._document)
|
tonsom1592-cmis/tonsom1592-cmis-cs2
|
recursion.py
|
Python
|
cc0-1.0
| 624
| 0.0625
|
def countup(n):
if n >= 10:
print "Blastoff!"
else:
print n
countup(n+1)
def main():
countup(1)
main()
def countdown_from_to(start,stop):
if start == stop:
print "Blastoff!"
elif start <= stop:
print "Invalid pair"
else:
print start
countdown_from_to(start - 1,stop)
def main():
countdown_from_to(89,53)
main()
def adder(sum_):
number = (raw_input("Next Number"))
if (number) == "":
print "The Sum Is {}".format(sum_)
|
elif number == float:
print number
else:
sum_ += float(number)
print "Running total: {}".format(sum_)
adder(sum_)
def main():
sum_ = 0
adder(sum_)
mai
|
n()
|
nsubiron/configure-pyz
|
setup.py
|
Python
|
gpl-3.0
| 1,666
| 0.006002
|
#!/usr/bin/env python
"""zip source directory tree"""
import argparse
import fnmatch
import logging
import os
import re
import subprocess
import zipfile
def get_version():
command = ['git', 'describe', '--tags', '--dirty', '--always']
ret
|
urn subprocess.check_output(command).decode('utf-8')
def source_walk(root):
root = os.path.abspath(root)
regex = re.compile(fnmatch.translate('*.py[co]'))
for path, _, files in os.walk(root):
files[:] = [f
|
for f in files if regex.match(f) is None]
for filename in files:
fullpath = os.path.join(path, filename)
yield fullpath, os.path.relpath(fullpath, root)
def setup():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-d', '--debug',
action='store_true',
help='print debug information')
argparser.add_argument(
'-o',
metavar='zipfile',
dest='output',
help='output file name')
argparser.add_argument(
'source',
help='source directory')
args = argparser.parse_args()
loglevel = logging.DEBUG if args.debug else logging.WARNING
logging.basicConfig(format='%(levelname)s: %(message)s', level=loglevel)
if not os.path.isdir(args.source):
logging.critical('"%s" is not a directory', args.source)
return
if args.output is None:
args.output = args.source + '.zip'
with zipfile.ZipFile(args.output, 'w', zipfile.ZIP_DEFLATED) as fzip:
fzip.writestr('version.txt', get_version())
for path, relpath in source_walk(args.source):
fzip.write(path, relpath)
if __name__ == '__main__':
setup()
|
jcnix/abg
|
enemies.py
|
Python
|
gpl-3.0
| 2,843
| 0.008442
|
# -*- coding: utf-8 -*-
# File: enemy.py
# Author: Casey Jones
#
# Created on July 20, 2009, 4:48 PM
#
# This file is part of Alpha Beta Gamma (abg).
#
# ABG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ABG is distributed in the hope that it will be useful,
# but WITHOUT ANY
|
WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ABG. If not, see <http://www.gnu.org/licenses/>.
#class to handle all enemies on screen
import sys, pygame, frametime, properties, random
from enemy import Enemy
class Enemies:
enemies = []
blackSurface = pygame.
|
Surface([Enemy.enemy.get_width(), Enemy.enemy.get_height()])
blackSurface.fill([0,0,0])
screen = None
def set_screen(self, screen):
self.screen = screen
def create(self):
#range that the current player ship can shoot
where_spawn = random.randint(1, properties.width - Enemy.enemy.get_width())
lenemy = Enemy(where_spawn)
self.enemies.append(lenemy)
def move(self, bullet):
to_update = []
if frametime.can_create_enemy():
self.create()
to_delete = []
to_update += [x.enemyrect for x in self.enemies]
if len(self.enemies) > 0:
for i in range(len(self.enemies)):
self.enemies[i].update(bullet)
self.screen.blit(self.blackSurface, self.enemies[i].enemyrect)
self.screen.blit(Enemy.enemy, self.enemies[i].enemyrect)
#If enemy goes off the bottom of the screen
if self.enemies[i].enemyrect.top > 800:
to_delete.append(i)
for x in to_delete:
self.remove(x)
to_update += [x.enemyrect for x in self.enemies]
return to_update
def getEnemies(self):
return self.enemies
def remove(self, index):
try:
to_update = self.enemies[index].enemyrect
self.screen.blit(self.blackSurface, self.enemies[index].enemyrect)
del self.enemies[index]
return to_update
except IndexError:
print("IndexError for enemy {0} of {1}".format(index, len(self.enemies)))
def game_over(self):
for i in range(len(self.enemies)):
self.screen.blit(self.blackSurface, self.enemies[i].enemyrect)
del self.enemies[:]
|
gppezzi/easybuild-framework
|
easybuild/tools/module_naming_scheme/mns.py
|
Python
|
gpl-2.0
| 7,666
| 0.004696
|
##
# Copyright 2011-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Module naming scheme API.
:author: Jens Timmerman (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
import re
from easybuild.base import fancylogger
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import Singleton
from easybuild.tools.py2vs3 import create_base_metaclass
DEVEL_MODULE_SUFFIX = '-easybuild-devel'
# singleton metaclass: only one instance is created
BaseModuleNamingScheme = create_base_metaclass('BaseModuleNamingScheme', Singleton, object)
class ModuleNamingScheme(BaseModuleNamingScheme):
"""Abstract class for a module naming scheme implementation."""
REQUIRED_KEYS = None
def __init__(self, *args, **kwargs):
"""Initialize logger."""
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
def is_sufficient(self, keys):
"""Determine whether specified list of easyconfig parameters is sufficient for this module naming scheme."""
if self.REQUIRED_KEYS is not None:
return set(keys).issuperset(set(self.REQUIRED_KEYS))
else:
raise EasyBuildError("Constant REQUIRED_KEYS is not defined, "
"should specify required easyconfig parameters.")
def requires_toolchain_details(self):
"""
Determine whether toolchain details are required by this module naming scheme,
e.g. whether one of det_toolchain_* functions are relied upon.
"""
return False
def det_full_module_name(self, ec):
"""
Determine full module name, relative to the top of the module path.
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: string with full module name, e.g.: '<compiler>/<mpi_lib>/<name>/<version>'
"""
raise NotImplementedError
def det_short_module_name(self, ec):
"""
Determine short module name, i.e. the name under which modules will be exposed to users.
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: string with module name, e.g. '<name>/<version>'
"""
# by default: full module name doesn't include a $MODULEPATH subdir
return self.det_full_module_name(ec)
def det_install_subdir(self, ec):
"""
Determine name of software installation subdirectory of install path.
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: string with name of subdirectory, e.g.: '<compiler>/<mpi_lib>/<name>/<version>'
"""
# by default: use full module name as name for install subdir
return self.det_full_module_name(ec)
def det_module_subdir(self, ec):
"""
Determine subdirectory for module file in $MODULEPATH.
This determines the separation between module names exposed to users, and what's part of the $MODULEPATH.
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: string with subdir path (relative to $MODULEPATH), e.g. '<compiler>/<mpi_lib>'
"""
# by default: no subdirectory
return ''
def det_module_symlink_paths(self, ec):
"""
Determine list of paths in which symlinks to module files must be created.
"""
# by default: make a symlink from moduleclass subdirectory of $MODULEPATH
return [ec['moduleclass']]
def det_modpath_extensions(self, ec):
"""
Determine list of subdirectories for which to extend $MODULEPATH with when this module is loaded (if any).
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: A list of $MODULEPATH subdirectories.
"""
# by default: an empty list of subdirectories to extend $MODULEPATH with
return []
def det_user_modpath_extensions(self, ec):
"""
|
Determine list of subdirector
|
ies relative to the user-specific modules directory for which to extend
$MODULEPATH with when this module is loaded (if any).
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: A list of $MODULEPATH subdirectories.
"""
# by default: use "system" module path extensions of naming scheme
return self.det_modpath_extensions(ec)
def det_init_modulepaths(self, ec):
"""
Determine initial module paths, where the modules that are top of the hierarchy (if any) live.
"""
return []
def expand_toolchain_load(self, ec=None):
"""
Determine whether load statements for a toolchain should be expanded to load statements for its dependencies.
This is useful when toolchains are not exposed to users.
"""
# by default: just include a load statement for the toolchain
return False
def is_short_modname_for(self, short_modname, name):
"""
Determine whether the specified (short) module name is a module for software with the specified name.
Default implementation checks via a strict regex pattern, and assumes short module names are of the form:
<name>/<version>[-<toolchain>]
"""
modname_regex = re.compile('^%s(/\S+)?$' % re.escape(name))
res = bool(modname_regex.match(short_modname))
self.log.debug("Checking whether '%s' is a module name for software with name '%s' via regex %s: %s",
short_modname, name, modname_regex.pattern, res)
return res
def det_make_devel_module(self):
"""
Determine if a devel module should be generated.
Can be used to create a separate set of modules with a different naming scheme.
Software is already installed beforehand with one naming scheme, including development module.
"""
return True
|
grpc/grpc-ios
|
native_src/third_party/googletest/googletest/test/googletest-json-outfiles-test.py
|
Python
|
apache-2.0
| 5,705
| 0.003155
|
#!/usr/bin/env python
# Copyright 2018, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_json_output module."""
import json
import os
from googletest.test import gtest_json_test_utils
from googletest.test import gtest_test_utils
GTEST_OUTPUT_SUBDIR = 'json_outfiles'
GTEST_OUTPUT_1_TEST = 'gtest_xml_outfile1_test_'
GTEST_OUTPUT_2_TEST = 'gtest_xml_outfile2_test_'
EXPECTED_1 = {
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'PropertyOne',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'TestSomeProperties',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'PropertyO
|
ne',
u'SetUpProp': u'1',
u'TestSomeProperty': u'1',
u'TearDownProp': u'1',
}],
}],
}
EX
|
PECTED_2 = {
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'PropertyTwo',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'TestSomeProperties',
u'status': u'RUN',
u'result': u'COMPLETED',
u'timestamp': u'*',
u'time': u'*',
u'classname': u'PropertyTwo',
u'SetUpProp': u'2',
u'TestSomeProperty': u'2',
u'TearDownProp': u'2',
}],
}],
}
class GTestJsonOutFilesTest(gtest_test_utils.TestCase):
"""Unit test for Google Test's JSON output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, '')
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + '.json'))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + '.json'))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_2)
def _TestOutFile(self, test_name, expected):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, '--gtest_output=json:%s' % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
output_file_name1 = test_name + '.json'
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
if os.path.isfile(output_file1):
with open(output_file1) as f:
actual = json.load(f)
else:
with open(output_file2) as f:
actual = json.load(f)
self.assertEqual(expected, gtest_json_test_utils.normalize(actual))
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '0'
gtest_test_utils.Main()
|
xuru/pyvisdk
|
pyvisdk/enums/profile_numeric_comparator.py
|
Python
|
mit
| 307
| 0
|
########################################
# Automatically generated, do not edit.
##########################
|
##############
from pyvisdk.thirdparty import Enum
ProfileNumericComparator = Enum(
'equal',
|
'greaterThan',
'greaterThanEqual',
'lessThan',
'lessThanEqual',
'notEqual',
)
|
poeticcapybara/pythalesians
|
pythalesians-examples/marketliquidity_examples.py
|
Python
|
apache-2.0
| 758
| 0.006596
|
__author__ = 'saeedamen'
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eithe
|
r express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
marke
|
tliquidity_examples
Shows how to calculate market liquidity using bid/ask data and tick counts.
"""
# TODO
|
pdelsante/thug
|
thug/ActiveX/modules/NamoInstaller.py
|
Python
|
gpl-2.0
| 1,217
| 0.008217
|
# NamoInstaller ActiveX Control 1.x - 3.x
# CVE-NOMATCH
import logging
log = logging.getLogger("Thug")
def Install(s
|
elf, arg):
if len(arg) > 1024:
log.ThugLogging.log_exploit_event(self._window.url,
|
"NamoInstaller ActiveX",
"Overflow in Install method")
log.DFT.check_shellcode(arg)
if str([arg]).find('http') > -1:
log.ThugLogging.add_behavior_warn('[NamoInstaller ActiveX] Insecure download from URL %s' % (arg, ))
log.ThugLogging.log_exploit_event(self._window.url,
"NamoInstaller ActiveX",
"Insecure download from URL",
forward = False,
data = {
"url": arg
}
)
try:
self._window._navigator.fetch(arg, redirect_type = "NamoInstaller Exploit")
except Exception:
log.ThugLogging.add_behavior_warn('[NamoInstaller ActiveX] Fetch failed')
|
chemalot/chemalot
|
bin/gOpt.py
|
Python
|
apache-2.0
| 7,487
| 0.024709
|
#!/usr/bin/env python
#Alberto
from __future__ import print_function, division
import os
import glob
import argparse
from subprocess import Popen, PIPE
from argparse import RawTextHelpFormatter
import sys
import re
from textwrap import dedent
from subprocess import call
def warn(*objs):
print(*objs, file=sys.stderr)
class FileLineWrapper(object):
"""File reader with line numbers and readRequiredLine"""
def __init__(self, f):
self.f = f
self.line = 0
def close(self):
return self.f.close()
def readline(self):
self.line += 1
return self.f.readline()
def readRequiredLine(self):
line = self.readline()
if not line:
raise IOError("Unexpected end of file found: %d" % self.line)
return line
def __enter__(self):
self.f.__enter__()
return self
def __exit__(self, tp, value, traceback):
self.f.__exit__(tp, value, traceback)
class GJob:
"""Class representing a gaussian job"""
COMMANDPat = re.compile("\s*#[PTN]* ")
SKIPGeomPat = re.compile("geom=\S*check", re.IGNORECASE)
LINKPat = re.compile("\s*--Link1--")
OPTPat = re.compile("\sopt([=\(]+([^\s\)]+)?\)?)", re.IGNORECASE)
FREQPat = re.compile("\sfreq[\s=\(]", re.IGNORECASE)
MAXCylPat = re.compile("MaxCycles=\d+", re.IGNORECASE)
CALCFCPat = re.compile("readFC|calcfc|calchffc|rcfc", re.IGNORECASE)
GEOMPat = re.compile("\s*geom(=\S+)", re.IGNORECASE)
GUESSPat = re.compile("\s*guess(=\S+)", re.IGNORECASE)
def __init__(self, start, command, middle, coords, end):
self.start = start # % directives
self.command = command # gaussian command line
self.middle = middle # comment, charge and multiplicity
self.coords = coords
self.end = end # anything after the coordinates
def __str__(self):
return ''.join(
[self.start, self.co
|
mmand, self.middle, self.coords, self.end])
def isOpt(self):
return GJob.OPTPat.search(self.command)
def isFreq(self):
return GJob.FREQPat.search(self.command)
def execute(self, outName):
com = dedent("""
date>>%s;gaussian.csh >>%s<<'gJOBComs'
%s'gJOBComs'""") % (outName,outName, str(self))
#warn(com)
status = call(["/bin/csh",
|
"-fc", com])
if status > 0:
raise IOError("Gaussian returned error code=%d" % status)
p = Popen("tail -n 10 "+outName, shell=True, bufsize=2048,
stdin=PIPE, stdout=PIPE, close_fds=True)
stdin,stdout= p.stdin, p.stdout
#stdin,stdout = os.popen2("tail -n 10 "+outName)
stdin.close()
lines = stdout.read()
stdout.close()
return b" Normal termination of Gaussian" in lines
def copy(self, chkGeom=False, optSteps='', optCalcFC=False, optReadFC=False):
newCom = self.command
newMiddle = self.middle
newCoords = self.coords
ma = GJob.OPTPat.search(newCom)
if (optSteps or optCalcFC or optReadFC) and not ma:
raise Exception("Not an optimization:" + str(self))
elif optSteps or optCalcFC or optReadFC:
optArgs= ma.group(2)
if optSteps:
optArgs= GJob.MAXCylPat.sub("",optArgs)
if optArgs: optArgs += ","
optArgs += "MaxCycles="+str(optSteps)
if optCalcFC:
optArgs = GJob.CALCFCPat.sub("",optArgs)
if optArgs: optArgs += ","
optArgs += "CalcFC"
if optReadFC:
optArgs = GJob.CALCFCPat.sub("",optArgs)
if optArgs: optArgs += ","
optArgs += "ReadFC"
optArgs = optArgs.replace(",,",",")
if optArgs.startswith(",") : optArgs = optArgs[1:]
newCom = GJob.OPTPat.sub(" opt=(%s)"%optArgs,newCom)
if chkGeom:
newCom = GJob.GEOMPat.sub("",newCom)
newCom = GJob.GUESSPat.sub("",newCom)
newCom = newCom.rstrip() + " Geom=AllCheck Guess=TCheck\n"
newMiddle = ""
newCoords = ""
return GJob(self.start, newCom, newMiddle, newCoords, self.end)
@staticmethod
def readNext(inFile):
start = ""
command = ""
middle = ""
coords = ""
end = ""
line = inFile.readline()
if not line: return None
while not GJob.COMMANDPat.match(line):
start += line
line = inFile.readRequiredLine()
while line.strip():
command += line
line = inFile.readRequiredLine()
if not GJob.SKIPGeomPat.search(command):
middle = "\n"
line = inFile.readRequiredLine()
# read comment lines
while line.strip():
middle += line
line = inFile.readRequiredLine()
middle += line
# read charge and multiplicity
middle += inFile.readRequiredLine()
line = inFile.readRequiredLine()
while line.strip():
coords += line
line = inFile.readRequiredLine()
while line and not GJob.LINKPat.match(line):
end += line
line = inFile.readline()
return GJob(start, command, middle, coords, end)
desc = """Run guassian optimization run.
Your gInFile may contain multiple jobs.
Whenever an optimization job is found it will be executed in multiple
subjobs with MaxCycle=optSteps. If the optimization does not
complete a frequency calculation is done with the final geometry.
If the n-1'ed step was a freq job it's parameters will be retained,
if not then the "CalcFC" option will be added to the opt keyword.
Note that gOpt will modify your gaussian options somewhat.
Example: set n=myName.g ; set nCPU=4 ; mysub.py -q medium -jobName $n:r -nCPU $nCPU -totalMem 10 -- gOpt.py -in $n"""
parser = argparse.ArgumentParser(description=desc, formatter_class=RawTextHelpFormatter)
parser.add_argument('-in', dest='gInFileName', required=True,
help='gaussian command file, out will be name.out')
parser.add_argument('-optSteps', dest='optSteps', required=False, default=8,
help='Number of optimizaton steps to execute before recalculating freq (def=%d)'%8)
parser.add_argument('-restartJob', metavar="<n>", type=int, required=False, default=0,
help='restart this computation with job number <n>. Only for opt jobs.')
args = parser.parse_args()
gInFileName = args.gInFileName
gOutFileName, dummy = os.path.splitext(gInFileName)
gOutFileName += ".out"
restartJob = args.restartJob
optSteps=args.__dict__.get('optSteps',8)
gJobs = []
with FileLineWrapper(open(gInFileName)) as gInFile:
gJob = GJob.readNext(gInFile)
while gJob:
gJobs.append(gJob)
gJob = GJob.readNext(gInFile)
lastGJob = None
for gJob in gJobs:
restartJob -= 1
if restartJob > 0:
continue
if gJob.isOpt(): # and lastGJob != None:
newGJob = gJob.copy(optSteps=optSteps)
success = newGJob.execute(gOutFileName)
while not success:
if lastGJob and lastGJob.isFreq():
newGJob = lastGJob.copy(chkGeom=True)
if not newGJob.execute(gOutFileName) :
raise IOError("Freq calculation did not complete!")
newGJob = gJob.copy(optSteps=optSteps,optReadFC=True)
success = newGJob.execute(gOutFileName)
else:
newGJob = gJob.copy(chkGeom=True,optSteps=optSteps,optCalcFC=True)
success = newGJob.execute(gOutFileName)
else:
gJob.execute(gOutFileName)
lastGJob = gJob
|
EternityForest/KaithemAutomation
|
kaithem/src/thirdparty/uncertainties/umath_core.py
|
Python
|
gpl-3.0
| 14,767
| 0.001761
|
# !!!!!!!!!!! Add a header to the documentation, that starts with something
# like "uncertainties.UFloat-compatible version of...", for all functions.
"""
Implementation of umath.py, with internals.
"""
# This module exists so as to define __all__, which in turn defines
# which functions are visible to the user in umath.py through from
# umath import * and Python shell completion.
# Many analytical derivatives depend on this
# Standard modules
import math
import
|
sys
import itertools
#
|
Local modules
import uncertainties.core as uncert_core
from uncertainties.core import (to_affine_scalar, AffineScalarFunc,
LinearCombination)
###############################################################################
# We wrap the functions from the math module so that they keep track of
# uncertainties by returning a AffineScalarFunc object.
# Some functions from the math module cannot be adapted in a standard
# way so to work with AffineScalarFunc objects (either as their result
# or as their arguments):
# (1) Some functions return a result of a type whose value and
# variations (uncertainties) cannot be represented by AffineScalarFunc
# (e.g., math.frexp, which returns a tuple). The exception raised
# when not wrapping them with wrap() is more obvious than the
# one obtained when wrapping them (in fact, the wrapped functions
# attempts operations that are not supported, such as calculation a
# subtraction on a result of type tuple).
# (2) Some functions don't take continuous scalar arguments (which can
# be varied during differentiation): math.fsum, math.factorial...
# Such functions can either be:
# - wrapped in a special way.
# - excluded from standard wrapping by adding their name to
# no_std_wrapping
# Math functions that have a standard interface: they take
# one or more float arguments, and return a scalar:
many_scalars_to_scalar_funcs = []
# Some functions require a specific treatment and must therefore be
# excluded from standard wrapping. Functions
# no_std_wrapping = ['modf', 'frexp', 'ldexp', 'fsum', 'factorial']
# Functions with numerical derivatives:
#
# !! Python2.7+: {..., ...}
num_deriv_funcs = set(['fmod', 'gamma', 'lgamma'])
# Functions are by definition locally constant (on real
# numbers): their value does not depend on the uncertainty (because
# this uncertainty is supposed to lead to a good linear approximation
# of the function in the uncertainty region). The type of their output
# for floats is preserved, as users should not care about deviations
# in their value: their value is locally constant due to the nature of
# the function (0 derivative). This situation is similar to that of
# comparisons (==, >, etc.).
#
# !! Python 2.7+: {..., ...}
locally_cst_funcs = set(['ceil', 'floor', 'isinf', 'isnan', 'trunc'])
# Functions that do not belong in many_scalars_to_scalar_funcs, but
# that have a version that handles uncertainties. These functions are
# also not in numpy (see unumpy/core.py).
non_std_wrapped_funcs = []
# Function that copies the relevant attributes from generalized
# functions from the math module:
# This is a copy&paste job from the functools module, changing
# the default arugment for assigned
def wraps(wrapper,
wrapped,
assigned=('__doc__',),
updated=('__dict__',)):
"""Update a wrapper function to look like the wrapped function.
wrapper -- function to be updated
wrapped -- original function
assigned -- tuple naming the attributes assigned directly
from the wrapped function to the wrapper function
updated -- tuple naming the attributes of the wrapper that
are updated with the corresponding attribute from the wrapped
function.
"""
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
########################################
# Wrapping of math functions:
# Fixed formulas for the derivatives of some functions from the math
# module (some functions might not be present in all version of
# Python). Singular points are not taken into account. The user
# should never give "large" uncertainties: problems could only appear
# if this assumption does not hold.
# Functions not mentioned in _fixed_derivatives have their derivatives
# calculated numerically.
# Functions that have singularities (possibly at infinity) benefit
# from analytical calculations (instead of the default numerical
# calculation) because their derivatives generally change very fast.
# Even slowly varying functions (e.g., abs()) yield more precise
# results when differentiated analytically, because of the loss of
# precision in numerical calculations.
#def log_1arg_der(x):
# """
# Derivative of log(x) (1-argument form).
# """
# return 1/x
def log_der0(*args):
"""
Derivative of math.log() with respect to its first argument.
Works whether 1 or 2 arguments are given.
"""
if len(args) == 1:
return 1/args[0]
else:
return 1/args[0]/math.log(args[1]) # 2-argument form
# The following version goes about as fast:
## A 'try' is used for the most common case because it is fast when no
## exception is raised:
#try:
# return log_1arg_der(*args) # Argument number check
#except TypeError:
# return 1/args[0]/math.log(args[1]) # 2-argument form
def _deriv_copysign(x,y):
if x >= 0:
return math.copysign(1, y)
else:
return -math.copysign(1, y)
def _deriv_fabs(x):
if x >= 0:
return 1
else:
return -1
def _deriv_pow_0(x, y):
if y == 0:
return 0.
elif x != 0 or y % 1 == 0:
return y*math.pow(x, y-1)
else:
return float('nan')
def _deriv_pow_1(x, y):
if x == 0 and y > 0:
return 0.
else:
return math.log(x) * math.pow(x, y)
erf_coef = 2/math.sqrt(math.pi) # Optimization for erf()
fixed_derivatives = {
# In alphabetical order, here:
'acos': [lambda x: -1/math.sqrt(1-x**2)],
'acosh': [lambda x: 1/math.sqrt(x**2-1)],
'asin': [lambda x: 1/math.sqrt(1-x**2)],
'asinh': [lambda x: 1/math.sqrt(1+x**2)],
'atan': [lambda x: 1/(1+x**2)],
'atan2': [lambda y, x: x/(x**2+y**2), # Correct for x == 0
lambda y, x: -y/(x**2+y**2)], # Correct for x == 0
'atanh': [lambda x: 1/(1-x**2)],
'copysign': [_deriv_copysign,
lambda x, y: 0],
'cos': [lambda x: -math.sin(x)],
'cosh': [math.sinh],
'degrees': [lambda x: math.degrees(1)],
'erf': [lambda x: math.exp(-x**2)*erf_coef],
'erfc': [lambda x: -math.exp(-x**2)*erf_coef],
'exp': [math.exp],
'expm1': [math.exp],
'fabs': [_deriv_fabs],
'hypot': [lambda x, y: x/math.hypot(x, y),
lambda x, y: y/math.hypot(x, y)],
'log': [log_der0,
lambda x, y: -math.log(x, y)/y/math.log(y)],
'log10': [lambda x: 1/x/math.log(10)],
'log1p': [lambda x: 1/(1+x)],
'pow': [_deriv_pow_0, _deriv_pow_1],
'radians': [lambda x: math.radians(1)],
'sin': [math.cos],
'sinh': [math.cosh],
'sqrt': [lambda x: 0.5/math.sqrt(x)],
'tan': [lambda x: 1+math.tan(x)**2],
'tanh': [lambda x: 1-math.tanh(x)**2]
}
# Many built-in functions in the math module are wrapped with a
# version which is uncertainty aware:
this_module = sys.modules[__name__]
def wrap_locally_cst_func(func):
'''
Return a function that returns the same arguments as func, but
after converting any AffineScalarFunc object to its nominal value.
This function is useful for wrapping functions that are locally
constant: the uncertainties should have no role in the result
(since they are supposed to keep the function linear and hence,
here, constant).
'''
def wrapped_func(*args, **kwargs):
args_float = list(map(uncert_core.nominal_value, args))
# !! In Python 2.7+, dictionary comprehension: {argname:...}
kwargs_float = dict(
(arg_name, uncert_cor
|
embotech/forcesnlp-examples
|
robot/acado/export_MPC/forces/interface/forces_build.py
|
Python
|
mit
| 2,126
| 0.031044
|
#forces : A fast customized optimization solver.
#
#Copyright (C) 2013-2016 EMBOTECH GMBH [[email protected]]. All rights reserved.
#
#
#This software is intended for simulation and testing purposes only.
#Use of this software for any commercial purpose is prohibited.
#
#This program is distributed in the hope that it will be useful.
#EMBOTECH makes NO WARRANTIES with respect to the use of the software
#without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
#PARTICULAR PURPOSE.
#
#EMBOTECH shall not have any liability for any damage arising from the use
#of the software.
#
#This Agreement shall exclusively be governed by and interpreted in
#accordance with the laws of Switzerland, excluding its principles
#of conflict of laws. The Courts of Zurich-City shall have exclusive
#jurisdiction in case of any dispute.
#
from distutils.ccompiler import new_compiler
c = new_compiler()
#from numpy.distutils.intelccompiler import IntelCCompiler
#c = IntelCCompiler()
import os
import sys
import distutils
# determine source file
sourcefile = os.path.join(os.getcwd(),"forces","src","forces"+".c")
# determine lib file
if sys.platform.startswith('win'):
libfile = os.path.join(os.getcwd(),"forces","lib","forces"+".lib")
else:
libfile = os.path.join(os.getcwd(),"forces","lib","forces"+".so")
# create lib dir if it does not exist yet
if not os.path.exists(os.path.join(os.getcwd(),"forces","lib")):
os.makedirs(os.path.join(os.getcwd(),"forces","lib"))
# compile into object file
objdir = os.path.join(os.getcwd(),"forces","obj")
if isinstance(c,distutils.unixccompiler.UnixCCompiler):
objects = c.compile([sourcefile], output_dir=objdir, extra
|
_preargs=['-O3','-fPIC','-fopenmp','-mavx'])
if sys.platform.startswith('linux'):
c.set_libraries(['rt','gomp'])
else:
objects = c.compil
|
e([sourcefile], output_dir=objdir)
# create libraries
libdir = os.path.join(os.getcwd(),"forces","lib")
exportsymbols = ["%s_solve" % "forces"]
c.create_static_lib(objects, "forces", output_dir=libdir)
c.link_shared_lib(objects, "forces", output_dir=libdir, export_symbols=exportsymbols)
|
clicumu/epo_utils
|
epo_utils/api.py
|
Python
|
mit
| 20,708
| 0.000145
|
# -*- coding: utf-8 -*-
""" Module for making calls to EPO-OPS REST-API.
This module contain classes and functions to get data from
[EPO-OPS API](http://www.epo.org/searching-for-patents/technical/espacenet/ops.html)
"""
import logging
import re
import time
from base64 import b64encode
from collections import namedtuple
from datetime import datetime, timedelta
import requests
from .constants import AUTH_URL, URL_PREFIX, VALID_ENDPOINTS, \
VALID_IDTYPES
from epo_utils.exceptions import FetchFailed, QuotaPerHourExceeded, \
QuotaPerWeekExceeded
from epo_utils.ops import Services, ReferenceType
try:
import requests_cache
except ImportError:
_HAS_CACHE = False
else:
_HAS_CACHE = True
from .documents import DocumentID
class APIInput:
"""
Encapsulation of API-input.
Provides ID-formatting and interface
to :class:`epo_utils.documents.DocumentID`.
Attributes
----------
id_type : str
ID-format (epodoc, docdb, original).
number : str
Document number.
kind : str
Document kind code.
country : str
Country code.
date : str
Date as YYYYMMDD-string.
"""
def __init__(self, id_type, number, kind=None, country=None, date=None):
if id_type not in VALID_IDTYPES:
raise ValueError('invalid id_type: {}'.format(id_type))
if date is not None:
date = str(date)
try:
datetime.strptime(date, '%Y%m%d')
except ValueError:
raise ValueError('date must be in YYYYMMDD-format')
else:
if len(date) != 8:
raise ValueError('date must be in YYYYMMDD-format')
if country is not None and not country.strip():
raise ValueError('country cant be empty if provided')
if kind is not None and not kind.strip():
raise ValueError('kind cant be empty if provided')
self.id_type = id_type
self.number = str(number)
self.kind = kind
self.country = country
self.date = date
@classmethod
def from_document_id(cls, document_id):
""" Convert instance of :class:`epo_utils.documents.DocumentID`
to `APIInput`.
Parameters
----------
document_id : epo_utils.documents.DocumentID
Document-ID to translate.
Returns
-------
APIInput
"""
if not isinstance(document_id, DocumentID):
raise ValueError('document_id must be DocumentID-instance')
return cls(document_id.id_type, document_id.doc_number,
document_id.kind, document_id.country, document_id.date)
def to_id(self):
""" Format as valid API-input ID.
Returns
-------
str
"""
if (',' in self.number or '.' in self.number or '/' in self.number) \
and self.id_type != 'classification':
number = '({})'.format(self.number)
else:
number = self.number
parts = [part for part in [self.country, number, self.kind, self.date]
if part is not None]
if self.id_type == 'original':
id_ = '.'.join(parts).replace(' ', '%20')
elif s
|
elf.id_type == 'docdb':
id_ =
|
'.'.join(parts)
elif self.id_type == 'epodoc':
if self.date is not None:
id_ = ''.join(parts[:-1])
id_ += '.' + self.date
else:
id_ = ''.join(parts)
elif self.id_type == 'classification':
return number
else:
raise ValueError('invalid id_type: {}'.format(self.id_type))
return id_
def __repr__(self):
module = self.__class__.__module__
class_name = self.__class__.__name__
return '<{0}.{1}: {2}>'.format(module, class_name, self.to_id())
class Token(namedtuple('Token', ['token', 'expires'])):
""" Wrapper around access-token. """
class EPOClient:
""" Client to call EPO-OPS REST-API using `requests`.
Features auto-throttling based on OPS throttling headers and
automatic retries on server-side error codes.
Parameters
----------
accept_type : str
Http accept type.
key : str, optional
EPO OPS user key.
secret : str, optional
EPO OPS user secret.
cache : bool
If True, try to use `requests_cache` for caching. Default False.
cache_kwargs : dict, optional.
Passed to :py:func:`requests_cache.install_cache` as keyword
arguments if provided.
max_retries : int
Number of allowed retries at 500-responses.
retry_timeout : float, int
Timeout in seconds between calls when retrying at 500-responses.
Attributes
----------
secret : str
key : str
token : Token or None
quota_per_hour_used : int
quota_per_week_used : int
"""
HAS_FULLTEXT = {'EP'}
def __init__(self, accept_type='xml', key=None, secret=None, cache=False,
cache_kwargs=None, max_retries=1, retry_timeout=10):
try:
_check_epoclient_input(accept_type, key, secret, cache,
cache_kwargs, max_retries, retry_timeout)
except AssertionError as e:
raise ValueError(str(e))
if accept_type.startswith('application/'):
self.accept_type = accept_type
else:
self.accept_type = 'application/{}'.format(accept_type)
if cache and _HAS_CACHE:
logging.info('Installs cache.')
requests_cache.install_cache(**(cache_kwargs or dict()))
elif cache:
raise ValueError('cache is set to True but requests_cache '
'is not available.')
self.secret = secret
self.key = key
self.max_retries = max_retries
self.retry_timeout = retry_timeout
self.quota_per_hour_used = 0
self.quota_per_week_used = 0
if all([secret, key]):
logging.debug('Auth provided.')
self.token = self.authenticate()
else:
logging.debug('Auth not provided')
self.token = None
self._last_call = {
'search': None,
'retrieval': None,
'inpadoc': None,
'images': None,
'other': None
}
self._next_call = self._last_call.copy()
def fetch(self, service, ref_type, api_input, endpoint='',
options=None, extra_headers=None):
""" Generic function to fetch data from the EPO-OPS API.
Parameters
----------
service : epo_utils.ops.Services
OPS-service to fetch from.
ref_type : epo_utils.ops.ReferenceType
OPS-reference type of data to fetch.
api_input : APIInput, list[APIInput]
Input to API-call.
endpoint : str
API-endpoint to call.
options : list, optional
API-call constitents.
extra_headers : dict, optional
Additional or custom headers to be used.
use_post : bool
If True, POST will be used for request.
Returns
-------
requests.Response
"""
if not isinstance(ref_type, ReferenceType):
raise ValueError('invalid ref_type: {}'.format(ref_type))
if not isinstance(service, Services):
raise ValueError('invalid service: {}'.format(service))
if endpoint not in VALID_ENDPOINTS:
raise ValueError('invalid endpoint: {}'.format(endpoint))
try:
input_text = ','.join(i.to_id() for i in api_input)
except TypeError:
input_text = api_input.to_id()
id_types = {api_input.id_type}
else:
id_types = {i.id_type for i in api_input}
if len(id_types) > 1:
raise ValueError('non-matching id-types')
options = options or list()
url = build_ops_url(service, ref_type, id_types.pop(),
endpoint, options)
headers = self._make_headers
|
dakrauth/snarf
|
snagit/core.py
|
Python
|
mit
| 7,965
| 0.000126
|
import os
import re
import sys
import json
import shlex
import logging
import inspect
import functools
import importlib
from pprint import pformat
from collections import namedtuple
from traceback import format_tb
from requests.exceptions import RequestException
import strutil
from cachely.loader import Loader
from .lib import library, interpreter_library, DataProxy
from . import utils
from . import core
from . import exceptions
logger = logging.getLogger(__name__)
BASE_LIBS = ['snagit.lib.text', 'snagit.lib.lines', 'snagit.lib.soup']
ReType = type(re.compile(''))
class Instruction(namedtuple('Instruction', 'cmd args kws line lineno')):
'''
``Instruction``'s take the form::
command [arg [arg ...]] [key=arg [key=arg ...]]
Where ``arg`` can be one of: single quoted string, double quoted string,
digit, True, False, None, or a simple, unquoted string.
'''
values_pat = r'''
[rj]?'(?:(\'|[^'])*?)' |
[r]?"(?:(\"|[^"])*?)" |
(\d+) |
(True|False|None) |
([^\s,]+)
'''
args_re = re.compile(
r'''^(
(?P<kwd>\w[\w\d-]*)=(?P<val>{0}) |
(?P<arg>{0}|([\s,]+))
)\s*'''.format(values_pat),
re.VERBOSE
)
value_dict = {'True': True, 'False': False, 'None': None}
def __str__(self):
def _repr(w):
if isinstance(w, ReType):
return 'r"{}"'.format(str(w.pattern))
return repr(w)
return '{}{}{}'.format(
self.cmd.upper(),
' {}'.format(
' '.join([_repr(c) for c in self.args]) if self.args else ''
),
' {}'.format(' '.join(
'{}={}'.format(k, _repr(v)) for k, v in self.kws.items()
) if self.kws else '')
)
@classmethod
def get_value(cls, s):
if s.isdigit():
return int(s)
elif s in cls.value_dict:
return cls.value_dict[s]
elif s.startswith(('r"', "r'")):
return re.compile(utils.escaped(s[2:-1]))
elif s.startswith("j'"):
return json.loads(utils.escaped(s[2:-1]))
elif s.startswith(('"', "'")):
return utils.escaped(s[1:-1])
else:
return s.strip()
@classmethod
def parse(cls, line, lineno):
args = []
kws = {}
cmd, text = strutil.splitter(line, expected=2, strip=True)
cmd = cmd.lower()
while text:
m = cls.args_re.search(text)
if not m:
break
gdict = m.groupdict()
kwd = gdict.get('kwd')
if kwd:
kws[kwd] = cls.get_value(gdict.get('val', ''))
else:
arg = gdict.get('arg', '').strip()
if arg != ',':
args.append(cls.get_value(arg))
text = text[len(m.group()):]
if text:
raise SyntaxError(
'Syntax error: "{}" (line {})'.format(text, lineno)
)
return cls(cmd, args, kws, line, lineno)
def lexer(code, lineno=0):
'''
Takes the script source code, scans it, and lexes it into
``Instructions``
'''
for chars in code.splitlines():
lineno += 1
|
line = chars.rstrip()
if not line or line.lstrip().startswith('#'):
continue
logger.debug('Lexed {} byte(s) line {}'.format(len(line), chars))
yield Instruction.parse(line, l
|
ineno)
def load_libraries(extensions=None):
if isinstance(extensions, str):
extensions = [extensions]
libs = BASE_LIBS + (extensions or [])
for lib in libs:
importlib.import_module(lib)
class Interpreter:
def __init__(
self,
contents=None,
loader=None,
use_cache=False,
do_pm=False,
extensions=None
):
self.use_cache = use_cache
self.loader = loader if loader else Loader(use_cache=use_cache)
self.contents = Contents(contents)
self.do_debug = False
self.do_pm = do_pm
self.instructions = []
load_libraries(extensions)
def load_sources(self, sources, use_cache=None):
use_cache = self.use_cache if use_cache is None else bool(use_cache)
contents = self.loader.load_sources(sources)
self.contents.update([
ct.decode() if isinstance(ct, bytes) else ct for ct in contents
])
def listing(self, linenos=False):
items = []
for instr in self.instructions:
items.append('{}{}'.format(
'{} '.format(instr.lineno) if linenos else '',
instr.line
))
return items
def lex(self, code):
lineno = self.instructions[-1].lineno if self.instructions else 0
instructions = list(lexer(code, lineno))
self.instructions.extend(instructions)
return instructions
def execute(self, code):
for instr in self.lex(code):
try:
self._execute_instruction(instr)
except exceptions.ProgramWarning as why:
print(why)
return self.contents
def _load_handler(self, instr):
if instr.cmd in library.registry:
func = library.registry[instr.cmd]
return self.contents, (func, instr.args, instr.kws)
elif instr.cmd in interpreter_library.registry:
func = interpreter_library.registry[instr.cmd]
return func, (self, instr.args, instr.kws)
raise exceptions.ProgramWarning(
'Unknown instruction (line {}): {}'.format(instr.lineno, instr.cmd)
)
def _execute_instruction(self, instr):
logger.debug('Executing {}'.format(instr.cmd))
handler, args = self._load_handler(instr)
do_debug, self.do_debug = self.do_debug, False
if do_debug:
utils.pdb.set_trace()
try:
handler(*args)
except Exception:
exc, value, tb = sys.exc_info()
if self.do_pm:
logger.error(
'Script exception, line {}: {} (Entering post_mortem)'.format( # noqa
instr.lineno,
value
)
)
utils.pdb.post_mortem(tb)
else:
raise
def execute_script(filename, contents=''):
code = utils.read_file(filename)
return execute_code(code, contents)
def execute_code(code, contents=''):
intrep = Interpreter(contents)
return str(intrep.execute(code))
class Contents:
def __init__(self, contents=None):
self.stack = []
self.set_contents(contents)
def __iter__(self):
return iter(self.contents)
def __len__(self):
return len(self.contents)
def __str__(self):
return '\n'.join(str(c) for c in self)
# def __getitem__(self, index):
# return self.contents[index]
def pop(self):
if self.stack:
self.contents = self.stack.pop()
def __call__(self, func, args, kws):
contents = []
for data in self:
result = func(data, args, kws)
contents.append(result)
self.update(contents)
def merge(self):
if self.contents:
first = self.contents[0]
data = first.merge(self.contents)
self.update([data])
def update(self, contents):
if self.contents:
self.stack.append(self.contents)
self.set_contents(contents)
def set_contents(self, contents):
self.contents = []
if isinstance(contents, (str, bytes)):
contents = [contents]
contents = contents or []
for ct in contents:
if isinstance(ct, (str, bytes)):
ct = DataProxy(ct)
self.contents.append(ct)
|
shahin/sqltxt
|
tests/unit/table_test.py
|
Python
|
mit
| 5,179
| 0.013516
|
import unittest
import os
from sqltxt.table import Table
from sqltxt.column import Column, ColumnName, AmbiguousColumnNameError
from sqltxt.expression import Expression
class TableTest(unittest.TestCase):
def setUp(self):
self.data_
|
path = os.path.join(os.path.dirname(__file__), '../data')
table_header = ["col_a", "col_b"]
table_contents = """1,1
2,3
3,2"""
self.table_a = Table.from_cmd(
name = 'table_
|
a',
cmd = 'echo -e "{0}"'.format(table_contents),
columns = table_header
)
table_header = ["col_a", "col_b"]
table_contents = """1,w
2,x
2,y
5,z"""
self.table_b = Table.from_cmd(
name = 'table_b',
cmd = 'echo -e "{0}"'.format(table_contents),
columns = table_header
)
def test_subset_rows(self):
conditions = [
[Expression('col_b', '==', '1'), 'or', Expression('col_a', '==', '2')]
]
self.table_a.subset_rows(conditions)
cmds_actual = self.table_a.cmds
cmds_expected = [
'echo -e "1,1\n2,3\n3,2"',
"awk -F',' 'OFS=\",\" { if (($2 == 1 || $1 == 2)) { print $1,$2 } }'"]
self.assertEqual(cmds_actual, cmds_expected)
def test_order_columns(self):
col_name_order = [ColumnName('col_b'), ColumnName('col_a')]
self.table_a.order_columns(col_name_order)
cmds_actual = self.table_a.cmds
cmds_expected = ['echo -e "1,1\n2,3\n3,2"', "awk -F',' 'OFS=\",\" { print $2,$1 }'"]
self.assertEqual(cmds_actual, cmds_expected)
def test_sort(self):
sort_by_col_names = [ColumnName('col_a'), ColumnName('col_b')]
self.table_a.sort(sort_by_col_names)
cmds_actual = self.table_a.cmds
cmds_expected = ['echo -e "1,1\n2,3\n3,2"', "sort -t, -k 1,1 -k 2,2"]
self.assertEqual(cmds_actual, cmds_expected)
sort_by_cols = [self.table_a.get_column_for_name(cn) for cn in sort_by_col_names]
self.assertEqual(self.table_a.sorted_by, sort_by_cols)
def test_is_sorted_by(self):
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = ['col_a', 'col_b'])
table_from_cmd.sorted_by = [Column('table_a.col_a'), Column('table_a.col_b')]
self.assertTrue(table_from_cmd.is_sorted_by([0]))
self.assertFalse(table_from_cmd.is_sorted_by([1]))
self.assertTrue(table_from_cmd.is_sorted_by([0,1]))
def test_get_column_for_name_raises_on_ambiguity(self):
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = ['col_a', 'col_a'])
with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):
table_from_cmd.get_column_for_name(ColumnName('col_a'))
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = ['ta.col_a', 'tb.col_a'])
with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):
table_from_cmd.get_column_for_name(ColumnName('col_a'))
first_column = Column('ta.col_a')
first_column.add_name('col_alpha')
second_column = Column('tb.col_a')
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = [first_column, second_column])
with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):
table_from_cmd.get_column_for_name(ColumnName('col_a'))
def test_sample_rows(self):
self.table_a.sample_rows(1)
cmds_actual = self.table_a.cmds
cmds_expected = ['echo -e "1,1\n2,3\n3,2"',
"""awk -v seed=$RANDOM -v n={0} '
BEGIN {{ srand(seed) }}
NR <= n {{ reservoir[NR] = $0 }}
NR > n {{ M = int(rand() * NR) + 1; if (M <= n) {{ reservoir[M] = $0 }}}}
END {{ for (key in reservoir) {{ print reservoir[key] }}}}'""".format(1)
]
self.assertEqual(cmds_actual, cmds_expected)
def test_get_cmd_str(self):
table_from_file = Table.from_file_path(os.path.join(self.data_path, 'table_a.txt'))
# output from a file-backed Table to STDOUT
cmd_actual = table_from_file.get_cmd_str()
cmd_expected = 'tail -n+2 {}/table_a.txt'.format(self.data_path)
self.assertEqual(cmd_actual, cmd_expected)
table_from_cmd = Table.from_cmd(
'table_a',
cmd = 'echo -e "1,2,3,4"',
columns = ['col_a', 'col_b', 'col_c', 'col_d'])
# output from a command-backed Table to STDOUT
cmd_actual = table_from_cmd.get_cmd_str()
cmd_expected = 'echo -e "1,2,3,4"'
self.assertEqual(cmd_actual, cmd_expected)
# add a command, then output
table_from_cmd.cmds += ['sort']
# to STDOUT
cmd_actual = table_from_cmd.get_cmd_str()
cmd_expected = 'echo -e "1,2,3,4" | sort'
self.assertEqual(cmd_actual, cmd_expected)
|
mramire8/active
|
strategy/randomsampling.py
|
Python
|
apache-2.0
| 21,847
| 0.006454
|
__author__ = 'mramire8'
from collections import defaultdict
import copy
import numpy as np
from sklearn.linear_model import LogisticRegression
from baselearner import BaseLearner
from sklearn.feature_extraction.text import CountVectorizer
from datautil.textutils import StemTokenizer
show_utilitly = False
class RandomSamplingLearner(BaseLearner):
def __init__(self, model=None, accuracy_model=None, budget=None, seed=None):
super(RandomSamplingLearner, self).__init__(model=model, accuracy_model=accuracy_model, budget=budget,
seed=seed)
def pick_next(self, pool=None, k=1):
try:
list_pool = list(pool.remaining)
indices = self.randgen.permutation(len(pool.remaining))
return [list_pool[index] for index in indices[:k]]
except Exception, err:
print err
raise Exception("Invalid pool object")
#return random.sample(pool, k)
def __str__(self):
string = self.__class__.__name__ #% "(model=" % self.current_model % ", accuracy_model="% self.accuracy_model #\
#% ", budget=" % \
#str(self.budget) % \
#", seed=)"
##", seed=" % str(self.seed) % ")"
return string
def __repr__(self):
string = self.__class__.__name__ % "(model=" % self.current_model % ", accuracy_model=" % self.accuracy_model \
% ", budget=" % self.budget % ", seed=" % self.seed % ")"
return string
class BootstrapRandom(BaseLearner):
def __init__(self, random_state=None):
#model=None, cost_model=None, accuracy_model=None, budget=None, seed=1234567
super(BootstrapRandom, self).__init__(seed=random_state)
self.bootstrap = self.pick_next
def pick_next(self, pool=None, k=1):
try:
list_pool = list(pool.remaining)
indices = self.randgen.permutation(len(pool.remaining))
return [list_pool[index] for index in indices[:k]]
except Exception, err:
#raise Exception("Invalid pool object")
print "*" * 10
print "Invalid pool object"
print err
class BootstrapFromEach(BaseLearner):
def __init__(self, seed):
super(BootstrapFromEach, self).__init__(seed=seed)
def bootstrap(self, pool, k=2):
k = int(k / 2)
data = defaultdict(lambda: [])
for i in pool.remaining:
data[pool.target[i]].append(i)
chosen = []
for label in data.keys():
candidates = data[label]
indices = self.randgen.permutation(len(candidates))
chosen.extend([candidates[index] for index in indices[:k]])
return chosen
class UncertaintyLearner(BaseLearner):
# def __init__(self, seed=0, subpool=None):
# super(UncertaintyLearner, self).__init__(seed=seed)
# self.subpool = subpool
def __init__(self, model=None, accuracy_model=None, budget=None, seed=None, cost_model=None, subpool=None):
super(UncertaintyLearner, self).__init__(model=model, accuracy_model=accuracy_model, budget=budget, seed=seed,
cost_model=cost_model, subpool=subpool)
self.model = model
def pick_next(self, pool=None, k=1):
list_pool = list(pool.remaining)
indices = self.randgen.permutation(len(pool.remaining))
remaining = [list_pool[index] for index in indices]
candidates = [c for c in pool.data]
uncertainty = []
if self.subpool is None:
self.subpool = len(pool.remaining)
for i in remaining[:self.subpool]:
data_point = candidates[i]
prob = self.model.predict_proba(data_point)[0]
maxprob = max(prob)
uncertainty.append(1 - maxprob)
sorted_ind = np.argsort(uncertainty)[::-1]
chosen = [remaining[x] for x in sorted_ind][:k]
return chosen
def train(self, train_data=None, train_labels=None):
'''
Return a new copy of a retrain classifier based on paramters. If no data, return an un-trained classifier
:param train_data: training data
:param train_labels: target values
:return: trained classifier
'''
clf = super(UncertaintyLearner, self).train(train_data=train_data, train_labels=train_labels)
self.model = clf
return clf
def __str__(self):
string = "{0}(seed={seed})".format(self.__class__.__name__, seed=self.seed)
return string
def __repr__(self):
string = self.__class__.__name__ % "(model=" % self.current_model % ", accuracy_model=" % self.accuracy_model \
% ", budget=" % self.budget % ", seed=" % self.seed % ")"
return string
class AnytimeLearner(BaseLearner):
# def __init__(self, seed=0, subpool=None):
# super(UncertaintyLearner, self).__init__(seed=seed)
# self.subpool = subpool
def __init__(self, model=None, accuracy_model=None, budget=None, seed=None, vcn=None, subpool=None,
cost_model=None):
super(AnytimeLearner, self).__init__(model=model, accuracy_model=accuracy_model, budget=budget, seed=seed,
cost_model=cost_model)
self.model = model
self.neutral_model = LogisticRegression(penalty='l1')
self.base_neutral = LogisticRegression(penalty='l1')
self.neutral_train = None
self.vct_neutral = None
self.neutral_labels = np.array([])
self.vcn = vcn
self._kvalues = [10, 25, 50, 75, 100]
self.subpool = subpool
def pick_next(self, pool=None, step_size=1):
list_pool = list(pool.remaining)
indices = self.randgen.permutation(len(pool.remaining))
remaining = [list_pool[index] for index in indices]
uncertainty = []
if self.subpool is None:
self.subpool = len(pool.remaining)
for i in remaining[:self.subpool]:
# data_point = candidates[i]
utility, k, unc = self.x_utility(pool.data[i], pool.text[i])
if show_utilitly:
print "%s\t %s \t %.3f" % (i, k, utility)
uncertainty.append([utility, k, unc])
uncertainty = np.array(uncertainty)
unc_copy = uncertainty[:, 0]
sorted_ind = np.argsort(unc_copy, axis=0)[::-1]
chosen = [[remaining[x], uncertainty[x, 1]] for x in sorted_ind[:int(step_size)]] #index and k of chosen
util = [uncertainty[x] for x in sorted_ind[:int(step_size)]]
# print util
## chosen returns the chosen and the k value associated with it
return chosen, util
def x_utility(self, instance, instance_text):
prob = self.model.predict_proba(instance)
unc = 1 - prob.max()
utility = np.array([[self.obj_fn_p2(xik, k) * unc, k] for k, xik in self.getk(instance_text)])
order = np.argsort(utility[:, 0], axis=0)[::-1] ## descending order
utility_sorted = utility[order, :]
# print format_list(utility_sorted)
if show_utilitly:
print "\t{0:.5f}".format(unc),
return utility_sorted[0, 0], utility_sorted[0, 1], unc ## return the max
def obj_fn_p2(self, instance_k, k):
## change the text to use the vectorizer
# xik = self.vct_neutral.transform([instance_k])
xik = self.vcn.transform([instance_k])
# neu = self.neutral_model.predict_proba(xik) if self.neutral_model is not None else [1]
neu = 1
if self.neutral_model is not None:
neu = self.neutral_model.predict_proba(xik)[0, 1] # probability of being not-neutral
costk = self.predict_cost(k)
utility = neu / costk ## u(x) * N(xk) / C(xk)
# print utility
if show_utilitly:
print "\t{0:.3f}".fo
|
rmat(neu),
print "\t{0:.3f}".format(costk),
return utility
def getk(self, doc_text):
'''
Return a set of subinstance of k words in classifier f
|
ormat
:param doc_text:
:return: set of subinstances of doc_tex
|
rolepoint/appengine-mapreduce
|
python/test/mapreduce/control_test.py
|
Python
|
apache-2.0
| 10,366
| 0.00328
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import random
import string
import time
import unittest
from google.appengine.ext import db
from mapreduce import control
from mapreduce import hooks
from mapreduce import model
from mapreduce import test_support
from testlib import testutil
from mapreduce.api import map_job
def random_string(length):
"""Generate a random string of given length."""
return "".join(
random.choice(string.letters + string.digits) for _ in range(length))
class TestEntity(db.Model):
"""Test entity class."""
class TestHooks(hooks.Hooks):
"""Test hooks class."""
enqueue_kickoff_task_calls = []
def enqueue_kickoff_task(self, task, queue_name):
TestHooks.enqueue_kickoff_task_calls.append((task, queue_name))
def test_handler(entity):
"""Test handler function."""
pass
class ControlTest(testutil.HandlerTestBase):
"""Tests for control module."""
QUEUE_NAME = "crazy-queue"
def setUp(self):
testutil.HandlerTestBase.setUp(self)
TestHooks.enqueue_kickoff_task_calls = []
def get_mapreduce_spec(self, task):
"""Get mapreduce spec form kickoff task payload."""
payload = test_support.decode_task_payload(task)
return model.MapreduceSpec.from_json_str(payload["mapreduce_spec"])
def validate_map_started(self, mapreduce_id, queue_name=None):
"""Tests that the map has been started."""
queue_name = queue_name or self.QUEUE_NAME
self.assertTrue(mapreduce_id)
# Note: only a kickoff job is pending at this stage, shards come later.
tasks = self.taskqueue.GetTasks(queue_name)
self.assertEquals(1, len(tasks))
# Checks that tasks are scheduled into the future.
task = tasks[0]
self.assertEqual("/mapreduce_base_path/kickoffjob_callback/" + mapreduce_id,
task["url"])
handler = test_support.execute_task(task)
self.assertEqual(mapreduce_id, handler.request.get("mapreduce_id"))
state = model.MapreduceState.get_by_job_id(mapreduce_id)
params = map_job.JobConfig._get_default_mr_params()
params.update({"foo": "bar",
"base_path": "/mapreduce_base_path",
"queue_name": queue_name})
self.assertEqual(state.mapreduce_spec.params, params)
return task["eta"]
def testStartMap(self):
"""Test start_map function.
Most of start_map functionality is already test
|
ed by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
shard_count = 4
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_reader
|
s.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name=self.QUEUE_NAME)
self.validate_map_started(mapreduce_id)
def testStartMap_Countdown(self):
"""Test that MR can be scheduled into the future.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
# MR should be scheduled into the future.
now_sec = long(time.time())
shard_count = 4
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name=self.QUEUE_NAME,
countdown=1000)
task_eta = self.validate_map_started(mapreduce_id)
eta_sec = time.mktime(time.strptime(task_eta, "%Y/%m/%d %H:%M:%S"))
self.assertTrue(now_sec + 1000 <= eta_sec)
def testStartMap_Eta(self):
"""Test that MR can be scheduled into the future.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
# MR should be scheduled into the future.
eta = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
shard_count = 4
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name=self.QUEUE_NAME,
eta=eta)
task_eta = self.validate_map_started(mapreduce_id)
self.assertEquals(eta.strftime("%Y/%m/%d %H:%M:%S"), task_eta)
def testStartMap_QueueEnvironment(self):
"""Test that the start_map inherits its queue from the enviornment."""
TestEntity().put()
shard_count = 4
os.environ["HTTP_X_APPENGINE_QUEUENAME"] = self.QUEUE_NAME
try:
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path")
finally:
del os.environ["HTTP_X_APPENGINE_QUEUENAME"]
self.validate_map_started(mapreduce_id)
def testStartMap_Hooks(self):
"""Tests that MR can be scheduled with a hook class installed.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
shard_count = 4
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name="crazy-queue",
hooks_class_name=__name__+"."+TestHooks.__name__)
self.assertTrue(mapreduce_id)
task, queue_name = TestHooks.enqueue_kickoff_task_calls[0]
self.assertEqual("/mapreduce_base_path/kickoffjob_callback/" + mapreduce_id,
task.url)
self.assertEqual("crazy-queue", queue_name)
def testStartMap_RaisingHooks(self):
"""Tests that MR can be scheduled with a dummy hook class installed.
The dummy hook class raises NotImplementedError for all method calls so the
default scheduling logic should be used.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
shard_count = 4
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name="crazy-queue",
hooks_class_name=hooks.__name__+"."+hooks.Hooks.__name__)
self.validate_map_started(mapreduce_id)
def testStartMap_HugePayload(self):
"""Test start_map function.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
shard_count = 4
mapreduce_id = ""
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"ma
|
FrancescoCeruti/linux-show-player
|
lisp/modules/midi/midi_output.py
|
Python
|
gpl-3.0
| 1,339
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of Linux Show Player
#
# Copyright 2012-2016 Francesco Ceruti <[email protected]>
#
# Linux Show Player is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Linux Show Player is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Gen
|
eral Public License
# along with Linux Show Player. If not, see <http://www.gnu.org/licenses/>.
import mido
from lisp.modules.midi.midi_common import MIDICommon
from lisp.modules.midi.midi_utils import mido_backend, mido_port_name
class MIDIOutput(MIDICommon):
def __init__(self,
|
port_name='AppDefault'):
super().__init__(port_name=port_name)
def send_from_str(self, str_message):
self.send(mido.parse_string(str_message))
def send(self, message):
self._port.send(message)
def open(self):
port_name = mido_port_name(self._port_name, 'O')
self._port = mido_backend().open_output(port_name)
|
Lucas-Wong/ToolsProject
|
CodeReview/Change_Xml.py
|
Python
|
gpl-3.0
| 1,695
| 0.00767
|
# ! /usr/bin/env python
# _*_ coding:utf-8 _*_
"""
@author = lucas.wang
@create_time = 2018-01-12
"""
from xml.etree import ElementTree as ET
import fnmatch
class change_xml(object):
"""
xml main function
"""
names = ['iConn.CreateXmlTools.vshost.exe', 'AutoUpdater.dll', 'NewTonsoft.Json.dll',
'Oracle.ManagedDataAccess.dll', 'Renci.SshNet.dll', 'Renci.SshNet.xml', 'zxing.dll', 'Images/ARX_HK.png']
old_path_name = r"http://172.16.1.81:8081/UpdateClient/"
def __init__(self, path_name, file_path, file_name="AutoupdateService.xml"):
"""
Init file path name
:param fileName:
"""
self.file_path = file_path
self.file_name = file_
|
name
self.tree = ET.parse(file_path + file_name)
self.path_name = path_name
def read_xml(self):
"""
Read xml file
:return:
"""
root = self.tree.getroot()
# print(root)
for item in root.getchildren():
# root.iter("file"):
# print(item.get("url"))
item.set("url", item.get('url').replace(self.old_path_name, self.path_name))
|
if fnmatch.filter(self.names, item.get('path')):
root.remove(item)
self.write_xml()
def write_xml(self):
self.tree.write(self.file_path + self.file_name)
print("Update xml file success. file: " + self.file_path + self.file_name)
if __name__ == '__main__':
"""
Test use
"""
read = change_xml(r'http://172.16.1.81:8081/UpdateClient/',
r'D:\\CodeWorkspace\\iConnAll\\Client-dev\\iConn.CreateXmlTools\\bin\\Release\\')
read.read_xml()
|
fiduswriter/fiduswriter
|
fiduswriter/document/models.py
|
Python
|
agpl-3.0
| 11,689
| 0
|
from builtins import str
from builtins import object
from django.db import models
from django.db.utils import OperationalError, ProgrammingError
from django.core import checks
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
# FW_DOCUMENT_VERSION:
# Also defined in frontend
# document/static/js/modules/schema/index.js
FW_DOCUMENT_VERSION = 3.4
class DocumentTemplate(models.Model):
title = models.CharField(max_length=255, default="", blank=True)
import_id = models.CharField(max_length=255, default="", blank=True)
content = models.JSONField(default=dict)
doc_version = models.DecimalField(
max_digits=3, decimal_places=1, default=FW_DOCUMENT_VERSION
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=models.deletion.CASCADE,
)
added = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
auto_delete = True
def __str__(self):
return self.title
def is_deletable(self):
reverse_relations = [
f
for f in self._meta.model._meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created
and not f.concrete
and f.name not in ["documentstyle", "exporttemplate"]
]
for r in reverse_relations:
if r.remote_field.model.objects.filter(
**{r.field.name: self}
).exists():
return False
return True
@classmethod
def check(cls, **kwargs):
errors = super().check(**kwargs)
errors.extend(cls._check_doc_versions(**kwargs))
return errors
@classmethod
def _check_doc_versions(cls, **kwargs):
try:
if len(
cls.objects.filter(doc_version__lt=str(FW_DOCUMENT_VERSION))
):
return [
checks.Warning(
"Document templates need to be upgraded. Please "
"navigate to /admin/document/document/maintenance/ "
"with a browser as a superuser and upgrade all "
"document templates on this server.",
obj=cls,
)
]
else:
return []
except (ProgrammingError, OperationalError):
# Database has not yet been initialized, so don't throw any error.
return []
class Document(models.Model):
title = models.CharField(max_length=255, default="", blank=True)
path = models.TextField(default="", blank=True)
content = models.JSONField(default=dict)
doc_version = models.DecimalField(
max_digits=3, decimal_places=1, default=FW_DOCUMENT_VERSION
)
# The doc_version is the version of the data format in the content field.
# We upgrade the content field in JavaScript and not migrations so that
# the same code can be used for migrations and for importing old fidus
# files that are being uploaded. This field is only used for upgrading data
# and is therefore not handed to the editor or document overview page.
version = models.PositiveIntegerField(default=0)
diffs = models.JSONField(default=list, blank=True)
# The last few diffs that were received and approved. The number of stored
# diffs should always be equivalent to or more than all the diffs since the
# last full save of the document.
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="owner",
on_delete=models.deletion.CASCADE,
)
added = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
comments = models.JSONField(default=dict, blank=True)
bibliography = models.JSONField(default=dict, blank=True)
# Whether or not document is listed on document overview list page.
# True by default and for all normal documents. Can be set to False when
# documents are added in plugins that list these documents somewhere else.
listed = models.BooleanField(default=True)
template = models.ForeignKey(
DocumentTemplate, on_delete=models.deletion.CASCADE
)
def __str__(self):
if len(self.title) > 0:
return "%(title)s (%(id)s)" % {"title": self.title, "id": self.id}
else:
return str(self.id)
class Meta(object):
ordering = ["-id"]
def clean(self, *args, **kwargs):
if self.comments is None:
self.comments = "{}"
if self.bibliography is None:
self.bibliography = "{}"
def save(self, *args, **kwargs):
self.clean()
super().save(*args, **kwargs)
def get_absolute_url(self):
return "/document/%i/" % self.id
def is_deletable(self):
reverse_relations = [
f
for f in self._meta.model._meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created
and not f.concrete
and f.name
not in [
"accessright",
"accessrightinvite",
"documentrevision",
"documentimage",
]
]
for r in reverse_relations:
if r.remote_field.model.objects.filter(
**{r.field.name: self}
).exists():
|
return False
return True
@classmethod
def check(cls, **kwargs):
errors = super().check(**kwargs)
errors.extend(cls._check_doc_versions(**kwargs))
return errors
@cla
|
ssmethod
def _check_doc_versions(cls, **kwargs):
try:
if len(
cls.objects.filter(doc_version__lt=str(FW_DOCUMENT_VERSION))
):
return [
checks.Warning(
"Documents need to be upgraded. Please navigate to "
"/admin/document/document/maintenance/ with a browser "
"as a superuser and upgrade all documents on this "
"server.",
obj=cls,
)
]
else:
return []
except (ProgrammingError, OperationalError):
# Database has not yet been initialized, so don't throw any error.
return []
RIGHTS_CHOICES = (
("write", "Writer"),
# Can write content and can read+write comments.
# Can chat with collaborators.
# Has read access to revisions.
("write-tracked", "Write with tracked changes"),
# Can write tracked content and can read/write comments.
# Cannot turn off tracked changes.
# Can chat with collaborators.
# Has read access to revisions.
("comment", "Commentator"),
# Can read content and can read+write comments.
# Can chat with collaborators.
# Has read access to revisions.
("review-tracked", "Reviewer who can write with tracked changes"),
# Can write tracked content and can read/write his own comments.
# Cannot turn off tracked changes.
# Cannot chat with collaborators.
# Has no access to revisions.
("review", "Reviewer"),
# Can read the content and can read/write his own comments.
# Comments by users with this access right only show the user's
# numeric ID, not their username.
# Cannot chat with collaborators nor see that they are connected.
# Has no access to revisions.
("read", "Reader"),
# Can read content, including comments
# Can chat with collaborators.
# Has read access to revisions.
("read-without-comments", "Reader without comment access"),
# Can read content, but not the comments.
# Cannot chat with collaborators.
# Has no access to revisions.
)
# Editor and Reviewer can only comment and not edit document
COMMENT_ONLY = ("review", "comment")
CAN_UPDATE_DOCUMENT = [
"write",
"write-tracked",
"review",
"review-tracked",
"comment",
]
# Whe
|
CGATOxford/CGATPipelines
|
obsolete/pipeline_genesets.py
|
Python
|
mit
| 12,292
| 0.000163
|
"""===========================
Geneset analysis
===========================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Overview
========
This pipeline performs gene set analysis of one or more
genesets.
Input data are two collections of files, genelists and pathways.
Genelists are tabular data with a gene for each row and associated
attributes in additional columns such as expression level, probability
of being called differentially expressed, etc.
Pathways are tabular data linking genes to pathways that they exist
in.
Generally, it performs the following tasks:
1. The pipeline merges separately prepared gene lists into
a single gene list matrix. There is a continuous scale
version (P-Values, expression values, ...) and a thresholded
version (0 and 1 for genelist membership).
2. The pipeline builds a matrix of gene list annotations to
test against. To this end, it collects:
ENSEMBL GO annotations
KEGG Pathways
User supplied pathways
GSEA database signatures
3. The pipeline performs various gene set enrichment analyses.
These are:
1. Hypergeometric GO analysis
2. Gene set enrichment analysis
4. The pipeline creates various QC metrics. To this end it looks
for biases in any of the gene lists supplied. Biases the pipeline
looks at are:
1. Gene length
2. Nucleotide composition
3. Gene intron/exon structure
4. User supplied table with biases.
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
The sphinxreport report requires a :file:`conf.py` and
:file:`sphinxreport.ini` file (see :ref:`PipelineReporting`). To start
with, use the files supplied with the Example_ data.
Input
-----
Optional inputs
+++++++++++++++
Requirements
------------
The pipeline requires the results from
:doc:`pipeline_annotations`. Set the configuration variable
:py:data:`annotations_database` and :py:data:`annotations_dir`.
On top of the default CGAT setup, the pipeline requires the following
software to be in the path:
+----------+-----------+---------------------------+
|*Program* |*Version* |*Purpose* |
+----------+-----------+---------------------------+
| | | |
+----------+-----------+---------------------------+
Pipeline output
===============
The major output is in the database file :file:`csvdb`.
Glossary
========
.. glossary::
Code
====
"""
from ruffus import *
import sys
import os
import sqlite3
import pandas
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGAT.Database as Database
import CGAT.SetTools as SetTools
import CGATPipelines.PipelineGO as PipelineGO
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
# load options from the config file
import CGATPipelines.Pipeline as P
PARAMS = P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"])
PARAMS.update(P.peekParameters(
PARAMS["annotations_dir"],
"pipeline_annotations.py",
prefix="annotations_",
update_interface=True))
# Update the PARAMS dictionary in any PipelineModules
# e.g.:
# import CGATPipelines.PipelineGeneset as PipelineGeneset
# PipelineGeneset.PARAMS = PARAMS
def connect():
'''connect to database.
Use this method to connect to additional databases.
Returns a database connection.
'''
dbh = sqlite3.connect(PARAMS["database_name"])
statement = '''ATTACH DATABASE '%s' as annotations''' % (
PARAMS["annotations_database"])
cc = dbh.cursor()
cc.execute(statement)
cc.close()
return dbh
@transform('genelists.dir/*.tsv.gz',
suffix(".tsv.gz"),
".load")
def loadGeneLists(infile, outfile):
'''load gene list data into database.'''
P.load(infile, outfile,
tablename="genelist_%s" % P.toTable(outfile))
@merge('genelists.dir/*.tsv.gz', 'genelists.tsv.gz')
def buildGeneListMatrix(infiles, outfile):
'''build a gene list matrix for simple pathway analysis
based on hypergeometric test.
A gene list is derived from a gene set by
applying thresholds to the input data set. The
thresholds are defined in the configuration file.
'''
genesets = []
backgrounds = []
headers = []
for infile in infiles:
genelist = pandas.read_csv(
IOTools.openFile(infile),
index_col=0,
sep='\t')
track = P.snip(os.path.basename(infile), ".tsv.gz")
headers.append(track)
field = PARAMS[P.matchParameter("%s_foreground_field" % track)]
min_threshold = PARAMS[P.matchParameter(
"%s_foreground_min_threshold" % track)]
max_threshold = PARAMS[P.matchParameter(
"%s_foreground_max_threshold" % track)]
genesets.append(set(genelist[
(genelist[field] >= min_threshold) &
(genelist[field] <= max_threshold)].index))
E.info('%s: foreground: %f <= %s <= %f' % (track,
min_threshold,
field,
max_threshold))
field = PARAMS[P.matchParameter("%s_background_field" % track)]
min_threshold = PARAMS[P.matchParameter(
"%s_background_min_threshold" % track)]
max_threshold = PARAMS[P.matchParameter(
"%s_background_max_threshold" % track)]
E.info('%s: background: %f <= %s <= %f' % (track,
min_threshold,
field,
max_threshold))
backgrounds.append(set(genelist[
(genelist[field] >= min_threshold) &
(genelist[field] <= max_threshold)].index))
E.info("%s: fg=%i, bg=%i" % (track,
len(genesets[-1]),
len(backgrounds[-1])))
E.info("writing gene list matrix")
with IOTools.openFile(outfile, "w") as outf:
SetTools.writeSets(outf, genesets, labels=headers)
with IOTools.openFile(outfile + ".bg.tsv.gz", "w") as outf:
SetTools.writeSets(outf, backgrounds, labels=headers)
E.info("writing intersection/union matrix")
# build set intersection matr
|
ix
matrix = SetTools.unionIntersectionMatrix(genesets)
with IOTools.openFile(outfile + ".matrix.gz", "w") as outf:
IOTools.writeMatrix(outf, matrix, headers, headers)
matrix = SetTools.unionIntersectionMatrix(backgrounds)
with IOTools.openFile(outfile + ".bg.matrix.gz", "w") as outf:
IOTools.writeMatrix(outf, matr
|
ix, headers, headers)
@transform(buildGeneListMatrix,
suffix(".tsv.gz"),
".load")
def loadGeneListMatrix(infile, outfile):
'''load fgene list matrix into table.'''
track = P.snip(infile, ".tsv.gz")
P.load(infile, outfile, tablename="%s_foreground" % track)
P.load(infile + ".bg.tsv.gz", outfile, tablename="%s_background" % track)
@transform('pathways.dir/*.tsv.gz',
regex('.*/(.*).tsv.gz'),
r"pathways_\1.load")
def loadPathways(infile, outfile):
'''load pathway information into database.'''
P.load(infile, outfile, "--add-index=gene_id --add-index=go_id")
@follows(mkdir('hypergeometric.dir'))
@transform('pathways.dir/*.tsv.gz',
regex('.*/(.*).tsv.gz'),
add_inputs(buildGeneListMatrix),
r'hypergeometric.dir/\1.tsv')
def runHypergeometricAnalysis(infiles, outfile):
'''run pathway analysis on pathway files in
the directory pathways.dir.
'''
infile_pathways, infile_genelist = infiles
infile_background = infile_genelist + ".bg.tsv.gz"
# TODO:
# gene annotations
# category annotat
|
cyclops1982/pdns
|
regression-tests.recursor-dnssec/test_RootNXTrust.py
|
Python
|
gpl-2.0
| 3,474
| 0.002303
|
import dns
import requests
import socket
from recursortests import RecursorTest
class RootNXTrustRecursorTest(RecursorTest):
def getOutgoingQueriesCount(self):
headers = {'x-api-key': self._apiKey}
url = 'http://127.0.0.1:' + str(self._wsPort) + '/api/v1/servers/localhost/statistics'
r = requests.get(url, headers=headers, timeout=self._wsTimeout)
self.assertTrue(r)
self.assertEquals(r.status_code, 200)
self.assertTrue(r.json())
content = r.json()
for entry in content:
if entry['name'] == 'all-outqueries':
return int(entry['value'])
return 0
class testRootNXTrustDisabled(RootNXTrustRecursorTest):
_confdir = 'RootNXTrustDisabled'
_wsPort = 8042
_wsTimeout = 2
_wsPassword = 'secretpassword'
_apiKey = 'secretapikey'
_config_template = """
root-nx-trust=no
qname-minimization=no
webserver=yes
webserver-port=%d
webserver-address=127.0.0.1
webserver-password=%s
api-key=%s
""" % (_wsPort, _wsPassword, _apiKey)
def testRootNXTrust(self):
"""
Check that, with root-nx-trust disabled, we still query the root for www2.nx-example.
after receiving a NXD from "." for nx-example. as an answer for www.nx-example.
"""
# first query nx.example.
before = self.getOutgoingQueriesCount()
query = dns.message.make_query('www.nx-example.', 'A')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NXDOMAIN)
print(res)
self.assertAuthorityHasSOA(res)
# check that we sent one query to the root
after = self.getOutgoingQueriesCount()
self.assertEqual(after, before + 1)
# then query nx2.example.
before = after
query = dns.message.make_query('www2.nx-example.', 'A')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NXDOMAIN)
self.assertAuthorityHasSOA(res)
after = self.getOutgoingQueriesCount()
self.assertEqual(after, before + 1)
class testRootNXTrustEnabled(RootNXTrustRecursorTest):
|
_confdir = 'RootNXTrustEnabled'
_wsPort = 8042
_wsTimeout = 2
_wsPassword = 'secretpassword'
_apiKey = 'secretapikey'
_config_template = """
root-nx-trust=yes
webserver=yes
webserver-port=%d
webserver-address=127.0.0.1
webserver-password=%s
api-key=%s
""" % (_wsPort, _wsPassword, _apiKey)
def testRootNXTrust(self):
"""
Check that, with root-nx-
|
trust enabled, we don't query the root for www2.nx-example.
after receiving a NXD from "." for nx-example. as an answer for www.nx-example.
"""
# first query nx.example.
before = self.getOutgoingQueriesCount()
query = dns.message.make_query('www.nx-example.', 'A')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NXDOMAIN)
print(res)
self.assertAuthorityHasSOA(res)
# check that we sent one query to the root
after = self.getOutgoingQueriesCount()
self.assertEqual(after, before + 1)
# then query nx2.example.
before = after
query = dns.message.make_query('www2.nx-example.', 'A')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NXDOMAIN)
self.assertAuthorityHasSOA(res)
after = self.getOutgoingQueriesCount()
self.assertEqual(after, before)
|
Alexoner/health-care-demo
|
careHealth/earth/action/hooks.py
|
Python
|
gpl-2.0
| 1,374
| 0
|
# -*- coding: utf-8 -*-
import json
def json_pre_process_hook(action, request, *args, **kwargs):
json_data = request.body
if not json_data:
action.ret('002').msg('json_params_required')
return False
try:
param_dict = json.loads(json_data)
except ValueError:
action.ret('003').msg('json_params_invalid')
return False
for key, value in param_dict.items():
setattr(action, key, value)
return True
def query_pre_process_hook(action, request, *args, **kwargs):
params_dict = request.GET
if not params_dict:
return Tr
|
ue
for key, value in params_dict.items():
setattr(action, key, value)
return True
def form_pre_process_hook(action, request, *args, **kwargs):
param_dict = request.POST
if not param_dict:
action.ret('004').msg('form_params_req
|
uired')
return False
for key, value in param_dict.items():
setattr(action, key, value)
return True
def jsonp_post_render_hook(action):
if action.jsonp_callback:
action.resp_data_json(
action.jsonp_callback + '('
+ action.resp_data_json + ')',
)
else:
action.ret('005').msg('jsonp_callback_required')
if action._data:
del action._data
action.render()
return False
return True
|
epam/DLab
|
infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py
|
Python
|
apache-2.0
| 15,517
| 0.004705
|
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
args = parser.parse_args()
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.
|
DEBUG,
filename=local_log_filepath)
notebook_config = dict()
try:
notebook_config['exploratory_name'] = os.environ['exploratory_name']
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
not
|
ebook_config['key_name'] = os.environ['conf_key_name']
notebook_config['user_keyname'] = os.environ['edge_user_name']
notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
os.environ['edge_user_name'],
notebook_config['exploratory_name'], args.uuid)
notebook_config['expected_image_name'] = '{}-{}-notebook-image'.format(notebook_config['service_base_name'],
os.environ['application'])
notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \
.format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['edge_user_name'])
notebook_config['security_group_name'] = '{}-{}-nb-SG'.format(notebook_config['service_base_name'],
os.environ['edge_user_name'])
notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
# generating variables regarding EDGE proxy on Notebook instance
instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
edge_instance_name = os.environ['conf_service_base_name'] + "-" + os.environ['edge_user_name'] + '-edge'
edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
try:
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
sudo_group = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
(instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
notebook_config['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed creating ssh user 'dlab'.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
try:
logging.info('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
print('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure proxy.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
try:
logging.info('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
print('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
params = "--hostname {} --keyfile {} --user {} --region {}".\
format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed installing apps: apt & pip.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring jupiter and all dependencies
try:
logging.info('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
print('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
params = "--hostname {} " \
"--keyfile {} " \
"--region {} " \
"--spark_version {} " \
"--hadoop_version {} " \
"--os_user {} " \
"--scala_version {} " \
"--r_mirror {} " \
"--exploratory_name {}".\
format(instance_hostname,
keyfile_name,
os.environ['aws_region'],
os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'],
notebook_config['dlab_ssh_user'],
os.environ['notebook_scala_version'],
os.environ['notebook_r_mirror'],
notebook_config['exploratory_name'])
try:
local("~/scripts/{}.py {}".format('configure_jupyter_node', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure jupyter.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[INSTALLING USERs KEY]')
logging.info('[INSTALLING USERs KEY]')
additional_config = {"user_keyname": notebook_config['user_keyname'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
instance_hostname, keyfile_name, json.dumps(additional_c
|
imndszy/voluntary
|
app/admin/__init__.py
|
Python
|
mit
| 178
| 0.005618
|
# -*- coding:utf8 -*-
# Aut
|
hor: [email protected]
# github: https://github.com/imndszy
from flask import Blueprint
admin = Blueprint('admin', __
|
name__)
from . import views
|
lightsweeper/lightsweeper-api
|
setup.py
|
Python
|
mit
| 528
| 0.020833
|
#!/usr/bin/env python
|
import sys
from setuptools import setup
if sys.hexversion < 0x030200a1:
print ("LightSweeper requires python 3.2 or higher.")
print("Exiting...")
sys.exit(1)
setup(name='LightSweeper',
version='0.6b',
description='The LightSweeper API',
author='The LightSweeper Team',
author_email='[email protected]',
url='http://www.lightsweeper.org',
packages=['lightsweeper'],
package_data={"lightsweeper" : ["sounds/*.wav"]},
include_package_data=True
|
)
|
tantalor/emend
|
app/emend/twitter.py
|
Python
|
mit
| 1,161
| 0.014643
|
from megaera import local, json
from oauth import signed_url
from google.appengine.api import urlfetch
__TWITTER_API__ = "http://api.twitter.com/1"
def tweet(status, **credentials):
if no
|
t credentials:
# shortcut for no-credentials case
credentials = local.config_get('twitter')
if not credentials:
return
update_url = "%s/statuses/update.json" % __TWITTER_API__
fetch_url = signed_url(url=update_url, method='POST', status=status, **credentials)
response = urlfetch.fetch(fetch_url, method=urlfetch.POST)
try:
content = json.read(response.content)
return content.get('id')
except json.ReadException:
pass
def untweet(status_id, **credentials):
|
if not credentials:
# shortcut for no-credentials case
credentials = local.config_get('twitter')
if not credentials:
return
destroy_url = "%s/statuses/destroy.json" % __TWITTER_API__
fetch_url = signed_url(url=destroy_url, method='POST', id=status_id, **credentials)
response = urlfetch.fetch(fetch_url, method=urlfetch.POST)
try:
content = json.read(response.content)
return content.get('id')
except json.ReadException:
pass
|
nmunro/azathoth
|
news/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 603
| 0.001658
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-06 11:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NewsItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_lengt
|
h=256)),
('content', models.TextFi
|
eld()),
],
),
]
|
LegionXI/pydarkstar
|
pydarkstar/tests/auction/test_worker.py
|
Python
|
mit
| 350
| 0.014286
|
import unittest
import logging
logging.getLogger().setLevel(logging.DEBUG)
from
|
...auction.worker import Worker
from ...database import Database
from ...rc import sql
class TestCase(unittest.TestCase):
def setUp(self
|
):
self.db = Database.pymysql(**sql)
self.ob = Worker(self.db, fail=True)
def test_init(self):
pass
|
Juanvulcano/zulip
|
zerver/tests/test_messages.py
|
Python
|
apache-2.0
| 72,787
| 0.001924
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.db.models import Q
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase, override_settings
from django.utils import timezone
from zerver.lib import bugdown
from zerver.decorator import JsonableError
from zerver.lib.test_runner import slow
from zilencer.models import Deployment
from zerver.lib.message import (
MessageDict,
message_to_dict,
)
from zerver.lib.test_helpers import (
get_user_messages,
make_client,
message_ids, message_stream_count,
most_recent_message,
most_recent_usermessage,
queries_captured,
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.models import (
MAX_MESSAGE_LENGTH, MAX_SUBJECT_LENGTH,
Message, Realm, Recipient, Stream, UserMessage, UserProfile, Attachment, RealmAlias,
get_realm_by_string_id, get_stream, get_user_profile_by_email,
Reaction, sew_messages_and_reactions
)
from zerver.lib.actions import (
check_message, check_send_message,
do_create_user,
get_client,
get_recipient,
)
from zerver.lib.upload import create_attachment
from zerver.views.messages import create_mirrored_message_users
import datetime
import DNS
import mock
import time
import ujson
from six.moves import range
from typing import Any, Optional, Text
class TopicHistoryTest(ZulipTestCase):
def test_topics_history(self):
# type: () -> None
# verified: int(UserMessage.flags.read) == 1
email = '[email protected]'
stream_name = 'Verona'
self.login(email)
user_profile = get_user_profile_by_email(email)
stream = Stream.objects.get(name=stream_name)
recipient = get_recipient(Recipient.STREAM, stream.id)
def create_test_message(topic, read, starred=False):
# type: (str, bool, bool) -> None
hamlet = get_user_profile_by_email('[email protected]')
message = Message.objects.create(
sender=hamlet,
recipient=recipient,
subject=topic,
content='whatever',
pub_date=timezone.now(),
sending_client=get_client('whatever'),
)
flags = 0
if read:
flags |= UserMessage.flags.read
# use this to make sure our query isn't confused
# by other flags
if starred:
flags |= UserMessage.flags.starred
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=flags,
)
create_test_message('topic2', read=False)
create_test_message('toPIc1', read=False, starred=True)
create_test_message('topic2', read=False)
create_test_message('topic2', read=True)
create_test_message('topic2', read=False, starred=True)
create_test_message('Topic2', read=False)
create_test_message('already_read', read=True)
endpoint = '/json/users/me/%d/topics' % (stream.id,)
result = self.client_get(endpoint, dict())
self.assert_json_success(result)
history = ujson.loads(result.content)['topics']
# We only look at the most recent three topics, because
# the prior fixture data may be unreliable.
self.assertEqual(history[:3], [
[u'already_read', 0],
[u'Topic2', 4],
[u'toPIc1', 1],
])
def test_bad_stream_id(self):
# type: () -> None
email = '[email protected]'
self.login(email)
# non-sensible stream id
endpoint = '/json/users/me/9999999999/topics'
result = self.client_get(endpoint, dict())
self.assert_json_error(result, 'Invalid stream id')
# out of realm
bad_stream = self.make_stream(
'mit_stream',
realm=get_realm_by_string_id('mit')
)
endpoint = '/json/users/me/%s/topics' % (bad_stream.id,)
result = self.client_get(endpoint, dict())
self.assert_json_error(result, 'Invalid stream id')
# private stream to which I am not subscribed
private_stream = self.make_stream(
'private_stream',
invite_only=True
)
endpoint = '/json/users/me/%s/topics' % (private_stream.id,)
result = self.client_get(endpoint, dict())
s
|
elf.assert_json_error(result, 'Invalid stream id')
class TestCrossRealmPMs(ZulipTestCase):
def make_realm(self, domain):
# type: (Text) -> Realm
realm = Realm.objects.create(string_id=domain, domain=domain, invite_required=False)
RealmAlias.objects.create(realm=realm, domain=domain)
return realm
de
|
f setUp(self):
# type: () -> None
dep = Deployment()
dep.base_api_url = "https://zulip.com/api/"
dep.base_site_url = "https://zulip.com/"
# We need to save the object before we can access
# the many-to-many relationship 'realms'
dep.save()
dep.realms = [get_realm_by_string_id("zulip")]
dep.save()
def create_user(self, email):
# type: (Text) -> UserProfile
username, domain = email.split('@')
self.register(username, 'test', domain=domain)
return get_user_profile_by_email(email)
@override_settings(CROSS_REALM_BOT_EMAILS=['[email protected]',
'[email protected]'])
def test_realm_scenarios(self):
# type: () -> None
r1 = self.make_realm('1.example.com')
r2 = self.make_realm('2.example.com')
r3 = self.make_realm('3.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
deployment.realms.add(r2)
deployment.realms.add(r3)
def assert_message_received(to_user, from_user):
# type: (UserProfile, UserProfile) -> None
messages = get_user_messages(to_user)
self.assertEqual(messages[-1].sender.pk, from_user.pk)
def assert_disallowed():
# type: () -> Any
return self.assertRaisesRegex(
JsonableError,
'You can\'t send private messages outside of your organization.')
random_zulip_email = '[email protected]'
user1_email = '[email protected]'
user1a_email = '[email protected]'
user2_email = '[email protected]'
user3_email = '[email protected]'
feedback_email = '[email protected]'
support_email = '[email protected]' # note: not zulip.com
self.create_user(random_zulip_email)
user1 = self.create_user(user1_email)
user1a = self.create_user(user1a_email)
user2 = self.create_user(user2_email)
self.create_user(user3_email)
feedback_bot = get_user_profile_by_email(feedback_email)
support_bot = self.create_user(support_email)
# Users can PM themselves
self.send_message(user1_email, user1_email, Recipient.PERSONAL)
assert_message_received(user1, user1)
# Users on the same realm can PM each other
self.send_message(user1_email, user1a_email, Recipient.PERSONAL)
assert_message_received(user1a, user1)
# Cross-realm bots in the zulip.com realm can PM any realm
self.send_message(feedback_email, user2_email, Recipient.PERSONAL)
assert_message_received(user2, feedback_bot)
# All users can PM cross-realm bots in the zulip.com realm
self.send_message(user1_email, feedback_email, Recipient.PERSONAL)
assert_message_received(feedback_bot, user1)
# Users can PM cross-realm bots on non-zulip realms.
# (The support bot represents some theoretical bot that we may
# create in the future that does not have zulip.com as its realm.)
self.send_message(user1_email, [support_email], Recipient.PERSONAL)
assert_message_received(support_bot, user1)
# Allow sending PMs to two different cross-realm bots simultaneously.
|
colloquium/spacewalk
|
backend/satellite_tools/rhn_ssl_dbstore.py
|
Python
|
gpl-2.0
| 4,252
| 0.005174
|
#
# Copyright (c) 2009--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import sys
from optparse import Option, OptionParser
from spacewalk.common import rhnTB
from spacewalk.common.rhnConfig
|
import CFG, initCFG
from spacewalk.server import rhnSQL
import satCerts
DEFAULT_TRUSTED_CERT = 'RHN-ORG-TRUSTED-SSL-CERT'
def fetchTraceback(method=None, req=None, extra=None):
""" a cheat for snagging just the string value of a Tr
|
aceback
NOTE: this tool may be needed for RHN Satellite 3.2 as well,
which doesn't have a fetchTraceback. So... this if for
compatibility.
"""
from cStringIO import StringIO
exc = StringIO()
rhnTB.Traceback(method=method, req=req, mail=0, ostream=exc,
extra=extra, severity=None)
return exc.getvalue()
def processCommandline():
initCFG('server.satellite')
options = [
Option('--ca-cert', action='store', default=DEFAULT_TRUSTED_CERT, type="string", help='public CA certificate, default is %s' % DEFAULT_TRUSTED_CERT),
Option('--label', action='store', default='RHN-ORG-TRUSTED-SSL-CERT', type="string", help='FOR TESTING ONLY - alternative database label for this CA certificate, default is "RHN-ORG-TRUSTED-SSL-CERT"'),
Option('-v','--verbose', action='count', help='be verbose (accumulable: -vvv means "be *really* verbose").'),
]
values, args = OptionParser(option_list=options).parse_args()
# we take no extra commandline arguments that are not linked to an option
if args:
msg = ("ERROR: these arguments make no sense in this context (try "
"--help): %s\n" % repr(args))
raise ValueError(msg)
if not os.path.exists(values.ca_cert):
sys.stderr.write("ERROR: can't find CA certificate at this location: "
"%s\n" % values.ca_cert)
sys.exit(10)
try:
db_backend = CFG.DB_BACKEND
db_host = CFG.DB_HOST
db_port = CFG.DB_PORT
db_user = CFG.DB_user
db_password = CFG.DB_PASSWORD
db_name = CFG.DB_NAME
rhnSQL.initDB(backend=db_backend, host=db_host, port=db_port,
username=db_user, password=db_password, database=db_name)
except:
sys.stderr.write("""\
ERROR: there was a problem trying to initialize the database:
%s\n""" % fetchTraceback())
sys.exit(11)
if values.verbose:
print 'Public CA SSL certificate: %s' % values.ca_cert
return values
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def main():
""" main routine
10 CA certificate not found
11 DB initialization failure
12 no Organization ID. Something very bad is going on.
13 Couldn't insert the certificate for whatever reason.
"""
values = processCommandline()
def writeError(e):
sys.stderr.write('\nERROR: %s\n' % e)
try:
satCerts.store_rhnCryptoKey(values.label, values.ca_cert, verbosity=values.verbose)
except satCerts.NoOrgIdError, e:
writeError("no organization ID!?!\n\n%s\n" % fetchTraceback())
sys.exit(12)
except satCerts.CaCertInsertionError, e:
writeError("no organization ID!?!\n\n%s\n" % fetchTraceback())
sys.exit(13)
return 0
#-------------------------------------------------------------------------------
if __name__ == "__main__":
sys.stderr.write('\nWARNING: intended to be wrapped by another executable\n'
' calling program.\n')
sys.exit(main() or 0)
#===============================================================================
|
dasbruns/netzob
|
src/netzob/Common/Models/Types/Raw.py
|
Python
|
gpl-3.0
| 7,876
| 0.001398
|
# -*- coding: utf-8 -*-
# +---------------------------------------------------------------------------+
# | 01001110 01100101 01110100 01111010 01101111 01100010 |
# | |
# | Netzob : Inferring communication protocols |
# +---------------------------------------------------------------------------+
# | Copyright (C) 2011-2014 Georges Bossert and Frédéric Guihéry |
# | This program is free software: you can redistribute it and/or modify |
# | it under the terms of the GNU General Public License as published by |
# | the Free Software Foundation, either version 3 of the License, or |
# | (at your option) any later version. |
# | |
# | This program is distributed in the hope that it will be useful, |
# | but WITHOUT ANY WARRANTY; without even the implied warranty of |
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
# | GNU General Public License for more details. |
# | |
# | You should have received a copy of the GNU General Public License |
# | along with this program. If not, see <http://www.gnu.org/licenses/>. |
# +---------------------------------------------------------------------------+
# | @url : http://www.netzob.org |
# | @contact : [email protected] |
# | @sponsors : Amossys, http://www.amossys.fr |
# | Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | File contributors : |
# | - Georges Bossert <georges.bossert (a) supelec.fr> |
# | - Frédéric Guihéry <frederic.guihery (a) amossys.fr> |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | Standard library imports |
# +---------------------------------------------------------------------------+
import random
import os
from bitarray import bitarray
# +---------------------------------------------------------------------------+
# | Related third party imports |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | Local application imports |
# +---------------------------------------------------------------------------+
from netzob.Common.Models.Types.AbstractType import AbstractType
class Raw(AbstractType):
"""Raw netzob data type expressed in bytes.
For instance, we can use this type to parse any raw field of 2 bytes:
>>> from netzob.all import *
>>> f = Field(Raw(nbBytes=2))
or with a specific value (default is little endianness)
>>> f = Field(Raw('\x01\x02\x03'))
>>> print f.domain.dataType
Raw='\\x01\\x02\\x03' ((0, 24))
>>> f.domain.dataType.endianness = AbstractType.ENDIAN_BIG
>>> print f.domain.dataType
Raw='\\x01\\x02\\x03' ((0, 24))
"""
def __init__(self, value=None, nbBytes=None, unitSize=AbstractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(), sign=AbstractType.defaultSign()):
if value is not None and not isinstance(value, bitarray):
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.BitArray import BitArray
value = TypeConverter.convert(value, Raw, BitArray)
nbBits = self._convertNbBytesinNbBits(nbBytes)
super(Raw, self).__init__(self.__class__.__name__, value, nbBits, unitSize=unitSize, endianness=endianness, sign=sign)
def __str__(self):
if self.value is not None:
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.BitArray import BitArray
from netzob.Common.Models.Types.HexaString import
|
HexaString
return "{0}={1} ({2})".format(self.typeName, repr(TypeConverter.convert(self.value, BitArray, Raw)), self.size)
else:
return "{0}={1} ({2})".format(self.typeName, self.value, self.size)
def __repr__(sel
|
f):
if self.value is not None:
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.BitArray import BitArray
return str(TypeConverter.convert(self.value, BitArray, self.__class__))
else:
return str(self.value)
def _convertNbBytesinNbBits(self, nbBytes):
nbMinBit = None
nbMaxBit = None
if nbBytes is not None:
if isinstance(nbBytes, int):
nbMinBit = nbBytes * 8
nbMaxBit = nbMinBit
else:
if nbBytes[0] is not None:
nbMinBit = nbBytes[0] * 8
if nbBytes[1] is not None:
nbMaxBit = nbBytes[1] * 8
return (nbMinBit, nbMaxBit)
def generate(self, generationStrategy=None):
"""Generates a random Raw that respects the requested size.
>>> from netzob.all import *
>>> a = Raw(nbBytes=(10))
>>> gen = a.generate()
>>> print len(gen)
80
>>> from netzob.all import *
>>> a = Raw(nbBytes=(10, 20))
>>> gen = a.generate()
>>> print 10<=len(gen) and 20<=len(gen)
True
"""
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.BitArray import BitArray
minSize, maxSize = self.size
if maxSize is None:
maxSize = AbstractType.MAXIMUM_GENERATED_DATA_SIZE
if minSize is None:
minSize = 0
generatedSize = random.randint(minSize, maxSize)
return TypeConverter.convert(os.urandom(generatedSize / 8), Raw, BitArray)
@staticmethod
def decode(data, unitSize=AbstractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(), sign=AbstractType.defaultSign()):
return data
@staticmethod
def encode(data, unitSize=AbstractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(), sign=AbstractType.defaultSign()):
return data
@staticmethod
def canParse(data):
"""Computes if specified data can be parsed as raw which is always the case if the data is at least 1 length and aligned on a byte.
>>> from netzob.all import *
>>> Raw.canParse(TypeConverter.convert("hello netzob", ASCII, BitArray))
True
The ascii table is defined from 0 to 127:
>>> Raw.canParse(TypeConverter.convert(128, Decimal, BitArray, src_sign=AbstractType.SIGN_UNSIGNED))
True
:param data: the data to check
:type data: python raw
:return: True if data can be parsed as a Raw which is always the case (if len(data)>0)
:rtype: bool
:raise: TypeError if the data is None
"""
if data is None:
raise TypeError("data cannot be None")
if len(data) == 0:
return False
if len(data) % 8 != 0:
return False
return True
|
BigRoy/vrayformayaUtils
|
tests/attributes_tests.py
|
Python
|
gpl-2.0
| 5,376
| 0.006696
|
import unittest
import maya.cmds as mc
import vrayformayaUtils as vfm
class TestMeshAttributes(unittest.TestCase):
"""
This is a generic TestCase for most v-ray mesh attributes.
Note that it doesn't test every single case of changes, but it should capture overall changes of the code.
"""
def setUp(self):
self.mesh = mc.polyCube()[0]
def test_subdivision(self):
transform = self.mesh
shapes = mc.listRelatives(transform, children=True, shapes=True)
vfm.attributes.vray_subdivision(self.mesh, state=True, smartConvert=True)
for shape in shapes:
self.assertTrue(mc.objExists("{0}.vraySubdivEnable".format(shape)))
vfm.attributes.vray_subdivision(self.mesh, state=True, smartConvert=True, vraySubdivEnable=False)
for shape in shapes:
self.assertEqual(mc.getAttr("{0}.vraySubdivEnable".format(shape)), False)
vfm.attributes.vray_subdivision(self.mesh, state=True, smartConvert=True, vraySubdivEnable=True)
for shape in shapes:
self.assertEqual(mc.getAttr("{0}.vraySubdivEnable".format(shape)), True)
vfm.attributes.vray_subdivision(self.mesh, state=False)
for shape in shapes:
self.assertFalse(mc.objExists("{0}.vraySubdivEnable".format(shape)))
# Apply to transform without smart convert (should not work)
vfm.attributes.vray_subdivision(self.mesh, state=True, smartConvert=False)
for shape in shapes:
self.assertFalse(mc.objExists("{0}.vraySubdivEnable".format(shape)))
for shape in shapes:
vfm.attributes.vray_subdivision(shape, state=True, smartConvert=False)
self.assertTrue(mc.objExists("{0}.vraySubdivEnable".format(shape)))
vfm.attributes.vray_subdivision(shape, state=False, smartConvert=False)
self.assertFalse(mc.objExists("{0}.vraySubdivEnable".format(shape)))
def test_vray_subquality(self):
transform = self.mesh
shapes = mc.listRelatives(transform, children=True, shapes=True)
# should run without errors:
vfm.attributes.vray_subquality(transform, vrayEdgeLength=1, vrayMaxSubdivs=1, vrayOverrideGlobalSubQual=1, vrayViewDep=1)
vfm.attributes.vray_subquality(transform, vrayEdgeLength=0, vrayMaxSubdivs=0, vrayOverrideGlobalSubQual=0, vrayViewDep=0)
for shape in shapes:
self.assertTrue(mc.objExists("{0}.vrayEdgeLength".format(shape)))
self.assertTrue(mc.objExists("{0}.vrayMaxSubdivs".format(shape)))
self.assertTrue(mc.objExists("{0}.vrayOverrideGlobalSubQual".format(shape)))
self.assertTrue(mc.objExist
|
s("{0}.vrayViewDep".format(shape)))
vfm.attributes.vray_subquality(shapes, smartConvert=False, state=False)
for shape in shapes:
self.assertFalse(mc.objExists("{0}.vrayEdgeLength".format(shape)))
|
self.assertFalse(mc.objExists("{0}.vrayMaxSubdivs".format(shape)))
self.assertFalse(mc.objExists("{0}.vrayOverrideGlobalSubQual".format(shape)))
self.assertFalse(mc.objExists("{0}.vrayViewDep".format(shape)))
def test_vray_user_attributes(self):
transform = self.mesh
shapes = mc.listRelatives(transform, children=True, shapes=True)
value = "Testing this attribute"
vfm.attributes.vray_user_attributes(transform, vrayUserAttributes=value)
for shape in shapes:
self.assertEqual(mc.getAttr("{0}.vrayUserAttributes".format(shape)), value)
value2 = "Aaaaaaap"
for shape in shapes:
self.assertNotEqual(mc.getAttr("{0}.vrayUserAttributes".format(shape)), value2)
vfm.attributes.vray_user_attributes(transform, vrayUserAttributes=value2)
for shape in shapes:
self.assertEqual(mc.getAttr("{0}.vrayUserAttributes".format(shape)), value2)
vfm.attributes.vray_user_attributes(shapes, state=False)
for shape in shapes:
self.assertFalse(mc.objExists("{0}.vrayUserAttributes".format(shape)))
def test_vray_displacement(self):
transform = self.mesh
shapes = mc.listRelatives(transform, children=True, shapes=True)
vfm.attributes.vray_displacement(transform, vrayDisplacementAmount=10)
vfm.attributes.vray_displacement(transform, vrayDisplacementShift=5)
vfm.attributes.vray_displacement(transform, vrayDisplacementType=2, vrayDisplacementUseBounds=True,
vrayEnableWaterLevel=True, vrayWaterLevel=2.0)
for shape in shapes:
self.assertTrue(mc.objExists("{0}.vrayDisplacementNone".format(shape)))
vfm.attributes.vray_displacement(shapes, state=False)
for shape in shapes:
self.assertFalse(mc.objExists("{0}.vrayDisplacementNone".format(shape)))
vfm.attributes.vray_displacement(shapes, state=0)
for shape in shapes:
self.assertFalse(mc.objExists("{0}.vrayDisplacementNone".format(shape)))
def tearDown(self):
mc.delete(self.mesh)
#import unittest
#import vrayformayaUtils_tests.attributes_tests as attrTest
#reload(attrTest)
#suite = unittest.TestLoader().loadTestsFromTestCase(attrTest.TestMeshAttributes)
#unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
unittest.main()
|
OpusVL/odoo-confutil
|
confutil/account_setup.py
|
Python
|
agpl-3.0
| 4,594
| 0.00566
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Post-installation configuration helpers
# Copyright (C) 2015 OpusVL (<http://opusvl.com/>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""Common code for scripting installation of a chart of accounts
into a company.
The function you probably want to use is setup_company_accounts()
"""
from datetime import date
import logging
from . import confutil
_logger = logging.getLogger(__name__)
def setup_company_accounts(cr, registry, uid, company, chart_template, code_digits=None, context=None):
"""This sets up accounts, fiscal year and periods for the given company.
company: A res.company object
chart_template: An account.chart.template object
code_digits: The number of digits (the default is usually 6)
context: e.g. {'lang': 'en_GB', 'tz': False, 'uid': openerp.SUPERUSER_ID}
A financial year is set up starting this year on 1st Jan and ending this year on 31st Dec.
"""
unconfigured_companies = unconfigured_company_ids(cr, registry, uid, context=context)
if company.id in unconfigured_companies:
setup_chart_of_accounts(cr, registry, uid,
company_id=company.id,
chart_template_id=chart_template.id,
code_digits=code_digits,
context=context,
)
today = date.today()
fy_name = today.strftime('%Y')
fy_code = 'FY' + fy_name
account_start = today.strftime('%Y-01-01')
account_end = today.strftime('%Y-12-31')
create_fiscal_year(cr, registry, uid,
company_id=company.id,
name=fy_name,
code=fy_code,
start_date=account_start,
end_date=account_end,
context=context,
)
confutil.set_account_settings(cr, registry, uid,
company=company,
changes={
'date_start': account_start,
'date_stop': account_end,
'period': 'month',
},
context=context,
)
def unconfigured_company_ids(cr, registry, uid, context=None):
"""Return list of ids of companies without a chart of accounts.
"""
account_installer = registry['account.installer']
return account_installer.get_unconfigured_cmp(cr, uid, context=context)
def setup_chart_of_accounts(cr, registry, uid, company_id, chart_templa
|
te_id, code_digits=None, context=None):
chart_wizard = registry['wizard.multi.charts.accounts']
defaults = chart_wizard.default_get(cr, uid, ['bank_accounts_id', 'currency_id'], context=context)
bank_accounts_spec = defaults.pop('bank_accounts_id')
bank_accounts_id = [(0, False, i) for i in bank_accounts_spec]
data = defaults.copy()
data.update({
"chart_template_id": chart_template_id,
'company_id': company_id,
'
|
bank_accounts_id': bank_accounts_id,
})
onchange = chart_wizard.onchange_chart_template_id(cr, uid, [], data['chart_template_id'], context=context)
data.update(onchange['value'])
if code_digits:
data.update({'code_digits': code_digits})
conf_id = chart_wizard.create(cr, uid, data, context=context)
chart_wizard.execute(cr, uid, [conf_id], context=context)
def create_fiscal_year(cr, registry, uid, company_id, name, code, start_date, end_date, context=None):
fy_model = registry['account.fiscalyear']
fy_data = fy_model.default_get(cr, uid, ['state', 'company_id'], context=context).copy()
fy_data.update({
'company_id': company_id,
'name': name,
'code': code,
'date_start': start_date,
'date_stop': end_date,
})
fy_id = fy_model.create(cr, uid, fy_data, context=context)
fy_model.create_period(cr, uid, [fy_id], context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Bl4ckb0ne/ring-api
|
ring_api/extra/servers/cherrypy/api/root.py
|
Python
|
gpl-3.0
| 347
| 0.008646
|
import c
|
herrypy, json
from bottle import request, get
from ring_api.server.api import ring, user
class Root(object):
def __init__(self, dring):
self.dring = dring
self.user = user.User(dring)
@cherrypy.expose
def index(self):
return 'todo'
@cherrypy.expose
def routes(self):
retur
|
n 'todo'
|
charlietsai/catmap
|
catmap/cli.py
|
Python
|
gpl-3.0
| 2,874
| 0.000348
|
import os
import shutil
usage = {}
usage['import'] = """catmap import <mkm-file>
Open a *.mkm project file and work with it interactively.
"""
def get_options(args=None, get_parser=False):
import optparse
import os
from glob import glob
import catmap
parser = optparse.OptionParser(
'Usage: %prog [help] ('
+ '|'.join(sorted(usage.keys()))
+ ') [options]',
version=catmap.__version__)
if args is not None:
options, args = parser.parse_args(args.split())
else:
options, args = parser.parse_args()
if len(args) < 1:
parser.error('Command expected')
if get_parser:
return options, args, parser
else:
return options, args
def match_keys(arg, usage, parser):
"""Try to match part of a command against
the set of commands from usage. Throws
an error if not successful.
"""
possible_args = [key for key in usage if key.startswith(arg)]
if len(possible_args) == 0:
parser.error('Command "%s" not understood.' % arg)
elif len(possible_args) > 1:
parser.error(('Command "%s" ambiguous.\n'
'Could be one of %s\n\n') % (arg, possible_args))
else:
return possible_args[0]
def main(args=None):
"""The CLI main entry point function.
The optional argument args, can be used to
directly supply command line argument like
$ catmap <args>
otherwise args will be taken from STDIN.
"""
from glob import glob
options, args, parser = get_options(args, get_parser=True)
if not args[0] in usage.keys():
args[0] = match_keys(args[0], usage, parser)
elif args[0] == 'import':
if len(args) < 2:
parser.error('mkm filename expected.')
from catmap import ReactionModel
mkm_file = args[1]
global model
model = ReactionModel(setup_file=mkm_file)
sh(banner='Note: model = catmap.ReactionModel(setup_file=\'%s\')\n# do model.run()\nfor a fully initialized model.' %
args[1])
def sh(banner):
"""Wrapper around interactive ipython shell
that factors out ipython version depencies.
"""
from distutils.version import LooseVersion
import IPython
if hasattr(IPython, 'release'):
try:
from IPython.terminal.embed import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
try:
from IPython.frontend.terminal.emb
|
ed \
import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
from IPython.Shell import IPShellEmbed
|
IPShellEmbed(banner=banner)()
else:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
|
weykent/ansible-runit-sv
|
tests/test_runit_sv.py
|
Python
|
isc
| 14,619
| 0
|
# Copyright (c) weykent <[email protected]>
# See COPYING for details.
import pytest
import runit_sv as _runit_sv_module
SETTABLE_MASK = _runit_sv_module.SETTABLE_MASK
idempotent = pytest.mark.idempotent
def pytest_generate_tests(metafunc):
if 'idempotency_state' not in metafunc.fixturenames:
return
states = ['regular']
if getattr(metafunc.function, 'idempotent', False):
states.append('checked')
metafunc.parametrize(('idempotency_state'), states)
def assert_no_local_failure(contacted):
assert not contacted['local'].get('failed')
def assert_local_failure(contacted):
assert contacted['local'].get('failed')
class FakeAnsibleModuleBailout(BaseException):
def __init__(self, success, params):
super(FakeAnsibleModuleBailout, self).__init__(success, params)
self.success = success
self.params = params
class FakeAnsibleModule(object):
def __init__(self, params, check_mode):
self.params = params
self.check_mode = check_mode
def __call__(self, argument_spec, supports_check_mode):
self.argument_spec = argument_spec
for name, spec in self.argument_spec.iteritems():
if name not in self.params:
self.params[name] = spec.get('default')
return self
def exit_json(self, **params):
raise FakeAnsibleModuleBailout(success=True, params=params)
def fail_json(self, **params):
raise FakeAnsibleModuleBailout(success=False, params=params)
def setup_change_checker(params):
must_change = params.pop('_must_change', False)
must_not_change = params.pop('_must_not_change', False)
if must_change and must_not_change:
raise ValueError('invalid request: must change and must not change')
if must_change:
def check(changed):
assert changed
elif must_not_change:
def check(changed):
assert not changed
else:
check = None
return check
@pytest.fixture(params=['real', 'fake'])
def runit_sv(request, idempotency_state):
if request.param == 'real':
ansible_module = request.getfuncargvalue('ansible_module')
def do(**params):
should_fail = params.pop('_should_fail', False)
params['_runner_kwargs'] = {
'check': params.pop('_check', False),
}
check_change = setup_change_checker(params)
contacted = ansible_module.runit_sv(**params)
if should_fail:
assert_local_failure(contacted)
else:
assert_no_local_failure(contacted)
if check_change is not None:
check_change(contacted['local']['changed'])
elif request.param == 'fake':
def do(**params):
should_fail = params.pop('_should_fail', False)
check = params.pop('_check', False)
check_change = setup_chang
|
e_checker(params)
module = FakeAnsibleModule(params, check)
with pytest.raises(FakeAnsibleModuleBailout) as excinfo:
_runit_sv_module.main(module)
assert excinfo.value.success != should_fail
if check_change is not None:
check_change(exc
|
info.value.params['changed'])
else:
raise ValueError('unknown param', request.param)
if idempotency_state == 'checked':
_do = do
def do(**params):
_do(_must_change=True, **params)
_do(_must_not_change=True, **params)
return do
@pytest.fixture
def basedir(tmpdir):
tmpdir.join('sv').mkdir()
tmpdir.join('service').mkdir()
tmpdir.join('init.d').mkdir()
return tmpdir
def base_directories(basedir, **overrides):
ret = {'sv_directory': [basedir.join('sv').strpath],
'service_directory': [basedir.join('service').strpath],
'init_d_directory': [basedir.join('init.d').strpath]}
ret.update(overrides)
return ret
def settable_mode(path):
return path.stat().mode & SETTABLE_MASK
def assert_file(path, contents, mode):
assert path.read() == contents and settable_mode(path) == mode
@idempotent
def test_basic_runscript(runit_sv, basedir):
"""
A basic invocation with name and runscript creates the sv directory
containing just the runscript, links the service directory, and links an
LSB service.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
**base_directories(basedir))
sv = basedir.join('sv', 'testsv')
assert len(sv.listdir()) == 1
assert_file(sv.join('run'), contents='spam eggs', mode=0o755)
assert basedir.join('service', 'testsv').readlink() == sv.strpath
assert basedir.join('init.d', 'testsv').readlink() == '/usr/bin/sv'
@idempotent
def test_log_runscript(runit_sv, basedir):
"""
Adding a log_runscript creates a log/run script as well.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
log_runscript='eggs spam',
**base_directories(basedir))
sv_log = basedir.join('sv', 'testsv', 'log')
assert len(sv_log.listdir()) == 1
assert_file(sv_log.join('run'), contents='eggs spam', mode=0o755)
@idempotent
def test_supervise_link(runit_sv, basedir):
"""
The supervise_link option will create a link to some arbitrary location.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
supervise_link='/spam/eggs',
**base_directories(basedir))
sv = basedir.join('sv', 'testsv')
assert len(sv.listdir()) == 2
assert sv.join('supervise').readlink() == '/spam/eggs'
@idempotent
def test_log_supervise_link(runit_sv, basedir):
"""
The log_supervise_link option will also create a link to some arbitrary
location.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
log_runscript='eggs spam',
log_supervise_link='/eggs/spam',
**base_directories(basedir))
sv_log = basedir.join('sv', 'testsv', 'log')
assert len(sv_log.listdir()) == 2
assert sv_log.join('supervise').readlink() == '/eggs/spam'
@idempotent
def test_extra_files(runit_sv, basedir):
"""
Adding extra_files will copy additional files into the sv directory.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
extra_files={
'spam': 'eggs',
'eggs': 'spam',
},
**base_directories(basedir))
sv = basedir.join('sv', 'testsv')
assert len(sv.listdir()) == 3
assert_file(sv.join('spam'), contents='eggs', mode=0o644)
assert_file(sv.join('eggs'), contents='spam', mode=0o644)
@idempotent
def test_extra_scripts(runit_sv, basedir):
"""
Adding extra_scripts will copy additional scripts into the sv directory.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
extra_scripts={
'spam': 'eggs',
'eggs': 'spam',
},
**base_directories(basedir))
sv = basedir.join('sv', 'testsv')
assert len(sv.listdir()) == 3
assert_file(sv.join('spam'), contents='eggs', mode=0o755)
assert_file(sv.join('eggs'), contents='spam', mode=0o755)
@idempotent
def test_extra_files_and_scripts(runit_sv, basedir):
"""
Adding extra_files and extra_scripts both will create both additional files
and additional scripts.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
extra_files={
'spam': 'eggs',
'eggs': 'spam',
},
extra_scripts={
'spams': 'eggs',
'eggss': 'spam',
},
**base_directories(basedir))
sv = basedir.join('sv', 'testsv')
assert len(sv.listdir()) == 5
assert_file(sv.join('spam'), contents='eggs', mode=0o644)
assert_file(sv.join('eggs'), contents='spam', mode=0o644)
assert_file(sv.join('spams'), contents='eggs', mode=0o755)
assert_file(sv.join('eggss'), contents='spam', mode=0o755)
def test_no_overlapping_extra_files_and_scripts(runit_sv, basedir):
"""
If extra_files and extra_scripts both touch the same path, there's an
immediate f
|
foreni-packages/hachoir-regex
|
hachoir_regex/parser.py
|
Python
|
gpl-2.0
| 6,130
| 0.006199
|
"""
Parse string to create Regex object.
TODO:
- Support \: \001, \x00, \0, \ \[, \(, \{, etc.
- Support Python extensions: (?:...), (?P<name>...), etc.
- Support \<, \>, \s, \S, \w, \W, \Z <=> $, \d, \D, \A <=> ^, \b, \B, [[:space:]], etc.
"""
from hachoir_regex import (RegexString, RegexEmpty, RegexRepeat,
RegexDot, RegexWord, RegexStart, RegexEnd,
RegexRange, RegexRangeItem, RegexRangeCharacter)
import re
REGEX_COMMAND_CHARACTERS = '.^$[](){}|+?*\\'
def parseRange(text, start):
r"""
>>> parseRange('[a]b', 1)
(<RegexRange '[a]'>, 3)
>>> parseRange('[a-z]b', 1)
(<RegexRange '[a-z]'>, 5)
>>> parseRange('[^a-z-]b', 1)
(<RegexRange '[^a-z-]'>, 7)
>>> parseRange('[^]-]b', 1)
(<RegexRange '[^]-]'>, 5)
>>> parseRange(r'[\]abc]', 1)
(<RegexRange '[]a-c]'>, 7)
>>> parseRange(r'[a\-x]', 1)
(<RegexRange '[ax-]'>, 6)
"""
index = start
char_range = []
exclude = False
if text[index] == '^':
exclude = True
index += 1
if text[index] == ']':
char_range.append(RegexRangeCharacter(']'))
index += 1
while index < len(text) and text[index] != ']':
if index+1 < len(text) \
and text[index] == '\\':
char_range.append(RegexRangeCharacter(text[index+1]))
index += 2
elif index+1 < len(text) \
and text[index] == '-' and text[index+1] == ']':
break
elif index+3 < len(text) \
and text[index+1] == '-' \
and text[index+2] != ']':
char_range.append(RegexRangeItem(ord(text[index]), ord(text[index+2])))
index += 3
else:
char_range.append(RegexRangeCharacter(text[index]))
index += 1
if index < len(text) and text[index] == '-':
char_range.append(RegexRangeCharacter('-'))
index += 1
if index == len(text) or text[index] != ']':
raise SyntaxError('Invalid range: %s' % text[start-1:index])
return RegexRange(char_range, exclude), index+1
def parseOr(text, start):
"""
>>> parseOr('(a)', 1)
(<RegexString 'a'>, 3)
>>> parseOr('(a|c)', 1)
(<RegexRange '[ac]'>, 5)
>>> parseOr(' (a|[bc]|d)', 2)
(<RegexRange '[a-d]'>, 11)
"""
index = start
# (?:...): Skip Python prefix '?:'
if text[index:index+2] == '?:':
index += 2
if text[index] == '?':
raise NotImplementedError("Doesn't support Python extension (?...)")
regex = None
while True:
new_regex, index = _parse(text, index, "|)")
if regex:
regex = regex | new_regex
else:
regex = new_regex
if len(text) <= index:
raise SyntaxError('Missing closing parenthesis')
if text[index] == ')':
break
index += 1
index += 1
if regex is None:
regex = RegexEmpty()
return regex, index
REPEAT_REGEX = re.compile("([0-9]+)(,[0-9]*)?}")
def parseRepeat(text, start):
"""
>>> parseRepeat('a{0,1}b', 2)
(0, 1, 6)
>>> parseRepeat('a{12}', 2)
(12, 12, 5)
"""
match = REPEAT_REGEX.match(text, start)
if not match:
raise SyntaxError('Unable to parse repetition '+text[start:])
rmin = int(match.group(1))
if match.group(2):
text = match.group(2)[1:]
if text:
rmax = int(text)
else:
rmax = None
else:
rmax = rmin
return (rmin, rmax, match.end(0))
CHAR_TO_FUNC = {'[': parseRange, '(': parseOr}
CHAR_TO_CLASS = {'.': RegexDot, '^': RegexStart, '$': RegexEnd}
CHAR_TO_REPEAT = {'*': (0, None), '?': (0, 1), '+': (1, None)}
def _parse(text, start=0, until=None):
if len(text) == start:
return RegexEmpty(), 0
index = start
regex = RegexEmpty()
last = None
done = False
while index < len(text):
char = text[index]
if until and char in until:
done = True
break
if char in REGEX_COMMAND_CHARACTERS:
if char in CHAR_TO_FUNC:
new_regex, index = CHAR_TO_FUNC[char] (text, index+1)
elif char in CHAR_TO_CLASS:
new_regex = CHAR_TO_CLASS[char]()
index += 1
elif char == '{':
rmin, rmax, index = parseRepeat(text, index+1)
new_regex = RegexRepeat(last, rmin, rmax)
last = None
elif char in CHAR_TO_REPEAT:
rmin, rmax = CHAR_TO_REPEAT[char]
if last is None:
raise SyntaxError('Repetition character (%s) without previous expression' % text[index])
new_regex = RegexRepeat(last, rmin, rmax)
last = None
index += 1
elif char == "\\":
index += 1
if index == len(text):
raise SyntaxError("Antislash (\\) without escaped character")
char = text[index]
|
if char == 'b':
new_regex = RegexWord()
else:
if not(char in REGE
|
X_COMMAND_CHARACTERS or char in " '"):
raise SyntaxError("Operator '\\%s' is not supported" % char)
new_regex = RegexString(char)
index += 1
else:
raise NotImplementedError("Operator '%s' is not supported" % char)
if last:
regex = regex + last
last = new_regex
else:
subtext = text[index]
index += 1
if last:
regex = regex + last
last = RegexString(subtext)
if last:
regex = regex + last
return regex, index
def parse(text):
r"""
>>> parse('')
<RegexEmpty ''>
>>> parse('abc')
<RegexString 'abc'>
>>> parse("chats?")
<RegexAnd 'chats?'>
>>> parse('[bc]d')
<RegexAnd '[bc]d'>
>>> parse("\\.")
<RegexString '\.'>
"""
regex, index = _parse(text)
assert index == len(text)
return regex
if __name__ == "__main__":
import doctest
doctest.testmod()
|
lukesanantonio/blendergltf
|
gpu_luts.py
|
Python
|
apache-2.0
| 3,110
| 0.018328
|
from gpu import *
LAMP_TYPES = [
GPU_DYNAMIC_LAMP_DYNVEC,
GPU_DYNAMIC_LAMP_DYNCO,
GPU_DYNAMIC_LAMP_DYNIMAT,
GPU_DYNAMIC_LAMP_DYNPERSMAT,
GPU_DYNAMIC_LAMP_DYNENERGY,
GPU_DYNAMIC_LAMP_DYNENERGY,
GPU_DYNAMIC_LAMP_DYNCOL,
GPU_DYNAMIC_LAMP_DISTANCE,
GPU_DYNAMIC_LAMP_ATT1,
GPU_DYNAMIC_LAMP_ATT2,
GPU_DYNAMIC_LAMP_SPOTSIZE,
GPU_DYNAMIC_LAMP_SPOTBLEND,
]
MIST_TYPES = [
GPU_DYNAMIC_MIST_ENABLE,
GPU_DYNAMIC_MIST_START,
GPU_DYNAMIC_MIST_DISTANCE,
GPU_DYNAMIC_MIST_INTENSITY,
GPU_DYNAMIC_MIST_TYPE,
GPU_DYNAMIC_MIST_COLOR,
]
WORLD_TYPES = [
GPU_DYNAMIC_HORIZON_COLOR,
GPU_DYNAMIC_AMBIENT_COLOR,
]
MATERIAL_TYPES = [
GPU_DYNAMIC_MAT_DIFFRGB,
GPU_DYNAMIC_MAT_REF,
GPU_DYNAMIC_MAT_SPECRGB,
GPU_DYNAMIC_MAT_SPEC,
GPU_DYNAMIC_MAT_HARD,
GPU_DYNAMIC_MAT_EMIT,
GPU_DYNAMIC_MAT_AMB,
GPU_DYNAMIC_MAT_ALPHA,
]
TYPE_TO_NAME = {
|
GPU_DYNAMIC_OBJECT_VIEWMAT : 'view_mat',
GPU_DYNAMIC_OBJECT_MAT : 'model_mat',
GPU_DYNAMIC_OBJECT_VIEWIMAT : 'inv_view_mat',
GPU_DYNAMIC_OBJECT_IMAT : 'inv_model_mat',
GPU_DYNAMIC_OBJECT_COLOR : 'color',
GPU_DYNAMIC_OBJECT_AUTOBUMPSCALE : 'auto_bump_scale',
GPU_DYNAMIC_MIST_ENABLE : 'u
|
se_mist',
GPU_DYNAMIC_MIST_START : 'start',
GPU_DYNAMIC_MIST_DISTANCE : 'depth',
GPU_DYNAMIC_MIST_INTENSITY : 'intensity',
GPU_DYNAMIC_MIST_TYPE : 'falloff',
GPU_DYNAMIC_MIST_COLOR : 'color',
GPU_DYNAMIC_HORIZON_COLOR : 'horizon_color',
GPU_DYNAMIC_AMBIENT_COLOR : 'ambient_color',
GPU_DYNAMIC_LAMP_DYNVEC : 'dynvec',
GPU_DYNAMIC_LAMP_DYNCO : 'dynco',
GPU_DYNAMIC_LAMP_DYNIMAT : 'dynimat',
GPU_DYNAMIC_LAMP_DYNPERSMAT : 'dynpersmat',
GPU_DYNAMIC_LAMP_DYNENERGY : 'energy',
GPU_DYNAMIC_LAMP_DYNCOL : 'color',
GPU_DYNAMIC_LAMP_DISTANCE : 'distance',
GPU_DYNAMIC_LAMP_ATT1 : 'linear_attenuation',
GPU_DYNAMIC_LAMP_ATT2 : 'quadratic_attenuation',
GPU_DYNAMIC_LAMP_SPOTSIZE : 'spot_size',
GPU_DYNAMIC_LAMP_SPOTBLEND : 'spot_blend',
GPU_DYNAMIC_MAT_DIFFRGB : 'diffuse_color',
GPU_DYNAMIC_MAT_REF : 'diffuse_intensity',
GPU_DYNAMIC_MAT_SPECRGB : 'specular_color',
GPU_DYNAMIC_MAT_SPEC : 'specular_intensity',
GPU_DYNAMIC_MAT_HARD : 'specular_hardness',
GPU_DYNAMIC_MAT_EMIT : 'emit',
GPU_DYNAMIC_MAT_AMB : 'ambient',
GPU_DYNAMIC_MAT_ALPHA : 'alpha',
}
TYPE_TO_SEMANTIC = {
GPU_DYNAMIC_LAMP_DYNVEC : 'BL_DYNVEC',
GPU_DYNAMIC_LAMP_DYNCO : 'BL_DYNCO',
GPU_DYNAMIC_LAMP_DYNIMAT : 'BL_DYNIMAT',
GPU_DYNAMIC_LAMP_DYNPERSMAT : 'BL_DYNPERSMAT',
CD_ORCO: 'POSITION',
-1: 'NORMAL' # Hack until the gpu module has something for normals
}
DATATYPE_TO_CONVERTER = {
GPU_DATA_1I : lambda x : x,
GPU_DATA_1F : lambda x : x,
GPU_DATA_2F : lambda x : list(x),
GPU_DATA_3F : lambda x : list(x),
GPU_DATA_4F : lambda x : list(x),
}
DATATYPE_TO_GLTF_TYPE = {
GPU_DATA_1I : 5124, # INT
GPU_DATA_1F : 5126, # FLOAT
GPU_DATA_2F : 35664, # FLOAT_VEC2
GPU_DATA_3F : 35665, # FLOAT_VEC3
GPU_DATA_4F : 35666, # FLOAT_VEC4
}
|
msartintarm/site
|
server.py
|
Python
|
mit
| 1,592
| 0.029523
|
import os.path
from tornado import ioloop, httpserver, web, websocket, templ
|
ate
from config import GameConfig
OS = os.path.dirname(__file__)
def server_path(uri):
return os.path.join(OS, uri)
def static_path(uri):
return { "path": server_path("static/" + uri) }
level_1 = G
|
ameConfig()
class TarmHandler(web.RequestHandler):
def get(self):
self.render(server_path("html/game.html"), config = level_1)
def write_error(self, code, **kwargs):
self.render(server_path("html/error.html"))
class TarmSocket(websocket.WebSocketHandler):
def open(self, *args):
self.stream.set_nodelay(True)
print("Socket opened.")
def on_message(self, message):
print("Message from browser:", message)
if "load-config" in message:
self.write_message(template.Loader('html').load('config.html').generate(config=level_1))
elif "load-about" in message:
self.write_message(template.Loader('html').load('about.html').generate())
elif "load-audio" in message:
self.write_message(template.Loader('html').load('audio.html').generate())
def start_server():
tarm_app = web.Application(handlers=[
(r"/", TarmHandler),
(r"/socket", TarmSocket),
(r"/images/(.*)", web.StaticFileHandler, static_path("images")),
(r"/textures/(.*)", web.StaticFileHandler, static_path("textures")),
(r"/music/(.*)", web.StaticFileHandler, static_path("audio"))
],
debug=True, gzip=True, static_path=server_path("static"))
httpserver.HTTPServer(tarm_app).listen(8000)
print("Starting server.")
ioloop.IOLoop.instance().start()
if __name__ == "__main__":
start_server()
|
Azure/azure-sdk-for-python
|
sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/operations/__init__.py
|
Python
|
mit
| 1,234
| 0.002431
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# ---------------------------------------------------------------------
|
-----
from ._policy_tracked_resources_operations import PolicyTrackedResourcesOperations
from ._remediations_operations import RemediationsOperations
from ._policy_events_operations import PolicyEventsOperations
from ._policy_states_operations import PolicyStatesOperations
from ._operations import Operations
from ._policy_metadata_operations i
|
mport PolicyMetadataOperations
from ._policy_restrictions_operations import PolicyRestrictionsOperations
from ._attestations_operations import AttestationsOperations
__all__ = [
'PolicyTrackedResourcesOperations',
'RemediationsOperations',
'PolicyEventsOperations',
'PolicyStatesOperations',
'Operations',
'PolicyMetadataOperations',
'PolicyRestrictionsOperations',
'AttestationsOperations',
]
|
toastdriven/eliteracing
|
cmdrs/tests/test_models.py
|
Python
|
bsd-3-clause
| 666
| 0
|
import hashlib
import mock
import uuid
from django.test import TestCase
from ..models im
|
port Commander
class CommanderTestCase(TestCase):
def test_generate_token(self):
with mock.patch.object(uuid, 'uuid4', return_value='a_test'):
cmdr = Commander(
name='Branch'
)
self.assertEqual(
cmdr.generate_token(),
hashlib.md5('a_test').hexdigest()
)
def test_save(self):
# We need to ensure tokens get auto-populated here.
cmdr = C
|
ommander.objects.create(
name='Branch'
)
self.assertTrue(len(cmdr.api_token) > 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.