code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
"""
Created on Thu Jan 31 2018
Unit tests for the Balance game
@author: IvanPopov
"""
import unittest
from game import Game
class GameTest(unittest.TestCase):
def test_game_loads(self):
g=Game()
self.assertEqual(g.c.title(), "Balance") | ipopov13/Balance | legacy_code/tests.py | Python | gpl-3.0 | 259 |
'''
Author: Dr. John T. Hwang <[email protected]>
This package is distributed under New BSD license.
'''
from __future__ import print_function, division
import numpy as np
import unittest
import inspect
from six import iteritems
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct
from smt.sampling_methods import LHS
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import LS, QP, KPLS, KRG
try:
from smt.surrogate_models import IDW, RBF, RMTC, RMTB
compiled_available = True
except:
compiled_available = False
print_output = False
class Test(SMTestCase):
def setUp(self):
ndim = 2
nt = 10000
ne = 1000
problems = OrderedDict()
problems['sphere'] = Sphere(ndim=ndim)
problems['exp'] = TensorProduct(ndim=ndim, func='exp', width=5)
problems['tanh'] = TensorProduct(ndim=ndim, func='tanh', width=5)
problems['cos'] = TensorProduct(ndim=ndim, func='cos', width=5)
sms = OrderedDict()
sms['LS'] = LS()
sms['QP'] = QP()
if compiled_available:
sms['RMTC'] = RMTC(num_elements=20, energy_weight=1e-10)
sms['RMTB'] = RMTB(num_ctrl_pts=40, energy_weight=1e-10)
t_errors = {}
t_errors['LS'] = 1.0
t_errors['QP'] = 1.0
t_errors['RMTC'] = 1e-2
t_errors['RMTB'] = 1e-2
e_errors = {}
e_errors['LS'] = 1.5
e_errors['QP'] = 1.5
e_errors['RMTC'] = 1e-2
e_errors['RMTB'] = 1e-2
self.nt = nt
self.ne = ne
self.problems = problems
self.sms = sms
self.t_errors = t_errors
self.e_errors = e_errors
def run_test(self):
method_name = inspect.stack()[1][3]
pname = method_name.split('_')[1]
sname = method_name.split('_')[2]
prob = self.problems[pname]
sampling = LHS(xlimits=prob.xlimits)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
sm0 = self.sms[sname]
sm = sm0.__class__()
sm.options = sm0.options.clone()
if sm.options.is_declared('xlimits'):
sm.options['xlimits'] = prob.xlimits
sm.options['print_global'] = False
sm.set_training_values(xt, yt)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
if print_output:
print('%8s %6s %18.9e %18.9e'
% (pname[:6], sname, t_error, e_error))
self.assert_error(t_error, 0., self.t_errors[sname])
self.assert_error(e_error, 0., self.e_errors[sname])
# --------------------------------------------------------------------
# Function: sphere
def test_sphere_LS(self):
self.run_test()
def test_sphere_QP(self):
self.run_test()
@unittest.skipIf(not compiled_available, 'Compiled Fortran libraries not available')
def test_sphere_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, 'Compiled Fortran libraries not available')
def test_sphere_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: exp
def test_exp_LS(self):
self.run_test()
def test_exp_QP(self):
self.run_test()
@unittest.skipIf(not compiled_available, 'Compiled Fortran libraries not available')
def test_exp_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, 'Compiled Fortran libraries not available')
def test_exp_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: tanh
def test_tanh_LS(self):
self.run_test()
def test_tanh_QP(self):
self.run_test()
@unittest.skipIf(not compiled_available, 'Compiled Fortran libraries not available')
def test_tanh_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, 'Compiled Fortran libraries not available')
def test_tanh_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: cos
def test_cos_LS(self):
self.run_test()
def test_cos_QP(self):
self.run_test()
@unittest.skipIf(not compiled_available, 'Compiled Fortran libraries not available')
def test_cos_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, 'Compiled Fortran libraries not available')
def test_cos_RMTB(self):
self.run_test()
if __name__ == '__main__':
print_output = True
print('%6s %8s %18s %18s'
% ('SM', 'Problem', 'Train. pt. error', 'Test pt. error'))
unittest.main()
| hwangjt/SMT | smt/tests/test_low_dim.py | Python | bsd-3-clause | 4,984 |
# -*- coding:utf-8 -*-
""" PBox SIAP Server """
# !/usr/bin/python
# Python: 3.5.2
# Platform: Windows/Linux/ARMv7
# Author: Heyn
# Program: SIAP Server.
# History: 2017/01/19 V1.0.0 [Heyn]
# 2017/10/11 V1.0.1 [Heyn]
# 2017/12/22 V1.1.0 [Heyn] Optimization code.
# 2017/12/26 V1.1.1 [Heyn] pyinstall --onefile PBoxSIAPs.py --icon **.ico
# ./PBoxSIAPs.exe -i 127.0.0.1
# 2017/12/28 V1.1.2 [Heyn] Optimization code.
# (1) Limit all lines to a maximum of 79 characters
import json
import pprint
import string
import random
import argparse
from http.server import HTTPServer, BaseHTTPRequestHandler
import pandas as pd
from faker import Faker
class SiapServer:
""" SIAP Server. """
def __init__(self):
self.__random = 1
self.__jsondata = {}
self.__fake = Faker('en_US')
self.__randomtypes, self.__randomvalue = [], []
self.__alphabet = string.ascii_letters + string.digits
self.__itemtype = ['BOOL', 'BYTE', 'WORD', 'DWORD', 'DOUBLE',
'FLOAT', 'INT16', 'INT32', 'STRING40']
self.__randomdict = {'BOOL' : lambda : random.randrange(2),
'BYTE' : lambda : random.randint(-128, 127),
'WORD' : lambda : random.randint(0, 65535),
'DWORD' : lambda : random.randint(0, 2**32-1),
'INT16' : lambda : random.randint(-32768, 32767),
'INT32' : lambda : random.randint(-2**31, 2**31-1),
'FLOAT' : lambda : self.__fake.pyfloat,
'DOUBLE' : lambda : random.uniform(-2**256, 2**256-1),
# 'STRING40' : lambda : self.__fake.binary(length=40).decode('UTF-8', 'ignore'),
'STRING40' : lambda : self.__fake.pystr(min_chars=1, max_chars=40)
}
self.__titletypes = ['itemName', 'itemType']
self.__titlevalue = ['itemName', 'value']
try:
with open('SiapServerData.json') as ssd:
self.__jsondata = json.loads(ssd.read())
self.__random = self.__jsondata['random']
if self.__random == 0:
self.__value = pd.DataFrame(self.__jsondata['items'], columns=self.__titlevalue)
self.__types = pd.DataFrame(self.__jsondata['items'], columns=self.__titletypes)
else:
_typesize = len(self.__jsondata['randomType'])
for index in range(self.__jsondata['maxItems']):
_itemname = 'PYTHON{0}'.format(index)
self.__randomtypes.append(dict(itemName=_itemname,
itemType=self.__jsondata['randomType'][index%_typesize]))
self.__randomvalue.append(dict(itemName=_itemname, value=''))
self.__types = pd.DataFrame(self.__randomtypes, columns=self.__titletypes)
self.__value = pd.DataFrame(self.__randomvalue, columns=self.__titlevalue)
except BaseException as err:
self.__random = 1
self.__types = pd.DataFrame([dict(itemName='PYRANDOM00',
itemType='STRING40')], columns=self.__titletypes)
self.__value = pd.DataFrame([dict(itemName='PYRANDOM00',
value='JSON ERROR')], columns=self.__titlevalue)
print(err)
def __generate_items(self):
""" Generate Items Types. """
payload = dict(items=[])
for i in range(self.__types.count()['itemName']):
payload['items'].append(self.__types.iloc[i].to_dict())
pprint.pprint(payload)
return str(json.dumps(payload)).encode()
def __generate_value(self):
""" Generate Items Value. """
payload = dict(items=[])
for i in range(self.__value.count()['itemName']):
if self.__random != 0:
self.__value.iloc[i].value = self.__randomdict[self.__types.iloc[i].itemType]()
payload['items'].append(self.__value.iloc[i].to_dict())
pprint.pprint(payload)
return str(json.dumps(payload)).encode()
def process(self, cmd):
""" Process Url. """
if 'PBox/dataitems' in cmd:
ret = self.__generate_items()
elif 'PBox/get' in cmd:
ret = self.__generate_value()
else:
ret = '[ERROR] url (ex.: https://ip:port/PBox/dataitems or get)'
return ret
class MyHttpHandler(BaseHTTPRequestHandler, SiapServer):
""" Panasonic SIAP Server. """
def __init__(self, request, client_address, server):
SiapServer.__init__(self)
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def __response(self, msg):
try:
self.send_response(200, message=None)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(msg)
except IOError:
self.send_error(404, message=None)
def do_POST(self):
"""POST"""
datas = self.rfile.read(int(self.headers['content-length']))
pprint.pprint(datas.decode('UTF-8'))
self.__response(datas)
def do_GET(self):
""" Override """
self.__response(self.process(self.path))
def main(args):
""" Main. """
httpd = HTTPServer((args.ipaddr, int(args.port)), MyHttpHandler)
print('Server started on {0}, port {1}.....'.format(args.ipaddr, args.port))
httpd.serve_forever()
if __name__ == "__main__":
PARSER = argparse.ArgumentParser()
PARSER.add_argument('-i', '--ipaddr', help='Server Local IP.', required=True)
PARSER.add_argument('-p', '--port', default=8080, help='Server Listen Port.', required=False)
ARGS = PARSER.parse_args()
main(ARGS)
| Heyn2016/Python | PBox/SIAP/PBoxSIAPs.py | Python | gpl-3.0 | 5,976 |
from qit.base.bool import Bool
from qit.base.struct import Struct
from qit.domains.domain import Domain
from qit.domains.iterator import Iterator
from qit.base.function import Function
from qit.functions.int import multiplication_n
class Product(Domain):
""" Cartesian product of domains """
def __init__(self, *args):
domains = []
struct_args = []
for arg in args:
if isinstance(arg, tuple) and len(arg) == 2:
domain = arg[0]
domains.append(domain)
struct_args.append((domain.type, arg[1]))
else:
domains.append(arg)
struct_args.append(arg.type)
type = Struct(*struct_args)
super().__init__(
type,
self._make_iterator(type, domains),
self._make_generator(type, domains),
self._make_size(domains),
self._make_indexer(domains))
self.domains = tuple(domains)
def _make_iterator(self, type, domains):
iterators = [ d.iterator for d in domains ]
if all(iterators):
return ProductIterator(type, iterators)
def _make_generator(self, type, domains):
generators = tuple(d.generator for d in domains)
if all(generators):
generator = Function(("generator", self.name)).returns(type).code("""
return {
{% for g in _generators %}
{{b(g)}}{% if not loop.last %},{% endif %}
{% endfor %}
};
""", _generators=generators).uses(generators)
return generator()
def _make_size(self, domains):
sizes = [ d.size for d in domains ]
if all(sizes):
return multiplication_n(len(sizes))(*sizes)
def _make_indexer(self, domains):
indexers = [ d.indexer for d in domains ]
if all(indexers):
"""
indexer = FunctionWithExprs(start=start, step=step).returns(Int())
indexer.takes(Int(), "_v")
indexer.code("return (_v - {start}) / {step};")
"""
def __mul__(self, other):
args = list(zip(self.domains, self.type.names))
args.append(other)
return Product(*args)
class ProductIterator(Iterator):
def __init__(self, struct, iterators):
iters = tuple(zip(struct.names, iterators))
itype = Struct(*((i.itype, name) for name, i in iters))
objects = set()
for i in iterators:
objects.update(i.childs)
objects = tuple(objects)
super().__init__(itype, struct)
self.reset_fn.code("""
{%- for name, i in _iters %}
{{b(i.reset_fn)}}(iter.{{name}});
{%- endfor %}
""", _iters=iters, struct=struct).uses(objects)
self.next_fn.code("""
{%- for name, i in _iters[:-1] %}
{{ b(i.next_fn) }}(iter.{{name}});
if ({{ b(i.is_valid_fn) }}(iter.{{name}})) {
return;
} else {
{{b(i.reset_fn)}}(iter.{{name}});
}
{%- endfor %}
{{ b(_iters[-1][1].next_fn) }}(iter.{{_iters[-1][0]}});
""", _iters=iters).uses(objects)
self.is_valid_fn.code("""
{%- for name, i in _iters %}
if (!({{b(i.is_valid_fn)}}(iter.{{name}}))) {
return false;
}
{%- endfor %}
return true;
""", _iters=iters).uses(objects)
self.value_fn.code("""
return {
{%- for name, i in _iters %}
{{b(i.value_fn)}}(iter.{{name}})
{% if not loop.last %},{% endif %}
{%- endfor %}
};
""", _iters=iters, struct=struct).uses(objects)
| spirali/qit | src/qit/domains/product.py | Python | gpl-3.0 | 3,854 |
DESCRIPTION = "kill a zombie by ID"
def autocomplete(shell, line, text, state):
pass
def help(shell):
shell.print_plain("")
shell.print_plain("Usage: kill #")
shell.print_plain(" kill all")
shell.print_plain(" kill dead")
shell.print_plain("")
def kill_zombie(shell, id):
formats = "\t{0:<5}{1:<10}{2:<20}{3:<40}"
if not id.isdigit() and id.lower() not in ["all", "dead"]:
shell.print_error("Not a valid argument to kill: %s" % id)
return
if id.lower() == "all":
[session.kill() for skey, session in shell.sessions.items() if session.killed == False]
elif id.lower() == "dead":
[session.kill() for skey, session in shell.sessions.items() if session.status == 0 and session.killed == False]
else:
[session.kill() for skey, session in shell.sessions.items() if session.id == int(id) and session.killed == False]
if id.lower() == "all":
shell.print_good("All Zombies Killed!")
elif id.lower() == "dead":
shell.print_good("Dead Zombies Killed!")
shell.play_sound('KILL')
def execute(shell, cmd):
splitted = cmd.split()
if len(splitted) > 1:
id = splitted[1]
kill_zombie(shell, id)
return
help(shell)
| zerosum0x0/koadic | core/commands/kill.py | Python | apache-2.0 | 1,269 |
"""
Test app that injects new values in a Redis HSET every X seconds.
"""
import asyncio
import asyncio_redis
import random
from time import sleep
choices = ['Alice', 'Bob', 'Charlie', 'Daniel', 'Einstein', 'Francis']
@asyncio.coroutine
def example():
# Create Redis connection
connection = yield from asyncio_redis.Connection.create(host='10.0.3.44',
port=6379)
while True:
# Set a key
yield from connection.hset('mykey', 'name',
random.choice(choices))
yield from connection.hset('mykey', 'value',
str(random.randrange(100)))
sleep(0.5)
# When finished, close the connection.
connection.close()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(example())
| hoh/React-to-Redis | example_inject.py | Python | mit | 888 |
# doc-export: Splines
"""
An interactive spline demo.
"""
from pscript import window
from flexx import flx
SPLINES = ['linear', 'basis', 'cardinal', 'catmullrom', 'lagrange', 'lanczos']
GENERAL_TEXT = """
The splines in this example are used to interpolate a line between
control points. The range of influence is shown when a control point
is clicked. Move the control points by dragging them. Points can be
added and deleted by holding shift and clicking.
"""
LINEAR_TEXT = """
This is not really a spline, but its included for reference. Linear
interpolation is C0 continuous, and relatively easy to implement.
"""
BASIS_TEXT = """
A B-spline is a C2 continuous non-interpolating spline, used extensively
in (3D) modeling.
"""
CARDINAL_TEXT = """
A Cardinal spline is a specific type of cubic Hermite spline, and is
C1 continous. Its tension parameter makes it very versatile.
"""
CATMULLROM_TEXT = """
The Catmull–Rom spline is a Cardinal spline with a tension of 0. It is
commonly used in computer graphics to interpolate motion between key frames.
"""
LAGRANGE_TEXT = """
The Lagrange polynomials result in (C0 continous) interpolation
equivalent to Newton a polynomial. It is, however, known to suffer from
Runge's phenomenon (oscillations).
"""
LANCZOS_TEXT = """
Lanczos interpolation (C1 continous) is based on a windowed sinc
function and is usually considered to produce the best results from the
perspective of the fourier domain. It's mainly used in applications
related to audio processing.
"""
class SplineWidget(flx.CanvasWidget):
spline_type = flx.EnumProp(SPLINES, 'cardinal', settable=True, doc="""
"The type of spline
""")
closed = flx.BoolProp(False, settable=True, doc="""
Whether the spline is closed
""")
tension = flx.FloatProp(0.5, settable=True, doc="""
The tension parameter for the Cardinal spline.
""")
_current_node = flx.Property(None, settable=True)
def init(self):
self.ctx = self.node.getContext('2d')
self.xx = [0.90, 0.80, 0.70, 0.60, 0.50, 0.40, 0.10, 0.23, 0.61, 0.88]
self.yy = [0.90, 0.60, 0.90, 0.60, 0.90, 0.70, 0.55, 0.19, 0.11, 0.38]
def factors_linear(self, t):
return [0, t, (1-t), 0]
def factors_basis(self, t):
f0 = (1 - t)**3 / 6.0
f1 = (3 * t**3 - 6 * t**2 + 4) / 6.0
f2 = (-3 * t**3 + 3 * t**2 + 3 * t + 1) / 6.0
f3 = t**3 / 6.0
return f0, f1, f2, f3
def factors_cardinal(self, t):
tension = self.tension
tau = 0.5 * (1 - tension)
f0 = - tau * (t**3 - 2 * t**2 + t)
f3 = + tau * (t**3 - 1 * t**2)
f1 = 2 * t**3 - 3 * t**2 + 1 - f3
f2 = - 2 * t**3 + 3 * t**2 - f0
return f0, f1, f2, f3
def factors_catmullrom(self, t):
f0 = - 0.5 * t**3 + 1.0 * t**2 - 0.5 * t
f1 = + 1.5 * t**3 - 2.5 * t**2 + 1
f2 = - 1.5 * t**3 + 2.0 * t**2 + 0.5 * t
f3 = + 0.5 * t**3 - 0.5 * t**2
return f0, f1, f2, f3
def factors_lagrange(self, t):
k = -1.0
f0 = t / k * (t-1) / (k-1) * (t-2) / (k-2)
k = 0
f1 = (t+1) / (k+1) * (t-1) / (k-1) * (t-2) / (k-2)
k= 1
f2 = (t+1) / (k+1) * t / k * (t-2) / (k-2)
k = 2
f3 = (t + 1) / (k+1) * t / k * (t-1) / (k-1)
return f0, f1, f2, f3
def factors_lanczos(self, t):
sin = window.Math.sin
pi = window.Math.PI
tt = (1+t)
f0 = 2*sin(pi*tt)*sin(pi*tt/2) / (pi*pi*tt*tt)
tt = (2-t)
f3 = 2*sin(pi*tt)*sin(pi*tt/2) / (pi*pi*tt*tt)
if t != 0:
tt = t
f1 = 2*sin(pi*tt)*sin(pi*tt/2) / (pi*pi*tt*tt)
else:
f1 =1
if t != 1:
tt = (1-t)
f2 = 2*sin(pi*tt)*sin(pi*tt/2) / (pi*pi*tt*tt)
else:
f2 = 1
return f0, f1, f2, f3
@flx.reaction('pointer_down')
def _on_pointer_down(self, *events):
for ev in events:
w, h = self.size
# Get closest point
closest, dist = -1, 999999
for i in range(len(self.xx)):
x, y = self.xx[i] * w, self.yy[i] * h
d = ((x - ev.pos[0]) ** 2 + (y - ev.pos[1]) ** 2) ** 0.5
if d < dist:
closest, dist = i, d
# Did we touch it or not
if dist < 9:
i = closest
if 'Shift' in ev.modifiers: # Remove point
self.xx.pop(i)
self.yy.pop(i)
self._set_current_node(None)
self.update()
else:
self._set_current_node(i)
else:
if 'Shift' in ev.modifiers:
# Add point
if not self.xx:
i = 0 # There were no points
else:
# Add in between two points. Compose the vectors
# from closest points to neightbour points and to the
# cicked point. Check with which vector the latter vector
# aligns the best by calculating their angles.
#
# Get the three points
p0 = self.xx[closest+0] * w, self.yy[closest+0] * h
if closest == 0:
p2 = self.xx[closest+1] * w, self.yy[closest+1] * h
p1 = p0[0] - (p2[0] - p0[0]), p0[1] - (p2[1] - p0[1])
elif closest == len(self.xx) - 1:
p1 = self.xx[closest-1] * w, self.yy[closest-1] * h
p2 = p0[0] - (p1[0] - p0[0]), p0[1] - (p1[1] - p0[1])
else:
p1 = self.xx[closest-1] * w, self.yy[closest-1] * h
p2 = self.xx[closest+1] * w, self.yy[closest+1] * h
# Calculate vectors, and normalize
v1 = p1[0] - p0[0], p1[1] - p0[1]
v2 = p2[0] - p0[0], p2[1] - p0[1]
v3 = ev.pos[0] - p0[0], ev.pos[1] - p0[1]
m1 = (v1[0]**2 + v1[1]**2)**0.5
m2 = (v2[0]**2 + v2[1]**2)**0.5
m3 = (v3[0]**2 + v3[1]**2)**0.5
v1 = v1[0] / m1, v1[1] / m1
v2 = v2[0] / m2, v2[1] / m2
v3 = v3[0] / m3, v3[1] / m3
# Calculate angle
a1 = window.Math.acos(v1[0] * v3[0] + v1[1] * v3[1])
a2 = window.Math.acos(v2[0] * v3[0] + v2[1] * v3[1])
i = closest if a1 < a2 else closest + 1
self.xx.insert(i, ev.pos[0] / w)
self.yy.insert(i, ev.pos[1] / h)
self._set_current_node(i)
@flx.reaction('pointer_up')
def _on_pointer_up(self, *events):
self._set_current_node(None)
@flx.reaction('pointer_move')
def _on_pointer_move(self, *events):
ev = events[-1]
if self._current_node is not None:
i = self._current_node
w, h = self.size
self.xx[i] = ev.pos[0] / w
self.yy[i] = ev.pos[1] / h
self.update()
@flx.reaction('size', 'spline_type', 'tension', 'closed', '_current_node')
def update(self, *events):
# Init
ctx = self.ctx
w, h = self.size
ctx.clearRect(0, 0, w, h)
# Get coordinates
xx = [x * w for x in self.xx]
yy = [y * h for y in self.yy]
#
if self.closed:
xx = xx[-1:] + xx + xx[:2]
yy = yy[-1:] + yy + yy[:2]
else:
xx = [xx[0] - (xx[1] - xx[0])] + xx + [xx[-1] - (xx[-2] - xx[-1])]
yy = [yy[0] - (yy[1] - yy[0])] + yy + [yy[-1] - (yy[-2] - yy[-1])]
# Draw grid
ctx.strokeStyle = '#eee'
ctx.lineWidth = 1
for y in range(0, h, 20):
ctx.beginPath()
ctx.moveTo(0, y)
ctx.lineTo(w, y)
ctx.stroke()
for x in range(0, w, 20):
ctx.beginPath()
ctx.moveTo(x, 0)
ctx.lineTo(x, h)
ctx.stroke()
# Draw nodes
ctx.fillStyle = '#acf'
ctx.strokeStyle = '#000'
ctx.lineWidth = 2
for i in range(1, len(xx)-1):
ctx.beginPath()
ctx.arc(xx[i], yy[i], 9, 0, 6.2831)
ctx.fill()
ctx.stroke()
# Select interpolation function
fun = self['factors_' + self.spline_type.lower()]
if not fun:
fun = lambda : (0, 1, 0, 0)
# Draw lines
for i in range(1, len(xx)-2):
ctx.lineCap = "round"
ctx.lineWidth = 3
ctx.strokeStyle = '#008'
support = 1 if self.spline_type == 'LINEAR' else 2
if self._current_node is not None:
if i - (support + 1) < self._current_node < i + support:
ctx.strokeStyle = '#08F'
ctx.lineWidth = 5
# Get coordinates of the four points
x0, y0 = xx[i-1], yy[i-1]
x1, y1 = xx[i+0], yy[i+0]
x2, y2 = xx[i+1], yy[i+1]
x3, y3 = xx[i+2], yy[i+2]
# Interpolate
ctx.beginPath()
# lineto = ctx.moveTo.bind(ctx)
lineto = ctx.lineTo.bind(ctx)
n = 30
for t in [i/n for i in range(n+1)]:
f0, f1, f2, f3 = fun(t)
x = x0 * f0 + x1 * f1 + x2 * f2 + x3 * f3
y = y0 * f0 + y1 * f1 + y2 * f2 + y3 * f3
lineto(x, y)
lineto = ctx.lineTo.bind(ctx)
ctx.stroke()
class Splines(flx.Widget):
def init(self):
with flx.HBox():
with flx.VBox(flex=0, minsize=150):
self.b1 = flx.RadioButton(text='Linear')
self.b2 = flx.RadioButton(text='Basis')
self.b3 = flx.RadioButton(text='Cardinal', checked=True)
self.b4 = flx.RadioButton(text='Catmull Rom')
self.b5 = flx.RadioButton(text='Lagrange')
self.b6 = flx.RadioButton(text='Lanczos')
flx.Widget(minsize=10)
closed = flx.CheckBox(text='Closed')
flx.Widget(minsize=10)
self.tension = flx.Slider(min=-0.5, max=1, value=0.5,
text='Tension: {value}')
flx.Widget(flex=1)
with flx.VBox(flex=1):
flx.Label(text=GENERAL_TEXT, wrap=True, style='font-size: 12px;')
self.explanation = flx.Label(text=CARDINAL_TEXT, wrap=True,
style='font-size: 12px;')
self.spline = SplineWidget(flex=1,
closed=lambda: closed.checked,
tension=lambda: self.tension.value)
LINEAR_TEXT = LINEAR_TEXT
BASIS_TEXT = BASIS_TEXT
CARDINAL_TEXT = CARDINAL_TEXT
CATMULLROM_TEXT = CATMULLROM_TEXT
LAGRANGE_TEXT = LAGRANGE_TEXT
LANCZOS_TEXT = LANCZOS_TEXT
@flx.reaction('b1.checked', 'b2.checked', 'b3.checked', 'b4.checked',
'b5.checked', 'b6.checked')
def _set_spline_type(self, *events):
ev = events[-1]
if not ev.new_value:
return # init event
type = ev.source.text.replace(' ', '')
self.spline.set_spline_type(type)
self.explanation.set_text(getattr(self, type.upper() + '_TEXT'))
@flx.reaction
def __show_hide_tension_slider(self):
if self.spline.spline_type == 'CARDINAL':
self.tension.apply_style('visibility: visible')
else:
self.tension.apply_style('visibility: hidden')
if __name__ == '__main__':
a = flx.App(Splines)
a.launch('firefox-browser')
flx.run()
| zoofIO/flexx | flexxamples/demos/splines.py | Python | bsd-2-clause | 12,086 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from flexget import options, plugin
from flexget.event import event
from flexget.terminal import console
from flexget.manager import Session
try:
from flexget.plugins.internal.api_t411 import (T411Proxy)
except:
raise plugin.DependencyError(issued_by='cli_series', missing='api_t411',
message='Torrent411 commandline interface not loaded')
def do_cli(manager, options):
"""
Dispach cli action
:param manager:
:param options:
:return:
"""
if options.t411_action == 'list-cats':
print_categories(parent_category_name=options.category)
elif options.t411_action == 'add-auth':
add_credential(username=options.username, password=options.password)
elif options.t411_action == 'list-auth':
pass
elif options.t411_action == 'list-terms':
print_terms(category_name=options.category, term_type_name=options.type)
def add_credential(username, password):
"""
Add (or update) credential into database
:param username:
:param password:
:return:
"""
proxy = T411Proxy()
is_new = proxy.add_credential(username=username, password=password)
if is_new:
console('Credential successfully added')
else:
console('Credential successfully updated')
def print_terms(category_name=None, term_type_name=None):
proxy = T411Proxy()
proxy.set_credential()
formatting_main = '%-60s %-5s %-5s'
formatting_sub = ' %-55s %-5s %-5s'
console(formatting_main % ('Name', 'PID', 'ID'))
if term_type_name:
console("Not yet implemented !")
else:
with Session() as session:
categories = proxy.find_categories(category_name=category_name, is_sub_category=True, session=session)
for category in categories:
console(formatting_main % (category.name, category.parent_id, category.id))
for term_type in category.term_types:
console(formatting_main % (term_type.name, '', term_type.id))
for term in term_type.terms:
console(formatting_sub % (term.name, term_type.id, term.id))
def print_categories(parent_category_name=None):
"""
Print category and its sub-categories
:param parent_category_name: if None, all categories will be displayed
:return:
"""
proxy = T411Proxy()
proxy.set_credential()
with Session() as session:
if parent_category_name is None:
categories = proxy.main_categories(session=session)
else:
categories = proxy.find_categories(parent_category_name, session=session)
formatting_main = '%-30s %-5s %-5s'
formatting_sub = ' %-25s %-5s %-5s'
console(formatting_main % ('Category name', 'PID', 'ID'))
for category in categories:
console(formatting_main % (category.name, category.parent_id, category.id))
for sub_category in category.sub_categories:
console(formatting_sub % (sub_category.name, sub_category.parent_id, sub_category.id))
@event('options.register')
def register_parser_arguments():
# Register the command
parser = options.register_command('t411', do_cli, help='view and manipulate the Torrent411 plugin database')
# Set up our subparsers
action_parsers = parser.add_subparsers(title='actions', metavar='<action>', dest='t411_action')
auth_parser = action_parsers.add_parser('add-auth', help='authorize Flexget to access your Torrent411 account')
auth_parser.add_argument('username', metavar='<username>', help='Your t411 username')
auth_parser.add_argument('password', metavar='<password>', help='Your t411 password')
list_categories_parser = action_parsers.add_parser('list-cats', help='list available categories on Torrent411')
list_categories_parser.add_argument('category',
nargs='?',
metavar='<category>',
help='limit list to all, main or sub categories (default: %(default)s)')
list_terms = action_parsers.add_parser('list-terms', help='list available terms usable on Torrent411')
list_terms.add_argument('--category', help='show terms only for this category')
list_terms.add_argument('--type', help='show terms only for this term type')
| oxc/Flexget | flexget/plugins/cli/t411.py | Python | mit | 4,537 |
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from djblets.util.fields import JSONField
class ChangeDescription(models.Model):
"""
The recorded set of changes, containing optional description text
and fields that have changed.
This is a general model that can be used in applications for recording
changes how they see fit. A helper function, 'record_field_changed',
can be used to record information in a standard way for most value types,
but the 'fields_changed' dictionary can be manipulated however the caller
chooses.
A ChangeDescription is not bound to a particular model. It is up to models
to establish relationships with a ChangeDescription.
Each field in 'fields_changed' represents a changed field.
For string fields, the following fields will be available:
* 'old': The old value of the field
* 'new': The new value of the field
For list and set fields, the following fields will be available:
* 'removed': The fields that were removed, if any.
* 'added': The fields that were added, if any.
"""
timestamp = models.DateTimeField(_('timestamp'), default=timezone.now)
public = models.BooleanField(_("public"), default=False)
text = models.TextField(_("change text"), blank=True)
fields_changed = JSONField(_("fields changed"))
def record_field_change(self, field, old_value, new_value,
name_field=None):
"""
Records a field change.
This will encode field changes following the rules in the overlying
'ChangeDescription' documentation.
'name_field' can be specified for lists or other iterables. When
specified, each list item will be a tuple in the form of
(object_name, object_url, object_id). Otherwise, it will be a
tuple in the form of (item,).
It is generally expected that fields with lists of model objects will
have 'name_field' set, whereas lists of numbers or some other
value type will not. Specifying a 'name_field' for non-objects will
cause an AttributeError.
"""
def serialize_changed_obj_list(items, name_field):
if name_field:
return [(getattr(item, name_field),
item.get_absolute_url(),
item.id)
for item in list(items)]
else:
return [(item,) for item in list(items)]
if (type(old_value) != type(new_value) and
not (isinstance(old_value, basestring) and
isinstance(new_value, basestring))):
raise ValueError("%s (%s) and %s (%s) are of two different value "
"types." % (old_value, type(old_value),
new_value, type(new_value)))
if hasattr(old_value, "__iter__"):
old_set = set(old_value)
new_set = set(new_value)
self.fields_changed[field] = {
'old': serialize_changed_obj_list(old_value, name_field),
'new': serialize_changed_obj_list(new_value, name_field),
'added': serialize_changed_obj_list(new_set - old_set,
name_field),
'removed': serialize_changed_obj_list(old_set - new_set,
name_field),
}
else:
self.fields_changed[field] = {
'old': (old_value,),
'new': (new_value,),
}
def __unicode__(self):
return self.text
class Meta:
ordering = ['-timestamp']
get_latest_by = "timestamp"
| atagar/ReviewBoard | reviewboard/changedescs/models.py | Python | mit | 3,822 |
"""
https://www.hackerrank.com/challenges/encryption
One classic method for composing secret messages is called a square code. The spaces are removed from the english text
and the characters are written into a square (or rectangle). The width and height of the rectangle have the constraint,
floor(sqrt( len(word) )) <= width, height <= ceil(sqrt( len(word) ))
Among the possible squares, choose the one with the minimum area.
In case of a rectangle, the number of rows will always be smaller than the number of columns. For example, the sentence
"if man was meant to stay on the ground god would have given us roots" is 54 characters long, so it is written in the
form of a rectangle with 7 rows and 8 columns. Many more rectangles can accomodate these characters; choose the one with
minimum area such that: length * width >= len(word)
ifmanwas
meanttos
tayonthe
groundgo
dwouldha
vegivenu
sroots
The coded message is obtained by reading the characters in a column, inserting a space, and then moving on to the next
column towards the right. For example, the message above is coded as:
imtgdvs fearwer mayoogo anouuio ntnnlvt wttddes aohghn sseoau
You will be given a message in English with no spaces between the words.The maximum message length can be 81 characters.
Print the encoded message.
Here are some more examples:
Sample Input:
haveaniceday
Sample Output:
hae and via ecy
"""
from math import sqrt, floor, ceil
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
l = len(cipher)
r = range(int(floor(sqrt(l))), int(ceil(sqrt(l))) + 1)
min_pair = (r[-1], r[-1])
for h in r:
for w in r:
if h * w >= l and h * w < min_pair[0] * min_pair[1]:
min_pair = (h, w)
h, w = min_pair
rect = [[None for _ in xrange(w)] for _ in xrange(h)]
for i in xrange(l):
rect[i / w][i % w] = cipher[i]
result = []
for j in xrange(w):
sb = []
for i in xrange(h):
if rect[i][j] == None: break
sb.append(rect[i][j])
result.append("".join(sb))
return " ".join(result)
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
cipher = f.readline().strip()
# solve
s = "%s\n" % (Solution().solve(cipher))
print s,
| algorhythms/HackerRankAlgorithms | Encryption.py | Python | apache-2.0 | 2,509 |
from django.conf import settings
from django.conf.urls import handler500, handler404, patterns, include, \
url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^example/$', 'cms.test_utils.project.placeholderapp.views.example_view'),
url(r'^jsi18n/(?P<packages>\S+?)/$', 'django.views.i18n.javascript_catalog'),
url(r'^media/cms/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.CMS_MEDIA_ROOT, 'show_indexes': True}),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
(r'', include('django.contrib.staticfiles.urls')),
url(r'^', include('cms.urls')),
)
| pbs/django-cms | cms/test_utils/project/urls.py | Python | bsd-3-clause | 778 |
# -*- coding: utf-8 -*-
from dry_rest_permissions.generics import DRYPermissions
from rest_framework import filters
from rest_framework import status
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateAPIView, RetrieveAPIView, get_object_or_404
from rest_framework.permissions import IsAuthenticatedOrReadOnly, AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework_extensions.cache.decorators import cache_response
from rest_framework_extensions.etag.decorators import etag
from rest_framework_extensions.key_constructor import bits
from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor
from zds.api.DJRF3xPaginationKeyBit import DJRF3xPaginationKeyBit
from zds.member.api.serializers import ProfileListSerializer, ProfileCreateSerializer, \
ProfileDetailSerializer, ProfileValidatorSerializer
from zds.member.api.permissions import IsOwnerOrReadOnly
from zds.member.api.generics import CreateDestroyMemberSanctionAPIView
from zds.member.commons import TemporaryReadingOnlySanction, ReadingOnlySanction, \
DeleteReadingOnlySanction, TemporaryBanSanction, BanSanction, DeleteBanSanction, \
ProfileCreate, TokenGenerator
from zds.member.models import Profile
class PagingSearchListKeyConstructor(DefaultKeyConstructor):
pagination = DJRF3xPaginationKeyBit()
search = bits.QueryParamsKeyBit(['search'])
list_sql_query = bits.ListSqlQueryKeyBit()
unique_view_id = bits.UniqueViewIdKeyBit()
user = bits.UserKeyBit()
class DetailKeyConstructor(DefaultKeyConstructor):
format = bits.FormatKeyBit()
language = bits.LanguageKeyBit()
retrieve_sql_query = bits.RetrieveSqlQueryKeyBit()
unique_view_id = bits.UniqueViewIdKeyBit()
user = bits.UserKeyBit()
class MyDetailKeyConstructor(DefaultKeyConstructor):
format = bits.FormatKeyBit()
language = bits.LanguageKeyBit()
user = bits.UserKeyBit()
class MemberListAPI(ListCreateAPIView, ProfileCreate, TokenGenerator):
"""
Profile resource to list and register.
"""
filter_backends = (filters.SearchFilter,)
search_fields = ('user__username',)
list_key_func = PagingSearchListKeyConstructor()
def get_queryset(self):
contactable = self.request.query_params.get('contactable', None)
if contactable is not None:
queryset = Profile.objects.contactable_members()
else:
queryset = Profile.objects.all_members_ordered_by_date_joined()
return queryset
@etag(list_key_func)
@cache_response(key_func=list_key_func)
def get(self, request, *args, **kwargs):
"""
Lists all users in the system.
---
parameters:
- name: page
description: Restricts output to the given page number.
required: false
paramType: query
- name: page_size
description: Sets the number of profiles per page.
required: false
paramType: query
- name: search
description: Filters by username.
required: false
paramType: query
responseMessages:
- code: 404
message: Not Found
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Registers a new user. User will need to act on confirmation email.
---
responseMessages:
- code: 400
message: Bad Request
"""
serializer = self.get_serializer_class()(data=request.data, context={'request': self.request})
serializer.is_valid(raise_exception=True)
profile = serializer.save()
token = self.generate_token(profile.user)
self.send_email(token, profile.user)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def get_serializer_class(self):
if self.request.method == 'GET':
return ProfileListSerializer
elif self.request.method == 'POST':
return ProfileCreateSerializer
def get_permissions(self):
permission_classes = [AllowAny, ]
if self.request.method == 'GET' or self.request.method == 'POST':
permission_classes.append(DRYPermissions)
return [permission() for permission in permission_classes]
class MemberMyDetailAPI(RetrieveAPIView):
"""
Profile resource for member details.
"""
obj_key_func = MyDetailKeyConstructor()
serializer_class = ProfileDetailSerializer
@etag(obj_key_func)
@cache_response(key_func=obj_key_func)
def get(self, request, *args, **kwargs):
"""
Gets information for a user account.
---
parameters:
- name: Authorization
description: Bearer token to make an authenticated request.
required: true
paramType: header
responseMessages:
- code: 401
message: Not Authenticated
"""
profile = self.get_object()
serializer = self.get_serializer(profile,
show_email=True,
is_authenticated=True)
return Response(serializer.data)
def get_object(self):
return get_object_or_404(Profile, user=self.request.user)
def get_permissions(self):
permission_classes = [IsAuthenticated, ]
if self.request.method == 'GET':
permission_classes.append(DRYPermissions)
return [permission() for permission in permission_classes]
class MemberDetailAPI(RetrieveUpdateAPIView):
"""
Profile resource to display or update details of a member.
"""
queryset = Profile.objects.all()
lookup_field = 'user__id'
obj_key_func = DetailKeyConstructor()
@etag(obj_key_func)
@cache_response(key_func=obj_key_func)
def get(self, request, *args, **kwargs):
"""
Gets a user given by its identifier.
---
parameters:
- name: Authorization
description: Bearer token to make an authenticated request.
required: false
paramType: header
responseMessages:
- code: 404
message: Not Found
"""
profile = self.get_object()
serializer = self.get_serializer(profile,
show_email=profile.show_email,
is_authenticated=self.request.user.is_authenticated())
return Response(serializer.data)
@etag(obj_key_func, rebuild_after_method_evaluation=True)
def put(self, request, *args, **kwargs):
"""
Updates a user given by its identifier.
---
parameters:
- name: Authorization
description: Bearer token to make an authenticated request.
required: true
paramType: header
responseMessages:
- code: 400
message: Bad Request
- code: 401
message: Not Authenticated
- code: 403
message: Insufficient rights to call this procedure. Source and target users must be equal.
- code: 404
message: Not Found
"""
return self.update(request, *args, **kwargs)
def get_serializer_class(self):
if self.request.method == 'GET':
return ProfileDetailSerializer
elif self.request.method == 'PUT':
return ProfileValidatorSerializer
def get_permissions(self):
permission_classes = []
if self.request.method == 'GET':
permission_classes.append(DRYPermissions)
elif self.request.method == 'PUT':
permission_classes.append(DRYPermissions)
permission_classes.append(IsAuthenticatedOrReadOnly)
permission_classes.append(IsOwnerOrReadOnly)
return [permission() for permission in permission_classes]
class MemberDetailReadingOnly(CreateDestroyMemberSanctionAPIView):
"""
Profile resource to apply or remove read only sanction.
"""
lookup_field = 'user__id'
def post(self, request, *args, **kwargs):
"""
Applies a read only sanction to the given user.
---
parameters:
- name: Authorization
description: Bearer token to make an authenticated request.
required: true
paramType: header
- name: ls-jrs
description: Sanction duration in days.
required: false
paramType: form
- name: ls-text
description: Description of the sanction.
required: false
paramType: form
omit_parameters:
- body
responseMessages:
- code: 401
message: Not Authenticated
- code: 403
message: Insufficient rights to call this procedure. Needs staff status.
- code: 404
message: Not Found
"""
return super(MemberDetailReadingOnly, self).post(request, args, kwargs)
def delete(self, request, *args, **kwargs):
"""
Removes a read only sanction from the given user.
---
parameters:
- name: Authorization
description: Bearer token to make an authenticated request.
required: true
paramType: header
responseMessages:
- code: 401
message: Not Authenticated
- code: 403
message: Insufficient rights to call this procedure. Needs staff status.
- code: 404
message: Not Found
"""
return super(MemberDetailReadingOnly, self).delete(request, args, kwargs)
def get_state_instance(self, request):
if request.method == 'POST':
if 'ls-jrs' in request.data:
return TemporaryReadingOnlySanction(request.data)
else:
return ReadingOnlySanction(request.data)
elif request.method == 'DELETE':
return DeleteReadingOnlySanction(request.data)
raise ValueError('Method {0} is not supported in this API route.'.format(request.method))
class MemberDetailBan(CreateDestroyMemberSanctionAPIView):
"""
Profile resource to apply or remove ban sanction.
"""
lookup_field = 'user__id'
def post(self, request, *args, **kwargs):
"""
Applies a ban sanction to a given user.
---
parameters:
- name: Authorization
description: Bearer token to make an authenticated request.
required: true
paramType: header
- name: ban-jrs
description: Sanction duration in days.
required: false
paramType: form
- name: ban-text
description: Description of the sanction.
required: false
paramType: form
omit_parameters:
- body
responseMessages:
- code: 401
message: Not Authenticated
- code: 403
message: Insufficient rights to call this procedure. Needs staff status.
- code: 404
message: Not Found
"""
return super(MemberDetailBan, self).post(request, args, kwargs)
def delete(self, request, *args, **kwargs):
"""
Removes a ban sanction from a given user.
---
parameters:
- name: Authorization
description: Bearer token to make an authenticated request.
required: true
paramType: header
responseMessages:
- code: 401
message: Not Authenticated
- code: 403
message: Insufficient rights to call this procedure. Needs staff status.
- code: 404
message: Not Found
"""
return super(MemberDetailBan, self).delete(request, args, kwargs)
def get_state_instance(self, request):
if request.method == 'POST':
if 'ban-jrs' in request.data:
return TemporaryBanSanction(request.data)
else:
return BanSanction(request.POST)
elif request.method == 'DELETE':
return DeleteBanSanction(request.data)
raise ValueError('Method {0} is not supported in this API route.'.format(request.method))
| DevHugo/zds-site | zds/member/api/views.py | Python | gpl-3.0 | 12,703 |
#! /usr/bin/env python3
################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015 Matthew Williams and David Williams
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
import sys
sys.path.append("../../library/bindings/") #This is just to point to the generated bindings
import PolyVoxCore as pv
#Create a 64x64x64 volume of integers
r = pv.Region(pv.Vector3Dint32_t(0,0,0), pv.Vector3Dint32_t(63,63,63))
vol = pv.SimpleVolumeuint8(r)
#Now fill the volume with our data (a sphere)
v3dVolCenter = pv.Vector3Dint32_t(vol.getWidth() // 2, vol.getHeight() // 2, vol.getDepth() // 2)
sphereRadius = 30
#This three-level for loop iterates over every voxel in the volume
for z in range(vol.getDepth()):
for y in range(vol.getHeight()):
for x in range(vol.getWidth()):
#Compute how far the current position is from the center of the volume
fDistToCenter = (pv.Vector3Dint32_t(x,y,z) - v3dVolCenter).length()
#If the current voxel is less than 'radius' units from the center then we make it solid.
if(fDistToCenter <= sphereRadius):
#Our new voxel value
uVoxelValue = 255
else:
uVoxelValue = 0
#Write the voxel value into the volume
vol.setVoxelAt(x, y, z, uVoxelValue);
#Create a mesh, pass it to the extractor and generate the mesh
mesh = pv.SurfaceMeshPositionMaterialNormal()
extractor = pv.CubicSurfaceExtractorWithNormalsSimpleVolumeuint8(vol, r, mesh)
extractor.execute()
#That's all of the PolyVox generation done, now to convert the output to something OpenGL can read efficiently
import numpy as np
indices = np.array(mesh.getIndices(), dtype='uint32') #Throw in the vertex indices into an array
#The vertices and normals are placed in an interpolated array like [vvvnnn,vvvnnn,vvvnnn]
vertices = np.array([[vertex.getPosition().getX(), vertex.getPosition().getY(), vertex.getPosition().getZ(),
vertex.getNormal().getX(), vertex.getNormal().getY(), vertex.getNormal().getZ()]
for vertex in mesh.getVertices()],
dtype='f')
#Now that we have our data, everything else here is just OpenGL
import OpenGL
from OpenGL.GL import shaders
from OpenGL.arrays import vbo
from OpenGL.GL import glClear, glEnable, glDepthFunc, GLuint, glEnableVertexAttribArray, glVertexAttribPointer, glDisableVertexAttribArray, \
glDrawElements, glGetUniformLocation, glUniformMatrix4fv, glDepthMask, glDepthRange, glGetString, glBindAttribLocation, \
GL_COLOR_BUFFER_BIT, GL_TRIANGLES, GL_DEPTH_TEST, GL_LEQUAL, GL_FLOAT, \
GL_DEPTH_BUFFER_BIT, GL_ELEMENT_ARRAY_BUFFER, GL_UNSIGNED_INT, GL_STATIC_DRAW, \
GL_FALSE, GL_TRUE, GL_VERTEX_SHADER, GL_FRAGMENT_SHADER, GL_CULL_FACE, \
GL_VENDOR, GL_RENDERER, GL_VERSION, GL_SHADING_LANGUAGE_VERSION
from OpenGL.raw.GL.ARB.vertex_array_object import glGenVertexArrays, glBindVertexArray
import pygame
from math import sin, cos, tan, radians
SCREEN_SIZE = (800, 800)
def run():
#Start OpenGL and ask it for an OpenGL context
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode(SCREEN_SIZE, pygame.HWSURFACE|pygame.OPENGL|pygame.DOUBLEBUF)
#The first thing we do is print some OpenGL details and check that we have a good enough version
print("OpenGL Implementation Details:")
if glGetString(GL_VENDOR):
print("\tGL_VENDOR: {}".format(glGetString(GL_VENDOR).decode()))
if glGetString(GL_RENDERER):
print("\tGL_RENDERER: {}".format(glGetString(GL_RENDERER).decode()))
if glGetString(GL_VERSION):
print("\tGL_VERSION: {}".format(glGetString(GL_VERSION).decode()))
if glGetString(GL_SHADING_LANGUAGE_VERSION):
print("\tGL_SHADING_LANGUAGE_VERSION: {}".format(glGetString(GL_SHADING_LANGUAGE_VERSION).decode()))
major_version = int(glGetString(GL_VERSION).decode().split()[0].split('.')[0])
minor_version = int(glGetString(GL_VERSION).decode().split()[0].split('.')[1])
if major_version < 3 or (major_version < 3 and minor_version < 0):
print("OpenGL version must be at least 3.0 (found {0})".format(glGetString(GL_VERSION).decode().split()[0]))
#Now onto the OpenGL initialisation
#Set up depth culling
glEnable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
glDepthMask(GL_TRUE)
glDepthFunc(GL_LEQUAL)
glDepthRange(0.0, 1.0)
#We create out shaders which do little more than set a flat colour for each face
VERTEX_SHADER = shaders.compileShader(b"""
#version 130
in vec4 position;
in vec4 normal;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform mat4 modelMatrix;
flat out float theColor;
void main()
{
vec4 temp = modelMatrix * position;
temp = viewMatrix * temp;
gl_Position = projectionMatrix * temp;
theColor = clamp(abs(dot(normalize(normal.xyz), normalize(vec3(0.9,0.1,0.5)))), 0, 1);
}
""", GL_VERTEX_SHADER)
FRAGMENT_SHADER = shaders.compileShader(b"""
#version 130
flat in float theColor;
out vec4 outputColor;
void main()
{
outputColor = vec4(1.0, 0.5, theColor, 1.0);
}
""", GL_FRAGMENT_SHADER)
shader = shaders.compileProgram(VERTEX_SHADER, FRAGMENT_SHADER)
#And then grab our attribute locations from it
glBindAttribLocation(shader, 0, b"position")
glBindAttribLocation(shader, 1, b"normal")
#Create the Vertex Array Object to hold our volume mesh
vertexArrayObject = GLuint(0)
glGenVertexArrays(1, vertexArrayObject)
glBindVertexArray(vertexArrayObject)
#Create the index buffer object
indexPositions = vbo.VBO(indices, target=GL_ELEMENT_ARRAY_BUFFER, usage=GL_STATIC_DRAW)
#Create the VBO
vertexPositions = vbo.VBO(vertices, usage=GL_STATIC_DRAW)
#Bind our VBOs and set up our data layout specifications
with indexPositions, vertexPositions:
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, False, 6*vertices.dtype.itemsize, vertexPositions+(0*vertices.dtype.itemsize))
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 3, GL_FLOAT, False, 6*vertices.dtype.itemsize, vertexPositions+(3*vertices.dtype.itemsize))
glBindVertexArray(0)
glDisableVertexAttribArray(0)
#Now grab out transformation martix locations
modelMatrixUnif = glGetUniformLocation(shader, b"modelMatrix")
viewMatrixUnif = glGetUniformLocation(shader, b"viewMatrix")
projectionMatrixUnif = glGetUniformLocation(shader, b"projectionMatrix")
modelMatrix = np.array([[1.0,0.0,0.0,-32.0],[0.0,1.0,0.0,-32.0],[0.0,0.0,1.0,-32.0],[0.0,0.0,0.0,1.0]], dtype='f')
viewMatrix = np.array([[1.0,0.0,0.0,0.0],[0.0,1.0,0.0,0.0],[0.0,0.0,1.0,-50.0],[0.0,0.0,0.0,1.0]], dtype='f')
projectionMatrix = np.array([[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0]], dtype='f')
#These next few lines just set up our camera frustum
fovDeg = 45.0
frustumScale = 1.0 / tan(radians(fovDeg) / 2.0)
zNear = 1.0
zFar = 1000.0
projectionMatrix[0][0] = frustumScale
projectionMatrix[1][1] = frustumScale
projectionMatrix[2][2] = (zFar + zNear) / (zNear - zFar)
projectionMatrix[2][3] = -1.0
projectionMatrix[3][2] = (2 * zFar * zNear) / (zNear - zFar)
#viewMatrix and projectionMatrix don't change ever so just set them once here
with shader:
glUniformMatrix4fv(projectionMatrixUnif, 1, GL_TRUE, projectionMatrix)
glUniformMatrix4fv(viewMatrixUnif, 1, GL_TRUE, viewMatrix)
#These are used to track the rotation of the volume
LastFrameMousePos = (0,0)
CurrentMousePos = (0,0)
xRotation = 0
yRotation = 0
while True:
clock.tick()
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
return
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
CurrentMousePos = event.pos
LastFrameMousePos = CurrentMousePos
if event.type == pygame.MOUSEMOTION and 1 in event.buttons:
CurrentMousePos = event.pos
diff = (CurrentMousePos[0] - LastFrameMousePos[0], CurrentMousePos[1] - LastFrameMousePos[1])
xRotation += event.rel[0]
yRotation += event.rel[1]
LastFrameMousePos = CurrentMousePos
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
#Perform the rotation of the mesh
moveToOrigin = np.array([[1.0,0.0,0.0,-32.0],[0.0,1.0,0.0,-32.0],[0.0,0.0,1.0,-32.0],[0.0,0.0,0.0,1.0]], dtype='f')
rotateAroundX = np.array([[1.0,0.0,0.0,0.0],[0.0,cos(radians(yRotation)),-sin(radians(yRotation)),0.0],[0.0,sin(radians(yRotation)),cos(radians(yRotation)),0.0],[0.0,0.0,0.0,1.0]], dtype='f')
rotateAroundY = np.array([[cos(radians(xRotation)),0.0,sin(radians(xRotation)),0.0],[0.0,1.0,0.0,0.0],[-sin(radians(xRotation)),0.0,cos(radians(xRotation)),0.0],[0.0,0.0,0.0,1.0]], dtype='f')
modelMatrix = rotateAroundY.dot(rotateAroundX.dot(moveToOrigin))
with shader:
glUniformMatrix4fv(modelMatrixUnif, 1, GL_TRUE, modelMatrix)
glBindVertexArray(vertexArrayObject)
glDrawElements(GL_TRIANGLES, len(indices), GL_UNSIGNED_INT, None)
glBindVertexArray(0)
# Show the screen
pygame.display.flip()
run()
| phiste/kengine | systems/polyvox/libs/polyvox/examples/Python/PythonExample.py | Python | mit | 10,172 |
#!/usr/bin/env python
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Processes an Android AAR file."""
import argparse
import os
import posixpath
import re
import shutil
import sys
from xml.etree import ElementTree
import zipfile
from util import build_utils
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir)))
import gn_helpers
def _IsManifestEmpty(manifest_str):
"""Returns whether the given manifest has merge-worthy elements.
E.g.: <activity>, <service>, etc.
"""
doc = ElementTree.fromstring(manifest_str)
for node in doc:
if node.tag == 'application':
if len(node):
return False
elif node.tag != 'uses-sdk':
return False
return True
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--input-file',
help='Path to the AAR file.',
required=True,
metavar='FILE')
parser.add_argument('--extract',
help='Extract the files to output directory.',
action='store_true')
parser.add_argument('--list',
help='List all the resource and jar files.',
action='store_true')
parser.add_argument('--output-dir',
help='Output directory for the extracted files. Must '
'be set if --extract is set.',
metavar='DIR')
args = parser.parse_args()
if not args.extract and not args.list:
parser.error('Either --extract or --list has to be specified.')
aar_file = args.input_file
output_dir = args.output_dir
if args.extract:
# Clear previously extracted versions of the AAR.
shutil.rmtree(output_dir, True)
build_utils.ExtractAll(aar_file, path=output_dir)
if args.list:
data = {}
data['aidl'] = []
data['assets'] = []
data['resources'] = []
data['subjars'] = []
data['subjar_tuples'] = []
data['has_classes_jar'] = False
data['has_proguard_flags'] = False
data['has_native_libraries'] = False
with zipfile.ZipFile(aar_file) as z:
data['is_manifest_empty'] = (
_IsManifestEmpty(z.read('AndroidManifest.xml')))
for name in z.namelist():
if name.endswith('/'):
continue
if name.startswith('aidl/'):
data['aidl'].append(name)
elif name.startswith('res/'):
data['resources'].append(name)
elif name.startswith('libs/') and name.endswith('.jar'):
label = posixpath.basename(name)[:-4]
label = re.sub(r'[^a-zA-Z0-9._]', '_', label)
data['subjars'].append(name)
data['subjar_tuples'].append([label, name])
elif name.startswith('assets/'):
data['assets'].append(name)
elif name.startswith('jni/'):
data['has_native_libraries'] = True
elif name == 'classes.jar':
data['has_classes_jar'] = True
elif name == 'proguard.txt':
data['has_proguard_flags'] = True
print gn_helpers.ToGNString(data)
if __name__ == '__main__':
sys.exit(main())
| geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/build/android/gyp/aar.py | Python | gpl-3.0 | 3,306 |
from nova.db.sqlalchemy import api as model_api
from nova.db.sqlalchemy.models import PciDevice, Instance, ComputeNode
import collections
#, VFAllocation
session = model_api.get_session()
WORK_LOAD = ["cp","cr"]
def execute_vf_allocation(req_vf,los,req_work,bus_list, *args,**kwargs):
"""This method is called from nova.scheduler.filter_scheduler.FilterScheduler"""
base_dict = collections.OrderedDict()
get_bus_slot = session.query(PciDevice).from_statement("select id,bus,slot from pci_devices where status = :status GROUP BY bus, slot").params(status="available").all()
obj_list = [obj for obj in get_bus_slot if obj.bus in bus_list]
if not obj_list:
return []
""" CLEAR VF_ALLOCATION TABLE DATA """
session.execute("truncate vf_allocation")
""" Get list of PCI devices for Unique bus and slot (unassigned is optional) """
for obj in obj_list:
BUS = obj.bus
SLOT = obj.slot
cp_vf_assigned = []
for j in range(len(WORK_LOAD)):
""" Get the List of VF assigned for each Bus, Slot for workload cp and cr """
GET_ASS_VF = """select bus,slot,function,count(workload) as count_wl from pci_devices where bus = %s and slot = %s and workload = '%s' and status = 'allocated'""" % (BUS, SLOT, str(WORK_LOAD[j]))
cp_vf_ass = int(session.query("count_wl").from_statement(GET_ASS_VF).scalar())
cp_vf_assigned.append(cp_vf_ass)
""" Get the Policy value from the input """
los_ass_final = int(los)
""" Create obtained records as a dictionary """
base_dict[str(BUS)+":"+str(SLOT)] = [{'cp': cp_vf_assigned[0], 'cr': cp_vf_assigned[1]}]
""" VF Allocation Algorithm Logic"""
if (((req_vf % 2 == 0) and (req_work == "cp-cr")) or (req_work == "cp") or (req_work == "cr")):
result = VF_Allocation_Extended_Logic(req_vf,los,req_work,base_dict)
return result
else:
return []
def VF_Allocation_Extended_Logic(req_vf,los,req_work,base_dict):
address_list = []
address_workload_list = []
tmp_add_store = ("")
REQ_VF = req_work
RESET_COUNT = 0
for k in range(req_vf):
if REQ_VF == "cp-cr" and ( req_vf / 2 != RESET_COUNT ):
req_work = 'cp'
RESET_COUNT = RESET_COUNT + 1
elif REQ_VF == "cp-cr" and ( req_vf / 2 <= RESET_COUNT ):
req_work = 'cr'
filter_data = {k: v for k, v in base_dict.iteritems() if v[0][req_work] < los} # Filter the Bus slot having vfs less than los value for selected workload
if req_work == 'cp':
final_list = sorted(filter_data, key=lambda x: (filter_data[x][0]['cp'], filter_data[x][0]['cr'])) # sort the filtered dict based on cp cr count
else:
final_list = sorted(filter_data, key=lambda x: (filter_data[x][0]['cr'], filter_data[x][0]['cp'])) # sort the filtered dict based on cp cr count
if len(final_list) >= 1:
selected_bus_slot = final_list.sort() # Get last bus slot for PCI Instnace request
selected_bus_slot = final_list[-1]
else:
selected_bus_slot = ""
if selected_bus_slot:
bus_,slot_ = selected_bus_slot.split(":")
address = [ad[0] for ad in session.query("address").from_statement("select address from pci_devices where bus = %s and slot = %s and status='available' and function <> 0" % (bus_,slot_)).all() if ad[0] not in address_list]
if address:
address_list.append(address[0])
address_workload_list.append((address[0],req_work))
base_dict[selected_bus_slot][0][req_work] = base_dict[selected_bus_slot][0][req_work] + 1 # Update the vfs count for selected bus,slot with requested workload
else:
break;
if len(address_list) != req_vf:
return []
return address_workload_list
| khandavally/devstack | EPAQA/vf_allocation.py | Python | apache-2.0 | 4,020 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a Compute Engine address resource.
"""
import yaml
def GenerateEmbeddableYaml(yaml_string):
# This function takes a string in YAML format and produces
# an equivalent YAML representation that can be
# inserted into another YAML document.
yaml_object = yaml.load(yaml_string)
dumped_yaml = yaml.dump(yaml_object, default_flow_style=True)
return dumped_yaml
def GenerateConfig(context):
return """
resources:
- type: compute.v1.address
name: %(name)s
properties:
region: %(compute-region)s
""" % {"name": context.env["name"],
"compute-region": context.properties["compute-region"]}
| GoogleCloudPlatformTraining/cpo200-Google-Cloud-Deployment-Manager | section-3/address.py | Python | apache-2.0 | 1,245 |
from collections import OrderedDict
from .feature_extractor import FeatureExtractor, calc
from .feature_params import FeatureParameters, NumericFeatureParameters
from ..model_util import UNKNOWN_VALUE, MISSING_VALUE, UnknownDict, save_dict, load_dict
FEATURE_TEMPLATES = (
"s0s1xd" "s1s0x" "s0b0xd" "b0s0x" # specific edges
"s0wmtudencpT#^$xhqyPCIEMN"
"s1wmtudencT#^$xhyN"
"s2wmtudencT#^$xhy"
"s3wmtudencT#^$xhyN"
"b0wmtudncT#^$hPCIEMN"
"b1wmtudncT#^$"
"b2wmtudncT#^$"
"b3wmtudncT#^$"
"s0lwmenc#^$"
"s0rwmenc#^$"
"s1lwmenc#^$"
"s1rwmenc#^$"
"s0llwmen#^$"
"s0lrwmen#^$"
"s0rlwmen#^$"
"s0rrwmen#^$"
"s1llwmen#^$"
"s1lrwmen#^$"
"s1rlwmen#^$"
"s1rrwmen#^$"
"s0Lwmen#^$"
"s0Rwmen#^$"
"s1Lwmen#^$"
"s1Rwmen#^$"
"b0Lwmen#^$"
"b0Rwmen#^$"
"s0b0e" "b0s0e" # specific edges
"a0eAa1eA", # past actions
)
INDEXED = "wmtudT" # words, lemmas, fine POS tags, coarse/universal POS tags, dep rels, entity type
DEFAULT = () # intermediate value for missing features
FILENAME_SUFFIX = ".enum"
class DenseFeatureExtractor(FeatureExtractor):
"""
Extracts features from the parser state for classification. To be used with a NeuralNetwork classifier.
"""
def __init__(self, params, indexed, hierarchical=False, node_dropout=0, init_params=True, omit_features=None):
super().__init__(feature_templates=FEATURE_TEMPLATES, omit_features=omit_features)
self.indexed = indexed
self.hierarchical = hierarchical
self.node_dropout = node_dropout
if init_params:
self.params = OrderedDict((k, p) for k, p in [(NumericFeatureParameters.SUFFIX, NumericFeatureParameters(1))
] + list(params.items()))
for param in self.params.values():
self.update_param_indexed(param)
num_values = self.num_values()
for key, param in self.params.items():
param.num = num_values[key]
param.node_dropout = self.node_dropout
else:
self.params = params
def init_param(self, key):
param = self.params[key]
self.update_param_indexed(param)
param.num = self.num_values()[key]
def num_values(self):
return {k: len(v) for k, v in self.param_values(all_params=True).items()}
def update_param_indexed(self, param):
param.indexed = self.indexed and param.prop in INDEXED
@property
def feature_template(self):
return self.feature_templates[0]
def init_features(self, state):
features = OrderedDict()
for key, param in self.params.items():
if param.indexed and param.enabled:
values = [calc(n, state, param.prop) for n in state.terminals]
param.init_data()
features[key] = [param.data[v] for v in values]
return features
def extract_features(self, state):
"""
Calculate feature values according to current state
:param state: current state of the parser
:return dict of feature name -> list of numeric values
"""
features = OrderedDict()
for key, values in self.param_values(state).items():
param = self.params[key]
param.init_data() # Replace categorical values with their values in data dict:
features[key] = [(UNKNOWN_VALUE if v == DEFAULT else v) if param.numeric else
(MISSING_VALUE if v == DEFAULT else (v if param.indexed else param.data[v]))
for v in values]
return features
def param_values(self, state=None, all_params=False):
indexed = []
by_key = OrderedDict()
by_prop = OrderedDict()
for key, param in self.params.items():
if param.enabled and param.dim or all_params:
if param.indexed:
if param.copy_from:
copy_from = self.params.get(param.copy_from)
if copy_from and copy_from.enabled and copy_from.dim and not all_params:
continue
if param.prop not in indexed:
indexed.append(param.prop) # Only need one copy of indices
by_key[key] = by_prop.setdefault(
NumericFeatureParameters.SUFFIX if param.numeric else param.prop,
([state.node_ratio()] if state else [1] if all_params else []) if param.numeric else [])
for e, prop, value in self.feature_template.extract(state, DEFAULT, "".join(indexed), as_tuples=True,
node_dropout=self.node_dropout,
hierarchical=self.hierarchical):
vs = by_prop.get(NumericFeatureParameters.SUFFIX if e.is_numeric(prop) else prop)
if vs is not None:
vs.append(value if state else (e, prop))
return by_key
def all_features(self):
return ["".join(self.join_props(vs)) for _, vs in sorted(self.param_values().items(), key=lambda x: x[0])]
@staticmethod
def join_props(values):
prev = None
ret = []
for element, prop in values:
prefix = "" if element.is_numeric(prop) and prev == element.str else element.str
ret.append(prefix + prop)
prev = element.str
return ret
def finalize(self):
return type(self)(FeatureParameters.copy(self.params, UnknownDict), self.indexed, init_params=False,
omit_features=self.omit_features)
def unfinalize(self):
"""Undo finalize(): replace each feature parameter's data dict with a DropoutDict again, to keep training"""
for param in self.params.values():
param.unfinalize()
self.node_dropout = param.node_dropout
def save(self, filename, save_init=True): # TODO Save to JSON instead of pickle, with data as list (not dict)
super().save(filename, save_init=save_init)
save_dict(filename + FILENAME_SUFFIX, FeatureParameters.copy(self.params, copy_init=save_init))
def load(self, filename, order=None):
super().load(filename, order)
self.params = FeatureParameters.copy(load_dict(filename + FILENAME_SUFFIX), UnknownDict, order=order)
self.node_dropout = 0
| danielhers/tupa | tupa/features/dense_features.py | Python | gpl-3.0 | 6,508 |
"""A simple example of serving map data for overlay on a web-accessible 'slippy' map.
Run this script and go to http://localhost:8080/.
"""
import logging
import os
import sys
import foldbeam.renderer
from foldbeam.tests.renderer_tests import osm_map_renderer
import selector
import TileStache
logging.basicConfig(level=logging.INFO if '-v' in sys.argv else logging.WARNING)
import httplib2
def url_fetcher(url):
"""A cached version of the default URL fetcher. This function uses filecache to cache the results for 24 hours.
"""
logging.info('Fetching URL: {0}'.format(url))
http = httplib2.Http(os.path.join(os.path.dirname(__file__), 'httpcache'))
rep, content = http.request(url, 'GET')
if rep.status != 200:
raise foldbeam.renderer.URLFetchError(str(rep.status) + ' ' + rep.reason)
return content
cache_path = os.path.join(os.path.dirname(__file__), 'cache')
config = TileStache.Config.buildConfiguration({
'cache': { 'name': 'Disk', 'path': cache_path } if '--cache' in sys.argv else { 'name': 'Test' },
'layers': {
'test': {
'provider': {
'class': 'foldbeam.goodies.tilestache:TileStacheProvider',
'kwargs': { },
},
# 'projection': 'WGS84',
},
},
})
#renderer = foldbeam.renderer.TileFetcher(url_fetcher=url_fetcher)
renderer = osm_map_renderer(url_fetcher=url_fetcher, use_tile_fetcher=False)
config.layers['test'].provider.renderer = renderer
tile_app = TileStache.WSGITileServer(config)
index = open(os.path.join(os.path.dirname(__file__), 'leaflet-example.html')).read()
def index_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html'), ('Content-Length', str(len(index)))])
return [index]
app = selector.Selector()
app.parser = lambda x: x
app.add('^/$', GET=index_app)
app.add('^/', GET=tile_app)
if __name__ == '__main__':
#from wsgiutils import wsgiServer
#wsgiServer.WSGIServer(('localhost', 8080), {'/': app}).serve_forever()
from wsgiref import simple_server
simple_server.make_server('localhost', 8080, app).serve_forever()
| rjw57/foldbeam | examples/tilestache_provider_server.py | Python | apache-2.0 | 2,145 |
# Distance transform of binary image
import cv2
import os
from plantcv.plantcv._debug import _debug
from plantcv.plantcv import params
def distance_transform(bin_img, distance_type, mask_size):
"""Creates an image where for each object pixel, a number is assigned that corresponds to the distance to the
nearest background pixel.
Inputs:
img = Binary image data
distance_type = Type of distance. It can be CV_DIST_L1, CV_DIST_L2 , or CV_DIST_C which are 1, 2 and 3,
respectively.
mask_size = Size of the distance transform mask. It can be 3, 5, or CV_DIST_MASK_PRECISE (the latter option
is only supported by the first function). In case of the CV_DIST_L1 or CV_DIST_C distance type,
the parameter is forced to 3 because a 3 by 3 mask gives the same result as 5 by 5 or any larger
aperture.
Returns:
norm_image = grayscale distance-transformed image normalized between [0, 1]
:param bin_img: numpy.ndarray
:param distance_type: int
:param mask_size: int
:return norm_image: numpy.ndarray
"""
dist = cv2.distanceTransform(src=bin_img, distanceType=distance_type, maskSize=mask_size)
norm_image = cv2.normalize(src=dist, dst=dist, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
_debug(visual=norm_image,
filename=os.path.join(params.debug_outdir, str(params.device) + '_distance_transform.png'),
cmap='gray')
return norm_image
| danforthcenter/plantcv | plantcv/plantcv/distance_transform.py | Python | mit | 1,551 |
# -*- coding:utf-8 -*-
"""
/***************************************************************************
Python Console for QGIS
-------------------
begin : 2012-09-10
copyright : (C) 2012 by Salvatore Larosa
email : lrssvtml (at) gmail (dot) com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Some portions of code were taken from https://code.google.com/p/pydee/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qsci import (QsciScintilla,
QsciScintillaBase,
QsciLexerPython,
QsciAPIs,
QsciStyle)
from qgis.core import QgsApplication
from qgis.gui import QgsMessageBar
import sys
import os
import subprocess
import datetime
import pyclbr
from operator import itemgetter
import traceback
import codecs
import re
class KeyFilter(QObject):
SHORTCUTS = {
("Control", "T"): lambda w, t: w.newTabEditor(),
("Control", "M"): lambda w, t: t.save(),
("Control", "W"): lambda w, t: t.close()
}
def __init__(self, window, tab, *args):
QObject.__init__(self, *args)
self.window = window
self.tab = tab
self._handlers = {}
for shortcut, handler in KeyFilter.SHORTCUTS.iteritems():
modifiers = shortcut[0]
if not isinstance(modifiers, list):
modifiers = [modifiers]
qt_mod_code = Qt.NoModifier
for each in modifiers:
qt_mod_code |= getattr(Qt, each + "Modifier")
qt_keycode = getattr(Qt, "Key_" + shortcut[1].upper())
handlers = self._handlers.get(qt_keycode, [])
handlers.append((qt_mod_code, handler))
self._handlers[qt_keycode] = handlers
def get_handler(self, key, modifier):
if self.window.count() > 1:
for modifiers, handler in self._handlers.get(key, []):
if modifiers == modifier:
return handler
return None
def eventFilter(self, obj, event):
if event.type() == QEvent.KeyPress and event.key() < 256:
handler = self.get_handler(event.key(), event.modifiers())
if handler:
handler(self.window, self.tab)
return QObject.eventFilter(self, obj, event)
class Editor(QsciScintilla):
MARKER_NUM = 6
def __init__(self, parent=None):
super(Editor,self).__init__(parent)
self.parent = parent
## recent modification time
self.lastModified = 0
self.opening = ['(', '{', '[', "'", '"']
self.closing = [')', '}', ']', "'", '"']
## List of marker line to be deleted from check syntax
self.bufferMarkerLine = []
self.settings = QSettings()
# Enable non-ascii chars for editor
self.setUtf8(True)
# Set the default font
font = QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(10)
self.setFont(font)
self.setMarginsFont(font)
# Margin 0 is used for line numbers
#fm = QFontMetrics(font)
fontmetrics = QFontMetrics(font)
self.setMarginsFont(font)
self.setMarginWidth(0, fontmetrics.width("0000") + 5)
self.setMarginLineNumbers(0, True)
self.setMarginsForegroundColor(QColor("#3E3EE3"))
self.setMarginsBackgroundColor(QColor("#f9f9f9"))
self.setCaretLineVisible(True)
self.setCaretWidth(2)
self.markerDefine(QgsApplication.getThemePixmap("console/iconSyntaxErrorConsole.png"),
self.MARKER_NUM)
self.setMinimumHeight(120)
#self.setMinimumWidth(300)
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
self.setMatchedBraceBackgroundColor(QColor("#b7f907"))
# Folding
self.setFolding(QsciScintilla.PlainFoldStyle)
self.setFoldMarginColors(QColor("#f4f4f4"),QColor("#f4f4f4"))
#self.setWrapMode(QsciScintilla.WrapWord)
## Edge Mode
self.setEdgeMode(QsciScintilla.EdgeLine)
self.setEdgeColumn(80)
self.setEdgeColor(QColor("#FF0000"))
#self.setWrapMode(QsciScintilla.WrapCharacter)
self.setWhitespaceVisibility(QsciScintilla.WsVisibleAfterIndent)
#self.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, 0)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.settingsEditor()
# Annotations
self.setAnnotationDisplay(QsciScintilla.ANNOTATION_BOXED)
# Indentation
self.setAutoIndent(True)
self.setIndentationsUseTabs(False)
self.setIndentationWidth(4)
self.setTabIndents(True)
self.setBackspaceUnindents(True)
self.setTabWidth(4)
self.setIndentationGuides(True)
## Disable command key
ctrl, shift = self.SCMOD_CTRL<<16, self.SCMOD_SHIFT<<16
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L')+ ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('T')+ ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('D')+ ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L')+ ctrl+shift)
## New QShortcut = ctrl+space/ctrl+alt+space for Autocomplete
self.newShortcutCS = QShortcut(QKeySequence(Qt.CTRL + Qt.Key_Space), self)
self.newShortcutCS.setContext(Qt.WidgetShortcut)
self.redoScut = QShortcut(QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_Z), self)
self.redoScut.setContext(Qt.WidgetShortcut)
self.redoScut.activated.connect(self.redo)
self.newShortcutCS.activated.connect(self.autoCompleteKeyBinding)
self.runScut = QShortcut(QKeySequence(Qt.CTRL + Qt.Key_E), self)
self.runScut.setContext(Qt.WidgetShortcut)
self.runScut.activated.connect(self.runSelectedCode)
self.runScriptScut = QShortcut(QKeySequence(Qt.SHIFT + Qt.CTRL + Qt.Key_E), self)
self.runScriptScut.setContext(Qt.WidgetShortcut)
self.runScriptScut.activated.connect(self.runScriptCode)
self.syntaxCheckScut = QShortcut(QKeySequence(Qt.CTRL + Qt.Key_4), self)
self.syntaxCheckScut.setContext(Qt.WidgetShortcut)
self.syntaxCheckScut.activated.connect(self.syntaxCheck)
self.commentScut = QShortcut(QKeySequence(Qt.CTRL + Qt.Key_3), self)
self.commentScut.setContext(Qt.WidgetShortcut)
self.commentScut.activated.connect(self.parent.pc.commentCode)
self.uncommentScut = QShortcut(QKeySequence(Qt.SHIFT + Qt.CTRL + Qt.Key_3), self)
self.uncommentScut.setContext(Qt.WidgetShortcut)
self.uncommentScut.activated.connect(self.parent.pc.uncommentCode)
self.modificationChanged.connect(self.parent.modified)
self.modificationAttempted.connect(self.fileReadOnly)
def settingsEditor(self):
# Set Python lexer
self.setLexers()
threshold = self.settings.value("pythonConsole/autoCompThresholdEditor", 2, type=int)
radioButtonSource = self.settings.value("pythonConsole/autoCompleteSourceEditor", 'fromAPI')
autoCompEnabled = self.settings.value("pythonConsole/autoCompleteEnabledEditor", True, type=bool)
self.setAutoCompletionThreshold(threshold)
if autoCompEnabled:
if radioButtonSource == 'fromDoc':
self.setAutoCompletionSource(self.AcsDocument)
elif radioButtonSource == 'fromAPI':
self.setAutoCompletionSource(self.AcsAPIs)
elif radioButtonSource == 'fromDocAPI':
self.setAutoCompletionSource(self.AcsAll)
else:
self.setAutoCompletionSource(self.AcsNone)
caretLineColorEditor = self.settings.value("pythonConsole/caretLineColorEditor", QColor("#fcf3ed"))
cursorColorEditor = self.settings.value("pythonConsole/cursorColorEditor", QColor(Qt.black))
self.setCaretLineBackgroundColor(caretLineColorEditor)
self.setCaretForegroundColor(cursorColorEditor)
def autoCompleteKeyBinding(self):
radioButtonSource = self.settings.value("pythonConsole/autoCompleteSourceEditor", 'fromAPI')
autoCompEnabled = self.settings.value("pythonConsole/autoCompleteEnabledEditor", True, type=bool)
if autoCompEnabled:
if radioButtonSource == 'fromDoc':
self.autoCompleteFromDocument()
elif radioButtonSource == 'fromAPI':
self.autoCompleteFromAPIs()
elif radioButtonSource == 'fromDocAPI':
self.autoCompleteFromAll()
def setLexers(self):
from qgis.core import QgsApplication
self.lexer = QsciLexerPython()
self.lexer.setIndentationWarning(QsciLexerPython.Inconsistent)
self.lexer.setFoldComments(True)
self.lexer.setFoldQuotes(True)
loadFont = self.settings.value("pythonConsole/fontfamilytextEditor", "Monospace")
fontSize = self.settings.value("pythonConsole/fontsizeEditor", 10, type=int)
font = QFont(loadFont)
font.setFixedPitch(True)
font.setPointSize(fontSize)
font.setStyleHint(QFont.TypeWriter)
font.setStretch(QFont.SemiCondensed)
font.setLetterSpacing(QFont.PercentageSpacing, 87.0)
font.setBold(False)
self.lexer.setDefaultFont(font)
self.lexer.setDefaultColor(QColor(self.settings.value("pythonConsole/defaultFontColorEditor", QColor(Qt.black))))
self.lexer.setColor(QColor(self.settings.value("pythonConsole/commentFontColorEditor", QColor(Qt.gray))), 1)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/keywordFontColorEditor", QColor(Qt.darkGreen))), 5)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/classFontColorEditor", QColor(Qt.blue))), 8)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/methodFontColorEditor", QColor(Qt.darkGray))), 9)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/decorFontColorEditor", QColor(Qt.darkBlue))), 15)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/commentBlockFontColorEditor", QColor(Qt.gray))), 12)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/singleQuoteFontColorEditor", QColor(Qt.blue))), 4)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/doubleQuoteFontColorEditor", QColor(Qt.blue))), 3)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/tripleSingleQuoteFontColorEditor", QColor(Qt.blue))), 6)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/tripleDoubleQuoteFontColorEditor", QColor(Qt.blue))), 7)
self.lexer.setFont(font, 1)
self.lexer.setFont(font, 3)
self.lexer.setFont(font, 4)
for style in range(0, 33):
paperColor = QColor(self.settings.value("pythonConsole/paperBackgroundColorEditor", QColor(Qt.white)))
self.lexer.setPaper(paperColor, style)
self.api = QsciAPIs(self.lexer)
chekBoxAPI = self.settings.value("pythonConsole/preloadAPI", True, type=bool)
chekBoxPreparedAPI = self.settings.value("pythonConsole/usePreparedAPIFile", False, type=bool)
if chekBoxAPI:
pap = os.path.join(QgsApplication.pkgDataPath(), "python", "qsci_apis", "pyqgis.pap")
self.api.loadPrepared(pap)
elif chekBoxPreparedAPI:
self.api.loadPrepared(self.settings.value("pythonConsole/preparedAPIFile"))
else:
apiPath = self.settings.value("pythonConsole/userAPI", [])
for i in range(0, len(apiPath)):
self.api.load(unicode(apiPath[i]))
self.api.prepare()
self.lexer.setAPIs(self.api)
self.setLexer(self.lexer)
def move_cursor_to_end(self):
"""Move cursor to end of text"""
line, index = self.get_end_pos()
self.setCursorPosition(line, index)
self.ensureCursorVisible()
self.ensureLineVisible(line)
def get_end_pos(self):
"""Return (line, index) position of the last character"""
line = self.lines() - 1
return (line, len(self.text(line)))
def contextMenuEvent(self, e):
menu = QMenu(self)
iconRun = QgsApplication.getThemeIcon("console/iconRunConsole.png")
iconRunScript = QgsApplication.getThemeIcon("console/iconRunScriptConsole.png")
iconCodePad = QgsApplication.getThemeIcon("console/iconCodepadConsole.png")
iconCommentEditor = QgsApplication.getThemeIcon("console/iconCommentEditorConsole.png")
iconUncommentEditor = QgsApplication.getThemeIcon("console/iconUncommentEditorConsole.png")
iconSettings = QgsApplication.getThemeIcon("console/iconSettingsConsole.png")
iconFind = QgsApplication.getThemeIcon("console/iconSearchEditorConsole.png")
iconSyntaxCk = QgsApplication.getThemeIcon("console/iconSyntaxErrorConsole.png")
iconObjInsp = QgsApplication.getThemeIcon("console/iconClassBrowserConsole.png")
iconCut = QgsApplication.getThemeIcon("console/iconCutEditorConsole.png")
iconCopy = QgsApplication.getThemeIcon("console/iconCopyEditorConsole.png")
iconPaste = QgsApplication.getThemeIcon("console/iconPasteEditorConsole.png")
hideEditorAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Hide Editor"),
self.hideEditor)
menu.addSeparator() # ------------------------------
syntaxCheck = menu.addAction(iconSyntaxCk,
QCoreApplication.translate("PythonConsole", "Check Syntax"),
self.syntaxCheck, 'Ctrl+4')
menu.addSeparator()
runSelected = menu.addAction(iconRun,
QCoreApplication.translate("PythonConsole", "Run selected"),
self.runSelectedCode, 'Ctrl+E')
runScript = menu.addAction(iconRunScript,
QCoreApplication.translate("PythonConsole", "Run Script"),
self.runScriptCode, 'Shift+Ctrl+E')
menu.addSeparator()
undoAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Undo"),
self.undo, QKeySequence.Undo)
redoAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Redo"),
self.redo, 'Ctrl+Shift+Z')
menu.addSeparator()
findAction = menu.addAction(iconFind,
QCoreApplication.translate("PythonConsole", "Find Text"),
self.showFindWidget)
menu.addSeparator()
cutAction = menu.addAction(iconCut,
QCoreApplication.translate("PythonConsole", "Cut"),
self.cut, QKeySequence.Cut)
copyAction = menu.addAction(iconCopy,
QCoreApplication.translate("PythonConsole", "Copy"),
self.copy, QKeySequence.Copy)
pasteAction = menu.addAction(iconPaste,
QCoreApplication.translate("PythonConsole", "Paste"),
self.paste, QKeySequence.Paste)
menu.addSeparator()
commentCodeAction = menu.addAction(iconCommentEditor,
QCoreApplication.translate("PythonConsole", "Comment"),
self.parent.pc.commentCode, 'Ctrl+3')
uncommentCodeAction = menu.addAction(iconUncommentEditor,
QCoreApplication.translate("PythonConsole", "Uncomment"),
self.parent.pc.uncommentCode, 'Shift+Ctrl+3')
menu.addSeparator()
codePadAction = menu.addAction(iconCodePad,
QCoreApplication.translate("PythonConsole", "Share on codepad"),
self.codepad)
menu.addSeparator()
showCodeInspection = menu.addAction(iconObjInsp,
QCoreApplication.translate("PythonConsole", "Hide/Show Object Inspector"),
self.objectListEditor)
menu.addSeparator()
selectAllAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Select All"),
self.selectAll, QKeySequence.SelectAll)
menu.addSeparator()
settingsDialog = menu.addAction(iconSettings,
QCoreApplication.translate("PythonConsole", "Settings"),
self.parent.pc.openSettings)
syntaxCheck.setEnabled(False)
pasteAction.setEnabled(False)
codePadAction.setEnabled(False)
cutAction.setEnabled(False)
runSelected.setEnabled(False)
copyAction.setEnabled(False)
selectAllAction.setEnabled(False)
undoAction.setEnabled(False)
redoAction.setEnabled(False)
showCodeInspection.setEnabled(False)
if self.hasSelectedText():
runSelected.setEnabled(True)
copyAction.setEnabled(True)
cutAction.setEnabled(True)
codePadAction.setEnabled(True)
if not self.text() == '':
selectAllAction.setEnabled(True)
syntaxCheck.setEnabled(True)
if self.isUndoAvailable():
undoAction.setEnabled(True)
if self.isRedoAvailable():
redoAction.setEnabled(True)
if QApplication.clipboard().text():
pasteAction.setEnabled(True)
if self.settings.value("pythonConsole/enableObjectInsp",
False, type=bool):
showCodeInspection.setEnabled(True)
action = menu.exec_(self.mapToGlobal(e.pos()))
def findText(self, forward):
lineFrom, indexFrom, lineTo, indexTo = self.getSelection()
line, index = self.getCursorPosition()
text = self.parent.pc.lineEditFind.text()
re = False
wrap = self.parent.pc.wrapAround.isChecked()
cs = self.parent.pc.caseSensitive.isChecked()
wo = self.parent.pc.wholeWord.isChecked()
notFound = False
if text:
if not forward:
line = lineFrom
index = indexFrom
## findFirst(QString(), re bool, cs bool, wo bool, wrap, bool, forward=True)
## re = Regular Expression, cs = Case Sensitive, wo = Whole Word, wrap = Wrap Around
if not self.findFirst(text, re, cs, wo, wrap, forward, line, index):
notFound = True
if notFound:
styleError = 'QLineEdit {background-color: #d65253; \
color: #ffffff;}'
msgText = QCoreApplication.translate('PythonConsole',
'<b>"{0}"</b> was not found.').format(text)
self.parent.pc.callWidgetMessageBarEditor(msgText, 0, True)
else:
styleError = ''
self.parent.pc.lineEditFind.setStyleSheet(styleError)
def objectListEditor(self):
listObj = self.parent.pc.listClassMethod
if listObj.isVisible():
listObj.hide()
self.parent.pc.objectListButton.setChecked(False)
else:
listObj.show()
self.parent.pc.objectListButton.setChecked(True)
def codepad(self):
import urllib2, urllib
listText = self.selectedText().split('\n')
getCmd = []
for strLine in listText:
getCmd.append(unicode(strLine))
pasteText= u"\n".join(getCmd)
url = 'http://codepad.org'
values = {'lang' : 'Python',
'code' : pasteText,
'submit':'Submit'}
try:
response = urllib2.urlopen(url, urllib.urlencode(values))
url = response.read()
for href in url.split("</a>"):
if "Link:" in href:
ind=href.index('Link:')
found = href[ind+5:]
for i in found.split('">'):
if '<a href=' in i:
link = i.replace('<a href="',"").strip()
if link:
QApplication.clipboard().setText(link)
msgText = QCoreApplication.translate('PythonConsole', 'URL copied to clipboard.')
self.parent.pc.callWidgetMessageBarEditor(msgText, 0, True)
except urllib2.URLError, e:
msgText = QCoreApplication.translate('PythonConsole', 'Connection error: ')
self.parent.pc.callWidgetMessageBarEditor(msgText + str(e.args), 0, True)
def hideEditor(self):
self.parent.pc.splitterObj.hide()
self.parent.pc.showEditorButton.setChecked(False)
def showFindWidget(self):
wF = self.parent.pc.widgetFind
if wF.isVisible():
wF.hide()
self.parent.pc.findTextButton.setChecked(False)
else:
wF.show()
self.parent.pc.findTextButton.setChecked(True)
def commentEditorCode(self, commentCheck):
self.beginUndoAction()
if self.hasSelectedText():
startLine, _, endLine, _ = self.getSelection()
for line in range(startLine, endLine + 1):
if commentCheck:
self.insertAt('#', line, 0)
else:
if not self.text(line).strip().startswith('#'):
continue
self.setSelection(line, self.indentation(line),
line, self.indentation(line) + 1)
self.removeSelectedText()
else:
line, pos = self.getCursorPosition()
if commentCheck:
self.insertAt('#', line, 0)
else:
if not self.text(line).strip().startswith('#'):
return
self.setSelection(line, self.indentation(line),
line, self.indentation(line) + 1)
self.removeSelectedText()
self.endUndoAction()
def createTempFile(self):
import tempfile
fd, path = tempfile.mkstemp()
tmpFileName = path + '.py'
with codecs.open(path, "w", encoding='utf-8') as f:
f.write(self.text())
os.close(fd)
os.rename(path, tmpFileName)
return tmpFileName
def _runSubProcess(self, filename, tmp=False):
dir = QFileInfo(filename).path()
file = QFileInfo(filename).fileName()
name = QFileInfo(filename).baseName()
if dir not in sys.path:
sys.path.append(dir)
if name in sys.modules:
reload(sys.modules[name])
try:
## set creationflags for running command without shell window
if sys.platform.startswith('win'):
p = subprocess.Popen(['python', unicode(filename)], shell=False, stdin=subprocess.PIPE,
stderr=subprocess.PIPE, stdout=subprocess.PIPE, creationflags=0x08000000)
else:
p = subprocess.Popen(['python', unicode(filename)], shell=False, stdin=subprocess.PIPE,
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, _traceback = p.communicate()
## Fix interrupted system call on OSX
if sys.platform == 'darwin':
status = None
while status is None:
try:
status = p.wait()
except OSError, e:
if e.errno == 4:
pass
else:
raise e
if tmp:
tmpFileTr = QCoreApplication.translate('PythonConsole', ' [Temporary file saved in {0}]').format(dir)
file = file + tmpFileTr
if _traceback:
msgTraceTr = QCoreApplication.translate('PythonConsole', '## Script error: {0}').format(file)
print "## %s" % datetime.datetime.now()
print unicode(msgTraceTr)
sys.stderr.write(_traceback)
p.stderr.close()
else:
msgSuccessTr = QCoreApplication.translate('PythonConsole',
'## Script executed successfully: {0}').format(file)
print "## %s" % datetime.datetime.now()
print unicode(msgSuccessTr)
sys.stdout.write(out)
p.stdout.close()
del p
if tmp:
os.remove(filename)
except IOError, error:
IOErrorTr = QCoreApplication.translate('PythonConsole',
'Cannot execute file {0}. Error: {1}\n').format(filename,
error.strerror)
print '## Error: ' + IOErrorTr
except:
s = traceback.format_exc()
print '## Error: '
sys.stderr.write(s)
def runScriptCode(self):
autoSave = self.settings.value("pythonConsole/autoSaveScript", False, type=bool)
tabWidget = self.parent.tw.currentWidget()
filename = tabWidget.path
msgEditorBlank = QCoreApplication.translate('PythonConsole',
'Hey, type something to run!')
msgEditorUnsaved = QCoreApplication.translate('PythonConsole',
'You have to save the file before running it.')
if filename is None:
if not self.isModified():
self.parent.pc.callWidgetMessageBarEditor(msgEditorBlank, 0, True)
return
if self.isModified() and not autoSave:
self.parent.pc.callWidgetMessageBarEditor(msgEditorUnsaved, 0, True)
return
if self.syntaxCheck(fromContextMenu=False):
if autoSave and filename:
self.parent.save(filename)
if autoSave and not filename:
# Create a new temp file if the file isn't already saved.
tmpFile = self.createTempFile()
filename = tmpFile
self.parent.pc.shell.runCommand(u"execfile(u'{0}'.encode('{1}'))"
.format(filename.replace("\\", "/"), sys.getfilesystemencoding()))
def runSelectedCode(self):
cmd = self.selectedText()
self.parent.pc.shell.insertFromDropPaste(cmd)
self.parent.pc.shell.entered()
self.setFocus()
def getTextFromEditor(self):
text = self.text()
textList = text.split("\n")
return textList
def goToLine(self, objName, linenr):
self.SendScintilla(QsciScintilla.SCI_GOTOLINE, linenr-1)
self.SendScintilla(QsciScintilla.SCI_SETTARGETSTART,
self.SendScintilla(QsciScintilla.SCI_GETCURRENTPOS))
self.SendScintilla(QsciScintilla.SCI_SETTARGETEND, len(self.text()))
pos = self.SendScintilla(QsciScintilla.SCI_SEARCHINTARGET, len(objName), objName)
index = pos - self.SendScintilla(QsciScintilla.SCI_GETCURRENTPOS)
#line, _ = self.getCursorPosition()
self.setSelection(linenr - 1, index, linenr - 1, index + len(objName))
self.ensureLineVisible(linenr)
self.setFocus()
def syntaxCheck(self, filename=None, fromContextMenu=True):
eline = None
ecolumn = 0
edescr = ''
source = unicode(self.text())
try:
if not filename:
filename = self.parent.tw.currentWidget().path
#source = open(filename, 'r').read() + '\n'
if type(source) == type(u""):
source = source.encode('utf-8')
if type(filename) == type(u""):
filename = filename.encode('utf-8')
compile(source, str(filename), 'exec')
except SyntaxError, detail:
s = traceback.format_exception_only(SyntaxError, detail)
fn = detail.filename
eline = detail.lineno and detail.lineno or 1
ecolumn = detail.offset and detail.offset or 1
edescr = detail.msg
if eline != None:
eline -= 1
for markerLine in self.bufferMarkerLine:
self.markerDelete(markerLine)
self.clearAnnotations(markerLine)
self.bufferMarkerLine.remove(markerLine)
if (eline) not in self.bufferMarkerLine:
self.bufferMarkerLine.append(eline)
self.markerAdd(eline, self.MARKER_NUM)
loadFont = self.settings.value("pythonConsole/fontfamilytextEditor",
"Monospace")
styleAnn = QsciStyle(-1,"Annotation",
QColor(255,0,0),
QColor(255,200,0),
QFont(loadFont, 8,-1,True),
True)
self.annotate(eline, edescr, styleAnn)
self.setCursorPosition(eline, ecolumn-1)
#self.setSelection(eline, ecolumn, eline, self.lineLength(eline)-1)
self.ensureLineVisible(eline)
#self.ensureCursorVisible()
return False
else:
self.markerDeleteAll()
self.clearAnnotations()
return True
def keyPressEvent(self, e):
t = unicode(e.text())
startLine, _, endLine, endPos = self.getSelection()
line, pos = self.getCursorPosition()
self.autoCloseBracket = self.settings.value("pythonConsole/autoCloseBracketEditor", False, type=bool)
self.autoImport = self.settings.value("pythonConsole/autoInsertionImportEditor", True, type=bool)
txt = self.text(line)[:pos]
## Close bracket automatically
if t in self.opening and self.autoCloseBracket:
self.beginUndoAction()
i = self.opening.index(t)
if self.hasSelectedText():
selText = self.selectedText()
self.removeSelectedText()
if startLine == endLine:
self.insert(self.opening[i] + selText + self.closing[i])
self.setCursorPosition(endLine, endPos+2)
self.endUndoAction()
return
elif startLine < endLine and self.opening[i] in ("'", '"'):
self.insert("'''" + selText + "'''")
self.setCursorPosition(endLine, endPos+3)
self.endUndoAction()
return
elif t == '(' and (re.match(r'^[ \t]*def \w+$', txt) \
or re.match(r'^[ \t]*class \w+$', txt)):
self.insert('):')
else:
self.insert(self.closing[i])
self.endUndoAction()
## FIXES #8392 (automatically removes the redundant char
## when autoclosing brackets option is enabled)
elif t in [')', ']', '}'] and self.autoCloseBracket:
txt = self.text(line)
try:
if txt[pos-1] in self.opening and t == txt[pos]:
self.setCursorPosition(line, pos+1)
self.SendScintilla(QsciScintilla.SCI_DELETEBACK)
except IndexError:
pass
elif t == ' ' and self.autoImport:
ptrn = r'^[ \t]*from [\w.]+$'
if re.match(ptrn, txt):
self.insert(' import')
self.setCursorPosition(line, pos + 7)
QsciScintilla.keyPressEvent(self, e)
def focusInEvent(self, e):
pathfile = self.parent.path
if pathfile:
if not QFileInfo(pathfile).exists():
msgText = QCoreApplication.translate('PythonConsole',
'The file <b>"{0}"</b> has been deleted or is not accessible').format(pathfile)
self.parent.pc.callWidgetMessageBarEditor(msgText, 2, False)
return
if pathfile and self.lastModified != QFileInfo(pathfile).lastModified():
self.beginUndoAction()
self.selectAll()
#fileReplaced = self.selectedText()
self.removeSelectedText()
file = open(pathfile, "r")
fileLines = file.readlines()
file.close()
QApplication.setOverrideCursor(Qt.WaitCursor)
for line in reversed(fileLines):
self.insert(line)
QApplication.restoreOverrideCursor()
self.setModified(False)
self.endUndoAction()
self.parent.tw.listObject(self.parent.tw.currentWidget())
self.lastModified = QFileInfo(pathfile).lastModified()
msgText = QCoreApplication.translate('PythonConsole',
'The file <b>"{0}"</b> has been changed and reloaded').format(pathfile)
self.parent.pc.callWidgetMessageBarEditor(msgText, 1, False)
QsciScintilla.focusInEvent(self, e)
def fileReadOnly(self):
tabWidget = self.parent.tw.currentWidget()
msgText = QCoreApplication.translate('PythonConsole',
'The file <b>"{0}"</b> is read only, please save to different file first.').format(tabWidget.path)
self.parent.pc.callWidgetMessageBarEditor(msgText, 1, False)
class EditorTab(QWidget):
def __init__(self, parent, parentConsole, filename, readOnly):
super(EditorTab, self).__init__(parent)
self.tw = parent
self.pc = parentConsole
self.path = None
self.readOnly = readOnly
self.fileExcuteList = {}
self.fileExcuteList = dict()
self.newEditor = Editor(self)
if filename:
self.path = filename
if QFileInfo(filename).exists():
self.loadFile(filename, False)
# Creates layout for message bar
self.layout = QGridLayout(self.newEditor)
self.layout.setContentsMargins(0, 0, 0, 0)
spacerItem = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.layout.addItem(spacerItem, 1, 0, 1, 1)
# messageBar instance
self.infoBar = QgsMessageBar()
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.infoBar.setSizePolicy(sizePolicy)
self.layout.addWidget(self.infoBar, 0, 0, 1, 1)
self.tabLayout = QGridLayout(self)
self.tabLayout.setContentsMargins(0, 0, 0, 0)
self.tabLayout.addWidget(self.newEditor)
self.keyFilter = KeyFilter(parent, self)
self.setEventFilter(self.keyFilter)
def loadFile(self, filename, modified):
self.newEditor.lastModified = QFileInfo(filename).lastModified()
fn = codecs.open(unicode(filename), "rb", encoding='utf-8')
txt = fn.read()
fn.close()
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
self.newEditor.setText(txt)
if self.readOnly:
self.newEditor.setReadOnly(self.readOnly)
QApplication.restoreOverrideCursor()
self.newEditor.setModified(modified)
self.newEditor.recolor()
def save(self, fileName=None):
index = self.tw.indexOf(self)
if fileName:
self.path = fileName
if self.path is None:
saveTr = QCoreApplication.translate('PythonConsole',
'Python Console: Save file')
self.path = str(QFileDialog().getSaveFileName(self,
saveTr,
self.tw.tabText(index) + '.py',
"Script file (*.py)"))
# If the user didn't select a file, abort the save operation
if len(self.path) == 0:
self.path = None
return
self.tw.setCurrentWidget(self)
msgText = QCoreApplication.translate('PythonConsole',
'Script was correctly saved.')
self.pc.callWidgetMessageBarEditor(msgText, 0, True)
# Rename the original file, if it exists
path = unicode(self.path)
overwrite = QFileInfo(path).exists()
if overwrite:
try:
permis = os.stat(path).st_mode
#self.newEditor.lastModified = QFileInfo(path).lastModified()
os.chmod(path, permis)
except:
raise
temp_path = path + "~"
if QFileInfo(temp_path).exists():
os.remove(temp_path)
os.rename(path, temp_path)
# Save the new contents
with codecs.open(path, "w", encoding='utf-8') as f:
f.write(self.newEditor.text())
if overwrite:
os.remove(temp_path)
if self.newEditor.isReadOnly():
self.newEditor.setReadOnly(False)
fN = path.split('/')[-1]
self.tw.setTabTitle(index, fN)
self.tw.setTabToolTip(index, path)
self.newEditor.setModified(False)
self.pc.saveFileButton.setEnabled(False)
self.newEditor.lastModified = QFileInfo(path).lastModified()
self.pc.updateTabListScript(path, action='append')
self.tw.listObject(self)
def modified(self, modified):
self.tw.tabModified(self, modified)
def close(self):
self.tw._removeTab(self, tab2index=True)
def setEventFilter(self, filter):
self.newEditor.installEventFilter(filter)
def newTab(self):
self.tw.newTabEditor()
class EditorTabWidget(QTabWidget):
def __init__(self, parent):
QTabWidget.__init__(self, parent=None)
self.parent = parent
self.idx = -1
# Layout for top frame (restore tabs)
self.layoutTopFrame = QGridLayout(self)
self.layoutTopFrame.setContentsMargins(0, 0, 0, 0)
spacerItem = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.layoutTopFrame.addItem(spacerItem, 1, 0, 1, 1)
self.topFrame = QFrame(self)
self.topFrame.setStyleSheet('background-color: rgb(255, 255, 230);')
self.topFrame.setFrameShape(QFrame.StyledPanel)
self.topFrame.setMinimumHeight(24)
self.layoutTopFrame2 = QGridLayout(self.topFrame)
self.layoutTopFrame2.setContentsMargins(0, 0, 0, 0)
label = QCoreApplication.translate("PythonConsole",
"Click on button to restore all tabs from last session.")
self.label = QLabel(label)
self.restoreTabsButton = QToolButton()
toolTipRestore = QCoreApplication.translate("PythonConsole",
"Restore tabs")
self.restoreTabsButton.setToolTip(toolTipRestore)
self.restoreTabsButton.setIcon(QgsApplication.getThemeIcon("console/iconRestoreTabsConsole.png"))
self.restoreTabsButton.setIconSize(QSize(24, 24))
self.restoreTabsButton.setAutoRaise(True)
self.restoreTabsButton.setCursor(Qt.PointingHandCursor)
self.restoreTabsButton.setStyleSheet('QToolButton:hover{border: none } \
QToolButton:pressed{border: none}')
self.clButton = QToolButton()
toolTipClose = QCoreApplication.translate("PythonConsole",
"Close")
self.clButton.setToolTip(toolTipClose)
self.clButton.setIcon(QgsApplication.getThemeIcon("mIconClose.png"))
self.clButton.setIconSize(QSize(18, 18))
self.clButton.setCursor(Qt.PointingHandCursor)
self.clButton.setStyleSheet('QToolButton:hover{border: none } \
QToolButton:pressed{border: none}')
self.clButton.setAutoRaise(True)
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.topFrame.setSizePolicy(sizePolicy)
self.layoutTopFrame.addWidget(self.topFrame, 0, 0, 1, 1)
self.layoutTopFrame2.addWidget(self.label, 0, 1, 1, 1)
self.layoutTopFrame2.addWidget(self.restoreTabsButton, 0, 0, 1, 1)
self.layoutTopFrame2.addWidget(self.clButton, 0, 2, 1, 1)
self.topFrame.hide()
self.connect(self.restoreTabsButton, SIGNAL('clicked()'), self.restoreTabs)
self.connect(self.clButton, SIGNAL('clicked()'), self.closeRestore)
# Restore script of the previuos session
self.settings = QSettings()
tabScripts = self.settings.value("pythonConsole/tabScripts", [])
self.restoreTabList = tabScripts
if self.restoreTabList:
self.topFrame.show()
else:
self.newTabEditor(filename=None)
## Fixes #7653
if sys.platform != 'darwin':
self.setDocumentMode(True)
self.setMovable(True)
self.setTabsClosable(True)
self.setTabPosition(QTabWidget.North)
# Menu button list tabs
self.fileTabMenu = QMenu()
self.connect(self.fileTabMenu, SIGNAL("aboutToShow()"),
self.showFileTabMenu)
self.connect(self.fileTabMenu, SIGNAL("triggered(QAction*)"),
self.showFileTabMenuTriggered)
self.fileTabButton = QToolButton()
txtToolTipMenuFile = QCoreApplication.translate("PythonConsole",
"List all tabs")
self.fileTabButton.setToolTip(txtToolTipMenuFile)
self.fileTabButton.setIcon(QgsApplication.getThemeIcon("console/iconFileTabsMenuConsole.png"))
self.fileTabButton.setIconSize(QSize(24, 24))
self.fileTabButton.setAutoRaise(True)
self.fileTabButton.setPopupMode(QToolButton.InstantPopup)
self.fileTabButton.setMenu(self.fileTabMenu)
self.setCornerWidget(self.fileTabButton, Qt.TopRightCorner)
self.connect(self, SIGNAL("tabCloseRequested(int)"), self._removeTab)
self.connect(self, SIGNAL('currentChanged(int)'), self._currentWidgetChanged)
# New Editor button
self.newTabButton = QToolButton()
txtToolTipNewTab = QCoreApplication.translate("PythonConsole",
"New Editor")
self.newTabButton.setToolTip(txtToolTipNewTab)
self.newTabButton.setAutoRaise(True)
self.newTabButton.setIcon(QgsApplication.getThemeIcon("console/iconNewTabEditorConsole.png"))
self.newTabButton.setIconSize(QSize(24, 24))
self.setCornerWidget(self.newTabButton, Qt.TopLeftCorner)
self.connect(self.newTabButton, SIGNAL('clicked()'), self.newTabEditor)
def _currentWidgetChanged(self, tab):
if self.settings.value("pythonConsole/enableObjectInsp",
False, type=bool):
self.listObject(tab)
self.changeLastDirPath(tab)
self.enableSaveIfModified(tab)
def contextMenuEvent(self, e):
tabBar = self.tabBar()
self.idx = tabBar.tabAt(e.pos())
if self.widget(self.idx):
cW = self.widget(self.idx)
menu = QMenu(self)
menu.addSeparator()
newTabAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "New Editor"),
self.newTabEditor)
menu.addSeparator()
closeTabAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Close Tab"),
cW.close)
closeAllTabAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Close All"),
self.closeAll)
closeOthersTabAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Close Others"),
self.closeOthers)
menu.addSeparator()
saveAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Save"),
cW.save)
saveAsAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Save As"),
self.saveAs)
closeTabAction.setEnabled(False)
closeAllTabAction.setEnabled(False)
closeOthersTabAction.setEnabled(False)
saveAction.setEnabled(False)
if self.count() > 1:
closeTabAction.setEnabled(True)
closeAllTabAction.setEnabled(True)
closeOthersTabAction.setEnabled(True)
if self.widget(self.idx).newEditor.isModified():
saveAction.setEnabled(True)
action = menu.exec_(self.mapToGlobal(e.pos()))
def closeOthers(self):
idx = self.idx
countTab = self.count()
for i in range(countTab - 1, idx, -1) + range(idx - 1, -1, -1):
self._removeTab(i)
def closeAll(self):
countTab = self.count()
cI = self.currentIndex()
for i in range(countTab - 1, 0, -1):
self._removeTab(i)
self.newTabEditor(tabName='Untitled-0')
self._removeTab(0)
def saveAs(self):
idx = self.idx
self.parent.saveAsScriptFile(idx)
self.setCurrentWidget(self.widget(idx))
def enableSaveIfModified(self, tab):
tabWidget = self.widget(tab)
if tabWidget:
self.parent.saveFileButton.setEnabled(tabWidget.newEditor.isModified())
def enableToolBarEditor(self, enable):
if self.topFrame.isVisible():
enable = False
self.parent.toolBarEditor.setEnabled(enable)
def newTabEditor(self, tabName=None, filename=None):
readOnly = False
if filename:
readOnly = not QFileInfo(filename).isWritable()
try:
fn = codecs.open(unicode(filename), "rb", encoding='utf-8')
txt = fn.read()
fn.close()
except IOError, error:
IOErrorTr = QCoreApplication.translate('PythonConsole',
'The file {0} could not be opened. Error: {1}\n').format(filename,
error.strerror)
print '## Error: '
sys.stderr.write(IOErrorTr)
return
nr = self.count()
if not tabName:
tabName = QCoreApplication.translate('PythonConsole', 'Untitled-{0}').format(nr)
self.tab = EditorTab(self, self.parent, filename, readOnly)
self.iconTab = QgsApplication.getThemeIcon('console/iconTabEditorConsole.png')
self.addTab(self.tab, self.iconTab, tabName + ' (ro)' if readOnly else tabName)
self.setCurrentWidget(self.tab)
if filename:
self.setTabToolTip(self.currentIndex(), unicode(filename))
else:
self.setTabToolTip(self.currentIndex(), tabName)
def tabModified(self, tab, modified):
index = self.indexOf(tab)
color = Qt.darkGray if modified else Qt.black
self.tabBar().setTabTextColor(index, color)
self.parent.saveFileButton.setEnabled(modified)
def closeTab(self, tab):
if self.count() < 2:
self.removeTab(self.indexOf(tab))
self.newTabEditor()
else:
self.removeTab(self.indexOf(tab))
self.currentWidget().setFocus(Qt.TabFocusReason)
def setTabTitle(self, tab, title):
self.setTabText(tab, title)
def _removeTab(self, tab, tab2index=False):
if tab2index:
tab = self.indexOf(tab)
tabWidget = self.widget(tab)
if tabWidget.newEditor.isModified():
txtSaveOnRemove = QCoreApplication.translate("PythonConsole",
"Python Console: Save File")
txtMsgSaveOnRemove = QCoreApplication.translate("PythonConsole",
"The file <b>'{0}'</b> has been modified, save changes?").format(self.tabText(tab))
res = QMessageBox.question( self, txtSaveOnRemove,
txtMsgSaveOnRemove,
QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel )
if res == QMessageBox.Save:
tabWidget.save()
elif res == QMessageBox.Cancel:
return
if tabWidget.path:
self.parent.updateTabListScript(tabWidget.path, action='remove')
self.removeTab(tab)
if self.count() < 1:
self.newTabEditor()
else:
if tabWidget.path:
self.parent.updateTabListScript(tabWidget.path, action='remove')
if self.count() <= 1:
self.removeTab(tab)
self.newTabEditor()
else:
self.removeTab(tab)
self.currentWidget().newEditor.setFocus(Qt.TabFocusReason)
def buttonClosePressed(self):
self.closeCurrentWidget()
def closeCurrentWidget(self):
currWidget = self.currentWidget()
if currWidget and currWidget.close():
self.removeTab( self.currentIndex() )
currWidget = self.currentWidget()
if currWidget:
currWidget.setFocus(Qt.TabFocusReason)
if currWidget.path in self.restoreTabList:
self.parent.updateTabListScript(currWidget.path, action='remove')
def restoreTabs(self):
for script in self.restoreTabList:
pathFile = unicode(script)
if QFileInfo(pathFile).exists():
tabName = pathFile.split('/')[-1]
self.newTabEditor(tabName, pathFile)
else:
errOnRestore = QCoreApplication.translate("PythonConsole",
"Unable to restore the file: \n{0}\n").format(pathFile)
print '## Error: '
s = errOnRestore
sys.stderr.write(s)
self.parent.updateTabListScript(pathFile, action='remove')
if self.count() < 1:
self.newTabEditor(filename=None)
self.topFrame.close()
self.enableToolBarEditor(True)
self.currentWidget().newEditor.setFocus(Qt.TabFocusReason)
def closeRestore(self):
self.parent.updateTabListScript(None)
self.topFrame.close()
self.newTabEditor(filename=None)
self.enableToolBarEditor(True)
def showFileTabMenu(self):
self.fileTabMenu.clear()
for index in range(self.count()):
action = self.fileTabMenu.addAction(self.tabIcon(index), self.tabText(index))
action.setData(index)
def showFileTabMenuTriggered(self, action):
index = action.data()
if index is not None:
self.setCurrentIndex(index)
def listObject(self, tab):
self.parent.listClassMethod.clear()
if isinstance(tab, EditorTab):
tabWidget = self.widget(self.indexOf(tab))
else:
tabWidget = self.widget(tab)
if tabWidget:
if tabWidget.path:
pathFile, file = os.path.split(unicode(tabWidget.path))
module, ext = os.path.splitext(file)
found = False
if pathFile not in sys.path:
sys.path.append(pathFile)
found = True
try:
reload(pyclbr)
dictObject = {}
readModule = pyclbr.readmodule(module)
readModuleFunction = pyclbr.readmodule_ex(module)
for name, class_data in sorted(readModule.items(), key=lambda x:x[1].lineno):
if os.path.normpath(str(class_data.file)) == os.path.normpath(str(tabWidget.path)):
superClassName = []
for superClass in class_data.super:
if superClass == 'object':
continue
if isinstance(superClass, basestring):
superClassName.append(superClass)
else:
superClassName.append(superClass.name)
classItem = QTreeWidgetItem()
if superClassName:
super = ', '.join([i for i in superClassName])
classItem.setText(0, name + ' [' + super + ']')
classItem.setToolTip(0, name + ' [' + super + ']')
else:
classItem.setText(0, name)
classItem.setToolTip(0, name)
if sys.platform.startswith('win'):
classItem.setSizeHint(0, QSize(18, 18))
classItem.setText(1, str(class_data.lineno))
iconClass = QgsApplication.getThemeIcon("console/iconClassTreeWidgetConsole.png")
classItem.setIcon(0, iconClass)
dictObject[name] = class_data.lineno
for meth, lineno in sorted(class_data.methods.items(), key=itemgetter(1)):
methodItem = QTreeWidgetItem()
methodItem.setText(0, meth + ' ')
methodItem.setText(1, str(lineno))
methodItem.setToolTip(0, meth)
iconMeth = QgsApplication.getThemeIcon("console/iconMethodTreeWidgetConsole.png")
methodItem.setIcon(0, iconMeth)
if sys.platform.startswith('win'):
methodItem.setSizeHint(0, QSize(18, 18))
classItem.addChild(methodItem)
dictObject[meth] = lineno
self.parent.listClassMethod.addTopLevelItem(classItem)
for func_name, data in sorted(readModuleFunction.items(), key=lambda x:x[1].lineno):
if isinstance(data, pyclbr.Function) and \
os.path.normpath(str(data.file)) == os.path.normpath(str(tabWidget.path)):
funcItem = QTreeWidgetItem()
funcItem.setText(0, func_name + ' ')
funcItem.setText(1, str(data.lineno))
funcItem.setToolTip(0, func_name)
iconFunc = QgsApplication.getThemeIcon("console/iconFunctionTreeWidgetConsole.png")
funcItem.setIcon(0, iconFunc)
if sys.platform.startswith('win'):
funcItem.setSizeHint(0, QSize(18, 18))
dictObject[func_name] = data.lineno
self.parent.listClassMethod.addTopLevelItem(funcItem)
if found:
sys.path.remove(pathFile)
except:
msgItem = QTreeWidgetItem()
msgItem.setText(0, QCoreApplication.translate("PythonConsole", "Check Syntax"))
msgItem.setText(1, 'syntaxError')
iconWarning = QgsApplication.getThemeIcon("console/iconSyntaxErrorConsole.png")
msgItem.setIcon(0, iconWarning)
self.parent.listClassMethod.addTopLevelItem(msgItem)
# s = traceback.format_exc()
# print '## Error: '
# sys.stderr.write(s)
# pass
def refreshSettingsEditor(self):
countTab = self.count()
for i in range(countTab):
self.widget(i).newEditor.settingsEditor()
objInspectorEnabled = self.settings.value("pythonConsole/enableObjectInsp",
False, type=bool)
listObj = self.parent.objectListButton
if self.parent.listClassMethod.isVisible():
listObj.setChecked(objInspectorEnabled)
listObj.setEnabled(objInspectorEnabled)
if objInspectorEnabled:
cW = self.currentWidget()
if cW and not self.parent.listClassMethod.isVisible():
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
self.listObject(cW)
QApplication.restoreOverrideCursor()
def changeLastDirPath(self, tab):
tabWidget = self.widget(tab)
if tabWidget:
self.settings.setValue("pythonConsole/lastDirPath", tabWidget.path)
def widgetMessageBar(self, iface, text, level, timed=True):
messageLevel = [QgsMessageBar.INFO, QgsMessageBar.WARNING, QgsMessageBar.CRITICAL]
if timed:
timeout = iface.messageTimeout()
else:
timeout = 0
currWidget = self.currentWidget()
currWidget.infoBar.pushMessage(text, messageLevel[level], timeout)
| kiith-sa/QGIS | python/console/console_editor.py | Python | gpl-2.0 | 58,453 |
#
# Generated Thu Mar 17 13:49:32 2011 by generateDS.py version 2.4b.
#
import sys
import tardis.tardis_portal.schema.mets as supermod
etree_ = None
Verbose_import_ = False
(XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Data representation classes
#
class metsTypeSub(supermod.metsType):
def __init__(self, PROFILE=None, LABEL=None, TYPE=None, ID=None, OBJID=None, metsHdr=None, dmdSec=None, amdSec=None, fileSec=None, structMap=None, structLink=None, behaviorSec=None):
super(metsTypeSub, self).__init__(PROFILE, LABEL, TYPE, ID, OBJID, metsHdr, dmdSec, amdSec, fileSec, structMap, structLink, behaviorSec, )
supermod.metsType.subclass = metsTypeSub
# end class metsTypeSub
class metsHdrSub(supermod.metsHdr):
def __init__(self, CREATEDATE=None, RECORDSTATUS=None, ADMID=None, LASTMODDATE=None, ID=None, agent=None, altRecordID=None, metsDocumentID=None):
super(metsHdrSub, self).__init__(CREATEDATE, RECORDSTATUS, ADMID, LASTMODDATE, ID, agent, altRecordID, metsDocumentID, )
supermod.metsHdr.subclass = metsHdrSub
# end class metsHdrSub
class agentSub(supermod.agent):
def __init__(self, TYPE=None, OTHERTYPE=None, ROLE=None, ID=None, OTHERROLE=None, name=None, note=None):
super(agentSub, self).__init__(TYPE, OTHERTYPE, ROLE, ID, OTHERROLE, name, note, )
supermod.agent.subclass = agentSub
# end class agentSub
class altRecordIDSub(supermod.altRecordID):
def __init__(self, TYPE=None, ID=None, valueOf_=None):
super(altRecordIDSub, self).__init__(TYPE, ID, valueOf_, )
supermod.altRecordID.subclass = altRecordIDSub
# end class altRecordIDSub
class metsDocumentIDSub(supermod.metsDocumentID):
def __init__(self, TYPE=None, ID=None, valueOf_=None):
super(metsDocumentIDSub, self).__init__(TYPE, ID, valueOf_, )
supermod.metsDocumentID.subclass = metsDocumentIDSub
# end class metsDocumentIDSub
class fileSecSub(supermod.fileSec):
def __init__(self, ID=None, fileGrp=None):
super(fileSecSub, self).__init__(ID, fileGrp, )
supermod.fileSec.subclass = fileSecSub
# end class fileSecSub
class amdSecTypeSub(supermod.amdSecType):
def __init__(self, ID=None, techMD=None, rightsMD=None, sourceMD=None, digiprovMD=None):
super(amdSecTypeSub, self).__init__(ID, techMD, rightsMD, sourceMD, digiprovMD, )
supermod.amdSecType.subclass = amdSecTypeSub
# end class amdSecTypeSub
class fileGrpTypeSub(supermod.fileGrpType):
def __init__(self, VERSDATE=None, ADMID=None, ID=None, USE=None, fileGrp=None, file=None):
super(fileGrpTypeSub, self).__init__(VERSDATE, ADMID, ID, USE, fileGrp, file, )
supermod.fileGrpType.subclass = fileGrpTypeSub
# end class fileGrpTypeSub
class structMapTypeSub(supermod.structMapType):
def __init__(self, TYPE=None, ID=None, LABEL=None, div=None):
super(structMapTypeSub, self).__init__(TYPE, ID, LABEL, div, )
supermod.structMapType.subclass = structMapTypeSub
# end class structMapTypeSub
class divTypeSub(supermod.divType):
def __init__(self, ADMID=None, TYPE=None, LABEL=None, DMDID=None, ORDERLABEL=None, CONTENTIDS=None, label=None, ORDER=None, ID=None, mptr=None, fptr=None, div=None):
super(divTypeSub, self).__init__(ADMID, TYPE, LABEL, DMDID, ORDERLABEL, CONTENTIDS, label, ORDER, ID, mptr, fptr, div, )
supermod.divType.subclass = divTypeSub
# end class divTypeSub
class mptrSub(supermod.mptr):
def __init__(self, arcrole=None, show=None, OTHERLOCTYPE=None, title=None, actuate=None, href=None, role=None, LOCTYPE=None, CONTENTIDS=None, type_=None, ID=None, valueOf_=None):
super(mptrSub, self).__init__(arcrole, show, OTHERLOCTYPE, title, actuate, href, role, LOCTYPE, CONTENTIDS, type_, ID, valueOf_, )
supermod.mptr.subclass = mptrSub
# end class mptrSub
class fptrSub(supermod.fptr):
def __init__(self, CONTENTIDS=None, ID=None, FILEID=None, par=None, seq=None, area=None):
super(fptrSub, self).__init__(CONTENTIDS, ID, FILEID, par, seq, area, )
supermod.fptr.subclass = fptrSub
# end class fptrSub
class parTypeSub(supermod.parType):
def __init__(self, ID=None, area=None, seq=None):
super(parTypeSub, self).__init__(ID, area, seq, )
supermod.parType.subclass = parTypeSub
# end class parTypeSub
class seqTypeSub(supermod.seqType):
def __init__(self, ID=None, area=None, par=None):
super(seqTypeSub, self).__init__(ID, area, par, )
supermod.seqType.subclass = seqTypeSub
# end class seqTypeSub
class areaTypeSub(supermod.areaType):
def __init__(self, BEGIN=None, END=None, BETYPE=None, SHAPE=None, COORDS=None, EXTENT=None, CONTENTIDS=None, ADMID=None, ID=None, EXTTYPE=None, FILEID=None, valueOf_=None):
super(areaTypeSub, self).__init__(BEGIN, END, BETYPE, SHAPE, COORDS, EXTENT, CONTENTIDS, ADMID, ID, EXTTYPE, FILEID, valueOf_, )
supermod.areaType.subclass = areaTypeSub
# end class areaTypeSub
class structLinkTypeSub(supermod.structLinkType):
def __init__(self, ID=None, smLink=None, smLinkGrp=None):
super(structLinkTypeSub, self).__init__(ID, smLink, smLinkGrp, )
supermod.structLinkType.subclass = structLinkTypeSub
# end class structLinkTypeSub
class smLinkSub(supermod.smLink):
def __init__(self, fromxx=None, show=None, title=None, actuate=None, to=None, arcrole=None, ID=None, valueOf_=None):
super(smLinkSub, self).__init__(fromxx, show, title, actuate, to, arcrole, ID, valueOf_, )
supermod.smLink.subclass = smLinkSub
# end class smLinkSub
class smLinkGrpSub(supermod.smLinkGrp):
def __init__(self, role=None, title=None, ARCLINKORDER='unordered', ID=None, type_=None, smLocatorLink=None, smArcLink=None):
super(smLinkGrpSub, self).__init__(role, title, ARCLINKORDER, ID, type_, smLocatorLink, smArcLink, )
supermod.smLinkGrp.subclass = smLinkGrpSub
# end class smLinkGrpSub
class smLocatorLinkSub(supermod.smLocatorLink):
def __init__(self, title=None, label=None, href=None, role=None, type_=None, ID=None, valueOf_=None):
super(smLocatorLinkSub, self).__init__(title, label, href, role, type_, ID, valueOf_, )
supermod.smLocatorLink.subclass = smLocatorLinkSub
# end class smLocatorLinkSub
class smArcLinkSub(supermod.smArcLink):
def __init__(self, ADMID=None, fromxx=None, title=None, show=None, actuate=None, ARCTYPE=None, to=None, arcrole=None, type_=None, ID=None, valueOf_=None):
super(smArcLinkSub, self).__init__(ADMID, fromxx, title, show, actuate, ARCTYPE, to, arcrole, type_, ID, valueOf_, )
supermod.smArcLink.subclass = smArcLinkSub
# end class smArcLinkSub
class behaviorSecTypeSub(supermod.behaviorSecType):
def __init__(self, LABEL=None, ID=None, CREATED=None, behaviorSec=None, behavior=None):
super(behaviorSecTypeSub, self).__init__(LABEL, ID, CREATED, behaviorSec, behavior, )
supermod.behaviorSecType.subclass = behaviorSecTypeSub
# end class behaviorSecTypeSub
class behaviorTypeSub(supermod.behaviorType):
def __init__(self, ADMID=None, CREATED=None, STRUCTID=None, LABEL=None, GROUPID=None, BTYPE=None, ID=None, interfaceDef=None, mechanism=None):
super(behaviorTypeSub, self).__init__(ADMID, CREATED, STRUCTID, LABEL, GROUPID, BTYPE, ID, interfaceDef, mechanism, )
supermod.behaviorType.subclass = behaviorTypeSub
# end class behaviorTypeSub
class objectTypeSub(supermod.objectType):
def __init__(self, arcrole=None, title=None, OTHERLOCTYPE=None, show=None, actuate=None, LABEL=None, href=None, role=None, LOCTYPE=None, type_=None, ID=None, valueOf_=None):
super(objectTypeSub, self).__init__(arcrole, title, OTHERLOCTYPE, show, actuate, LABEL, href, role, LOCTYPE, type_, ID, valueOf_, )
supermod.objectType.subclass = objectTypeSub
# end class objectTypeSub
class mdSecTypeSub(supermod.mdSecType):
def __init__(self, STATUS=None, ADMID=None, CREATED=None, ID=None, GROUPID=None, mdRef=None, mdWrap=None):
super(mdSecTypeSub, self).__init__(STATUS, ADMID, CREATED, ID, GROUPID, mdRef, mdWrap, )
supermod.mdSecType.subclass = mdSecTypeSub
# end class mdSecTypeSub
class mdRefSub(supermod.mdRef):
def __init__(self, MIMETYPE=None, arcrole=None, XPTR=None, CHECKSUMTYPE=None, show=None, OTHERLOCTYPE=None, CHECKSUM=None, OTHERMDTYPE=None, title=None, actuate=None, MDTYPE=None, LABEL=None, href=None, role=None, LOCTYPE=None, MDTYPEVERSION=None, CREATED=None, type_=None, ID=None, SIZE=None, valueOf_=None):
super(mdRefSub, self).__init__(MIMETYPE, arcrole, XPTR, CHECKSUMTYPE, show, OTHERLOCTYPE, CHECKSUM, OTHERMDTYPE, title, actuate, MDTYPE, LABEL, href, role, LOCTYPE, MDTYPEVERSION, CREATED, type_, ID, SIZE, valueOf_, )
supermod.mdRef.subclass = mdRefSub
# end class mdRefSub
class mdWrapSub(supermod.mdWrap):
def __init__(self, MIMETYPE=None, CHECKSUMTYPE=None, CREATED=None, CHECKSUM=None, OTHERMDTYPE=None, MDTYPE=None, LABEL=None, MDTYPEVERSION=None, ID=None, SIZE=None, binData=None, xmlData=None):
super(mdWrapSub, self).__init__(MIMETYPE, CHECKSUMTYPE, CREATED, CHECKSUM, OTHERMDTYPE, MDTYPE, LABEL, MDTYPEVERSION, ID, SIZE, binData, xmlData, )
supermod.mdWrap.subclass = mdWrapSub
# end class mdWrapSub
class fileTypeSub(supermod.fileType):
def __init__(self, MIMETYPE=None, ADMID=None, END=None, CHECKSUMTYPE=None, SEQ=None, CREATED=None, CHECKSUM=None, USE=None, ID=None, DMDID=None, BEGIN=None, OWNERID=None, SIZE=None, GROUPID=None, BETYPE=None, FLocat=None, FContent=None, stream=None, transformFile=None, file=None):
super(fileTypeSub, self).__init__(MIMETYPE, ADMID, END, CHECKSUMTYPE, SEQ, CREATED, CHECKSUM, USE, ID, DMDID, BEGIN, OWNERID, SIZE, GROUPID, BETYPE, FLocat, FContent, stream, transformFile, file, )
supermod.fileType.subclass = fileTypeSub
# end class fileTypeSub
class FLocatSub(supermod.FLocat):
def __init__(self, arcrole=None, USE=None, title=None, OTHERLOCTYPE=None, show=None, actuate=None, href=None, role=None, LOCTYPE=None, type_=None, ID=None, valueOf_=None):
super(FLocatSub, self).__init__(arcrole, USE, title, OTHERLOCTYPE, show, actuate, href, role, LOCTYPE, type_, ID, valueOf_, )
supermod.FLocat.subclass = FLocatSub
# end class FLocatSub
class FContentSub(supermod.FContent):
def __init__(self, USE=None, ID=None, binData=None, xmlData=None):
super(FContentSub, self).__init__(USE, ID, binData, xmlData, )
supermod.FContent.subclass = FContentSub
# end class FContentSub
class streamSub(supermod.stream):
def __init__(self, BEGIN=None, END=None, ADMID=None, BETYPE=None, streamType=None, DMDID=None, OWNERID=None, ID=None, valueOf_=None):
super(streamSub, self).__init__(BEGIN, END, ADMID, BETYPE, streamType, DMDID, OWNERID, ID, valueOf_, )
supermod.stream.subclass = streamSub
# end class streamSub
class transformFileSub(supermod.transformFile):
def __init__(self, TRANSFORMTYPE=None, TRANSFORMKEY=None, TRANSFORMBEHAVIOR=None, TRANSFORMALGORITHM=None, TRANSFORMORDER=None, ID=None, valueOf_=None):
super(transformFileSub, self).__init__(TRANSFORMTYPE, TRANSFORMKEY, TRANSFORMBEHAVIOR, TRANSFORMALGORITHM, TRANSFORMORDER, ID, valueOf_, )
supermod.transformFile.subclass = transformFileSub
# end class transformFileSub
class structLinkSub(supermod.structLink):
def __init__(self, ID=None, smLink=None, smLinkGrp=None):
super(structLinkSub, self).__init__(ID, smLink, smLinkGrp, )
supermod.structLink.subclass = structLinkSub
# end class structLinkSub
class fileGrpSub(supermod.fileGrp):
def __init__(self, VERSDATE=None, ADMID=None, ID=None, USE=None, fileGrp=None, file=None):
super(fileGrpSub, self).__init__(VERSDATE, ADMID, ID, USE, fileGrp, file, )
supermod.fileGrp.subclass = fileGrpSub
# end class fileGrpSub
class metsSub(supermod.mets):
def __init__(self, PROFILE=None, LABEL=None, TYPE=None, ID=None, OBJID=None, metsHdr=None, dmdSec=None, amdSec=None, fileSec=None, structMap=None, structLink=None, behaviorSec=None):
super(metsSub, self).__init__(PROFILE, LABEL, TYPE, ID, OBJID, metsHdr, dmdSec, amdSec, fileSec, structMap, structLink, behaviorSec, )
supermod.mets.subclass = metsSub
# end class metsSub
def get_root_tag(node):
tag = supermod.Tag_pattern_.match(node.tag).groups()[-1]
rootClass = None
if hasattr(supermod, tag):
rootClass = getattr(supermod, tag)
return tag, rootClass
def parse(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'mets'
rootClass = supermod.mets
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
doc = None
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'mets'
rootClass = supermod.mets
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'mets'
rootClass = supermod.mets
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from ??? import *\n\n')
sys.stdout.write('import ??? as model_\n\n')
sys.stdout.write('rootObj = model_.mets(\n')
rootObj.exportLiteral(sys.stdout, 0, name_="mets")
sys.stdout.write(')\n')
return rootObj
USAGE_TEXT = """
Usage: python ???.py <infilename>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
infilename = args[0]
root = parse(infilename)
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
| eresearchrmit/hpctardis | tardis/tardis_portal/schema/metssubs.py | Python | bsd-3-clause | 16,179 |
from string import ascii_lowercase as alphabet
def decode(message):
dictionary = str.maketrans(alphabet, alphabet[::-1])
return message.translate(dictionary)
| VladKha/CodeWars | 7 kyu/Decoding a message/solve.py | Python | gpl-3.0 | 168 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import sys
import tempfile
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertEqual(4, len(classifier.weights_))
self.assertEqual(3, len(classifier.bias_))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('centered_bias_weight', classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClasssifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor.weights_.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertAllClose(
[w[0] for w in weights], regressor.weights_.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
no_l1_reg_weights = regressor.weights_
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = regressor.weights_
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.1, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], -0.1, err=0.05)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| tomasreimers/tensorflow-emscripten | tensorflow/contrib/learn/python/learn/estimators/linear_test.py | Python | apache-2.0 | 64,221 |
from django_cyverse_auth.models import Token
from rest_framework import serializers
class TokenSerializer(serializers.ModelSerializer):
token = serializers.CharField(read_only=True, source='key')
username = serializers.CharField(read_only=True, source='user.username')
expires = serializers.CharField(read_only=True, source='get_expired_time')
class Meta:
model = Token
fields = ('token', 'username', 'expires')
| CCI-MOC/GUI-Backend | api/v1/serializers/token_serializer.py | Python | apache-2.0 | 447 |
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pytest
from pandas.compat import product as cart_product, range
from pandas import DataFrame, MultiIndex, Period, Series, Timedelta, Timestamp
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestCounting(object):
def test_cumcount(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'])
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3])
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_empty(self):
ge = DataFrame().groupby(level=0)
se = Series().groupby(level=0)
# edge case, as this is usually considered float
e = Series(dtype='int64')
assert_series_equal(e, ge.cumcount())
assert_series_equal(e, se.cumcount())
def test_cumcount_dupe_index(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=[0] * 5)
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=mi)
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=mi)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_groupby_not_col(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=[0] * 5)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_ngroup(self):
df = DataFrame({'A': list('aaaba')})
g = df.groupby('A')
sg = g.A
expected = Series([0, 0, 0, 1, 0])
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_distinct(self):
df = DataFrame({'A': list('abcde')})
g = df.groupby('A')
sg = g.A
expected = Series(range(5), dtype='int64')
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_one_group(self):
df = DataFrame({'A': [0] * 5})
g = df.groupby('A')
sg = g.A
expected = Series([0] * 5)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_empty(self):
ge = DataFrame().groupby(level=0)
se = Series().groupby(level=0)
# edge case, as this is usually considered float
e = Series(dtype='int64')
assert_series_equal(e, ge.ngroup())
assert_series_equal(e, se.ngroup())
def test_ngroup_series_matches_frame(self):
df = DataFrame({'A': list('aaaba')})
s = Series(list('aaaba'))
assert_series_equal(df.groupby(s).ngroup(),
s.groupby(s).ngroup())
def test_ngroup_dupe_index(self):
df = DataFrame({'A': list('aaaba')}, index=[0] * 5)
g = df.groupby('A')
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame({'A': list('aaaba')}, index=mi)
g = df.groupby('A')
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=mi)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_groupby_not_col(self):
df = DataFrame({'A': list('aaaba')}, index=[0] * 5)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_descending(self):
df = DataFrame(['a', 'a', 'b', 'a', 'b'], columns=['A'])
g = df.groupby(['A'])
ascending = Series([0, 0, 1, 0, 1])
descending = Series([1, 1, 0, 1, 0])
assert_series_equal(descending, (g.ngroups - 1) - ascending)
assert_series_equal(ascending, g.ngroup(ascending=True))
assert_series_equal(descending, g.ngroup(ascending=False))
def test_ngroup_matches_cumcount(self):
# verify one manually-worked out case works
df = DataFrame([['a', 'x'], ['a', 'y'], ['b', 'x'],
['a', 'x'], ['b', 'y']], columns=['A', 'X'])
g = df.groupby(['A', 'X'])
g_ngroup = g.ngroup()
g_cumcount = g.cumcount()
expected_ngroup = Series([0, 1, 2, 0, 3])
expected_cumcount = Series([0, 0, 0, 1, 0])
assert_series_equal(g_ngroup, expected_ngroup)
assert_series_equal(g_cumcount, expected_cumcount)
def test_ngroup_cumcount_pair(self):
# brute force comparison for all small series
for p in cart_product(range(3), repeat=4):
df = DataFrame({'a': p})
g = df.groupby(['a'])
order = sorted(set(p))
ngroupd = [order.index(val) for val in p]
cumcounted = [p[:i].count(val) for i, val in enumerate(p)]
assert_series_equal(g.ngroup(), Series(ngroupd))
assert_series_equal(g.cumcount(), Series(cumcounted))
def test_ngroup_respects_groupby_order(self):
np.random.seed(0)
df = DataFrame({'a': np.random.choice(list('abcdef'), 100)})
for sort_flag in (False, True):
g = df.groupby(['a'], sort=sort_flag)
df['group_id'] = -1
df['group_index'] = -1
for i, (_, group) in enumerate(g):
df.loc[group.index, 'group_id'] = i
for j, ind in enumerate(group.index):
df.loc[ind, 'group_index'] = j
assert_series_equal(Series(df['group_id'].values),
g.ngroup())
assert_series_equal(Series(df['group_index'].values),
g.cumcount())
@pytest.mark.parametrize('datetimelike', [
[Timestamp('2016-05-%02d 20:09:25+00:00' % i) for i in range(1, 4)],
[Timestamp('2016-05-%02d 20:09:25' % i) for i in range(1, 4)],
[Timedelta(x, unit="h") for x in range(1, 4)],
[Period(freq="2W", year=2017, month=x) for x in range(1, 4)]])
def test_count_with_datetimelike(self, datetimelike):
# test for #13393, where DataframeGroupBy.count() fails
# when counting a datetimelike column.
df = DataFrame({'x': ['a', 'a', 'b'], 'y': datetimelike})
res = df.groupby('x').count()
expected = DataFrame({'y': [2, 1]}, index=['a', 'b'])
expected.index.name = "x"
assert_frame_equal(expected, res)
def test_count_with_only_nans_in_first_group(self):
# GH21956
df = DataFrame({'A': [np.nan, np.nan], 'B': ['a', 'b'], 'C': [1, 2]})
result = df.groupby(['A', 'B']).C.count()
mi = MultiIndex(levels=[[], ['a', 'b']],
codes=[[], []],
names=['A', 'B'])
expected = Series([], index=mi, dtype=np.int64, name='C')
assert_series_equal(result, expected, check_index_type=False)
| GuessWhoSamFoo/pandas | pandas/tests/groupby/test_counting.py | Python | bsd-3-clause | 7,838 |
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Training loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ai_safety_gridworlds.helpers import factory
import numpy as np
from six.moves import range
def get_env(env_name, noops,
movement_reward=-1, goal_reward=1, side_effect_reward=-1):
"""Get a copy of the environment for simulating the baseline."""
if env_name == 'box' or 'sokocoin' in env_name:
levels = {'box': 0, 'sokocoin1': 1, 'sokocoin2': 2, 'sokocoin3': 3}
sizes = {'box': 36, 'sokocoin1': 100, 'sokocoin2': 72, 'sokocoin3': 100}
env = factory.get_environment_obj(
'side_effects_sokoban', noops=noops, movement_reward=movement_reward,
goal_reward=goal_reward, wall_reward=side_effect_reward,
corner_reward=side_effect_reward, level=levels[env_name])
size = sizes[env_name]
elif 'sushi' in env_name or env_name == 'vase':
env = factory.get_environment_obj(
'conveyor_belt', variant=env_name, noops=noops, goal_reward=goal_reward)
size = 49
else:
env = factory.get_environment_obj(env_name)
size = None
return env, size
def run_loop(agent, env, number_episodes, anneal):
"""Training agent."""
episodic_returns = []
episodic_performances = []
if anneal:
agent.epsilon = 1.0
eps_unit = 1.0 / number_episodes
for episode in range(number_episodes):
# Get the initial set of observations from the environment.
timestep = env.reset()
# Prepare agent for a new episode.
agent.begin_episode()
while True:
action = agent.step(timestep)
timestep = env.step(action)
if timestep.last():
agent.end_episode(timestep)
episodic_returns.append(env.episode_return)
episodic_performances.append(env.get_last_performance())
break
if anneal:
agent.epsilon = max(0, agent.epsilon - eps_unit)
if episode % 500 == 0:
print('Episode', episode)
return episodic_returns, episodic_performances
def run_agent(baseline, dev_measure, dev_fun, discount, value_discount, beta,
nonterminal_weight, exact_baseline, anneal, num_episodes,
num_episodes_noexp, seed, env_name, noops, movement_reward,
goal_reward, side_effect_reward, agent_class):
"""Run agent.
Create an agent with the given parameters for the side effects penalty.
Run the agent for `num_episodes' episodes with an exploration rate that is
either annealed from 1 to 0 (`anneal=True') or constant (`anneal=False').
Then run the agent with no exploration for `num_episodes_noexp' episodes.
Args:
baseline: baseline state
dev_measure: deviation measure
dev_fun: summary function for the deviation measure
discount: discount factor
value_discount: discount factor for deviation measure value function.
beta: weight for side effects penalty
nonterminal_weight: penalty weight for nonterminal states.
exact_baseline: whether to use an exact or approximate baseline
anneal: whether to anneal the exploration rate from 1 to 0 or use a constant
exploration rate
num_episodes: number of episodes
num_episodes_noexp: number of episodes with no exploration
seed: random seed
env_name: environment name
noops: whether the environment has noop actions
movement_reward: movement reward
goal_reward: reward for reaching a goal state
side_effect_reward: hidden reward for causing side effects
agent_class: Q-learning agent class: QLearning (regular) or QLearningSE
(with side effects penalty)
Returns:
returns: return for each episode
performances: safety performance for each episode
"""
np.random.seed(seed)
env, state_size = get_env(env_name=env_name,
noops=noops,
movement_reward=movement_reward,
goal_reward=goal_reward,
side_effect_reward=side_effect_reward)
start_timestep = env.reset()
if exact_baseline:
baseline_env, _ = get_env(env_name=env_name,
noops=True,
movement_reward=movement_reward,
goal_reward=goal_reward,
side_effect_reward=side_effect_reward)
else:
baseline_env = None
agent = agent_class(
actions=env.action_spec(), baseline=baseline, dev_measure=dev_measure,
dev_fun=dev_fun, discount=discount, value_discount=value_discount,
beta=beta, exact_baseline=exact_baseline, baseline_env=baseline_env,
start_timestep=start_timestep, state_size=state_size,
nonterminal_weight=nonterminal_weight)
returns, performances = run_loop(
agent, env, number_episodes=num_episodes, anneal=anneal)
if num_episodes_noexp > 0:
agent.epsilon = 0
returns_noexp, performances_noexp = run_loop(
agent, env, number_episodes=num_episodes_noexp, anneal=False)
returns.extend(returns_noexp)
performances.extend(performances_noexp)
return returns, performances
| deepmind/deepmind-research | side_effects_penalties/training.py | Python | apache-2.0 | 5,751 |
import datetime
import itertools
import unittest
from copy import copy
from django.db import (
DatabaseError, IntegrityError, OperationalError, connection,
)
from django.db.models import Model
from django.db.models.deletion import CASCADE, PROTECT
from django.db.models.fields import (
AutoField, BigIntegerField, BinaryField, BooleanField, CharField,
DateField, DateTimeField, IntegerField, PositiveIntegerField, SlugField,
TextField, TimeField,
)
from django.db.models.fields.related import (
ForeignKey, ForeignObject, ManyToManyField, OneToOneField,
)
from django.db.models.indexes import Index
from django.db.transaction import atomic
from django.test import (
TransactionTestCase, mock, skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils.timezone import UTC
from .fields import (
CustomManyToManyField, InheritedManyToManyField, MediumBlobField,
)
from .models import (
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book,
BookForeignObj, BookWeak, BookWithLongName, BookWithO2O, BookWithoutAuthor,
BookWithSlug, IntegerPK, Node, Note, NoteRename, Tag, TagIndexed,
TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests that the schema-alteration code works correctly.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book,
BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Note,
Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if 'schema' in new_apps.all_models:
for model in self.local_models:
for many_to_many in model._meta.many_to_many:
through = many_to_many.remote_field.through
if through and through._meta.auto_created:
del new_apps.all_models['schema'][through._meta.model_name]
del new_apps.all_models['schema'][model._meta.model_name]
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.table_name_converter
with atomic():
connection.disable_constraint_checking()
table_names = connection.introspection.table_names()
for model in itertools.chain(SchemaTests.models, self.local_models):
tbl = converter(model._meta.db_table)
if tbl in table_names:
with connection.schema_editor() as editor:
editor.delete_model(model)
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
# SQLite also doesn't error properly
if not columns:
raise DatabaseError("Table does not exist (empty pragma)")
return columns
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_indexes(cursor, table)
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def get_constraints_for_column(self, model, column_name):
constraints = self.get_constraints(model._meta.db_table)
constraints_for_column = []
for name, details in constraints.items():
if details['columns'] == [column_name]:
constraints_for_column.append(name)
return sorted(constraints_for_column)
def check_added_field_default(self, schema_editor, model, field, field_name, expected_default,
cast_function=None):
with connection.cursor() as cursor:
schema_editor.add_field(model, field)
cursor.execute("SELECT {} FROM {};".format(field_name, model._meta.db_table))
database_default = cursor.fetchall()[0][0]
if cast_function and not type(database_default) == type(expected_default):
database_default = cast_function(database_default)
self.assertEqual(database_default, expected_default)
def get_constraints_count(self, table, column, fk_to):
"""
Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the
number of foreign keys, unique constraints, and indexes on
`table`.`column`. The `fk_to` argument is a 2-tuple specifying the
expected foreign key relationship's (table, column).
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
counts = {'fks': 0, 'uniques': 0, 'indexes': 0}
for c in constraints.values():
if c['columns'] == [column]:
if c['foreign_key'] == fk_to:
counts['fks'] += 1
if c['unique']:
counts['uniques'] += 1
elif c['index']:
counts['indexes'] += 1
return counts
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check that it's there
list(Author.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Author)
# Check that it's gone
with self.assertRaises(DatabaseError):
list(Author.objects.all())
@skipUnlessDBFeature('supports_foreign_keys')
def test_fk(self):
"Tests that creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Check that initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@skipUnlessDBFeature('supports_foreign_keys')
def test_fk_to_proxy(self):
"Tests that creating a FK to a proxy model creates database constraints."
class AuthorProxy(Author):
class Meta:
app_label = 'schema'
apps = new_apps
proxy = True
class AuthorRef(Model):
author = ForeignKey(AuthorProxy, on_delete=CASCADE)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [AuthorProxy, AuthorRef]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorRef)
constraints = self.get_constraints(AuthorRef._meta.db_table)
for details in constraints.values():
if details['columns'] == ['author_id'] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail('No FK constraint for author_id found')
@skipUnlessDBFeature('supports_foreign_keys')
def test_fk_db_constraint(self):
"Tests that the db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Check that initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
# Check that BookWeak doesn't have an FK constraint
constraints = self.get_constraints(BookWeak._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.fail("FK constraint for author_id found")
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, CASCADE, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Make sure no FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
# Alter to one with a constraint
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for tag_id found")
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
# Make sure no FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Check that initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Make sure no FK constraint is present
constraints = self.get_constraints(new_field.remote_field.through._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
@skipUnlessDBFeature('supports_foreign_keys')
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@skipUnlessDBFeature('supports_foreign_keys')
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@skipUnlessDBFeature('supports_foreign_keys')
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['age'][0], "IntegerField")
self.assertEqual(columns['age'][1][6], True)
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['surname'][0], "CharField")
self.assertEqual(columns['surname'][1][6],
connection.features.interprets_empty_strings_as_nulls)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns['awesome'][0]
self.assertEqual(
field_type,
connection.features.introspected_boolean_field_type(new_field, created_separately=True)
)
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns['thing']
self.assertEqual(field_type, 'IntegerField')
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns['bits'][0], ("BinaryField", "TextField"))
@unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific")
def test_add_binaryfield_mediumblob(self):
"""
Test adding a custom-sized binary field on MySQL (#24846).
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field with default
new_field = MediumBlobField(blank=True, default=b'123')
new_field.set_attributes_from_name('bits')
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# Introspection treats BLOBs as TextFields
self.assertEqual(columns['bits'][0], "TextField")
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(columns['name'][1][6], True)
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_text_field_to_date_field(self):
"""
#25002 - Test conversion of text field to date field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='1988-05-05')
old_field = Note._meta.get_field('info')
new_field = DateField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
def test_alter_text_field_to_datetime_field(self):
"""
#25002 - Test conversion of text field to datetime field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='1988-05-05 3:16:17.4567')
old_field = Note._meta.get_field('info')
new_field = DateTimeField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
def test_alter_text_field_to_time_field(self):
"""
#25002 - Test conversion of text field to time field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='3:16:17.4567')
old_field = Note._meta.get_field('info')
new_field = TimeField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='aaa')
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='bbb')
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns['height'][1][6])
# Create some test data
Author.objects.create(name='Not null author', height=12)
Author.objects.create(name='Null author')
# Verify null value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertIsNone(Author.objects.get(name='Null author').height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertFalse(columns['height'][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertEqual(Author.objects.get(name='Null author').height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field('name')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field('info')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
@skipUnlessDBFeature('supports_combined_alters')
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns['height'][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns['height'][1][6])
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for name, details in constraints.items():
if details['foreign_key']:
self.fail('Found an unexpected FK constraint to %s' % details['columns'])
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
constraints = self.get_constraints(LocalBook._meta.db_table)
# Ensure FK constraint exists
for name, details in constraints.items():
if details['foreign_key'] and details['columns'] == ["author_id"]:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
BookWithO2O.objects.all().delete()
# Make sure the FK constraint is present
constraints = self.get_constraints(BookWithO2O._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique anymore
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
# Make sure the FK constraint is still present
constraints = self.get_constraints(Book._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
Book.objects.all().delete()
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique now
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
# Make sure the FK constraint is present
constraints = self.get_constraints(BookWithO2O._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
def test_alter_field_fk_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = 1 if connection.features.supports_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1})
old_field = Book._meta.get_field('author')
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index on ForeignKey is replaced with a unique constraint for OneToOneField.
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0})
def test_alter_field_fk_keeps_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = 1 if connection.features.supports_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1})
old_field = Book._meta.get_field('author')
# on_delete changed from CASCADE.
new_field = ForeignKey(Author, PROTECT)
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index remains.
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1})
def test_alter_field_o2o_to_fk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = 1 if connection.features.supports_foreign_keys else 0
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0})
old_field = BookWithO2O._meta.get_field('author')
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint on OneToOneField is replaced with an index for ForeignKey.
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1})
def test_alter_field_o2o_keeps_unique(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = 1 if connection.features.supports_foreign_keys else 0
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0})
old_field = BookWithO2O._meta.get_field('author')
# on_delete changed from CASCADE.
new_field = OneToOneField(Author, PROTECT)
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint remains.
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0})
def test_alter_db_table_case(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Alter the case of the table
old_table_name = Author._meta.db_table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, old_table_name, old_table_name.upper())
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name='Foo')
Author.objects.create(name='Bar')
def test_alter_int_pk_to_autofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
AutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field('i')
new_field = AutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
def test_alter_int_pk_to_int_unique(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
IntegerField(unique=True).
"""
class IntegerUnique(Model):
i = IntegerField(unique=True)
j = IntegerField(primary_key=True)
class Meta:
app_label = 'schema'
apps = new_apps
db_table = 'INTEGERPK'
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
# model requires a new PK
old_field = IntegerPK._meta.get_field('j')
new_field = IntegerField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('j')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
old_field = IntegerPK._meta.get_field('i')
new_field = IntegerField(unique=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# Ensure unique constraint works.
IntegerUnique.objects.create(i=1, j=1)
with self.assertRaises(IntegrityError):
IntegerUnique.objects.create(i=1, j=2)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['display_name'][0], "CharField")
self.assertNotIn("name", columns)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns['detail_info'][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = 'schema'
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(columns['book_id'][0], "IntegerField")
self.assertEqual(columns['tag_id'][0], "IntegerField")
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.remote_field.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Make sure the model state is coherent with the table one now that
# we've removed the tags field.
opts = LocalAuthorWithM2M._meta
opts.local_many_to_many.remove(new_field)
del new_apps.all_models['schema'][new_field.remote_field.through._meta.model_name]
opts._expire_cache()
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = 'schema'
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field, strict=True)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
constraints = self.get_constraints(
LocalBookWithM2M._meta.get_field("tags").remote_field.through._meta.db_table
)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["tagm2mtest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tagm2mtest', 'id'))
break
else:
self.fail("No FK constraint for tagm2mtest_id found")
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True)
# Ensure old M2M is gone
with self.assertRaises(DatabaseError):
self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through)
# This model looks like the new model and is used for teardown.
opts = LocalBookWithM2M._meta
opts.local_many_to_many.remove(old_field)
# Ensure the new M2M exists and points to UniqueTest
constraints = self.get_constraints(new_field.remote_field.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["uniquetest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_uniquetest', 'id'))
break
else:
self.fail("No FK constraint for uniquetest_id found")
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@skipUnlessDBFeature('supports_column_check_constraints')
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
with self.assertRaises(IntegrityError):
TagUniqueRename.objects.create(title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, [])
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_unique_together_with_fk_with_existing_index(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key, where the foreign key is added after the model is
created.
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithoutAuthor)
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name('author')
editor.add_field(BookWithoutAuthor, new_field)
# Ensure the fields aren't unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
def test_index_together_with_fk(self):
"""
Tests removing and adding index_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.index_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [['author', 'title']], [])
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
# Ensure there is an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c['columns'] == ["slug", "title"]
),
)
def test_db_table(self):
"""
Tests renaming of the table
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
# Ensure the table is there afterwards
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table again
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
def test_add_remove_index(self):
"""
Tests index addition and removal
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there and has no index
self.assertNotIn('title', self.get_indexes(Author._meta.db_table))
# Add the index
index = Index(fields=['name'], name='author_title_idx')
with connection.schema_editor() as editor:
editor.add_index(Author, index)
self.assertIn('name', self.get_indexes(Author._meta.db_table))
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn('name', self.get_indexes(Author._meta.db_table))
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertTrue(
self.get_indexes(Tag._meta.db_table)['id']['primary_key'],
)
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
'id',
self.get_indexes(Tag._meta.db_table),
)
self.assertTrue(
self.get_indexes(Tag._meta.db_table)['slug']['primary_key'],
)
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@skipUnlessDBFeature('supports_foreign_keys')
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id")
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@skipUnlessDBFeature('supports_foreign_keys')
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(AuthorWithEvenLongerName, CASCADE, related_name="something")
new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk")
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
def test_add_foreign_object(self):
with connection.schema_editor() as editor:
editor.create_model(BookForeignObj)
new_field = ForeignObject(Author, on_delete=CASCADE, from_fields=['author_id'], to_fields=['id'])
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.add_field(BookForeignObj, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail("Errors when applying initial migration for a model "
"with a table named after an SQL reserved word: %s" % e)
# Check that it's there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# Check that it's gone
with self.assertRaises(DatabaseError):
list(Thing.objects.all())
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital letters.
"""
def get_field(*args, **kwargs):
kwargs['db_column'] = "CamelCase"
field = kwargs.pop('field_class', IntegerField)(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
constraint_name = "CamelCaseIndex"
editor.execute(
editor.sql_create_index % {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"columns": editor.quote_name(column),
"extra": "",
}
)
if connection.features.uppercases_column_names:
constraint_name = constraint_name.upper()
self.assertIn(constraint_name, self.get_constraints(model._meta.db_table))
editor.alter_field(model, get_field(db_index=True), field, strict=True)
self.assertNotIn(constraint_name, self.get_constraints(model._meta.db_table))
constraint_name = "CamelCaseUniqConstraint"
editor.execute(
editor.sql_create_unique % {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"columns": editor.quote_name(field.column),
}
)
if connection.features.uppercases_column_names:
constraint_name = constraint_name.upper()
self.assertIn(constraint_name, self.get_constraints(model._meta.db_table))
editor.alter_field(model, get_field(unique=True), field, strict=True)
self.assertNotIn(constraint_name, self.get_constraints(model._meta.db_table))
if connection.features.supports_foreign_keys:
constraint_name = "CamelCaseFKConstraint"
editor.execute(
editor.sql_create_fk % {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
"deferrable": connection.ops.deferrable_sql(),
}
)
if connection.features.uppercases_column_names:
constraint_name = constraint_name.upper()
self.assertIn(constraint_name, self.get_constraints(model._meta.db_table))
editor.alter_field(model, get_field(Author, CASCADE, field_class=ForeignKey), field, strict=True)
self.assertNotIn(constraint_name, self.get_constraints(model._meta.db_table))
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField to ensure default will be used from effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '')
def test_add_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField with a default
new_field = CharField(max_length=15, blank=True, default='surname default')
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], 'surname default')
# And that the default is no longer set in the database.
field = next(
f for f in connection.introspection.get_table_description(cursor, "schema_author")
if f.name == "surname"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_alter_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name='Anonymous1')
self.assertIsNone(Author.objects.get().height)
old_field = Author._meta.get_field('height')
# The default from the new field is used in updating existing rows.
new_field = IntegerField(blank=True, default=42)
new_field.set_attributes_from_name('height')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(Author.objects.get().height, 42)
# The database default should be removed.
with connection.cursor() as cursor:
field = next(
f for f in connection.introspection.get_table_description(cursor, "schema_author")
if f.name == "height"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_add_textfield_unhashable_default(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name='Anonymous1')
# Create a field that has an unhashable default
new_field = TextField(default={})
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_add_indexed_charfield(self):
field = CharField(max_length=255, db_index=True)
field.set_attributes_from_name('nom_de_plume')
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, 'nom_de_plume'),
['schema_author_nom_de_plume_7570a851', 'schema_author_nom_de_plume_7570a851_like'],
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_add_unique_charfield(self):
field = CharField(max_length=255, unique=True)
field.set_attributes_from_name('nom_de_plume')
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, 'nom_de_plume'),
['schema_author_nom_de_plume_7570a851_like', 'schema_author_nom_de_plume_key']
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_index_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, 'name'), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Author._meta.get_field('name')
new_field = CharField(max_length=255, db_index=True)
new_field.set_attributes_from_name('name')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, 'name'),
['schema_author_name_1fbc5617', 'schema_author_name_1fbc5617_like']
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, 'name'), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, 'name'), [])
# Alter to add unique=True and create 2 indexes.
old_field = Author._meta.get_field('name')
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name('name')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, 'name'),
['schema_author_name_1fbc5617_like', 'schema_author_name_1fbc5617_uniq']
)
# Remove unique=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, 'name'), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_index_to_textfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Note)
self.assertEqual(self.get_constraints_for_column(Note, 'info'), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Note._meta.get_field('info')
new_field = TextField(db_index=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Note, 'info'),
['schema_note_info_4b0ea695', 'schema_note_info_4b0ea695_like']
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Note, 'info'), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield_with_db_index(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field('title')
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq']
)
# Alter to remove unique=True (should drop unique index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_remove_unique_and_db_index_from_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field('title')
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq']
)
# Alter to remove both unique=True and db_index=True (should drop all indexes)
new_field2 = CharField(max_length=100)
new_field2.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(self.get_constraints_for_column(BookWithoutAuthor, 'title'), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_swap_unique_and_db_index_with_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
# Alter to set unique=True and remove db_index=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field('title')
new_field = CharField(max_length=100, unique=True)
new_field.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq']
)
# Alter to set db_index=True and remove unique=True (should restore index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_db_index_to_charfield_with_unique(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Tag)
self.assertEqual(
self.get_constraints_for_column(Tag, 'slug'),
['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key']
)
# Alter to add db_index=True
old_field = Tag._meta.get_field('slug')
new_field = SlugField(db_index=True, unique=True)
new_field.set_attributes_from_name('slug')
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, 'slug'),
['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key']
)
# Alter to remove db_index=True
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name('slug')
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, 'slug'),
['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key']
)
def test_alter_field_add_index_to_integerfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, 'weight'), [])
# Alter to add db_index=True and create index.
old_field = Author._meta.get_field('weight')
new_field = IntegerField(null=True, db_index=True)
new_field.set_attributes_from_name('weight')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
expected = 'schema_author_weight_587740f9'
if connection.features.uppercases_column_names:
expected = expected.upper()
self.assertEqual(self.get_constraints_for_column(Author, 'weight'), [expected])
# Remove db_index=True to drop index.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, 'weight'), [])
def test_alter_pk_with_self_referential_field(self):
"""
Changing the primary key field name of a model with a self-referential
foreign key (#26384).
"""
if connection.vendor == 'mysql' and connection.mysql_version < (5, 6, 6):
self.skipTest('Skip known bug renaming primary keys on older MySQL versions (#24995).')
old_field = Node._meta.get_field('node_id')
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name('id')
with connection.schema_editor() as editor:
editor.alter_field(Node, old_field, new_field, strict=True)
@mock.patch('django.db.backends.base.schema.datetime')
@mock.patch('django.db.backends.base.schema.timezone')
def test_add_datefield_and_datetimefield_use_effective_default(self, mocked_datetime, mocked_tz):
"""
effective_default() should be used for DateField, DateTimeField, and
TimeField if auto_now or auto_add_now is set (#25005).
"""
now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1)
now_tz = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1, tzinfo=UTC())
mocked_datetime.now = mock.MagicMock(return_value=now)
mocked_tz.now = mock.MagicMock(return_value=now_tz)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check auto_now/auto_now_add attributes are not defined
columns = self.column_classes(Author)
self.assertNotIn("dob_auto_now", columns)
self.assertNotIn("dob_auto_now_add", columns)
self.assertNotIn("dtob_auto_now", columns)
self.assertNotIn("dtob_auto_now_add", columns)
self.assertNotIn("tob_auto_now", columns)
self.assertNotIn("tob_auto_now_add", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Ensure fields were added with the correct defaults
dob_auto_now = DateField(auto_now=True)
dob_auto_now.set_attributes_from_name('dob_auto_now')
self.check_added_field_default(
editor, Author, dob_auto_now, 'dob_auto_now', now.date(),
cast_function=lambda x: x.date(),
)
dob_auto_now_add = DateField(auto_now_add=True)
dob_auto_now_add.set_attributes_from_name('dob_auto_now_add')
self.check_added_field_default(
editor, Author, dob_auto_now_add, 'dob_auto_now_add', now.date(),
cast_function=lambda x: x.date(),
)
dtob_auto_now = DateTimeField(auto_now=True)
dtob_auto_now.set_attributes_from_name('dtob_auto_now')
self.check_added_field_default(
editor, Author, dtob_auto_now, 'dtob_auto_now', now,
)
dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True)
dt_tm_of_birth_auto_now_add.set_attributes_from_name('dtob_auto_now_add')
self.check_added_field_default(
editor, Author, dt_tm_of_birth_auto_now_add, 'dtob_auto_now_add', now,
)
tob_auto_now = TimeField(auto_now=True)
tob_auto_now.set_attributes_from_name('tob_auto_now')
self.check_added_field_default(
editor, Author, tob_auto_now, 'tob_auto_now', now.time(),
cast_function=lambda x: x.time(),
)
tob_auto_now_add = TimeField(auto_now_add=True)
tob_auto_now_add.set_attributes_from_name('tob_auto_now_add')
self.check_added_field_default(
editor, Author, tob_auto_now_add, 'tob_auto_now_add', now.time(),
cast_function=lambda x: x.time(),
)
| sergei-maertens/django | tests/schema/tests.py | Python | bsd-3-clause | 97,924 |
###############################################################################
# actionAngle: a Python module to calculate actions, angles, and frequencies
#
# class: actionAngleSpherical
#
# methods:
# __call__: returns (jr,lz,jz)
# actionsFreqs: returns (jr,lz,jz,Or,Op,Oz)
# actionsFreqsAngles: returns (jr,lz,jz,Or,Op,Oz,ar,ap,az)
#
###############################################################################
import copy
import numpy
from scipy import integrate
from ..potential import epifreq, omegac, _dim
from ..potential.Potential import _evaluatePotentials
from ..potential.Potential import flatten as flatten_potential
from .actionAngle import actionAngle
from .actionAngleAxi import actionAngleAxi, potentialAxi
class actionAngleSpherical(actionAngle):
"""Action-angle formalism for spherical potentials"""
def __init__(self,*args,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize an actionAngleSpherical object
INPUT:
pot= a Spherical potential
ro= distance from vantage point to GC (kpc; can be Quantity)
vo= circular velocity at ro (km/s; can be Quantity)
OUTPUT:
instance
HISTORY:
2013-12-28 - Written - Bovy (IAS)
"""
actionAngle.__init__(self,
ro=kwargs.get('ro',None),vo=kwargs.get('vo',None))
if not 'pot' in kwargs: #pragma: no cover
raise IOError("Must specify pot= for actionAngleSpherical")
self._pot= flatten_potential(kwargs['pot'])
#Also store a 'planar' (2D) version of the potential
if _dim(self._pot) == 2:
self._2dpot= self._pot
elif isinstance(self._pot,list):
self._2dpot= [p.toPlanar() for p in self._pot]
else:
self._2dpot= self._pot.toPlanar()
#The following for if we ever implement this code in C
self._c= False
ext_loaded= False
if ext_loaded and (('c' in kwargs and kwargs['c'])
or not 'c' in kwargs):
self._c= True #pragma: no cover
else:
self._c= False
# Check the units
self._check_consistent_units()
return None
def _evaluate(self,*args,**kwargs):
"""
NAME:
__call__ (_evaluate)
PURPOSE:
evaluate the actions (jr,lz,jz)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
fixed_quad= (False) if True, use n=10 fixed_quad integration
scipy.integrate.quadrature or .fixed_quad keywords
OUTPUT:
(jr,lz,jz)
HISTORY:
2013-12-28 - Written - Bovy (IAS)
"""
fixed_quad= kwargs.pop('fixed_quad',False)
if len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
if isinstance(R,float):
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
if self._c: #pragma: no cover
pass
else:
Lz= R*vT
Lx= -z*vT
Ly= z*vR-R*vz
L2= Lx*Lx+Ly*Ly+Lz*Lz
E= _evaluatePotentials(self._pot,R,z)\
+vR**2./2.+vT**2./2.+vz**2./2.
L= numpy.sqrt(L2)
#Actions
Jphi= Lz
Jz= L-numpy.fabs(Lz)
#Jr requires some more work
#Set up an actionAngleAxi object for EL and rap/rperi calculations
axiR= numpy.sqrt(R**2.+z**2.)
axivT= L/axiR
axivR= (R*vR+z*vz)/axiR
Jr= []
for ii in range(len(axiR)):
axiaA= actionAngleAxi(axiR[ii],axivR[ii],axivT[ii],
pot=self._2dpot)
(rperi,rap)= axiaA.calcRapRperi()
EL= axiaA.calcEL()
E, L= EL
Jr.append(self._calc_jr(rperi,rap,E,L,fixed_quad,**kwargs))
return (numpy.array(Jr),Jphi,Jz)
def _actionsFreqs(self,*args,**kwargs):
"""
NAME:
actionsFreqs (_actionsFreqs)
PURPOSE:
evaluate the actions and frequencies (jr,lz,jz,Omegar,Omegaphi,Omegaz)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
fixed_quad= (False) if True, use n=10 fixed_quad integration
scipy.integrate.quadrature or .fixed_quad keywords
OUTPUT:
(jr,lz,jz,Omegar,Omegaphi,Omegaz)
HISTORY:
2013-12-28 - Written - Bovy (IAS)
"""
fixed_quad= kwargs.pop('fixed_quad',False)
if len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
if isinstance(R,float):
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
if self._c: #pragma: no cover
pass
else:
Lz= R*vT
Lx= -z*vT
Ly= z*vR-R*vz
L2= Lx*Lx+Ly*Ly+Lz*Lz
E= _evaluatePotentials(self._pot,R,z)+vR**2./2.+vT**2./2.+vz**2./2.
L= numpy.sqrt(L2)
#Actions
Jphi= Lz
Jz= L-numpy.fabs(Lz)
#Jr requires some more work
#Set up an actionAngleAxi object for EL and rap/rperi calculations
axiR= numpy.sqrt(R**2.+z**2.)
axivT= L/axiR
axivR= (R*vR+z*vz)/axiR
Jr= []
Or= []
Op= []
for ii in range(len(axiR)):
axiaA= actionAngleAxi(axiR[ii],axivR[ii],axivT[ii],
pot=self._2dpot)
(rperi,rap)= axiaA.calcRapRperi()
EL= axiaA.calcEL()
E, L= EL
Jr.append(self._calc_jr(rperi,rap,E,L,fixed_quad,**kwargs))
#Radial period
if Jr[-1] < 10.**-9.: #Circular orbit
Or.append(epifreq(self._pot,axiR[ii],use_physical=False))
Op.append(omegac(self._pot,axiR[ii],use_physical=False))
continue
Rmean= numpy.exp((numpy.log(rperi)+numpy.log(rap))/2.)
Or.append(self._calc_or(Rmean,rperi,rap,E,L,fixed_quad,**kwargs))
Op.append(self._calc_op(Or[-1],Rmean,rperi,rap,E,L,fixed_quad,**kwargs))
Op= numpy.array(Op)
Oz= copy.copy(Op)
Op[vT < 0.]*= -1.
return (numpy.array(Jr),Jphi,Jz,numpy.array(Or),Op,Oz)
def _actionsFreqsAngles(self,*args,**kwargs):
"""
NAME:
actionsFreqsAngles (_actionsFreqsAngles)
PURPOSE:
evaluate the actions, frequencies, and angles
(jr,lz,jz,Omegar,Omegaphi,Omegaz,ar,ap,az)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
fixed_quad= (False) if True, use n=10 fixed_quad integration
scipy.integrate.quadrature or .fixed_quad keywords
OUTPUT:
(jr,lz,jz,Omegar,Omegaphi,Omegaz,ar,aphi,az)
HISTORY:
2013-12-29 - Written - Bovy (IAS)
"""
fixed_quad= kwargs.pop('fixed_quad',False)
if len(args) == 5: #R,vR.vT, z, vz pragma: no cover
raise IOError("You need to provide phi when calculating angles")
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
phi= self._eval_phi
if isinstance(R,float):
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
phi= numpy.array([phi])
if self._c: #pragma: no cover
pass
else:
Lz= R*vT
Lx= -z*vT
Ly= z*vR-R*vz
L2= Lx*Lx+Ly*Ly+Lz*Lz
E= _evaluatePotentials(self._pot,R,z)+vR**2./2.+vT**2./2.+vz**2./2.
L= numpy.sqrt(L2)
#Actions
Jphi= Lz
Jz= L-numpy.fabs(Lz)
#Jr requires some more work
#Set up an actionAngleAxi object for EL and rap/rperi calculations
axiR= numpy.sqrt(R**2.+z**2.)
axivT= L/axiR #these are really spherical coords, called axi bc they go in actionAngleAxi
axivR= (R*vR+z*vz)/axiR
axivz= (z*vR-R*vz)/axiR
Jr= []
Or= []
Op= []
ar= []
az= []
#Calculate the longitude of the ascending node
asc= self._calc_long_asc(z,R,axivz,phi,Lz,L)
for ii in range(len(axiR)):
axiaA= actionAngleAxi(axiR[ii],axivR[ii],axivT[ii],
pot=self._2dpot)
(rperi,rap)= axiaA.calcRapRperi()
EL= axiaA.calcEL()
E, L= EL
Jr.append(self._calc_jr(rperi,rap,E,L,fixed_quad,**kwargs))
#Radial period
Rmean= numpy.exp((numpy.log(rperi)+numpy.log(rap))/2.)
if Jr[-1] < 10.**-9.: #Circular orbit
Or.append(epifreq(self._pot,axiR[ii],use_physical=False))
Op.append(omegac(self._pot,axiR[ii],use_physical=False))
else:
Or.append(self._calc_or(Rmean,rperi,rap,E,L,fixed_quad,**kwargs))
Op.append(self._calc_op(Or[-1],Rmean,rperi,rap,E,L,fixed_quad,**kwargs))
#Angles
ar.append(self._calc_angler(Or[-1],axiR[ii],Rmean,rperi,rap,
E,L,axivR[ii],fixed_quad,**kwargs))
az.append(self._calc_anglez(Or[-1],Op[-1],ar[-1],
z[ii],axiR[ii],
Rmean,rperi,rap,E,L,Lz[ii],
axivR[ii],axivz[ii],phi[ii],
fixed_quad,**kwargs))
Op= numpy.array(Op)
Oz= copy.copy(Op)
Op[vT < 0.]*= -1.
ap= copy.copy(asc)
ar= numpy.array(ar)
az= numpy.array(az)
ap[vT < 0.]-= az[vT < 0.]
ap[vT >= 0.]+= az[vT >= 0.]
ar= ar % (2.*numpy.pi)
ap= ap % (2.*numpy.pi)
az= az % (2.*numpy.pi)
return (numpy.array(Jr),Jphi,Jz,numpy.array(Or),Op,Oz,
ar,ap,az)
def _EccZmaxRperiRap(self,*args,**kwargs):
"""
NAME:
EccZmaxRperiRap (_EccZmaxRperiRap)
PURPOSE:
evaluate the eccentricity, maximum height above the plane, peri- and apocenter for a spherical potential
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
OUTPUT:
(e,zmax,rperi,rap)
HISTORY:
2017-12-22 - Written - Bovy (UofT)
"""
if len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
if isinstance(R,float):
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
if self._c: #pragma: no cover
pass
else:
Lz= R*vT
Lx= -z*vT
Ly= z*vR-R*vz
L2= Lx*Lx+Ly*Ly+Lz*Lz
L= numpy.sqrt(L2)
#Set up an actionAngleAxi object for EL and rap/rperi calculations
axiR= numpy.sqrt(R**2.+z**2.)
axivT= L/axiR
axivR= (R*vR+z*vz)/axiR
rperi, rap= [], []
for ii in range(len(axiR)):
axiaA= actionAngleAxi(axiR[ii],axivR[ii],axivT[ii],
pot=self._2dpot)
trperi,trap= axiaA.calcRapRperi()
rperi.append(trperi)
rap.append(trap)
rperi= numpy.array(rperi)
rap= numpy.array(rap)
return ((rap-rperi)/(rap+rperi),rap*numpy.sqrt(1.-Lz**2./L2),
rperi,rap)
def _calc_jr(self,rperi,rap,E,L,fixed_quad,**kwargs):
if fixed_quad:
return integrate.fixed_quad(_JrSphericalIntegrand,
rperi,rap,
args=(E,L,self._2dpot),
n=10,
**kwargs)[0]/numpy.pi
else:
return (numpy.array(integrate.quad(_JrSphericalIntegrand,
rperi,rap,
args=(E,L,self._2dpot),
**kwargs)))[0]/numpy.pi
def _calc_or(self,Rmean,rperi,rap,E,L,fixed_quad,**kwargs):
Tr= 0.
if Rmean > rperi and not fixed_quad:
Tr+= numpy.array(integrate.quadrature(_TrSphericalIntegrandSmall,
0.,numpy.sqrt(Rmean-rperi),
args=(E,L,self._2dpot,
rperi),
**kwargs))[0]
elif Rmean > rperi and fixed_quad:
Tr+= integrate.fixed_quad(_TrSphericalIntegrandSmall,
0.,numpy.sqrt(Rmean-rperi),
args=(E,L,self._2dpot,
rperi),
n=10,**kwargs)[0]
if Rmean < rap and not fixed_quad:
Tr+= numpy.array(integrate.quadrature(_TrSphericalIntegrandLarge,
0.,numpy.sqrt(rap-Rmean),
args=(E,L,self._2dpot,
rap),
**kwargs))[0]
elif Rmean < rap and fixed_quad:
Tr+= integrate.fixed_quad(_TrSphericalIntegrandLarge,
0.,numpy.sqrt(rap-Rmean),
args=(E,L,self._2dpot,
rap),
n=10,**kwargs)[0]
Tr= 2.*Tr
return 2.*numpy.pi/Tr
def _calc_op(self,Or,Rmean,rperi,rap,E,L,fixed_quad,**kwargs):
#Azimuthal period
I= 0.
if Rmean > rperi and not fixed_quad:
I+= numpy.array(integrate.quadrature(_ISphericalIntegrandSmall,
0.,numpy.sqrt(Rmean-rperi),
args=(E,L,self._2dpot,
rperi),
**kwargs))[0]
elif Rmean > rperi and fixed_quad:
I+= integrate.fixed_quad(_ISphericalIntegrandSmall,
0.,numpy.sqrt(Rmean-rperi),
args=(E,L,self._2dpot,rperi),
n=10,**kwargs)[0]
if Rmean < rap and not fixed_quad:
I+= numpy.array(integrate.quadrature(_ISphericalIntegrandLarge,
0.,numpy.sqrt(rap-Rmean),
args=(E,L,self._2dpot,
rap),
**kwargs))[0]
elif Rmean < rap and fixed_quad:
I+= integrate.fixed_quad(_ISphericalIntegrandLarge,
0.,numpy.sqrt(rap-Rmean),
args=(E,L,self._2dpot,rap),
n=10,**kwargs)[0]
I*= 2*L
return I*Or/2./numpy.pi
def _calc_long_asc(self,z,R,axivz,phi,Lz,L):
i= numpy.arccos(Lz/L)
sinu= z/R/numpy.tan(i)
pindx= (sinu > 1.)*numpy.isfinite(sinu)
sinu[pindx]= 1.
pindx= (sinu < -1.)*numpy.isfinite(sinu)
sinu[pindx]= -1.
u= numpy.arcsin(sinu)
vzindx= axivz > 0.
u[vzindx]= numpy.pi-u[vzindx]
# For non-inclined orbits, we set Omega=0 by convention
u[True^numpy.isfinite(u)]= phi[True^numpy.isfinite(u)]
return phi-u
def _calc_angler(self,Or,r,Rmean,rperi,rap,E,L,vr,fixed_quad,**kwargs):
if r < Rmean:
if r > rperi and not fixed_quad:
wr= Or*integrate.quadrature(_TrSphericalIntegrandSmall,
0.,numpy.sqrt(r-rperi),
args=(E,L,self._2dpot,rperi),
**kwargs)[0]
elif r > rperi and fixed_quad:
wr= Or*integrate.fixed_quad(_TrSphericalIntegrandSmall,
0.,numpy.sqrt(r-rperi),
args=(E,L,self._2dpot,rperi),
n=10,**kwargs)[0]
else:
wr= 0.
if vr < 0.: wr= 2*numpy.pi-wr
else:
if r < rap and not fixed_quad:
wr= Or*integrate.quadrature(_TrSphericalIntegrandLarge,
0.,numpy.sqrt(rap-r),
args=(E,L,self._2dpot,rap),
**kwargs)[0]
elif r < rap and fixed_quad:
wr= Or*integrate.fixed_quad(_TrSphericalIntegrandLarge,
0.,numpy.sqrt(rap-r),
args=(E,L,self._2dpot,rap),
n=10,**kwargs)[0]
else:
wr= numpy.pi
if vr < 0.:
wr= numpy.pi+wr
else:
wr= numpy.pi-wr
return wr
def _calc_anglez(self,Or,Op,ar,z,r,Rmean,rperi,rap,E,L,Lz,vr,axivz,phi,
fixed_quad,**kwargs):
#First calculate psi
i= numpy.arccos(Lz/L)
sinpsi= z/r/numpy.sin(i)
if numpy.isfinite(sinpsi):
if sinpsi > 1.:
sinpsi= 1.
elif sinpsi < -1.:
sinpsi= -1.
psi= numpy.arcsin(sinpsi)
if axivz > 0.: psi= numpy.pi-psi
else:
psi= phi
psi= psi % (2.*numpy.pi)
#Calculate dSr/dL
dpsi= Op/Or*2.*numpy.pi #this is the full I integral
if r < Rmean:
if not fixed_quad:
wz= L*integrate.quadrature(_ISphericalIntegrandSmall,
0.,numpy.sqrt(r-rperi),
args=(E,L,self._2dpot,
rperi),
**kwargs)[0]
elif fixed_quad:
wz= L*integrate.fixed_quad(_ISphericalIntegrandSmall,
0.,numpy.sqrt(r-rperi),
args=(E,L,self._2dpot,
rperi),
n=10,**kwargs)[0]
if vr < 0.: wz= dpsi-wz
else:
if not fixed_quad:
wz= L*integrate.quadrature(_ISphericalIntegrandLarge,
0.,numpy.sqrt(rap-r),
args=(E,L,self._2dpot,
rap),
**kwargs)[0]
elif fixed_quad:
wz= L*integrate.fixed_quad(_ISphericalIntegrandLarge,
0.,numpy.sqrt(rap-r),
args=(E,L,self._2dpot,
rap),
n=10,**kwargs)[0]
if vr < 0.:
wz= dpsi/2.+wz
else:
wz= dpsi/2.-wz
#Add everything
wz= -wz+psi+Op/Or*ar
return wz
def _JrSphericalIntegrand(r,E,L,pot):
"""The J_r integrand"""
return numpy.sqrt(2.*(E-potentialAxi(r,pot))-L**2./r**2.)
def _TrSphericalIntegrandSmall(t,E,L,pot,rperi):
r= rperi+t**2.#part of the transformation
return 2.*t/_JrSphericalIntegrand(r,E,L,pot)
def _TrSphericalIntegrandLarge(t,E,L,pot,rap):
r= rap-t**2.#part of the transformation
return 2.*t/_JrSphericalIntegrand(r,E,L,pot)
def _ISphericalIntegrandSmall(t,E,L,pot,rperi):
r= rperi+t**2.#part of the transformation
return 2.*t/_JrSphericalIntegrand(r,E,L,pot)/r**2.
def _ISphericalIntegrandLarge(t,E,L,pot,rap):
r= rap-t**2.#part of the transformation
return 2.*t/_JrSphericalIntegrand(r,E,L,pot)/r**2.
| jobovy/galpy | galpy/actionAngle/actionAngleSpherical.py | Python | bsd-3-clause | 23,406 |
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: nil; -*-
### BEGIN LICENSE
# Copyright (C) 2010-2012 Kevin Mehall <[email protected]>
#This program is free software: you can redistribute it and/or modify it
#under the terms of the GNU General Public License version 3, as published
#by the Free Software Foundation.
#
#This program is distributed in the hope that it will be useful, but
#WITHOUT ANY WARRANTY; without even the implied warranties of
#MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
#PURPOSE. See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along
#with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import logging
import webbrowser
from urllib.parse import splittype, splituser, splitpasswd
def parse_proxy(proxy):
""" _parse_proxy from urllib """
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
def open_browser(url):
logging.info("Opening URL {}".format(url))
webbrowser.open(url)
if isinstance(webbrowser.get(), webbrowser.BackgroundBrowser):
try:
os.wait() # workaround for http://bugs.python.org/issue5993
except:
pass
| xenoxaos/pithos | pithos/util.py | Python | gpl-3.0 | 1,919 |
import sys, itertools, unittest
from test import test_support
import ast
def to_tuple(t):
if t is None or isinstance(t, (basestring, int, long, complex)):
return t
elif isinstance(t, list):
return [to_tuple(e) for e in t]
result = [t.__class__.__name__]
if hasattr(t, 'lineno') and hasattr(t, 'col_offset'):
result.append((t.lineno, t.col_offset))
if t._fields is None:
return tuple(result)
for f in t._fields:
result.append(to_tuple(getattr(t, f)))
return tuple(result)
# These tests are compiled through "exec"
# There should be at least one test per statement
exec_tests = [
# None
"None",
# FunctionDef
"def f(): pass",
# FunctionDef with arg
"def f(a): pass",
# FunctionDef with arg and default value
"def f(a=0): pass",
# FunctionDef with varargs
"def f(*args): pass",
# FunctionDef with kwargs
"def f(**kwargs): pass",
# FunctionDef with all kind of args
"def f(a, b=1, c=None, d=[], e={}, *args, **kwargs): pass",
# ClassDef
"class C:pass",
# ClassDef, new style class
"class C(object): pass",
# Return
"def f():return 1",
# Delete
"del v",
# Assign
"v = 1",
# AugAssign
"v += 1",
# Print
"print >>f, 1, ",
# For
"for v in v:pass",
# While
"while v:pass",
# If
"if v:pass",
# Raise
"raise Exception, 'string'",
# TryExcept
"try:\n pass\nexcept Exception:\n pass",
# TryFinally
"try:\n pass\nfinally:\n pass",
# Assert
"assert v",
# Import
"import sys",
# ImportFrom
"from sys import v",
# Exec
"exec 'v'",
# Global
"global v",
# Expr
"1",
# Pass,
"pass",
# Break
"break",
# Continue
"continue",
# for statements with naked tuples (see http://bugs.python.org/issue6704)
"for a,b in c: pass",
"[(a,b) for a,b in c]",
"((a,b) for a,b in c)",
"((a,b) for (a,b) in c)",
# Multiline generator expression (test for .lineno & .col_offset)
"""(
(
Aa
,
Bb
)
for
Aa
,
Bb in Cc
)""",
# dictcomp
"{a : b for w in x for m in p if g}",
# dictcomp with naked tuple
"{a : b for v,w in x}",
# setcomp
"{r for l in x if g}",
# setcomp with naked tuple
"{r for l,m in x}",
]
# These are compiled through "single"
# because of overlap with "eval", it just tests what
# can't be tested with "eval"
single_tests = [
"1+2"
]
# These are compiled through "eval"
# It should test all expressions
eval_tests = [
# None
"None",
# BoolOp
"a and b",
# BinOp
"a + b",
# UnaryOp
"not v",
# Lambda
"lambda:None",
# Dict
"{ 1:2 }",
# Empty dict
"{}",
# Set
"{None,}",
# Multiline dict (test for .lineno & .col_offset)
"""{
1
:
2
}""",
# ListComp
"[a for b in c if d]",
# GeneratorExp
"(a for b in c if d)",
# Yield - yield expressions can't work outside a function
#
# Compare
"1 < 2 < 3",
# Call
"f(1,2,c=3,*d,**e)",
# Repr
"`v`",
# Num
"10L",
# Str
"'string'",
# Attribute
"a.b",
# Subscript
"a[b:c]",
# Name
"v",
# List
"[1,2,3]",
# Empty list
"[]",
# Tuple
"1,2,3",
# Tuple
"(1,2,3)",
# Empty tuple
"()",
# Combination
"a.b.c.d(a.b[1:2])",
]
# TODO: expr_context, slice, boolop, operator, unaryop, cmpop, comprehension
# excepthandler, arguments, keywords, alias
class AST_Tests(unittest.TestCase):
def _assertTrueorder(self, ast_node, parent_pos):
if not isinstance(ast_node, ast.AST) or ast_node._fields is None:
return
if isinstance(ast_node, (ast.expr, ast.stmt, ast.excepthandler)):
node_pos = (ast_node.lineno, ast_node.col_offset)
self.assertTrue(node_pos >= parent_pos)
parent_pos = (ast_node.lineno, ast_node.col_offset)
for name in ast_node._fields:
value = getattr(ast_node, name)
if isinstance(value, list):
for child in value:
self._assertTrueorder(child, parent_pos)
elif value is not None:
self._assertTrueorder(value, parent_pos)
def test_AST_objects(self):
x = ast.AST()
self.assertEqual(x._fields, ())
with self.assertRaises(AttributeError):
x.vararg
with self.assertRaises(AttributeError):
x.foobar = 21
with self.assertRaises(AttributeError):
ast.AST(lineno=2)
with self.assertRaises(TypeError):
# "_ast.AST constructor takes 0 positional arguments"
ast.AST(2)
def test_snippets(self):
for input, output, kind in ((exec_tests, exec_results, "exec"),
(single_tests, single_results, "single"),
(eval_tests, eval_results, "eval")):
for i, o in itertools.izip(input, output):
ast_tree = compile(i, "?", kind, ast.PyCF_ONLY_AST)
self.assertEqual(to_tuple(ast_tree), o)
self._assertTrueorder(ast_tree, (0, 0))
def test_slice(self):
slc = ast.parse("x[::]").body[0].value.slice
self.assertIsNone(slc.upper)
self.assertIsNone(slc.lower)
self.assertIsInstance(slc.step, ast.Name)
self.assertEqual(slc.step.id, "None")
def test_from_import(self):
im = ast.parse("from . import y").body[0]
self.assertIsNone(im.module)
def test_non_interned_future_from_ast(self):
mod = ast.parse("from __future__ import division")
self.assertIsInstance(mod.body[0], ast.ImportFrom)
mod.body[0].module = " __future__ ".strip()
compile(mod, "<test>", "exec")
def test_base_classes(self):
self.assertTrue(issubclass(ast.For, ast.stmt))
self.assertTrue(issubclass(ast.Name, ast.expr))
self.assertTrue(issubclass(ast.stmt, ast.AST))
self.assertTrue(issubclass(ast.expr, ast.AST))
self.assertTrue(issubclass(ast.comprehension, ast.AST))
self.assertTrue(issubclass(ast.Gt, ast.AST))
def test_field_attr_existence(self):
for name, item in ast.__dict__.iteritems():
if isinstance(item, type) and name != 'AST' and name[0].isupper():
x = item()
if isinstance(x, ast.AST):
self.assertEqual(type(x._fields), tuple)
def test_arguments(self):
x = ast.arguments()
self.assertEqual(x._fields, ('args', 'vararg', 'kwarg', 'defaults'))
with self.assertRaises(AttributeError):
x.vararg
x = ast.arguments(1, 2, 3, 4)
self.assertEqual(x.vararg, 2)
def test_field_attr_writable(self):
x = ast.Num()
# We can assign to _fields
x._fields = 666
self.assertEqual(x._fields, 666)
def test_classattrs(self):
x = ast.Num()
self.assertEqual(x._fields, ('n',))
with self.assertRaises(AttributeError):
x.n
x = ast.Num(42)
self.assertEqual(x.n, 42)
with self.assertRaises(AttributeError):
x.lineno
with self.assertRaises(AttributeError):
x.foobar
x = ast.Num(lineno=2)
self.assertEqual(x.lineno, 2)
x = ast.Num(42, lineno=0)
self.assertEqual(x.lineno, 0)
self.assertEqual(x._fields, ('n',))
self.assertEqual(x.n, 42)
self.assertRaises(TypeError, ast.Num, 1, 2)
self.assertRaises(TypeError, ast.Num, 1, 2, lineno=0)
def test_module(self):
body = [ast.Num(42)]
x = ast.Module(body)
self.assertEqual(x.body, body)
def test_nodeclasses(self):
# Zero arguments constructor explicitely allowed
x = ast.BinOp()
self.assertEqual(x._fields, ('left', 'op', 'right'))
# Random attribute allowed too
x.foobarbaz = 5
self.assertEqual(x.foobarbaz, 5)
n1 = ast.Num(1)
n3 = ast.Num(3)
addop = ast.Add()
x = ast.BinOp(n1, addop, n3)
self.assertEqual(x.left, n1)
self.assertEqual(x.op, addop)
self.assertEqual(x.right, n3)
x = ast.BinOp(1, 2, 3)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
x = ast.BinOp(1, 2, 3, lineno=0)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
self.assertEqual(x.lineno, 0)
# node raises exception when not given enough arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2)
# node raises exception when given too many arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4)
# node raises exception when not given enough arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, lineno=0)
# node raises exception when given too many arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4, lineno=0)
# can set attributes through kwargs too
x = ast.BinOp(left=1, op=2, right=3, lineno=0)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
self.assertEqual(x.lineno, 0)
# Random kwargs also allowed
x = ast.BinOp(1, 2, 3, foobarbaz=42)
self.assertEqual(x.foobarbaz, 42)
def test_no_fields(self):
# this used to fail because Sub._fields was None
x = ast.Sub()
self.assertEqual(x._fields, ())
def test_pickling(self):
import pickle
mods = [pickle]
try:
import cPickle
mods.append(cPickle)
except ImportError:
pass
protocols = [0, 1, 2]
for mod in mods:
for protocol in protocols:
for ast in (compile(i, "?", "exec", 0x400) for i in exec_tests):
ast2 = mod.loads(mod.dumps(ast, protocol))
self.assertEqual(to_tuple(ast2), to_tuple(ast))
def test_invalid_identitifer(self):
m = ast.Module([ast.Expr(ast.Name(u"x", ast.Load()))])
ast.fix_missing_locations(m)
with self.assertRaises(TypeError) as cm:
compile(m, "<test>", "exec")
self.assertIn("identifier must be of type str", str(cm.exception))
def test_invalid_string(self):
m = ast.Module([ast.Expr(ast.Str(43))])
ast.fix_missing_locations(m)
with self.assertRaises(TypeError) as cm:
compile(m, "<test>", "exec")
self.assertIn("string must be of type str or uni", str(cm.exception))
class ASTHelpers_Test(unittest.TestCase):
def test_parse(self):
a = ast.parse('foo(1 + 1)')
b = compile('foo(1 + 1)', '<unknown>', 'exec', ast.PyCF_ONLY_AST)
self.assertEqual(ast.dump(a), ast.dump(b))
def test_dump(self):
node = ast.parse('spam(eggs, "and cheese")')
self.assertEqual(ast.dump(node),
"Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), "
"args=[Name(id='eggs', ctx=Load()), Str(s='and cheese')], "
"keywords=[], starargs=None, kwargs=None))])"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
"Module([Expr(Call(Name('spam', Load()), [Name('eggs', Load()), "
"Str('and cheese')], [], None, None))])"
)
self.assertEqual(ast.dump(node, include_attributes=True),
"Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load(), "
"lineno=1, col_offset=0), args=[Name(id='eggs', ctx=Load(), "
"lineno=1, col_offset=5), Str(s='and cheese', lineno=1, "
"col_offset=11)], keywords=[], starargs=None, kwargs=None, "
"lineno=1, col_offset=0), lineno=1, col_offset=0)])"
)
def test_copy_location(self):
src = ast.parse('1 + 1', mode='eval')
src.body.right = ast.copy_location(ast.Num(2), src.body.right)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Num(n=1, lineno=1, col_offset=0), '
'op=Add(), right=Num(n=2, lineno=1, col_offset=4), lineno=1, '
'col_offset=0))'
)
def test_fix_missing_locations(self):
src = ast.parse('write("spam")')
src.body.append(ast.Expr(ast.Call(ast.Name('spam', ast.Load()),
[ast.Str('eggs')], [], None, None)))
self.assertEqual(src, ast.fix_missing_locations(src))
self.assertEqual(ast.dump(src, include_attributes=True),
"Module(body=[Expr(value=Call(func=Name(id='write', ctx=Load(), "
"lineno=1, col_offset=0), args=[Str(s='spam', lineno=1, "
"col_offset=6)], keywords=[], starargs=None, kwargs=None, "
"lineno=1, col_offset=0), lineno=1, col_offset=0), "
"Expr(value=Call(func=Name(id='spam', ctx=Load(), lineno=1, "
"col_offset=0), args=[Str(s='eggs', lineno=1, col_offset=0)], "
"keywords=[], starargs=None, kwargs=None, lineno=1, "
"col_offset=0), lineno=1, col_offset=0)])"
)
def test_increment_lineno(self):
src = ast.parse('1 + 1', mode='eval')
self.assertEqual(ast.increment_lineno(src, n=3), src)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Num(n=1, lineno=4, col_offset=0), '
'op=Add(), right=Num(n=1, lineno=4, col_offset=4), lineno=4, '
'col_offset=0))'
)
# issue10869: do not increment lineno of root twice
src = ast.parse('1 + 1', mode='eval')
self.assertEqual(ast.increment_lineno(src.body, n=3), src.body)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Num(n=1, lineno=4, col_offset=0), '
'op=Add(), right=Num(n=1, lineno=4, col_offset=4), lineno=4, '
'col_offset=0))'
)
def test_iter_fields(self):
node = ast.parse('foo()', mode='eval')
d = dict(ast.iter_fields(node.body))
self.assertEqual(d.pop('func').id, 'foo')
self.assertEqual(d, {'keywords': [], 'kwargs': None,
'args': [], 'starargs': None})
def test_iter_child_nodes(self):
node = ast.parse("spam(23, 42, eggs='leek')", mode='eval')
self.assertEqual(len(list(ast.iter_child_nodes(node.body))), 4)
iterator = ast.iter_child_nodes(node.body)
self.assertEqual(next(iterator).id, 'spam')
self.assertEqual(next(iterator).n, 23)
self.assertEqual(next(iterator).n, 42)
self.assertEqual(ast.dump(next(iterator)),
"keyword(arg='eggs', value=Str(s='leek'))"
)
def test_get_docstring(self):
node = ast.parse('def foo():\n """line one\n line two"""')
self.assertEqual(ast.get_docstring(node.body[0]),
'line one\nline two')
def test_literal_eval(self):
self.assertEqual(ast.literal_eval('[1, 2, 3]'), [1, 2, 3])
self.assertEqual(ast.literal_eval('{"foo": 42}'), {"foo": 42})
self.assertEqual(ast.literal_eval('(True, False, None)'), (True, False, None))
self.assertRaises(ValueError, ast.literal_eval, 'foo()')
def test_literal_eval_issue4907(self):
self.assertEqual(ast.literal_eval('2j'), 2j)
self.assertEqual(ast.literal_eval('10 + 2j'), 10 + 2j)
self.assertEqual(ast.literal_eval('1.5 - 2j'), 1.5 - 2j)
self.assertRaises(ValueError, ast.literal_eval, '2 + (3 + 4j)')
def test_main():
with test_support.check_py3k_warnings(("backquote not supported",
SyntaxWarning)):
test_support.run_unittest(AST_Tests, ASTHelpers_Test)
def main():
if __name__ != '__main__':
return
if sys.argv[1:] == ['-g']:
for statements, kind in ((exec_tests, "exec"), (single_tests, "single"),
(eval_tests, "eval")):
print kind+"_results = ["
for s in statements:
print repr(to_tuple(compile(s, "?", kind, 0x400)))+","
print "]"
print "main()"
raise SystemExit
test_main()
#### EVERYTHING BELOW IS GENERATED #####
exec_results = [
('Module', [('Expr', (1, 0), ('Name', (1, 0), 'None', ('Load',)))]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, []), [('Pass', (1, 9))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('Name', (1, 6), 'a', ('Param',))], None, None, []), [('Pass', (1, 10))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('Name', (1, 6), 'a', ('Param',))], None, None, [('Num', (1, 8), 0)]), [('Pass', (1, 12))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], 'args', None, []), [('Pass', (1, 14))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, 'kwargs', []), [('Pass', (1, 17))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('Name', (1, 6), 'a', ('Param',)), ('Name', (1, 9), 'b', ('Param',)), ('Name', (1, 14), 'c', ('Param',)), ('Name', (1, 22), 'd', ('Param',)), ('Name', (1, 28), 'e', ('Param',))], 'args', 'kwargs', [('Num', (1, 11), 1), ('Name', (1, 16), 'None', ('Load',)), ('List', (1, 24), [], ('Load',)), ('Dict', (1, 30), [], [])]), [('Pass', (1, 52))], [])]),
('Module', [('ClassDef', (1, 0), 'C', [], [('Pass', (1, 8))], [])]),
('Module', [('ClassDef', (1, 0), 'C', [('Name', (1, 8), 'object', ('Load',))], [('Pass', (1, 17))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, []), [('Return', (1, 8), ('Num', (1, 15), 1))], [])]),
('Module', [('Delete', (1, 0), [('Name', (1, 4), 'v', ('Del',))])]),
('Module', [('Assign', (1, 0), [('Name', (1, 0), 'v', ('Store',))], ('Num', (1, 4), 1))]),
('Module', [('AugAssign', (1, 0), ('Name', (1, 0), 'v', ('Store',)), ('Add',), ('Num', (1, 5), 1))]),
('Module', [('Print', (1, 0), ('Name', (1, 8), 'f', ('Load',)), [('Num', (1, 11), 1)], False)]),
('Module', [('For', (1, 0), ('Name', (1, 4), 'v', ('Store',)), ('Name', (1, 9), 'v', ('Load',)), [('Pass', (1, 11))], [])]),
('Module', [('While', (1, 0), ('Name', (1, 6), 'v', ('Load',)), [('Pass', (1, 8))], [])]),
('Module', [('If', (1, 0), ('Name', (1, 3), 'v', ('Load',)), [('Pass', (1, 5))], [])]),
('Module', [('Raise', (1, 0), ('Name', (1, 6), 'Exception', ('Load',)), ('Str', (1, 17), 'string'), None)]),
('Module', [('TryExcept', (1, 0), [('Pass', (2, 2))], [('ExceptHandler', (3, 0), ('Name', (3, 7), 'Exception', ('Load',)), None, [('Pass', (4, 2))])], [])]),
('Module', [('TryFinally', (1, 0), [('Pass', (2, 2))], [('Pass', (4, 2))])]),
('Module', [('Assert', (1, 0), ('Name', (1, 7), 'v', ('Load',)), None)]),
('Module', [('Import', (1, 0), [('alias', 'sys', None)])]),
('Module', [('ImportFrom', (1, 0), 'sys', [('alias', 'v', None)], 0)]),
('Module', [('Exec', (1, 0), ('Str', (1, 5), 'v'), None, None)]),
('Module', [('Global', (1, 0), ['v'])]),
('Module', [('Expr', (1, 0), ('Num', (1, 0), 1))]),
('Module', [('Pass', (1, 0))]),
('Module', [('Break', (1, 0))]),
('Module', [('Continue', (1, 0))]),
('Module', [('For', (1, 0), ('Tuple', (1, 4), [('Name', (1, 4), 'a', ('Store',)), ('Name', (1, 6), 'b', ('Store',))], ('Store',)), ('Name', (1, 11), 'c', ('Load',)), [('Pass', (1, 14))], [])]),
('Module', [('Expr', (1, 0), ('ListComp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 12), [('Name', (1, 12), 'a', ('Store',)), ('Name', (1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 20), 'c', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('GeneratorExp', (2, 4), ('Tuple', (3, 4), [('Name', (3, 4), 'Aa', ('Load',)), ('Name', (5, 7), 'Bb', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (8, 4), [('Name', (8, 4), 'Aa', ('Store',)), ('Name', (10, 4), 'Bb', ('Store',))], ('Store',)), ('Name', (10, 10), 'Cc', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Name', (1, 11), 'w', ('Store',)), ('Name', (1, 16), 'x', ('Load',)), []), ('comprehension', ('Name', (1, 22), 'm', ('Store',)), ('Name', (1, 27), 'p', ('Load',)), [('Name', (1, 32), 'g', ('Load',))])]))]),
('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'v', ('Store',)), ('Name', (1, 13), 'w', ('Store',))], ('Store',)), ('Name', (1, 18), 'x', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 12), 'x', ('Load',)), [('Name', (1, 17), 'g', ('Load',))])]))]),
('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Tuple', (1, 7), [('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 9), 'm', ('Store',))], ('Store',)), ('Name', (1, 14), 'x', ('Load',)), [])]))]),
]
single_results = [
('Interactive', [('Expr', (1, 0), ('BinOp', (1, 0), ('Num', (1, 0), 1), ('Add',), ('Num', (1, 2), 2)))]),
]
eval_results = [
('Expression', ('Name', (1, 0), 'None', ('Load',))),
('Expression', ('BoolOp', (1, 0), ('And',), [('Name', (1, 0), 'a', ('Load',)), ('Name', (1, 6), 'b', ('Load',))])),
('Expression', ('BinOp', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Add',), ('Name', (1, 4), 'b', ('Load',)))),
('Expression', ('UnaryOp', (1, 0), ('Not',), ('Name', (1, 4), 'v', ('Load',)))),
('Expression', ('Lambda', (1, 0), ('arguments', [], None, None, []), ('Name', (1, 7), 'None', ('Load',)))),
('Expression', ('Dict', (1, 0), [('Num', (1, 2), 1)], [('Num', (1, 4), 2)])),
('Expression', ('Dict', (1, 0), [], [])),
('Expression', ('Set', (1, 0), [('Name', (1, 1), 'None', ('Load',))])),
('Expression', ('Dict', (1, 0), [('Num', (2, 6), 1)], [('Num', (4, 10), 2)])),
('Expression', ('ListComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])),
('Expression', ('GeneratorExp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])),
('Expression', ('Compare', (1, 0), ('Num', (1, 0), 1), [('Lt',), ('Lt',)], [('Num', (1, 4), 2), ('Num', (1, 8), 3)])),
('Expression', ('Call', (1, 0), ('Name', (1, 0), 'f', ('Load',)), [('Num', (1, 2), 1), ('Num', (1, 4), 2)], [('keyword', 'c', ('Num', (1, 8), 3))], ('Name', (1, 11), 'd', ('Load',)), ('Name', (1, 15), 'e', ('Load',)))),
('Expression', ('Repr', (1, 0), ('Name', (1, 1), 'v', ('Load',)))),
('Expression', ('Num', (1, 0), 10L)),
('Expression', ('Str', (1, 0), 'string')),
('Expression', ('Attribute', (1, 0), ('Name', (1, 0), 'a', ('Load',)), 'b', ('Load',))),
('Expression', ('Subscript', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Slice', ('Name', (1, 2), 'b', ('Load',)), ('Name', (1, 4), 'c', ('Load',)), None), ('Load',))),
('Expression', ('Name', (1, 0), 'v', ('Load',))),
('Expression', ('List', (1, 0), [('Num', (1, 1), 1), ('Num', (1, 3), 2), ('Num', (1, 5), 3)], ('Load',))),
('Expression', ('List', (1, 0), [], ('Load',))),
('Expression', ('Tuple', (1, 0), [('Num', (1, 0), 1), ('Num', (1, 2), 2), ('Num', (1, 4), 3)], ('Load',))),
('Expression', ('Tuple', (1, 1), [('Num', (1, 1), 1), ('Num', (1, 3), 2), ('Num', (1, 5), 3)], ('Load',))),
('Expression', ('Tuple', (1, 0), [], ('Load',))),
('Expression', ('Call', (1, 0), ('Attribute', (1, 0), ('Attribute', (1, 0), ('Attribute', (1, 0), ('Name', (1, 0), 'a', ('Load',)), 'b', ('Load',)), 'c', ('Load',)), 'd', ('Load',)), [('Subscript', (1, 8), ('Attribute', (1, 8), ('Name', (1, 8), 'a', ('Load',)), 'b', ('Load',)), ('Slice', ('Num', (1, 12), 1), ('Num', (1, 14), 2), None), ('Load',))], [], None, None)),
]
main()
| j5shi/Thruster | pylibs/test/test_ast.py | Python | gpl-2.0 | 25,341 |
from sympy import (
Abs, acos, Add, atan, Basic, besselsimp, binomial, collect,
collect_const, combsimp, cos, cosh, cot, coth, count_ops, denom,
Derivative, diff, Dummy, E, Eq, erf, exp, exp_polar, expand,
expand_multinomial, exptrigsimp, factor, factorial, FallingFactorial,
Float, fraction, Function, gamma, GoldenRatio, hyper, hyper,
hypersimp, I, Integer, Integral, integrate, log, logcombine, Matrix,
Mul, nsimplify, O, oo, ordered, pi, Piecewise, polar_lift, polarify,
posify, powdenest, powsimp, rad, radsimp, Rational, ratsimp,
ratsimpmodprime, rcollect, RisingFactorial, root, S, separatevars,
signsimp, simplify, sin, sinh, solve, sqrt, Subs, Symbol, symbols,
sympify, tan, tanh, trigsimp, Wild, zoo, Sum)
from sympy.core.mul import _keep_coeff, _unevaluated_Mul as umul
from sympy.simplify.simplify import (
collect_sqrt, fraction_expand, _unevaluated_Add, nthroot)
from sympy.utilities.pytest import XFAIL, slow
from sympy.abc import x, y, z, t, a, b, c, d, e, f, g, h, i, k
def test_ratsimp():
f, g = 1/x + 1/y, (x + y)/(x*y)
assert f != g and ratsimp(f) == g
f, g = 1/(1 + 1/x), 1 - 1/(x + 1)
assert f != g and ratsimp(f) == g
f, g = x/(x + y) + y/(x + y), 1
assert f != g and ratsimp(f) == g
f, g = -x - y - y**2/(x + y) + x**2/(x + y), -2*y
assert f != g and ratsimp(f) == g
f = (a*c*x*y + a*c*z - b*d*x*y - b*d*z - b*t*x*y - b*t*x - b*t*z +
e*x)/(x*y + z)
G = [a*c - b*d - b*t + (-b*t*x + e*x)/(x*y + z),
a*c - b*d - b*t - ( b*t*x - e*x)/(x*y + z)]
assert f != g and ratsimp(f) in G
A = sqrt(pi)
B = log(erf(x) - 1)
C = log(erf(x) + 1)
D = 8 - 8*erf(x)
f = A*B/D - A*C/D + A*C*erf(x)/D - A*B*erf(x)/D + 2*A/D
assert ratsimp(f) == A*B/8 - A*C/8 - A/(4*erf(x) - 4)
def test_ratsimpmodprime():
a = y**5 + x + y
b = x - y
F = [x*y**5 - x - y]
assert ratsimpmodprime(a/b, F, x, y, order='lex') == \
(x**2 + x*y + x + y) / (x**2 - x*y)
a = x + y**2 - 2
b = x + y**2 - y - 1
F = [x*y - 1]
assert ratsimpmodprime(a/b, F, x, y, order='lex') == \
(1 + y - x)/(y - x)
a = 5*x**3 + 21*x**2 + 4*x*y + 23*x + 12*y + 15
b = 7*x**3 - y*x**2 + 31*x**2 + 2*x*y + 15*y + 37*x + 21
F = [x**2 + y**2 - 1]
assert ratsimpmodprime(a/b, F, x, y, order='lex') == \
(1 + 5*y - 5*x)/(8*y - 6*x)
a = x*y - x - 2*y + 4
b = x + y**2 - 2*y
F = [x - 2, y - 3]
assert ratsimpmodprime(a/b, F, x, y, order='lex') == \
Rational(2, 5)
# Test a bug where denominators would be dropped
assert ratsimpmodprime(x, [y - 2*x], order='lex') == \
y/2
def test_trigsimp1():
x, y = symbols('x,y')
assert trigsimp(1 - sin(x)**2) == cos(x)**2
assert trigsimp(1 - cos(x)**2) == sin(x)**2
assert trigsimp(sin(x)**2 + cos(x)**2) == 1
assert trigsimp(1 + tan(x)**2) == 1/cos(x)**2
assert trigsimp(1/cos(x)**2 - 1) == tan(x)**2
assert trigsimp(1/cos(x)**2 - tan(x)**2) == 1
assert trigsimp(1 + cot(x)**2) == 1/sin(x)**2
assert trigsimp(1/sin(x)**2 - 1) == 1/tan(x)**2
assert trigsimp(1/sin(x)**2 - cot(x)**2) == 1
assert trigsimp(5*cos(x)**2 + 5*sin(x)**2) == 5
assert trigsimp(5*cos(x/2)**2 + 2*sin(x/2)**2) == 3*cos(x)/2 + S(7)/2
assert trigsimp(sin(x)/cos(x)) == tan(x)
assert trigsimp(2*tan(x)*cos(x)) == 2*sin(x)
assert trigsimp(cot(x)**3*sin(x)**3) == cos(x)**3
assert trigsimp(y*tan(x)**2/sin(x)**2) == y/cos(x)**2
assert trigsimp(cot(x)/cos(x)) == 1/sin(x)
assert trigsimp(sin(x + y) + sin(x - y)) == 2*sin(x)*cos(y)
assert trigsimp(sin(x + y) - sin(x - y)) == 2*sin(y)*cos(x)
assert trigsimp(cos(x + y) + cos(x - y)) == 2*cos(x)*cos(y)
assert trigsimp(cos(x + y) - cos(x - y)) == -2*sin(x)*sin(y)
assert trigsimp(tan(x + y) - tan(x)/(1 - tan(x)*tan(y))) == \
sin(y)/(-sin(y)*tan(x) + cos(y)) # -tan(y)/(tan(x)*tan(y) - 1)
assert trigsimp(sinh(x + y) + sinh(x - y)) == 2*sinh(x)*cosh(y)
assert trigsimp(sinh(x + y) - sinh(x - y)) == 2*sinh(y)*cosh(x)
assert trigsimp(cosh(x + y) + cosh(x - y)) == 2*cosh(x)*cosh(y)
assert trigsimp(cosh(x + y) - cosh(x - y)) == 2*sinh(x)*sinh(y)
assert trigsimp(tanh(x + y) - tanh(x)/(1 + tanh(x)*tanh(y))) == \
sinh(y)/(sinh(y)*tanh(x) + cosh(y))
assert trigsimp(cos(0.12345)**2 + sin(0.12345)**2) == 1
e = 2*sin(x)**2 + 2*cos(x)**2
assert trigsimp(log(e)) == log(2)
def test_trigsimp1a():
assert trigsimp(sin(2)**2*cos(3)*exp(2)/cos(2)**2) == tan(2)**2*cos(3)*exp(2)
assert trigsimp(tan(2)**2*cos(3)*exp(2)*cos(2)**2) == sin(2)**2*cos(3)*exp(2)
assert trigsimp(cot(2)*cos(3)*exp(2)*sin(2)) == cos(3)*exp(2)*cos(2)
assert trigsimp(tan(2)*cos(3)*exp(2)/sin(2)) == cos(3)*exp(2)/cos(2)
assert trigsimp(cot(2)*cos(3)*exp(2)/cos(2)) == cos(3)*exp(2)/sin(2)
assert trigsimp(cot(2)*cos(3)*exp(2)*tan(2)) == cos(3)*exp(2)
assert trigsimp(sinh(2)*cos(3)*exp(2)/cosh(2)) == tanh(2)*cos(3)*exp(2)
assert trigsimp(tanh(2)*cos(3)*exp(2)*cosh(2)) == sinh(2)*cos(3)*exp(2)
assert trigsimp(coth(2)*cos(3)*exp(2)*sinh(2)) == cosh(2)*cos(3)*exp(2)
assert trigsimp(tanh(2)*cos(3)*exp(2)/sinh(2)) == cos(3)*exp(2)/cosh(2)
assert trigsimp(coth(2)*cos(3)*exp(2)/cosh(2)) == cos(3)*exp(2)/sinh(2)
assert trigsimp(coth(2)*cos(3)*exp(2)*tanh(2)) == cos(3)*exp(2)
def test_trigsimp2():
x, y = symbols('x,y')
assert trigsimp(cos(x)**2*sin(y)**2 + cos(x)**2*cos(y)**2 + sin(x)**2,
recursive=True) == 1
assert trigsimp(sin(x)**2*sin(y)**2 + sin(x)**2*cos(y)**2 + cos(x)**2,
recursive=True) == 1
assert trigsimp(
Subs(x, x, sin(y)**2 + cos(y)**2)) == Subs(x, x, 1)
def test_issue_4373():
x = Symbol("x")
assert abs(trigsimp(2.0*sin(x)**2 + 2.0*cos(x)**2) - 2.0) < 1e-10
def test_trigsimp3():
x, y = symbols('x,y')
assert trigsimp(sin(x)/cos(x)) == tan(x)
assert trigsimp(sin(x)**2/cos(x)**2) == tan(x)**2
assert trigsimp(sin(x)**3/cos(x)**3) == tan(x)**3
assert trigsimp(sin(x)**10/cos(x)**10) == tan(x)**10
assert trigsimp(cos(x)/sin(x)) == 1/tan(x)
assert trigsimp(cos(x)**2/sin(x)**2) == 1/tan(x)**2
assert trigsimp(cos(x)**10/sin(x)**10) == 1/tan(x)**10
assert trigsimp(tan(x)) == trigsimp(sin(x)/cos(x))
def test_issue_4661():
a, x, y = symbols('a x y')
eq = -4*sin(x)**4 + 4*cos(x)**4 - 8*cos(x)**2
assert trigsimp(eq) == -4
n = sin(x)**6 + 4*sin(x)**4*cos(x)**2 + 5*sin(x)**2*cos(x)**4 + 2*cos(x)**6
d = -sin(x)**2 - 2*cos(x)**2
assert simplify(n/d) == -1
assert trigsimp(-2*cos(x)**2 + cos(x)**4 - sin(x)**4) == -1
eq = (- sin(x)**3/4)*cos(x) + (cos(x)**3/4)*sin(x) - sin(2*x)*cos(2*x)/8
assert trigsimp(eq) == 0
def test_issue_4494():
a, b = symbols('a b')
eq = sin(a)**2*sin(b)**2 + cos(a)**2*cos(b)**2*tan(a)**2 + cos(a)**2
assert trigsimp(eq) == 1
def test_issue_5948():
a, x, y = symbols('a x y')
assert trigsimp(diff(integrate(cos(x)/sin(x)**7, x), x)) == \
cos(x)/sin(x)**7
def test_issue_4775():
a, x, y = symbols('a x y')
assert trigsimp(sin(x)*cos(y)+cos(x)*sin(y)) == sin(x + y)
assert trigsimp(sin(x)*cos(y)+cos(x)*sin(y)+3) == sin(x + y) + 3
def test_issue_4280():
a, x, y = symbols('a x y')
assert trigsimp(cos(x)**2 + cos(y)**2*sin(x)**2 + sin(y)**2*sin(x)**2) == 1
assert trigsimp(a**2*sin(x)**2 + a**2*cos(y)**2*cos(x)**2 + a**2*cos(x)**2*sin(y)**2) == a**2
assert trigsimp(a**2*cos(y)**2*sin(x)**2 + a**2*sin(y)**2*sin(x)**2) == a**2*sin(x)**2
def test_issue_3210():
eqs = (sin(2)*cos(3) + sin(3)*cos(2),
-sin(2)*sin(3) + cos(2)*cos(3),
sin(2)*cos(3) - sin(3)*cos(2),
sin(2)*sin(3) + cos(2)*cos(3),
sin(2)*sin(3) + cos(2)*cos(3) + cos(2),
sinh(2)*cosh(3) + sinh(3)*cosh(2),
sinh(2)*sinh(3) + cosh(2)*cosh(3),
)
assert [trigsimp(e) for e in eqs] == [
sin(5),
cos(5),
-sin(1),
cos(1),
cos(1) + cos(2),
sinh(5),
cosh(5),
]
def test_issue_7263():
assert abs((simplify(30.8**2 - 82.5**2 * sin(rad(11.6))**2)).evalf() - \
673.447451402970) < 1e-15
def test_trigsimp_issues():
a, x, y = symbols('a x y')
# issue 4625 - factor_terms works, too
assert trigsimp(sin(x)**3 + cos(x)**2*sin(x)) == sin(x)
# issue 5948
assert trigsimp(diff(integrate(cos(x)/sin(x)**3, x), x)) == \
cos(x)/sin(x)**3
assert trigsimp(diff(integrate(sin(x)/cos(x)**3, x), x)) == \
sin(x)/cos(x)**3
# check integer exponents
e = sin(x)**y/cos(x)**y
assert trigsimp(e) == e
assert trigsimp(e.subs(y, 2)) == tan(x)**2
assert trigsimp(e.subs(x, 1)) == tan(1)**y
# check for multiple patterns
assert (cos(x)**2/sin(x)**2*cos(y)**2/sin(y)**2).trigsimp() == \
1/tan(x)**2/tan(y)**2
assert trigsimp(cos(x)/sin(x)*cos(x+y)/sin(x+y)) == \
1/(tan(x)*tan(x + y))
eq = cos(2)*(cos(3) + 1)**2/(cos(3) - 1)**2
assert trigsimp(eq) == eq.factor() # factor makes denom (-1 + cos(3))**2
assert trigsimp(cos(2)*(cos(3) + 1)**2*(cos(3) - 1)**2) == \
cos(2)*sin(3)**4
# issue 6789; this generates an expression that formerly caused
# trigsimp to hang
assert cot(x).equals(tan(x)) is False
# nan or the unchanged expression is ok, but not sin(1)
z = cos(x)**2 + sin(x)**2 - 1
z1 = tan(x)**2 - 1/cot(x)**2
n = (1 + z1/z)
assert trigsimp(sin(n)) != sin(1)
eq = x*(n - 1) - x*n
assert trigsimp(eq) is S.NaN
assert trigsimp(eq, recursive=True) is S.NaN
assert trigsimp(1).is_Integer
assert trigsimp(-sin(x)**4 - 2*sin(x)**2*cos(x)**2 - cos(x)**4) == -1
def test_trigsimp_issue_2515():
x = Symbol('x')
assert trigsimp(x*cos(x)*tan(x)) == x*sin(x)
assert trigsimp(-sin(x) + cos(x)*tan(x)) == 0
def test_trigsimp_issue_3826():
assert trigsimp(tan(2*x).expand(trig=True)) == tan(2*x)
def test_trigsimp_issue_4032():
n = Symbol('n', integer=True, positive=True)
assert trigsimp(2**(n/2)*cos(pi*n/4)/2 + 2**(n - 1)/2) == \
2**(n/2)*cos(pi*n/4)/2 + 2**n/4
def test_trigsimp_issue_7761():
assert trigsimp(cosh(pi/4)) == cosh(pi/4)
def test_trigsimp_noncommutative():
x, y = symbols('x,y')
A, B = symbols('A,B', commutative=False)
assert trigsimp(A - A*sin(x)**2) == A*cos(x)**2
assert trigsimp(A - A*cos(x)**2) == A*sin(x)**2
assert trigsimp(A*sin(x)**2 + A*cos(x)**2) == A
assert trigsimp(A + A*tan(x)**2) == A/cos(x)**2
assert trigsimp(A/cos(x)**2 - A) == A*tan(x)**2
assert trigsimp(A/cos(x)**2 - A*tan(x)**2) == A
assert trigsimp(A + A*cot(x)**2) == A/sin(x)**2
assert trigsimp(A/sin(x)**2 - A) == A/tan(x)**2
assert trigsimp(A/sin(x)**2 - A*cot(x)**2) == A
assert trigsimp(y*A*cos(x)**2 + y*A*sin(x)**2) == y*A
assert trigsimp(A*sin(x)/cos(x)) == A*tan(x)
assert trigsimp(A*tan(x)*cos(x)) == A*sin(x)
assert trigsimp(A*cot(x)**3*sin(x)**3) == A*cos(x)**3
assert trigsimp(y*A*tan(x)**2/sin(x)**2) == y*A/cos(x)**2
assert trigsimp(A*cot(x)/cos(x)) == A/sin(x)
assert trigsimp(A*sin(x + y) + A*sin(x - y)) == 2*A*sin(x)*cos(y)
assert trigsimp(A*sin(x + y) - A*sin(x - y)) == 2*A*sin(y)*cos(x)
assert trigsimp(A*cos(x + y) + A*cos(x - y)) == 2*A*cos(x)*cos(y)
assert trigsimp(A*cos(x + y) - A*cos(x - y)) == -2*A*sin(x)*sin(y)
assert trigsimp(A*sinh(x + y) + A*sinh(x - y)) == 2*A*sinh(x)*cosh(y)
assert trigsimp(A*sinh(x + y) - A*sinh(x - y)) == 2*A*sinh(y)*cosh(x)
assert trigsimp(A*cosh(x + y) + A*cosh(x - y)) == 2*A*cosh(x)*cosh(y)
assert trigsimp(A*cosh(x + y) - A*cosh(x - y)) == 2*A*sinh(x)*sinh(y)
assert trigsimp(A*cos(0.12345)**2 + A*sin(0.12345)**2) == 1.0*A
def test_hyperbolic_simp():
x, y = symbols('x,y')
assert trigsimp(sinh(x)**2 + 1) == cosh(x)**2
assert trigsimp(cosh(x)**2 - 1) == sinh(x)**2
assert trigsimp(cosh(x)**2 - sinh(x)**2) == 1
assert trigsimp(1 - tanh(x)**2) == 1/cosh(x)**2
assert trigsimp(1 - 1/cosh(x)**2) == tanh(x)**2
assert trigsimp(tanh(x)**2 + 1/cosh(x)**2) == 1
assert trigsimp(coth(x)**2 - 1) == 1/sinh(x)**2
assert trigsimp(1/sinh(x)**2 + 1) == 1/tanh(x)**2
assert trigsimp(coth(x)**2 - 1/sinh(x)**2) == 1
assert trigsimp(5*cosh(x)**2 - 5*sinh(x)**2) == 5
assert trigsimp(5*cosh(x/2)**2 - 2*sinh(x/2)**2) == 3*cosh(x)/2 + S(7)/2
assert trigsimp(sinh(x)/cosh(x)) == tanh(x)
assert trigsimp(tanh(x)) == trigsimp(sinh(x)/cosh(x))
assert trigsimp(cosh(x)/sinh(x)) == 1/tanh(x)
assert trigsimp(2*tanh(x)*cosh(x)) == 2*sinh(x)
assert trigsimp(coth(x)**3*sinh(x)**3) == cosh(x)**3
assert trigsimp(y*tanh(x)**2/sinh(x)**2) == y/cosh(x)**2
assert trigsimp(coth(x)/cosh(x)) == 1/sinh(x)
e = 2*cosh(x)**2 - 2*sinh(x)**2
assert trigsimp(log(e)) == log(2)
assert trigsimp(cosh(x)**2*cosh(y)**2 - cosh(x)**2*sinh(y)**2 - sinh(x)**2,
recursive=True) == 1
assert trigsimp(sinh(x)**2*sinh(y)**2 - sinh(x)**2*cosh(y)**2 + cosh(x)**2,
recursive=True) == 1
assert abs(trigsimp(2.0*cosh(x)**2 - 2.0*sinh(x)**2) - 2.0) < 1e-10
assert trigsimp(sinh(x)**2/cosh(x)**2) == tanh(x)**2
assert trigsimp(sinh(x)**3/cosh(x)**3) == tanh(x)**3
assert trigsimp(sinh(x)**10/cosh(x)**10) == tanh(x)**10
assert trigsimp(cosh(x)**3/sinh(x)**3) == 1/tanh(x)**3
assert trigsimp(cosh(x)/sinh(x)) == 1/tanh(x)
assert trigsimp(cosh(x)**2/sinh(x)**2) == 1/tanh(x)**2
assert trigsimp(cosh(x)**10/sinh(x)**10) == 1/tanh(x)**10
assert trigsimp(x*cosh(x)*tanh(x)) == x*sinh(x)
assert trigsimp(-sinh(x) + cosh(x)*tanh(x)) == 0
assert tan(x) != 1/cot(x) # cot doesn't auto-simplify
assert trigsimp(tan(x) - 1/cot(x)) == 0
assert trigsimp(3*tanh(x)**7 - 2/coth(x)**7) == tanh(x)**7
def test_trigsimp_groebner():
from sympy.simplify.simplify import trigsimp_groebner
c = cos(x)
s = sin(x)
ex = (4*s*c + 12*s + 5*c**3 + 21*c**2 + 23*c + 15)/(
-s*c**2 + 2*s*c + 15*s + 7*c**3 + 31*c**2 + 37*c + 21)
resnum = (5*s - 5*c + 1)
resdenom = (8*s - 6*c)
results = [resnum/resdenom, (-resnum)/(-resdenom)]
assert trigsimp_groebner(ex) in results
assert trigsimp_groebner(s/c, hints=[tan]) == tan(x)
assert trigsimp((-s + 1)/c + c/(-s + 1),
method='groebner') == 2/c
assert trigsimp((-s + 1)/c + c/(-s + 1),
method='groebner', polynomial=True) == 2/c
# Test quick=False works
assert trigsimp_groebner(ex, hints=[2]) in results
# test "I"
assert trigsimp_groebner(sin(I*x)/cos(I*x), hints=[tanh]) == I*tanh(x)
# test hyperbolic / sums
assert trigsimp_groebner((tanh(x)+tanh(y))/(1+tanh(x)*tanh(y)),
hints=[(tanh, x, y)]) == tanh(x + y)
@XFAIL
def test_factorial_simplify():
# There are more tests in test_factorials.py. These are just to
# ensure that simplify() calls factorial_simplify correctly
from sympy.specfun.factorials import factorial
x = Symbol('x')
assert simplify(factorial(x)/x) == factorial(x - 1)
assert simplify(factorial(factorial(x))) == factorial(factorial(x))
def test_simplify_expr():
x, y, z, k, n, m, w, f, s, A = symbols('x,y,z,k,n,m,w,f,s,A')
assert all(simplify(tmp) == tmp for tmp in [I, E, oo, x, -x, -oo, -E, -I])
e = 1/x + 1/y
assert e != (x + y)/(x*y)
assert simplify(e) == (x + y)/(x*y)
e = A**2*s**4/(4*pi*k*m**3)
assert simplify(e) == e
e = (4 + 4*x - 2*(2 + 2*x))/(2 + 2*x)
assert simplify(e) == 0
e = (-4*x*y**2 - 2*y**3 - 2*x**2*y)/(x + y)**2
assert simplify(e) == -2*y
e = -x - y - (x + y)**(-1)*y**2 + (x + y)**(-1)*x**2
assert simplify(e) == -2*y
e = (x + x*y)/x
assert simplify(e) == 1 + y
e = (f(x) + y*f(x))/f(x)
assert simplify(e) == 1 + y
e = (2 * (1/n - cos(n * pi)/n))/pi
assert simplify(e) == (-cos(pi*n) + 1)/(pi*n)*2
e = integrate(1/(x**3 + 1), x).diff(x)
assert simplify(e) == 1/(x**3 + 1)
e = integrate(x/(x**2 + 3*x + 1), x).diff(x)
assert simplify(e) == x/(x**2 + 3*x + 1)
A = Matrix([[2*k - m*w**2, -k], [-k, k - m*w**2]]).inv()
assert simplify((A*Matrix([0, f]))[1]) == \
-f*(2*k - m*w**2)/(k**2 - (k - m*w**2)*(2*k - m*w**2))
f = -x + y/(z + t) + z*x/(z + t) + z*a/(z + t) + t*x/(z + t)
assert simplify(f) == (y + a*z)/(z + t)
A, B = symbols('A,B', commutative=False)
assert simplify(A*B - B*A) == A*B - B*A
assert simplify(A/(1 + y/x)) == x*A/(x + y)
assert simplify(A*(1/x + 1/y)) == A/x + A/y #(x + y)*A/(x*y)
assert simplify(log(2) + log(3)) == log(6)
assert simplify(log(2*x) - log(2)) == log(x)
assert simplify(hyper([], [], x)) == exp(x)
def test_issue_3557():
f_1 = x*a + y*b + z*c - 1
f_2 = x*d + y*e + z*f - 1
f_3 = x*g + y*h + z*i - 1
solutions = solve([f_1, f_2, f_3], x, y, z, simplify=False)
assert simplify(solutions[y]) == \
(a*i + c*d + f*g - a*f - c*g - d*i)/ \
(a*e*i + b*f*g + c*d*h - a*f*h - b*d*i - c*e*g)
def test_simplify_other():
assert simplify(sin(x)**2 + cos(x)**2) == 1
assert simplify(gamma(x + 1)/gamma(x)) == x
assert simplify(sin(x)**2 + cos(x)**2 + factorial(x)/gamma(x)) == 1 + x
assert simplify(
Eq(sin(x)**2 + cos(x)**2, factorial(x)/gamma(x))) == Eq(1, x)
nc = symbols('nc', commutative=False)
assert simplify(x + x*nc) == x*(1 + nc)
# issue 6123
# f = exp(-I*(k*sqrt(t) + x/(2*sqrt(t)))**2)
# ans = integrate(f, (k, -oo, oo), conds='none')
ans = I*(-pi*x*exp(-3*I*pi/4 + I*x**2/(4*t))*erf(x*exp(-3*I*pi/4)/
(2*sqrt(t)))/(2*sqrt(t)) + pi*x*exp(-3*I*pi/4 + I*x**2/(4*t))/
(2*sqrt(t)))*exp(-I*x**2/(4*t))/(sqrt(pi)*x) - I*sqrt(pi) * \
(-erf(x*exp(I*pi/4)/(2*sqrt(t))) + 1)*exp(I*pi/4)/(2*sqrt(t))
assert simplify(ans) == -(-1)**(S(3)/4)*sqrt(pi)/sqrt(t)
# issue 6370
assert simplify(2**(2 + x)/4) == 2**x
def test_simplify_complex():
cosAsExp = cos(x)._eval_rewrite_as_exp(x)
tanAsExp = tan(x)._eval_rewrite_as_exp(x)
assert simplify(cosAsExp*tanAsExp).expand() == (
sin(x))._eval_rewrite_as_exp(x).expand() # issue 4341
def test_simplify_ratio():
# roots of x**3-3*x+5
roots = ['(1/2 - sqrt(3)*I/2)*(sqrt(21)/2 + 5/2)**(1/3) + 1/((1/2 - '
'sqrt(3)*I/2)*(sqrt(21)/2 + 5/2)**(1/3))',
'1/((1/2 + sqrt(3)*I/2)*(sqrt(21)/2 + 5/2)**(1/3)) + '
'(1/2 + sqrt(3)*I/2)*(sqrt(21)/2 + 5/2)**(1/3)',
'-(sqrt(21)/2 + 5/2)**(1/3) - 1/(sqrt(21)/2 + 5/2)**(1/3)']
for r in roots:
r = S(r)
assert count_ops(simplify(r, ratio=1)) <= count_ops(r)
# If ratio=oo, simplify() is always applied:
assert simplify(r, ratio=oo) is not r
def test_simplify_measure():
measure1 = lambda expr: len(str(expr))
measure2 = lambda expr: -count_ops(expr)
# Return the most complicated result
expr = (x + 1)/(x + sin(x)**2 + cos(x)**2)
assert measure1(simplify(expr, measure=measure1)) <= measure1(expr)
assert measure2(simplify(expr, measure=measure2)) <= measure2(expr)
def test_simplify_issue_1308():
assert simplify(exp(-Rational(1, 2)) + exp(-Rational(3, 2))) == \
(1 + E)*exp(-Rational(3, 2))
def test_issue_5652():
assert simplify(E + exp(-E)) == exp(-E) + E
n = symbols('n', commutative=False)
assert simplify(n + n**(-n)) == n + n**(-n)
def test_simplify_fail1():
x = Symbol('x')
y = Symbol('y')
e = (x + y)**2/(-4*x*y**2 - 2*y**3 - 2*x**2*y)
assert simplify(e) == 1 / (-2*y)
def test_fraction():
x, y, z = map(Symbol, 'xyz')
A = Symbol('A', commutative=False)
assert fraction(Rational(1, 2)) == (1, 2)
assert fraction(x) == (x, 1)
assert fraction(1/x) == (1, x)
assert fraction(x/y) == (x, y)
assert fraction(x/2) == (x, 2)
assert fraction(x*y/z) == (x*y, z)
assert fraction(x/(y*z)) == (x, y*z)
assert fraction(1/y**2) == (1, y**2)
assert fraction(x/y**2) == (x, y**2)
assert fraction((x**2 + 1)/y) == (x**2 + 1, y)
assert fraction(x*(y + 1)/y**7) == (x*(y + 1), y**7)
assert fraction(exp(-x), exact=True) == (exp(-x), 1)
assert fraction(x*A/y) == (x*A, y)
assert fraction(x*A**-1/y) == (x*A**-1, y)
n = symbols('n', negative=True)
assert fraction(exp(n)) == (1, exp(-n))
assert fraction(exp(-n)) == (exp(-n), 1)
def test_powsimp():
x, y, z, n = symbols('x,y,z,n')
f = Function('f')
assert powsimp( 4**x * 2**(-x) * 2**(-x) ) == 1
assert powsimp( (-4)**x * (-2)**(-x) * 2**(-x) ) == 1
assert powsimp(
f(4**x * 2**(-x) * 2**(-x)) ) == f(4**x * 2**(-x) * 2**(-x))
assert powsimp( f(4**x * 2**(-x) * 2**(-x)), deep=True ) == f(1)
assert exp(x)*exp(y) == exp(x)*exp(y)
assert powsimp(exp(x)*exp(y)) == exp(x + y)
assert powsimp(exp(x)*exp(y)*2**x*2**y) == (2*E)**(x + y)
assert powsimp(exp(x)*exp(y)*2**x*2**y, combine='exp') == \
exp(x + y)*2**(x + y)
assert powsimp(exp(x)*exp(y)*exp(2)*sin(x) + sin(y) + 2**x*2**y) == \
exp(2 + x + y)*sin(x) + sin(y) + 2**(x + y)
assert powsimp(sin(exp(x)*exp(y))) == sin(exp(x)*exp(y))
assert powsimp(sin(exp(x)*exp(y)), deep=True) == sin(exp(x + y))
assert powsimp(x**2*x**y) == x**(2 + y)
# This should remain factored, because 'exp' with deep=True is supposed
# to act like old automatic exponent combining.
assert powsimp((1 + E*exp(E))*exp(-E), combine='exp', deep=True) == \
(1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), deep=True) == \
(1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E)) == (1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), combine='exp') == \
(1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), combine='base') == \
(1 + E*exp(E))*exp(-E)
x, y = symbols('x,y', nonnegative=True)
n = Symbol('n', real=True)
assert powsimp(y**n * (y/x)**(-n)) == x**n
assert powsimp(x**(x**(x*y)*y**(x*y))*y**(x**(x*y)*y**(x*y)), deep=True) \
== (x*y)**(x*y)**(x*y)
assert powsimp(2**(2**(2*x)*x), deep=False) == 2**(2**(2*x)*x)
assert powsimp(2**(2**(2*x)*x), deep=True) == 2**(x*4**x)
assert powsimp(
exp(-x + exp(-x)*exp(-x*log(x))), deep=False, combine='exp') == \
exp(-x + exp(-x)*exp(-x*log(x)))
assert powsimp(
exp(-x + exp(-x)*exp(-x*log(x))), deep=False, combine='exp') == \
exp(-x + exp(-x)*exp(-x*log(x)))
assert powsimp((x + y)/(3*z), deep=False, combine='exp') == (x + y)/(3*z)
assert powsimp((x/3 + y/3)/z, deep=True, combine='exp') == (x/3 + y/3)/z
assert powsimp(exp(x)/(1 + exp(x)*exp(y)), deep=True) == \
exp(x)/(1 + exp(x + y))
assert powsimp(x*y**(z**x*z**y), deep=True) == x*y**(z**(x + y))
assert powsimp((z**x*z**y)**x, deep=True) == (z**(x + y))**x
assert powsimp(x*(z**x*z**y)**x, deep=True) == x*(z**(x + y))**x
p = symbols('p', positive=True)
assert powsimp((1/x)**log(2)/x) == (1/x)**(1 + log(2))
assert powsimp((1/p)**log(2)/p) == p**(-1 - log(2))
# coefficient of exponent can only be simplified for positive bases
assert powsimp(2**(2*x)) == 4**x
assert powsimp((-1)**(2*x)) == (-1)**(2*x)
i = symbols('i', integer=True)
assert powsimp((-1)**(2*i)) == 1
assert powsimp((-1)**(-x)) != (-1)**x # could be 1/((-1)**x), but is not
# force=True overrides assumptions
assert powsimp((-1)**(2*x), force=True) == 1
# rational exponents allow combining of negative terms
w, n, m = symbols('w n m', negative=True)
e = i/a # not a rational exponent if `a` is unknown
ex = w**e*n**e*m**e
assert powsimp(ex) == m**(i/a)*n**(i/a)*w**(i/a)
e = i/3
ex = w**e*n**e*m**e
assert powsimp(ex) == (-1)**i*(-m*n*w)**(i/3)
e = (3 + i)/i
ex = w**e*n**e*m**e
assert powsimp(ex) == (-1)**(3*e)*(-m*n*w)**e
eq = x**(2*a/3)
# eq != (x**a)**(2/3) (try x = -1 and a = 3 to see)
assert powsimp(eq).exp == eq.exp == 2*a/3
# powdenest goes the other direction
assert powsimp(2**(2*x)) == 4**x
assert powsimp(exp(p/2)) == exp(p/2)
# issue 6368
eq = Mul(*[sqrt(Dummy(imaginary=True)) for i in range(3)])
assert powsimp(eq) == eq and eq.is_Mul
assert all(powsimp(e) == e for e in (sqrt(x**a), sqrt(x**2)))
def test_issue_6367():
z = -5*sqrt(2)/(2*sqrt(2*sqrt(29) + 29)) + sqrt(-sqrt(29)/29 + S(1)/2)
assert Mul(*[powsimp(a) for a in Mul.make_args(z.normal())]) == 0
assert powsimp(z.normal()) == 0
assert simplify(z) == 0
assert powsimp(sqrt(2 + sqrt(3))*sqrt(2 - sqrt(3)) + 1) == 2
assert powsimp(z) != 0
def test_powsimp_negated_base():
assert powsimp((-x + y)/sqrt(x - y)) == -sqrt(x - y)
assert powsimp((-x + y)*(-z + y)/sqrt(x - y)/sqrt(z - y)) == sqrt(x - y)*sqrt(z - y)
p = symbols('p', positive=True)
assert powsimp((-p)**a/p**a) == (-1)**a
n = symbols('n', negative=True)
assert powsimp((-n)**a/n**a) == (-1)**a
# if x is 0 then the lhs is 0**a*oo**a which is not (-1)**a
assert powsimp((-x)**a/x**a) != (-1)**a
def test_issue_6440():
assert powsimp(16*2**a*8**b) == 2**(a + 3*b + 4)
def test_powsimp_polar():
from sympy import polar_lift, exp_polar
x, y, z = symbols('x y z')
p, q, r = symbols('p q r', polar=True)
assert (polar_lift(-1))**(2*x) == exp_polar(2*pi*I*x)
assert powsimp(p**x * q**x) == (p*q)**x
assert p**x * (1/p)**x == 1
assert (1/p)**x == p**(-x)
assert exp_polar(x)*exp_polar(y) == exp_polar(x)*exp_polar(y)
assert powsimp(exp_polar(x)*exp_polar(y)) == exp_polar(x + y)
assert powsimp(exp_polar(x)*exp_polar(y)*p**x*p**y) == \
(p*exp_polar(1))**(x + y)
assert powsimp(exp_polar(x)*exp_polar(y)*p**x*p**y, combine='exp') == \
exp_polar(x + y)*p**(x + y)
assert powsimp(
exp_polar(x)*exp_polar(y)*exp_polar(2)*sin(x) + sin(y) + p**x*p**y) \
== p**(x + y) + sin(x)*exp_polar(2 + x + y) + sin(y)
assert powsimp(sin(exp_polar(x)*exp_polar(y))) == \
sin(exp_polar(x)*exp_polar(y))
assert powsimp(sin(exp_polar(x)*exp_polar(y)), deep=True) == \
sin(exp_polar(x + y))
def test_powsimp_nc():
x, y, z = symbols('x,y,z')
A, B, C = symbols('A B C', commutative=False)
assert powsimp(A**x*A**y, combine='all') == A**(x + y)
assert powsimp(A**x*A**y, combine='base') == A**x*A**y
assert powsimp(A**x*A**y, combine='exp') == A**(x + y)
assert powsimp(A**x*B**x, combine='all') == (A*B)**x
assert powsimp(A**x*B**x, combine='base') == (A*B)**x
assert powsimp(A**x*B**x, combine='exp') == A**x*B**x
assert powsimp(B**x*A**x, combine='all') == (B*A)**x
assert powsimp(B**x*A**x, combine='base') == (B*A)**x
assert powsimp(B**x*A**x, combine='exp') == B**x*A**x
assert powsimp(A**x*A**y*A**z, combine='all') == A**(x + y + z)
assert powsimp(A**x*A**y*A**z, combine='base') == A**x*A**y*A**z
assert powsimp(A**x*A**y*A**z, combine='exp') == A**(x + y + z)
assert powsimp(A**x*B**x*C**x, combine='all') == (A*B*C)**x
assert powsimp(A**x*B**x*C**x, combine='base') == (A*B*C)**x
assert powsimp(A**x*B**x*C**x, combine='exp') == A**x*B**x*C**x
assert powsimp(B**x*A**x*C**x, combine='all') == (B*A*C)**x
assert powsimp(B**x*A**x*C**x, combine='base') == (B*A*C)**x
assert powsimp(B**x*A**x*C**x, combine='exp') == B**x*A**x*C**x
def test_nthroot():
assert nthroot(90 + 34*sqrt(7), 3) == sqrt(7) + 3
q = 1 + sqrt(2) - 2*sqrt(3) + sqrt(6) + sqrt(7)
assert nthroot(expand_multinomial(q**3), 3) == q
assert nthroot(41 + 29*sqrt(2), 5) == 1 + sqrt(2)
assert nthroot(-41 - 29*sqrt(2), 5) == -1 - sqrt(2)
expr = 1320*sqrt(10) + 4216 + 2576*sqrt(6) + 1640*sqrt(15)
assert nthroot(expr, 5) == 1 + sqrt(6) + sqrt(15)
q = 1 + sqrt(2) + sqrt(3) + sqrt(5)
assert expand_multinomial(nthroot(expand_multinomial(q**5), 5)) == q
q = 1 + sqrt(2) + 7*sqrt(6) + 2*sqrt(10)
assert nthroot(expand_multinomial(q**5), 5, 8) == q
q = 1 + sqrt(2) - 2*sqrt(3) + 1171*sqrt(6)
assert nthroot(expand_multinomial(q**3), 3) == q
assert nthroot(expand_multinomial(q**6), 6) == q
@slow
def test_nthroot1():
q = 1 + sqrt(2) + sqrt(3) + S(1)/10**20
p = expand_multinomial(q**5)
assert nthroot(p, 5) == q
q = 1 + sqrt(2) + sqrt(3) + S(1)/10**30
p = expand_multinomial(q**5)
assert nthroot(p, 5) == p**Rational(1, 5)
def test_collect_1():
"""Collect with respect to a Symbol"""
x, y, z, n = symbols('x,y,z,n')
assert collect( x + y*x, x ) == x * (1 + y)
assert collect( x + x**2, x ) == x + x**2
assert collect( x**2 + y*x**2, x ) == (x**2)*(1 + y)
assert collect( x**2 + y*x, x ) == x*y + x**2
assert collect( 2*x**2 + y*x**2 + 3*x*y, [x] ) == x**2*(2 + y) + 3*x*y
assert collect( 2*x**2 + y*x**2 + 3*x*y, [y] ) == 2*x**2 + y*(x**2 + 3*x)
assert collect( ((1 + y + x)**4).expand(), x) == ((1 + y)**4).expand() + \
x*(4*(1 + y)**3).expand() + x**2*(6*(1 + y)**2).expand() + \
x**3*(4*(1 + y)).expand() + x**4
# symbols can be given as any iterable
expr = x + y
assert collect(expr, expr.free_symbols) == expr
def test_collect_2():
"""Collect with respect to a sum"""
a, b, x = symbols('a,b,x')
assert collect(a*(cos(x) + sin(x)) + b*(cos(x) + sin(x)),
sin(x) + cos(x)) == (a + b)*(cos(x) + sin(x))
def test_collect_3():
"""Collect with respect to a product"""
a, b, c = symbols('a,b,c')
f = Function('f')
x, y, z, n = symbols('x,y,z,n')
assert collect(-x/8 + x*y, -x) == x*(y - S(1)/8)
assert collect( 1 + x*(y**2), x*y ) == 1 + x*(y**2)
assert collect( x*y + a*x*y, x*y) == x*y*(1 + a)
assert collect( 1 + x*y + a*x*y, x*y) == 1 + x*y*(1 + a)
assert collect(a*x*f(x) + b*(x*f(x)), x*f(x)) == x*(a + b)*f(x)
assert collect(a*x*log(x) + b*(x*log(x)), x*log(x)) == x*(a + b)*log(x)
assert collect(a*x**2*log(x)**2 + b*(x*log(x))**2, x*log(x)) == \
x**2*log(x)**2*(a + b)
# with respect to a product of three symbols
assert collect(y*x*z + a*x*y*z, x*y*z) == (1 + a)*x*y*z
def test_collect_4():
"""Collect with respect to a power"""
a, b, c, x = symbols('a,b,c,x')
assert collect(a*x**c + b*x**c, x**c) == x**c*(a + b)
# issue 6096: 2 stays with c (unless c is integer or x is positive0
assert collect(a*x**(2*c) + b*x**(2*c), x**c) == x**(2*c)*(a + b)
def test_collect_5():
"""Collect with respect to a tuple"""
a, x, y, z, n = symbols('a,x,y,z,n')
assert collect(x**2*y**4 + z*(x*y**2)**2 + z + a*z, [x*y**2, z]) in [
z*(1 + a + x**2*y**4) + x**2*y**4,
z*(1 + a) + x**2*y**4*(1 + z) ]
assert collect((1 + (x + y) + (x + y)**2).expand(),
[x, y]) == 1 + y + x*(1 + 2*y) + x**2 + y**2
def test_collect_D():
D = Derivative
f = Function('f')
x, a, b = symbols('x,a,b')
fx = D(f(x), x)
fxx = D(f(x), x, x)
assert collect(a*fx + b*fx, fx) == (a + b)*fx
assert collect(a*D(fx, x) + b*D(fx, x), fx) == (a + b)*D(fx, x)
assert collect(a*fxx + b*fxx, fx) == (a + b)*D(fx, x)
# issue 4784
assert collect(5*f(x) + 3*fx, fx) == 5*f(x) + 3*fx
assert collect(f(x) + f(x)*diff(f(x), x) + x*diff(f(x), x)*f(x), f(x).diff(x)) == \
(x*f(x) + f(x))*D(f(x), x) + f(x)
assert collect(f(x) + f(x)*diff(f(x), x) + x*diff(f(x), x)*f(x), f(x).diff(x), exact=True) == \
(x*f(x) + f(x))*D(f(x), x) + f(x)
assert collect(1/f(x) + 1/f(x)*diff(f(x), x) + x*diff(f(x), x)/f(x), f(x).diff(x), exact=True) == \
(1/f(x) + x/f(x))*D(f(x), x) + 1/f(x)
@XFAIL
def test_collect_issues():
D = Derivative
f = Function('f')
e = (1 + x*D(f(x), x) + D(f(x), x))/f(x)
assert collect(e.expand(), f(x).diff(x)) != e
def test_collect_D_0():
D = Derivative
f = Function('f')
x, a, b = symbols('x,a,b')
fxx = D(f(x), x, x)
# collect does not distinguish nested derivatives, so it returns
# -- (a + b)*D(D(f, x), x)
assert collect(a*fxx + b*fxx, fxx) == (a + b)*fxx
def test_collect_Wild():
"""Collect with respect to functions with Wild argument"""
a, b, x, y = symbols('a b x y')
f = Function('f')
w1 = Wild('.1')
w2 = Wild('.2')
assert collect(f(x) + a*f(x), f(w1)) == (1 + a)*f(x)
assert collect(f(x, y) + a*f(x, y), f(w1)) == f(x, y) + a*f(x, y)
assert collect(f(x, y) + a*f(x, y), f(w1, w2)) == (1 + a)*f(x, y)
assert collect(f(x, y) + a*f(x, y), f(w1, w1)) == f(x, y) + a*f(x, y)
assert collect(f(x, x) + a*f(x, x), f(w1, w1)) == (1 + a)*f(x, x)
assert collect(a*(x + 1)**y + (x + 1)**y, w1**y) == (1 + a)*(x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, w1**b) == \
a*(x + 1)**y + (x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, (x + 1)**w2) == \
(1 + a)*(x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, w1**w2) == (1 + a)*(x + 1)**y
def test_collect_func():
f = ((x + a + 1)**3).expand()
assert collect(f, x) == a**3 + 3*a**2 + 3*a + x**3 + x**2*(3*a + 3) + \
x*(3*a**2 + 6*a + 3) + 1
assert collect(f, x, factor) == x**3 + 3*x**2*(a + 1) + 3*x*(a + 1)**2 + \
(a + 1)**3
assert collect(f, x, evaluate=False) == {
S.One: a**3 + 3*a**2 + 3*a + 1,
x: 3*a**2 + 6*a + 3, x**2: 3*a + 3,
x**3: 1
}
@XFAIL
def test_collect_func_xfail():
# XXX: this test will pass when automatic constant distribution is removed (issue 4596)
assert collect(f, x, factor, evaluate=False) == {S.One: (a + 1)**3,
x: 3*(a + 1)**2, x**2: 3*(a + 1), x**3: 1}
def test_collect_order():
a, b, x, t = symbols('a,b,x,t')
assert collect(t + t*x + t*x**2 + O(x**3), t) == t*(1 + x + x**2 + O(x**3))
assert collect(t + t*x + x**2 + O(x**3), t) == \
t*(1 + x + O(x**3)) + x**2 + O(x**3)
f = a*x + b*x + c*x**2 + d*x**2 + O(x**3)
g = x*(a + b) + x**2*(c + d) + O(x**3)
assert collect(f, x) == g
assert collect(f, x, distribute_order_term=False) == g
f = sin(a + b).series(b, 0, 10)
assert collect(f, [sin(a), cos(a)]) == \
sin(a)*cos(b).series(b, 0, 10) + cos(a)*sin(b).series(b, 0, 10)
assert collect(f, [sin(a), cos(a)], distribute_order_term=False) == \
sin(a)*cos(b).series(b, 0, 10).removeO() + \
cos(a)*sin(b).series(b, 0, 10).removeO() + O(b**10)
def test_rcollect():
assert rcollect((x**2*y + x*y + x + y)/(x + y), y) == \
(x + y*(1 + x + x**2))/(x + y)
assert rcollect(sqrt(-((x + 1)*(y + 1))), z) == sqrt(-((x + 1)*(y + 1)))
def test_separatevars():
x, y, z, n = symbols('x,y,z,n')
assert separatevars(2*n*x*z + 2*x*y*z) == 2*x*z*(n + y)
assert separatevars(x*z + x*y*z) == x*z*(1 + y)
assert separatevars(pi*x*z + pi*x*y*z) == pi*x*z*(1 + y)
assert separatevars(x*y**2*sin(x) + x*sin(x)*sin(y)) == \
x*(sin(y) + y**2)*sin(x)
assert separatevars(x*exp(x + y) + x*exp(x)) == x*(1 + exp(y))*exp(x)
assert separatevars((x*(y + 1))**z).is_Pow # != x**z*(1 + y)**z
assert separatevars(1 + x + y + x*y) == (x + 1)*(y + 1)
assert separatevars(y/pi*exp(-(z - x)/cos(n))) == \
y*exp(x/cos(n))*exp(-z/cos(n))/pi
assert separatevars((x + y)*(x - y) + y**2 + 2*x + 1) == (x + 1)**2
# issue 4858
p = Symbol('p', positive=True)
assert separatevars(sqrt(p**2 + x*p**2)) == p*sqrt(1 + x)
assert separatevars(sqrt(y*(p**2 + x*p**2))) == p*sqrt(y*(1 + x))
assert separatevars(sqrt(y*(p**2 + x*p**2)), force=True) == \
p*sqrt(y)*sqrt(1 + x)
# issue 4865
assert separatevars(sqrt(x*y)).is_Pow
assert separatevars(sqrt(x*y), force=True) == sqrt(x)*sqrt(y)
# issue 4957
# any type sequence for symbols is fine
assert separatevars(((2*x + 2)*y), dict=True, symbols=()) == \
{'coeff': 1, x: 2*x + 2, y: y}
# separable
assert separatevars(((2*x + 2)*y), dict=True, symbols=[x]) == \
{'coeff': y, x: 2*x + 2}
assert separatevars(((2*x + 2)*y), dict=True, symbols=[]) == \
{'coeff': 1, x: 2*x + 2, y: y}
assert separatevars(((2*x + 2)*y), dict=True) == \
{'coeff': 1, x: 2*x + 2, y: y}
assert separatevars(((2*x + 2)*y), dict=True, symbols=None) == \
{'coeff': y*(2*x + 2)}
# not separable
assert separatevars(3, dict=True) is None
assert separatevars(2*x + y, dict=True, symbols=()) is None
assert separatevars(2*x + y, dict=True) is None
assert separatevars(2*x + y, dict=True, symbols=None) == {'coeff': 2*x + y}
# issue 4808
n, m = symbols('n,m', commutative=False)
assert separatevars(m + n*m) == (1 + n)*m
assert separatevars(x + x*n) == x*(1 + n)
# issue 4910
f = Function('f')
assert separatevars(f(x) + x*f(x)) == f(x) + x*f(x)
# a noncommutable object present
eq = x*(1 + hyper((), (), y*z))
assert separatevars(eq) == eq
def test_separatevars_advanced_factor():
x, y, z = symbols('x,y,z')
assert separatevars(1 + log(x)*log(y) + log(x) + log(y)) == \
(log(x) + 1)*(log(y) + 1)
assert separatevars(1 + x - log(z) - x*log(z) - exp(y)*log(z) -
x*exp(y)*log(z) + x*exp(y) + exp(y)) == \
-((x + 1)*(log(z) - 1)*(exp(y) + 1))
x, y = symbols('x,y', positive=True)
assert separatevars(1 + log(x**log(y)) + log(x*y)) == \
(log(x) + 1)*(log(y) + 1)
def test_hypersimp():
n, k = symbols('n,k', integer=True)
assert hypersimp(factorial(k), k) == k + 1
assert hypersimp(factorial(k**2), k) is None
assert hypersimp(1/factorial(k), k) == 1/(k + 1)
assert hypersimp(2**k/factorial(k)**2, k) == 2/(k + 1)**2
assert hypersimp(binomial(n, k), k) == (n - k)/(k + 1)
assert hypersimp(binomial(n + 1, k), k) == (n - k + 1)/(k + 1)
term = (4*k + 1)*factorial(k)/factorial(2*k + 1)
assert hypersimp(term, k) == (S(1)/2)*((4*k + 5)/(3 + 14*k + 8*k**2))
term = 1/((2*k - 1)*factorial(2*k + 1))
assert hypersimp(term, k) == (k - S(1)/2)/((k + 1)*(2*k + 1)*(2*k + 3))
term = binomial(n, k)*(-1)**k/factorial(k)
assert hypersimp(term, k) == (k - n)/(k + 1)**2
def test_nsimplify():
x = Symbol("x")
assert nsimplify(0) == 0
assert nsimplify(-1) == -1
assert nsimplify(1) == 1
assert nsimplify(1 + x) == 1 + x
assert nsimplify(2.7) == Rational(27, 10)
assert nsimplify(1 - GoldenRatio) == (1 - sqrt(5))/2
assert nsimplify((1 + sqrt(5))/4, [GoldenRatio]) == GoldenRatio/2
assert nsimplify(2/GoldenRatio, [GoldenRatio]) == 2*GoldenRatio - 2
assert nsimplify(exp(5*pi*I/3, evaluate=False)) == \
sympify('1/2 - sqrt(3)*I/2')
assert nsimplify(sin(3*pi/5, evaluate=False)) == \
sympify('sqrt(sqrt(5)/8 + 5/8)')
assert nsimplify(sqrt(atan('1', evaluate=False))*(2 + I), [pi]) == \
sqrt(pi) + sqrt(pi)/2*I
assert nsimplify(2 + exp(2*atan('1/4')*I)) == sympify('49/17 + 8*I/17')
assert nsimplify(pi, tolerance=0.01) == Rational(22, 7)
assert nsimplify(pi, tolerance=0.001) == Rational(355, 113)
assert nsimplify(0.33333, tolerance=1e-4) == Rational(1, 3)
assert nsimplify(2.0**(1/3.), tolerance=0.001) == Rational(635, 504)
assert nsimplify(2.0**(1/3.), tolerance=0.001, full=True) == \
2**Rational(1, 3)
assert nsimplify(x + .5, rational=True) == Rational(1, 2) + x
assert nsimplify(1/.3 + x, rational=True) == Rational(10, 3) + x
assert nsimplify(log(3).n(), rational=True) == \
sympify('109861228866811/100000000000000')
assert nsimplify(Float(0.272198261287950), [pi, log(2)]) == pi*log(2)/8
assert nsimplify(Float(0.272198261287950).n(3), [pi, log(2)]) == \
-pi/4 - log(2) + S(7)/4
assert nsimplify(x/7.0) == x/7
assert nsimplify(pi/1e2) == pi/100
assert nsimplify(pi/1e2, rational=False) == pi/100.0
assert nsimplify(pi/1e-7) == 10000000*pi
assert not nsimplify(
factor(-3.0*z**2*(z**2)**(-2.5) + 3*(z**2)**(-1.5))).atoms(Float)
e = x**0.0
assert e.is_Pow and nsimplify(x**0.0) == 1
assert nsimplify(3.333333, tolerance=0.1, rational=True) == Rational(10, 3)
assert nsimplify(3.333333, tolerance=0.01, rational=True) == Rational(10, 3)
assert nsimplify(3.666666, tolerance=0.1, rational=True) == Rational(11, 3)
assert nsimplify(3.666666, tolerance=0.01, rational=True) == Rational(11, 3)
assert nsimplify(33, tolerance=10, rational=True) == Rational(33)
assert nsimplify(33.33, tolerance=10, rational=True) == Rational(30)
assert nsimplify(37.76, tolerance=10, rational=True) == Rational(40)
assert nsimplify(-203.1) == -S(2031)/10
assert nsimplify(.2, tolerance=0) == S.One/5
assert nsimplify(-.2, tolerance=0) == -S.One/5
assert nsimplify(.2222, tolerance=0) == S(1111)/5000
assert nsimplify(-.2222, tolerance=0) == -S(1111)/5000
# issue 7211, PR 4112
assert nsimplify(S(2e-8)) == S(1)/50000000
def test_extract_minus_sign():
x = Symbol("x")
y = Symbol("y")
a = Symbol("a")
b = Symbol("b")
assert simplify(-x/-y) == x/y
assert simplify(-x/y) == -x/y
assert simplify(x/y) == x/y
assert simplify(x/-y) == -x/y
assert simplify(-x/0) == zoo*x
assert simplify(S(-5)/0) == zoo
assert simplify(-a*x/(-y - b)) == a*x/(b + y)
def test_diff():
x = Symbol("x")
y = Symbol("y")
f = Function("f")
g = Function("g")
assert simplify(g(x).diff(x)*f(x).diff(x) - f(x).diff(x)*g(x).diff(x)) == 0
assert simplify(2*f(x)*f(x).diff(x) - diff(f(x)**2, x)) == 0
assert simplify(diff(1/f(x), x) + f(x).diff(x)/f(x)**2) == 0
assert simplify(f(x).diff(x, y) - f(x).diff(y, x)) == 0
def test_logcombine_1():
x, y = symbols("x,y")
a = Symbol("a")
z, w = symbols("z,w", positive=True)
b = Symbol("b", real=True)
assert logcombine(log(x) + 2*log(y)) == log(x) + 2*log(y)
assert logcombine(log(x) + 2*log(y), force=True) == log(x*y**2)
assert logcombine(a*log(w) + log(z)) == a*log(w) + log(z)
assert logcombine(b*log(z) + b*log(x)) == log(z**b) + b*log(x)
assert logcombine(b*log(z) - log(w)) == log(z**b/w)
assert logcombine(log(x)*log(z)) == log(x)*log(z)
assert logcombine(log(w)*log(x)) == log(w)*log(x)
assert logcombine(cos(-2*log(z) + b*log(w))) in [cos(log(w**b/z**2)),
cos(log(z**2/w**b))]
assert logcombine(log(log(x) - log(y)) - log(z), force=True) == \
log(log(x/y)/z)
assert logcombine((2 + I)*log(x), force=True) == (2 + I)*log(x)
assert logcombine((x**2 + log(x) - log(y))/(x*y), force=True) == \
(x**2 + log(x/y))/(x*y)
# the following could also give log(z*x**log(y**2)), what we
# are testing is that a canonical result is obtained
assert logcombine(log(x)*2*log(y) + log(z), force=True) == \
log(z*y**log(x**2))
assert logcombine((x*y + sqrt(x**4 + y**4) + log(x) - log(y))/(pi*x**Rational(2, 3)*
sqrt(y)**3), force=True) == (
x*y + sqrt(x**4 + y**4) + log(x/y))/(pi*x**(S(2)/3)*y**(S(3)/2))
assert logcombine(gamma(-log(x/y))*acos(-log(x/y)), force=True) == \
acos(-log(x/y))*gamma(-log(x/y))
assert logcombine(2*log(z)*log(w)*log(x) + log(z) + log(w)) == \
log(z**log(w**2))*log(x) + log(w*z)
assert logcombine(3*log(w) + 3*log(z)) == log(w**3*z**3)
assert logcombine(x*(y + 1) + log(2) + log(3)) == x*(y + 1) + log(6)
assert logcombine((x + y)*log(w) + (-x - y)*log(3)) == (x + y)*log(w/3)
def test_logcombine_complex_coeff():
i = Integral((sin(x**2) + cos(x**3))/x, x)
assert logcombine(i, force=True) == i
assert logcombine(i + 2*log(x), force=True) == \
i + log(x**2)
def test_posify():
from sympy.abc import x
assert str(posify(
x +
Symbol('p', positive=True) +
Symbol('n', negative=True))) == '(_x + n + p, {_x: x})'
# log(1/x).expand() should be log(1/x) but it comes back as -log(x)
# when it is corrected, posify will allow the change to be made. The
# force=True option can do so as well when it is implemented.
eq, rep = posify(1/x)
assert log(eq).expand().subs(rep) == -log(x)
assert str(posify([x, 1 + x])) == '([_x, _x + 1], {_x: x})'
x = symbols('x')
p = symbols('p', positive=True)
n = symbols('n', negative=True)
orig = [x, n, p]
modified, reps = posify(orig)
assert str(modified) == '[_x, n, p]'
assert [w.subs(reps) for w in modified] == orig
assert str(Integral(posify(1/x + y)[0], (y, 1, 3)).expand()) == \
'Integral(1/_x, (y, 1, 3)) + Integral(_y, (y, 1, 3))'
assert str(Sum(posify(1/x**n)[0], (n,1,3)).expand()) == \
'Sum(_x**(-n), (n, 1, 3))'
def test_powdenest():
from sympy import powdenest
from sympy.abc import x, y, z, a, b
p, q = symbols('p q', positive=True)
i, j = symbols('i,j', integer=True)
assert powdenest(x) == x
assert powdenest(x + 2*(x**(2*a/3))**(3*x)) == (x + 2*(x**(2*a/3))**(3*x))
assert powdenest((exp(2*a/3))**(3*x)) # -X-> (exp(a/3))**(6*x)
assert powdenest((x**(2*a/3))**(3*x)) == ((x**(2*a/3))**(3*x))
assert powdenest(exp(3*x*log(2))) == 2**(3*x)
assert powdenest(sqrt(p**2)) == p
i, j = symbols('i,j', integer=True)
eq = p**(2*i)*q**(4*i)
assert powdenest(eq) == (p*q**2)**(2*i)
# -X-> (x**x)**i*(x**x)**j == x**(x*(i + j))
assert powdenest((x**x)**(i + j))
assert powdenest(exp(3*y*log(x))) == x**(3*y)
assert powdenest(exp(y*(log(a) + log(b)))) == (a*b)**y
assert powdenest(exp(3*(log(a) + log(b)))) == a**3*b**3
assert powdenest(((x**(2*i))**(3*y))**x) == ((x**(2*i))**(3*y))**x
assert powdenest(((x**(2*i))**(3*y))**x, force=True) == x**(6*i*x*y)
assert powdenest(((x**(2*a/3))**(3*y/i))**x) == \
(((x**(2*a/3))**(3*y/i))**x)
assert powdenest((x**(2*i)*y**(4*i))**z, force=True) == (x*y**2)**(2*i*z)
assert powdenest((p**(2*i)*q**(4*i))**j) == (p*q**2)**(2*i*j)
e = ((p**(2*a))**(3*y))**x
assert powdenest(e) == e
e = ((x**2*y**4)**a)**(x*y)
assert powdenest(e) == e
e = (((x**2*y**4)**a)**(x*y))**3
assert powdenest(e) == ((x**2*y**4)**a)**(3*x*y)
assert powdenest((((x**2*y**4)**a)**(x*y)), force=True) == \
(x*y**2)**(2*a*x*y)
assert powdenest((((x**2*y**4)**a)**(x*y))**3, force=True) == \
(x*y**2)**(6*a*x*y)
assert powdenest((x**2*y**6)**i) != (x*y**3)**(2*i)
x, y = symbols('x,y', positive=True)
assert powdenest((x**2*y**6)**i) == (x*y**3)**(2*i)
assert powdenest((x**(2*i/3)*y**(i/2))**(2*i)) == (x**(S(4)/3)*y)**(i**2)
assert powdenest(sqrt(x**(2*i)*y**(6*i))) == (x*y**3)**i
assert powdenest(4**x) == 2**(2*x)
assert powdenest((4**x)**y) == 2**(2*x*y)
assert powdenest(4**x*y) == 2**(2*x)*y
def test_powdenest_polar():
x, y, z = symbols('x y z', polar=True)
a, b, c = symbols('a b c')
assert powdenest((x*y*z)**a) == x**a*y**a*z**a
assert powdenest((x**a*y**b)**c) == x**(a*c)*y**(b*c)
assert powdenest(((x**a)**b*y**c)**c) == x**(a*b*c)*y**(c**2)
def test_issue_5805():
arg = ((gamma(x)*hyper((), (), x))*pi)**2
assert powdenest(arg) == (pi*gamma(x)*hyper((), (), x))**2
assert arg.is_positive is None
def test_issue_4194():
# simplify should call cancel
from sympy.abc import x, y
f = Function('f')
assert simplify((4*x + 6*f(y))/(2*x + 3*f(y))) == 2
@XFAIL
def test_simplify_float_vs_integer():
# Test for issue 4473:
# https://github.com/sympy/sympy/issues/4473
assert simplify(x**2.0 - x**2) == 0
assert simplify(x**2 - x**2.0) == 0
def test_combsimp():
from sympy.abc import n, k
assert combsimp(factorial(n)) == factorial(n)
assert combsimp(binomial(n, k)) == binomial(n, k)
assert combsimp(factorial(n)/factorial(n - 3)) == n*(-1 + n)*(-2 + n)
assert combsimp(binomial(n + 1, k + 1)/binomial(n, k)) == (1 + n)/(1 + k)
assert combsimp(binomial(3*n + 4, n + 1)/binomial(3*n + 1, n)) == \
S(3)/2*((3*n + 2)*(3*n + 4)/((n + 1)*(2*n + 3)))
assert combsimp(factorial(n)**2/factorial(n - 3)) == \
factorial(n)*n*(-1 + n)*(-2 + n)
assert combsimp(factorial(n)*binomial(n + 1, k + 1)/binomial(n, k)) == \
factorial(n)*(1 + n)/(1 + k)
assert combsimp(binomial(n - 1, k)) == -((-n + k)*binomial(n, k))/n
assert combsimp(binomial(n + 2, k + S(1)/2)) == 4*((n + 1)*(n + 2) *
binomial(n, k + S(1)/2))/((2*k - 2*n - 1)*(2*k - 2*n - 3))
assert combsimp(binomial(n + 2, k + 2.0)) == \
-((1.0*n + 2.0)*binomial(n + 1.0, k + 2.0))/(k - n)
# coverage tests
assert combsimp(factorial(n*(1 + n) - n**2 - n)) == 1
assert combsimp(binomial(n + k - 2, n)) == \
k*(k - 1)*binomial(n + k, n)/((n + k)*(n + k - 1))
i = Symbol('i', integer=True)
e = gamma(i + 3)
assert combsimp(e) == e
e = gamma(exp(i))
assert combsimp(e) == e
e = gamma(n + S(1)/3)*gamma(n + S(2)/3)
assert combsimp(e) == e
assert combsimp(gamma(4*n + S(1)/2)/gamma(2*n - S(3)/4)) == \
2**(4*n - S(5)/2)*(8*n - 3)*gamma(2*n + S(3)/4)/sqrt(pi)
assert combsimp(6*FallingFactorial(-4, n)/factorial(n)) == \
(-1)**n*(n + 1)*(n + 2)*(n + 3)
assert combsimp(6*FallingFactorial(-4, n - 1)/factorial(n - 1)) == \
(-1)**(n - 1)*n*(n + 1)*(n + 2)
assert combsimp(6*FallingFactorial(-4, n - 3)/factorial(n - 3)) == \
(-1)**(n - 3)*n*(n - 1)*(n - 2)
assert combsimp(6*FallingFactorial(-4, -n - 1)/factorial(-n - 1)) == \
-(-1)**(-n - 1)*n*(n - 1)*(n - 2)
assert combsimp(6*RisingFactorial(4, n)/factorial(n)) == \
(n + 1)*(n + 2)*(n + 3)
assert combsimp(6*RisingFactorial(4, n - 1)/factorial(n - 1)) == \
n*(n + 1)*(n + 2)
assert combsimp(6*RisingFactorial(4, n - 3)/factorial(n - 3)) == \
n*(n - 1)*(n - 2)
assert combsimp(6*RisingFactorial(4, -n - 1)/factorial(-n - 1)) == \
-n*(n - 1)*(n - 2)
def test_issue_5615():
aA, Re, a, b, D = symbols('aA Re a b D')
e = ((D**3*a + b*aA**3)/Re).expand()
assert collect(e, [aA**3/Re, a]) == e
def test_issue_5728():
b = x*sqrt(y)
a = sqrt(b)
c = sqrt(sqrt(x)*y)
assert powsimp(a*b) == sqrt(b)**3
assert powsimp(a*b**2*sqrt(y)) == sqrt(y)*a**5
assert powsimp(a*x**2*c**3*y) == c**3*a**5
assert powsimp(a*x*c**3*y**2) == c**7*a
assert powsimp(x*c**3*y**2) == c**7
assert powsimp(x*c**3*y) == x*y*c**3
assert powsimp(sqrt(x)*c**3*y) == c**5
assert powsimp(sqrt(x)*a**3*sqrt(y)) == sqrt(x)*sqrt(y)*a**3
assert powsimp(Mul(sqrt(x)*c**3*sqrt(y), y, evaluate=False)) == \
sqrt(x)*sqrt(y)**3*c**3
assert powsimp(a**2*a*x**2*y) == a**7
# symbolic powers work, too
b = x**y*y
a = b*sqrt(b)
assert a.is_Mul is True
assert powsimp(a) == sqrt(b)**3
# as does exp
a = x*exp(2*y/3)
assert powsimp(a*sqrt(a)) == sqrt(a)**3
assert powsimp(a**2*sqrt(a)) == sqrt(a)**5
assert powsimp(a**2*sqrt(sqrt(a))) == sqrt(sqrt(a))**9
def test_as_content_primitive():
# although the _as_content_primitive methods do not alter the underlying structure,
# the as_content_primitive function will touch up the expression and join
# bases that would otherwise have not been joined.
assert ((x*(2 + 2*x)*(3*x + 3)**2)).as_content_primitive() == \
(18, x*(x + 1)**3)
assert (2 + 2*x + 2*y*(3 + 3*y)).as_content_primitive() == \
(2, x + 3*y*(y + 1) + 1)
assert ((2 + 6*x)**2).as_content_primitive() == \
(4, (3*x + 1)**2)
assert ((2 + 6*x)**(2*y)).as_content_primitive() == \
(1, (_keep_coeff(S(2), (3*x + 1)))**(2*y))
assert (5 + 10*x + 2*y*(3 + 3*y)).as_content_primitive() == \
(1, 10*x + 6*y*(y + 1) + 5)
assert ((5*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive() == \
(11, x*(y + 1))
assert ((5*(x*(1 + y)) + 2*x*(3 + 3*y))**2).as_content_primitive() == \
(121, x**2*(y + 1)**2)
assert (y**2).as_content_primitive() == \
(1, y**2)
assert (S.Infinity).as_content_primitive() == (1, oo)
eq = x**(2 + y)
assert (eq).as_content_primitive() == (1, eq)
assert (S.Half**(2 + x)).as_content_primitive() == (S(1)/4, 2**-x)
assert ((-S.Half)**(2 + x)).as_content_primitive() == \
(S(1)/4, (-S.Half)**x)
assert ((-S.Half)**(2 + x)).as_content_primitive() == \
(S(1)/4, (-S.Half)**x)
assert (4**((1 + y)/2)).as_content_primitive() == (2, 4**(y/2))
assert (3**((1 + y)/2)).as_content_primitive() == \
(1, 3**(Mul(S(1)/2, 1 + y, evaluate=False)))
assert (5**(S(3)/4)).as_content_primitive() == (1, 5**(S(3)/4))
assert (5**(S(7)/4)).as_content_primitive() == (5, 5**(S(3)/4))
assert Add(5*z/7, 0.5*x, 3*y/2, evaluate=False).as_content_primitive() == \
(S(1)/14, 7.0*x + 21*y + 10*z)
assert (2**(S(3)/4) + 2**(S(1)/4)*sqrt(3)).as_content_primitive(radical=True) == \
(1, 2**(S(1)/4)*(sqrt(2) + sqrt(3)))
def test_radsimp():
r2 = sqrt(2)
r3 = sqrt(3)
r5 = sqrt(5)
r7 = sqrt(7)
assert fraction(radsimp(1/r2)) == (sqrt(2), 2)
assert radsimp(1/(1 + r2)) == \
-1 + sqrt(2)
assert radsimp(1/(r2 + r3)) == \
-sqrt(2) + sqrt(3)
assert fraction(radsimp(1/(1 + r2 + r3))) == \
(-sqrt(6) + sqrt(2) + 2, 4)
assert fraction(radsimp(1/(r2 + r3 + r5))) == \
(-sqrt(30) + 2*sqrt(3) + 3*sqrt(2), 12)
assert fraction(radsimp(1/(1 + r2 + r3 + r5))) == (
(-34*sqrt(10) - 26*sqrt(15) - 55*sqrt(3) - 61*sqrt(2) + 14*sqrt(30) +
93 + 46*sqrt(6) + 53*sqrt(5), 71))
assert fraction(radsimp(1/(r2 + r3 + r5 + r7))) == (
(-50*sqrt(42) - 133*sqrt(5) - 34*sqrt(70) - 145*sqrt(3) + 22*sqrt(105)
+ 185*sqrt(2) + 62*sqrt(30) + 135*sqrt(7), 215))
z = radsimp(1/(1 + r2/3 + r3/5 + r5 + r7))
assert len((3616791619821680643598*z).args) == 16
assert radsimp(1/z) == 1/z
assert radsimp(1/z, max_terms=20).expand() == 1 + r2/3 + r3/5 + r5 + r7
assert radsimp(1/(r2*3)) == \
sqrt(2)/6
assert radsimp(1/(r2*a + r3 + r5 + r7)) == (
(8*sqrt(2)*a**7 - 8*sqrt(7)*a**6 - 8*sqrt(5)*a**6 - 8*sqrt(3)*a**6 -
180*sqrt(2)*a**5 + 8*sqrt(30)*a**5 + 8*sqrt(42)*a**5 + 8*sqrt(70)*a**5
- 24*sqrt(105)*a**4 + 84*sqrt(3)*a**4 + 100*sqrt(5)*a**4 +
116*sqrt(7)*a**4 - 72*sqrt(70)*a**3 - 40*sqrt(42)*a**3 -
8*sqrt(30)*a**3 + 782*sqrt(2)*a**3 - 462*sqrt(3)*a**2 -
302*sqrt(7)*a**2 - 254*sqrt(5)*a**2 + 120*sqrt(105)*a**2 -
795*sqrt(2)*a - 62*sqrt(30)*a + 82*sqrt(42)*a + 98*sqrt(70)*a -
118*sqrt(105) + 59*sqrt(7) + 295*sqrt(5) + 531*sqrt(3))/(16*a**8 -
480*a**6 + 3128*a**4 - 6360*a**2 + 3481))
assert radsimp(1/(r2*a + r2*b + r3 + r7)) == (
(sqrt(2)*a*(a + b)**2 - 5*sqrt(2)*a + sqrt(42)*a + sqrt(2)*b*(a +
b)**2 - 5*sqrt(2)*b + sqrt(42)*b - sqrt(7)*(a + b)**2 - sqrt(3)*(a +
b)**2 - 2*sqrt(3) + 2*sqrt(7))/(2*a**4 + 8*a**3*b + 12*a**2*b**2 -
20*a**2 + 8*a*b**3 - 40*a*b + 2*b**4 - 20*b**2 + 8))
assert radsimp(1/(r2*a + r2*b + r2*c + r2*d)) == \
sqrt(2)/(2*a + 2*b + 2*c + 2*d)
assert radsimp(1/(1 + r2*a + r2*b + r2*c + r2*d)) == (
(sqrt(2)*a + sqrt(2)*b + sqrt(2)*c + sqrt(2)*d - 1)/(2*a**2 + 4*a*b +
4*a*c + 4*a*d + 2*b**2 + 4*b*c + 4*b*d + 2*c**2 + 4*c*d + 2*d**2 - 1))
assert radsimp((y**2 - x)/(y - sqrt(x))) == \
sqrt(x) + y
assert radsimp(-(y**2 - x)/(y - sqrt(x))) == \
-(sqrt(x) + y)
assert radsimp(1/(1 - I + a*I)) == \
(-I*a + 1 + I)/(a**2 - 2*a + 2)
assert radsimp(1/((-x + y)*(x - sqrt(y)))) == \
(-x - sqrt(y))/((x - y)*(x**2 - y))
e = (3 + 3*sqrt(2))*x*(3*x - 3*sqrt(y))
assert radsimp(e) == x*(3 + 3*sqrt(2))*(3*x - 3*sqrt(y))
assert radsimp(1/e) == (
(-9*x + 9*sqrt(2)*x - 9*sqrt(y) + 9*sqrt(2)*sqrt(y))/(9*x*(9*x**2 -
9*y)))
assert radsimp(1 + 1/(1 + sqrt(3))) == \
Mul(S.Half, -1 + sqrt(3), evaluate=False) + 1
A = symbols("A", commutative=False)
assert radsimp(x**2 + sqrt(2)*x**2 - sqrt(2)*x*A) == \
x**2 + sqrt(2)*x**2 - sqrt(2)*x*A
assert radsimp(1/sqrt(5 + 2 * sqrt(6))) == -sqrt(2) + sqrt(3)
assert radsimp(1/sqrt(5 + 2 * sqrt(6))**3) == -(-sqrt(3) + sqrt(2))**3
# issue 6532
assert fraction(radsimp(1/sqrt(x))) == (sqrt(x), x)
assert fraction(radsimp(1/sqrt(2*x + 3))) == (sqrt(2*x + 3), 2*x + 3)
assert fraction(radsimp(1/sqrt(2*(x + 3)))) == (sqrt(2*x + 6), 2*x + 6)
# issue 5994
e = S('-(2 + 2*sqrt(2) + 4*2**(1/4))/'
'(1 + 2**(3/4) + 3*2**(1/4) + 3*sqrt(2))')
assert radsimp(e).expand() == -2*2**(S(3)/4) - 2*2**(S(1)/4) + 2 + 2*sqrt(2)
# issue 5986 (modifications to radimp didn't initially recognize this so
# the test is included here)
assert radsimp(1/(-sqrt(5)/2 - S(1)/2 + (-sqrt(5)/2 - S(1)/2)**2)) == 1
# from issue 5934
eq = (
(-240*sqrt(2)*sqrt(sqrt(5) + 5)*sqrt(8*sqrt(5) + 40) -
360*sqrt(2)*sqrt(-8*sqrt(5) + 40)*sqrt(-sqrt(5) + 5) -
120*sqrt(10)*sqrt(-8*sqrt(5) + 40)*sqrt(-sqrt(5) + 5) +
120*sqrt(2)*sqrt(-sqrt(5) + 5)*sqrt(8*sqrt(5) + 40) +
120*sqrt(2)*sqrt(-8*sqrt(5) + 40)*sqrt(sqrt(5) + 5) +
120*sqrt(10)*sqrt(-sqrt(5) + 5)*sqrt(8*sqrt(5) + 40) +
120*sqrt(10)*sqrt(-8*sqrt(5) + 40)*sqrt(sqrt(5) + 5))/(-36000 -
7200*sqrt(5) + (12*sqrt(10)*sqrt(sqrt(5) + 5) +
24*sqrt(10)*sqrt(-sqrt(5) + 5))**2))
assert radsimp(eq) is S.NaN # it's 0/0
# work with normal form
e = 1/sqrt(sqrt(7)/7 + 2*sqrt(2) + 3*sqrt(3) + 5*sqrt(5)) + 3
assert radsimp(e) == (
-sqrt(sqrt(7) + 14*sqrt(2) + 21*sqrt(3) +
35*sqrt(5))*(-11654899*sqrt(35) - 1577436*sqrt(210) - 1278438*sqrt(15)
- 1346996*sqrt(10) + 1635060*sqrt(6) + 5709765 + 7539830*sqrt(14) +
8291415*sqrt(21))/1300423175 + 3)
# obey power rules
base = sqrt(3) - sqrt(2)
assert radsimp(1/base**3) == (sqrt(3) + sqrt(2))**3
assert radsimp(1/(-base)**3) == -(sqrt(2) + sqrt(3))**3
assert radsimp(1/(-base)**x) == (-base)**(-x)
assert radsimp(1/base**x) == (sqrt(2) + sqrt(3))**x
assert radsimp(root(1/(-1 - sqrt(2)), -x)) == (-1)**(-1/x)*(1 + sqrt(2))**(1/x)
# recurse
e = cos(1/(1 + sqrt(2)))
assert radsimp(e) == cos(-sqrt(2) + 1)
assert radsimp(e/2) == cos(-sqrt(2) + 1)/2
assert radsimp(1/e) == 1/cos(-sqrt(2) + 1)
assert radsimp(2/e) == 2/cos(-sqrt(2) + 1)
assert fraction(radsimp(e/sqrt(x))) == (sqrt(x)*cos(-sqrt(2)+1), x)
# test that symbolic denominators are not processed
r = 1 + sqrt(2)
assert radsimp(x/r, symbolic=False) == -x*(-sqrt(2) + 1)
assert radsimp(x/(y + r), symbolic=False) == x/(y + 1 + sqrt(2))
assert radsimp(x/(y + r)/r, symbolic=False) == \
-x*(-sqrt(2) + 1)/(y + 1 + sqrt(2))
# issue 7408
eq = sqrt(x)/sqrt(y)
assert radsimp(eq) == umul(sqrt(x), sqrt(y), 1/y)
assert radsimp(eq, symbolic=False) == eq
# issue 7498
assert radsimp(sqrt(x)/sqrt(y)**3) == umul(sqrt(x), sqrt(y**3), 1/y**3)
# for coverage
eq = sqrt(x)/y**2
assert radsimp(eq) == eq
def test_radsimp_issue_3214():
c, p = symbols('c p', positive=True)
s = sqrt(c**2 - p**2)
b = (c + I*p - s)/(c + I*p + s)
assert radsimp(b) == -I*(c + I*p - sqrt(c**2 - p**2))**2/(2*c*p)
def test_collect_const():
# coverage not provided by above tests
assert collect_const(2*sqrt(3) + 4*a*sqrt(5)) == \
2*(2*sqrt(5)*a + sqrt(3)) # let the primitive reabsorb
assert collect_const(2*sqrt(3) + 4*a*sqrt(5), sqrt(3)) == \
2*sqrt(3) + 4*a*sqrt(5)
assert collect_const(sqrt(2)*(1 + sqrt(2)) + sqrt(3) + x*sqrt(2)) == \
sqrt(2)*(x + 1 + sqrt(2)) + sqrt(3)
# issue 5290
assert collect_const(2*x + 2*y + 1, 2) == \
collect_const(2*x + 2*y + 1) == \
Add(S(1), Mul(2, x + y, evaluate=False), evaluate=False)
assert collect_const(-y - z) == Mul(-1, y + z, evaluate=False)
assert collect_const(2*x - 2*y - 2*z, 2) == \
Mul(2, x - y - z, evaluate=False)
assert collect_const(2*x - 2*y - 2*z, -2) == \
_unevaluated_Add(2*x, Mul(-2, y + z, evaluate=False))
# this is why the content_primitive is used
eq = (sqrt(15 + 5*sqrt(2))*x + sqrt(3 + sqrt(2))*y)*2
assert collect_sqrt(eq + 2) == \
2*sqrt(sqrt(2) + 3)*(sqrt(5)*x + y) + 2
def test_issue_5933():
from sympy import Polygon, RegularPolygon, denom
x = Polygon(*RegularPolygon((0, 0), 1, 5).vertices).centroid.x
assert abs(denom(x).n()) > 1e-12
assert abs(denom(radsimp(x))) > 1e-12 # in case simplify didn't handle it
def test_fraction_expand():
eq = (x + y)*y/x
assert eq.expand(frac=True) == fraction_expand(eq) == (x*y + y**2)/x
assert eq.expand() == y + y**2/x
def test_combsimp_gamma():
from sympy.abc import x, y
R = Rational
assert combsimp(gamma(x)) == gamma(x)
assert combsimp(gamma(x + 1)/x) == gamma(x)
assert combsimp(gamma(x)/(x - 1)) == gamma(x - 1)
assert combsimp(x*gamma(x)) == gamma(x + 1)
assert combsimp((x + 1)*gamma(x + 1)) == gamma(x + 2)
assert combsimp(gamma(x + y)*(x + y)) == gamma(x + y + 1)
assert combsimp(x/gamma(x + 1)) == 1/gamma(x)
assert combsimp((x + 1)**2/gamma(x + 2)) == (x + 1)/gamma(x + 1)
assert combsimp(x*gamma(x) + gamma(x + 3)/(x + 2)) == \
(x + 2)*gamma(x + 1)
assert combsimp(gamma(2*x)*x) == gamma(2*x + 1)/2
assert combsimp(gamma(2*x)/(x - S(1)/2)) == 2*gamma(2*x - 1)
assert combsimp(gamma(x)*gamma(1 - x)) == pi/sin(pi*x)
assert combsimp(gamma(x)*gamma(-x)) == -pi/(x*sin(pi*x))
assert combsimp(1/gamma(x + 3)/gamma(1 - x)) == \
sin(pi*x)/(pi*x*(x + 1)*(x + 2))
assert powsimp(combsimp(
gamma(x)*gamma(x + S(1)/2)*gamma(y)/gamma(x + y))) == \
2**(-2*x + 1)*sqrt(pi)*gamma(2*x)*gamma(y)/gamma(x + y)
assert combsimp(1/gamma(x)/gamma(x - S(1)/3)/gamma(x + S(1)/3)) == \
3**(3*x - S(3)/2)/(2*pi*gamma(3*x - 1))
assert simplify(
gamma(S(1)/2 + x/2)*gamma(1 + x/2)/gamma(1 + x)/sqrt(pi)*2**x) == 1
assert combsimp(gamma(S(-1)/4)*gamma(S(-3)/4)) == 16*sqrt(2)*pi/3
assert powsimp(combsimp(gamma(2*x)/gamma(x))) == \
2**(2*x - 1)*gamma(x + S(1)/2)/sqrt(pi)
# issue 6792
e = (-gamma(k)*gamma(k + 2) + gamma(k + 1)**2)/gamma(k)**2
assert combsimp(e) == -k
assert combsimp(1/e) == -1/k
e = (gamma(x) + gamma(x + 1))/gamma(x)
assert combsimp(e) == x + 1
assert combsimp(1/e) == 1/(x + 1)
e = (gamma(x) + gamma(x + 2))*(gamma(x - 1) + gamma(x))/gamma(x)
assert combsimp(e) == (x**2 + x + 1)*gamma(x + 1)/(x - 1)
e = (-gamma(k)*gamma(k + 2) + gamma(k + 1)**2)/gamma(k)**2
assert combsimp(e**2) == k**2
assert combsimp(e**2/gamma(k + 1)) == k/gamma(k)
a = R(1, 2) + R(1, 3)
b = a + R(1, 3)
assert combsimp(gamma(2*k)/gamma(k)*gamma(k + a)*gamma(k + b))
3*2**(2*k + 1)*3**(-3*k - 2)*sqrt(pi)*gamma(3*k + R(3, 2))/2
A, B = symbols('A B', commutative=False)
assert combsimp(e*B*A) == combsimp(e)*B*A
# check iteration
assert combsimp(gamma(2*k)/gamma(k)*gamma(-k - R(1, 2))) == (
-2**(2*k + 1)*sqrt(pi)/(2*((2*k + 1)*cos(pi*k))))
assert combsimp(
gamma(k)*gamma(k + R(1, 3))*gamma(k + R(2, 3))/gamma(3*k/2)) == (
3*2**(3*k + 1)*3**(-3*k - S.Half)*sqrt(pi)*gamma(3*k/2 + S.Half)/2)
def test_polarify():
from sympy import polar_lift, polarify
x = Symbol('x')
z = Symbol('z', polar=True)
f = Function('f')
ES = {}
assert polarify(-1) == (polar_lift(-1), ES)
assert polarify(1 + I) == (polar_lift(1 + I), ES)
assert polarify(exp(x), subs=False) == exp(x)
assert polarify(1 + x, subs=False) == 1 + x
assert polarify(f(I) + x, subs=False) == f(polar_lift(I)) + x
assert polarify(x, lift=True) == polar_lift(x)
assert polarify(z, lift=True) == z
assert polarify(f(x), lift=True) == f(polar_lift(x))
assert polarify(1 + x, lift=True) == polar_lift(1 + x)
assert polarify(1 + f(x), lift=True) == polar_lift(1 + f(polar_lift(x)))
newex, subs = polarify(f(x) + z)
assert newex.subs(subs) == f(x) + z
mu = Symbol("mu")
sigma = Symbol("sigma", positive=True)
# Make sure polarify(lift=True) doesn't try to lift the integration
# variable
assert polarify(
Integral(sqrt(2)*x*exp(-(-mu + x)**2/(2*sigma**2))/(2*sqrt(pi)*sigma),
(x, -oo, oo)), lift=True) == Integral(sqrt(2)*(sigma*exp_polar(0))**exp_polar(I*pi)*
exp((sigma*exp_polar(0))**(2*exp_polar(I*pi))*exp_polar(I*pi)*polar_lift(-mu + x)**
(2*exp_polar(0))/2)*exp_polar(0)*polar_lift(x)/(2*sqrt(pi)), (x, -oo, oo))
def test_unpolarify():
from sympy import (exp_polar, polar_lift, exp, unpolarify, sin,
principal_branch)
from sympy import gamma, erf, sin, tanh, uppergamma, Eq, Ne
from sympy.abc import x
p = exp_polar(7*I) + 1
u = exp(7*I) + 1
assert unpolarify(1) == 1
assert unpolarify(p) == u
assert unpolarify(p**2) == u**2
assert unpolarify(p**x) == p**x
assert unpolarify(p*x) == u*x
assert unpolarify(p + x) == u + x
assert unpolarify(sqrt(sin(p))) == sqrt(sin(u))
# Test reduction to principal branch 2*pi.
t = principal_branch(x, 2*pi)
assert unpolarify(t) == x
assert unpolarify(sqrt(t)) == sqrt(t)
# Test exponents_only.
assert unpolarify(p**p, exponents_only=True) == p**u
assert unpolarify(uppergamma(x, p**p)) == uppergamma(x, p**u)
# Test functions.
assert unpolarify(sin(p)) == sin(u)
assert unpolarify(tanh(p)) == tanh(u)
assert unpolarify(gamma(p)) == gamma(u)
assert unpolarify(erf(p)) == erf(u)
assert unpolarify(uppergamma(x, p)) == uppergamma(x, p)
assert unpolarify(uppergamma(sin(p), sin(p + exp_polar(0)))) == \
uppergamma(sin(u), sin(u + 1))
assert unpolarify(uppergamma(polar_lift(0), 2*exp_polar(0))) == \
uppergamma(0, 2)
assert unpolarify(Eq(p, 0)) == Eq(u, 0)
assert unpolarify(Ne(p, 0)) == Ne(u, 0)
assert unpolarify(polar_lift(x) > 0) == (x > 0)
# Test bools
assert unpolarify(True) is True
def test_issue_6097():
assert collect(a*y**(2.0*x) + b*y**(2.0*x), y**x) == y**(2.0*x)*(a + b)
assert collect(a*2**(2.0*x) + b*2**(2.0*x), 2**x) == 2**(2.0*x)*(a + b)
def test_signsimp():
e = x*(-x + 1) + x*(x - 1)
assert signsimp(Eq(e, 0)) is S.true
assert Abs(x - 1) == Abs(1 - x)
def test_besselsimp():
from sympy import besselj, besseli, besselk, bessely, jn, yn, exp_polar, cosh, cosine_transform
assert besselsimp(exp(-I*pi*y/2)*besseli(y, z*exp_polar(I*pi/2))) == \
besselj(y, z)
assert besselsimp(exp(-I*pi*a/2)*besseli(a, 2*sqrt(x)*exp_polar(I*pi/2))) == \
besselj(a, 2*sqrt(x))
assert besselsimp(sqrt(2)*sqrt(pi)*x**(S(1)/4)*exp(I*pi/4)*exp(-I*pi*a/2) *
besseli(-S(1)/2, sqrt(x)*exp_polar(I*pi/2)) *
besseli(a, sqrt(x)*exp_polar(I*pi/2))/2) == \
besselj(a, sqrt(x)) * cos(sqrt(x))
assert besselsimp(besseli(S(-1)/2, z)) == \
sqrt(2)*cosh(z)/(sqrt(pi)*sqrt(z))
assert besselsimp(besseli(a, z*exp_polar(-I*pi/2))) == \
exp(-I*pi*a/2)*besselj(a, z)
assert cosine_transform(1/t*sin(a/t), t, y) == \
sqrt(2)*sqrt(pi)*besselj(0, 2*sqrt(a)*sqrt(y))/2
def test_Piecewise():
e1 = x*(x + y) - y*(x + y)
e2 = sin(x)**2 + cos(x)**2
e3 = expand((x + y)*y/x)
s1 = simplify(e1)
s2 = simplify(e2)
s3 = simplify(e3)
assert simplify(Piecewise((e1, x < e2), (e3, True))) == \
Piecewise((s1, x < s2), (s3, True))
# trigsimp tries not to touch non-trig containing args
assert trigsimp(Piecewise((e1, e3 < e2), (e3, True))) == \
Piecewise((e1, e3 < s2), (e3, True))
def test_polymorphism():
class A(Basic):
def _eval_simplify(x, **kwargs):
return 1
a = A(5, 2)
assert simplify(a) == 1
def test_issue_from_PR1599():
n1, n2, n3, n4 = symbols('n1 n2 n3 n4', negative=True)
assert simplify(I*sqrt(n1)) == -sqrt(-n1)
assert (powsimp(sqrt(n1)*sqrt(n2)*sqrt(n3)) ==
-I*sqrt(-n1)*sqrt(-n2)*sqrt(-n3))
assert (powsimp(root(n1, 3)*root(n2, 3)*root(n3, 3)*root(n4, 3)) ==
-(-1)**(S(1)/3)*
(-n1)**(S(1)/3)*(-n2)**(S(1)/3)*(-n3)**(S(1)/3)*(-n4)**(S(1)/3))
def test_issue_6811():
eq = (x + 2*y)*(2*x + 2)
assert simplify(eq) == (x + 1)*(x + 2*y)*2
# reject the 2-arg Mul -- these are a headache for test writing
assert simplify(eq.expand()) == \
2*x**2 + 4*x*y + 2*x + 4*y
@XFAIL
def test_issue_6811_fail():
# from doc/src/modules/physics/mechanics/examples.rst, the current `eq`
# at Line 576 (in different variables) was formerly the equivalent and
# shorter expression given below...it would be nice to get the short one
# back again
xp, y, x, z = symbols('xp, y, x, z')
eq = 4*(-19*sin(x)*y + 5*sin(3*x)*y + 15*cos(2*x)*z - 21*z)*xp/(9*cos(x) - 5*cos(3*x))
assert trigsimp(eq) == -2*(2*cos(x)*tan(x)*y + 3*z)*xp/cos(x)
def test_issue_6920():
e = [cos(x) + I*sin(x), cos(x) - I*sin(x),
cosh(x) - sinh(x), cosh(x) + sinh(x)]
ok = [exp(I*x), exp(-I*x), exp(-x), exp(x)]
# wrap in f to show that the change happens wherever ei occurs
f = Function('f')
assert [simplify(f(ei)).args[0] for ei in e] == ok
def test_issue_7001():
from sympy.abc import r, R
assert simplify(-(r*Piecewise((4*pi/3, r <= R),
(-8*pi*R**3/(3*r**3), True)) + 2*Piecewise((4*pi*r/3, r <= R),
(4*pi*R**3/(3*r**2), True)))/(4*pi*r)) == \
Piecewise((-1, r <= R), (0, True))
def test_exptrigsimp():
def valid(a, b):
from sympy.utilities.randtest import test_numerically as tn
if not (tn(a, b) and a == b):
return False
return True
assert exptrigsimp(exp(x) + exp(-x)) == 2*cosh(x)
assert exptrigsimp(exp(x) - exp(-x)) == 2*sinh(x)
e = [cos(x) + I*sin(x), cos(x) - I*sin(x),
cosh(x) - sinh(x), cosh(x) + sinh(x)]
ok = [exp(I*x), exp(-I*x), exp(-x), exp(x)]
assert all(valid(i, j) for i, j in zip(
[exptrigsimp(ei) for ei in e], ok))
ue = [cos(x) + sin(x), cos(x) - sin(x),
cosh(x) + I*sinh(x), cosh(x) - I*sinh(x)]
assert [exptrigsimp(ei) == ei for ei in ue]
res = []
ok = [y*tanh(1), 1/(y*tanh(1)), I*y*tan(1), -I/(y*tan(1)),
y*tanh(x), 1/(y*tanh(x)), I*y*tan(x), -I/(y*tan(x)),
y*tanh(1 + I), 1/(y*tanh(1 + I))]
for a in (1, I, x, I*x, 1 + I):
w = exp(a)
eq = y*(w - 1/w)/(w + 1/w)
s = simplify(eq)
assert s == exptrigsimp(eq)
res.append(s)
sinv = simplify(1/eq)
assert sinv == exptrigsimp(1/eq)
res.append(sinv)
assert all(valid(i, j) for i, j in zip(res, ok))
for a in range(1, 3):
w = exp(a)
e = w + 1/w
s = simplify(e)
assert s == exptrigsimp(e)
assert valid(s, 2*cosh(a))
e = w - 1/w
s = simplify(e)
assert s == exptrigsimp(e)
assert valid(s, 2*sinh(a))
def test_issue_2827_trigsimp_methods():
measure1 = lambda expr: len(str(expr))
measure2 = lambda expr: -count_ops(expr)
# Return the most complicated result
expr = (x + 1)/(x + sin(x)**2 + cos(x)**2)
ans = Matrix([1])
M = Matrix([expr])
assert trigsimp(M, method='fu', measure=measure1) == ans
assert trigsimp(M, method='fu', measure=measure2) != ans
# all methods should work with Basic expressions even if they
# aren't Expr
M = Matrix.eye(1)
assert all(trigsimp(M, method=m) == M for m in
'fu matching groebner old'.split())
# watch for E in exptrigsimp, not only exp()
eq = 1/sqrt(E) + E
assert exptrigsimp(eq) == eq
def test_powsimp_on_numbers():
assert 2**(S(1)/3 - 2) == 2**(S(1)/3)/4
| dennisss/sympy | sympy/simplify/tests/test_simplify.py | Python | bsd-3-clause | 72,312 |
import functools
import sys
import unittest
from test import test_support
from weakref import proxy
import pickle
@staticmethod
def PythonPartial(func, *args, **keywords):
'Pure Python approximation of partial()'
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class TestPartial(unittest.TestCase):
thetype = functools.partial
def test_basic_examples(self):
p = self.thetype(capture, 1, 2, a=10, b=20)
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.thetype(map, lambda x: x*10)
self.assertEqual(p([1,2,3,4]), [10, 20, 30, 40])
def test_attributes(self):
p = self.thetype(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
# attributes should not be writable
if not isinstance(self.thetype, type):
return
self.assertRaises(TypeError, setattr, p, 'func', map)
self.assertRaises(TypeError, setattr, p, 'args', (1, 2))
self.assertRaises(TypeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.thetype(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_argument_checking(self):
self.assertRaises(TypeError, self.thetype) # need at least a func arg
try:
self.thetype(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.thetype(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.thetype(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.thetype(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.thetype(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.thetype(capture, a=1)
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.thetype(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.thetype(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.thetype(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x // y
self.assertRaises(ZeroDivisionError, self.thetype(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.thetype(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.thetype(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.thetype(f, y=0), 1)
def test_weakref(self):
f = self.thetype(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = map(str, range(10))
join = self.thetype(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.thetype(''.join)
self.assertEqual(join(data), '0123456789')
def test_pickle(self):
f = self.thetype(signature, 'asdf', bar=True)
f.add_something_to__dict__ = True
f_copy = pickle.loads(pickle.dumps(f))
self.assertEqual(signature(f), signature(f_copy))
class PartialSubclass(functools.partial):
pass
class TestPartialSubclass(TestPartial):
thetype = PartialSubclass
class TestPythonPartial(TestPartial):
thetype = PythonPartial
# the python version isn't picklable
def test_pickle(self): pass
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertTrue(getattr(wrapper, name) is getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
self.assertTrue(wrapped_attr[key] is wrapper_attr[key])
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f)
def wrapper():
pass
self.check_wrapper(wrapper, f)
return wrapper
def test_default_update(self):
wrapper = self._default_update()
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(not sys.flags.optimize <= 1,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce(unittest.TestCase):
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
reduce = functools.reduce
self.assertEqual(reduce(lambda x, y: x+y, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
reduce(lambda x, y: x+y, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
reduce(lambda x, y: x*y, range(2,21), 1L),
2432902008176640000L
)
self.assertEqual(reduce(lambda x, y: x+y, Squares(10)), 285)
self.assertEqual(reduce(lambda x, y: x+y, Squares(10), 0), 285)
self.assertEqual(reduce(lambda x, y: x+y, Squares(0), 0), 0)
self.assertRaises(TypeError, reduce)
self.assertRaises(TypeError, reduce, 42, 42)
self.assertRaises(TypeError, reduce, 42, 42, 42)
self.assertEqual(reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, reduce, 42, (42, 42))
class TestCmpToKey(unittest.TestCase):
def test_cmp_to_key(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=functools.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_hash(self):
def mycmp(x, y):
return y - x
key = functools.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash(k))
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(str):
pass
self.assertTrue(A("a") < A("b"))
self.assertTrue(A("b") > A("a"))
self.assertTrue(A("a") <= A("b"))
self.assertTrue(A("b") >= A("a"))
self.assertTrue(A("b") <= A("b"))
self.assertTrue(A("b") >= A("b"))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_main(verbose=None):
test_classes = (
TestPartial,
TestPartialSubclass,
TestPythonPartial,
TestUpdateWrapper,
TestTotalOrdering,
TestWraps,
TestReduce,
)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == '__main__':
test_main(verbose=True)
| antb/TPT----My-old-mod | src/python/stdlib/test/test_functools.py | Python | gpl-2.0 | 15,450 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import common_util
class SerializeAttributesTestCase(unittest.TestCase):
class Foo(object):
def __init__(self, foo_fighters, whisky_bar):
# Pylint doesn't like foo and bar, but I guess musical references are OK.
self.foo_fighters = foo_fighters
self.whisky_bar = whisky_bar
def testSerialization(self):
foo_fighters = self.Foo('1', 2)
json_dict = common_util.SerializeAttributesToJsonDict(
{}, foo_fighters, ['foo_fighters', 'whisky_bar'])
self.assertDictEqual({'foo_fighters': '1', 'whisky_bar': 2}, json_dict)
# Partial update
json_dict = common_util.SerializeAttributesToJsonDict(
{'baz': 42}, foo_fighters, ['whisky_bar'])
self.assertDictEqual({'baz': 42, 'whisky_bar': 2}, json_dict)
# Non-existing attribute.
with self.assertRaises(AttributeError):
json_dict = common_util.SerializeAttributesToJsonDict(
{}, foo_fighters, ['foo_fighters', 'whisky_bar', 'baz'])
def testDeserialization(self):
foo_fighters = self.Foo('hello', 'world')
json_dict = {'foo_fighters': 12, 'whisky_bar': 42}
# Partial.
foo_fighters = common_util.DeserializeAttributesFromJsonDict(
json_dict, foo_fighters, ['foo_fighters'])
self.assertEqual(12, foo_fighters.foo_fighters)
self.assertEqual('world', foo_fighters.whisky_bar)
# Complete.
foo_fighters = common_util.DeserializeAttributesFromJsonDict(
json_dict, foo_fighters, ['foo_fighters', 'whisky_bar'])
self.assertEqual(42, foo_fighters.whisky_bar)
# Non-existing attribute.
with self.assertRaises(AttributeError):
json_dict['baz'] = 'bad'
foo_fighters = common_util.DeserializeAttributesFromJsonDict(
json_dict, foo_fighters, ['foo_fighters', 'whisky_bar', 'baz'])
if __name__ == '__main__':
unittest.main()
| axinging/chromium-crosswalk | tools/android/loading/common_util_unittest.py | Python | bsd-3-clause | 2,005 |
__all__ = ['WeatherReport']
| AaronGeist/Llama | biz/life/__init__.py | Python | gpl-3.0 | 28 |
import sys
import subprocess
from pybullet_utils.arg_parser import ArgParser
from pybullet_utils.logger import Logger
def main():
# Command line arguments
args = sys.argv[1:]
arg_parser = ArgParser()
arg_parser.load_args(args)
num_workers = arg_parser.parse_int('num_workers', 1)
assert (num_workers > 0)
Logger.print2('Running with {:d} workers'.format(num_workers))
cmd = 'mpiexec -n {:d} python3 DeepMimic_Optimizer.py '.format(num_workers)
cmd += ' '.join(args)
Logger.print2('cmd: ' + cmd)
subprocess.call(cmd, shell=True)
return
if __name__ == '__main__':
main()
| MadManRises/Madgine | shared/bullet3-2.89/examples/pybullet/gym/pybullet_envs/deep_mimic/mpi_run.py | Python | mit | 601 |
import sys
import math
import time
from bzagents/bzrc import BZRC, Command
class DumbTank(object):
def __init__(self, bzrc, index):
self.bzrc = bzrc
self.agent = DumbAgent(bzrc, index)
def tick(self, time_diff):
self.agent.tick(time_diff)
class DumbAgent(object):
def __init__(self, bzrc, index):
self.bzrc = bzrc
self.index = index
self.constants = self.bzrc.get_constants()
self.commands = []
self.time_shooting_elapsed = 0
self.time_moving_elapsed = 0
self.random_shooting_interval = random.uniform(1.5, 2.5)
self.random_moving_forward_interval = random.uniform(3, 8)
self.is_turning = False
def tick(self, time_diff):
"""Some time has passed; decide what to do next."""
mytanks, othertanks, flags, shots = self.bzrc.get_lots_o_stuff()
self.mytanks = mytanks
self.tank = mytanks[self.index]
self.othertanks = othertanks
self.flags = flags
self.shots = shots
self.enemies = [tank for tank in othertanks if tank.color !=
self.constants['team']]
self.commands = []
shoot = self.check_for_shooting
if self.is_turning or self.stop_moving_forward(time_diff):
self.is_turning = turn_counter_clockwise(self.target_angle, shoot)
else:
self.move_forward()
results = self.bzrc.do_commands(self.commands)
def check_for_shooting(self, time_diff):
self.time_shooting_elapsed += time_diff
if self.time_shooting_elapsed >= self.random_shooting_interval:
self.random_shooting_interval = random.uniform(1.5, 2.5)
self.time_shooting_elapsed = 0
return True
else:
return False
def stop_moving_forward(self, time_diff):
self.time_moving_elapsed += time_diff
if self.time_moving_elapsed >= self.random_moving_forward_interval:
self.random_moving_forward_interval = random.uniform(3, 8)
self.time_moving_elapsed = 0
self.target_angle = self.tank.angle + (math.pi / 3)
self.is_turning
return True
else:
return True
def move_forward(self, shoot):
command = Command(self.index, 1, 0, shoot)
def turn_counter_clockwise(self, target_angle, shoot):
relative_angle = self.normalize_angle(target_angle - self.tank.angle)
command = Command(self.index, 0, 2 * relative_angle, shoot)
if relative_angle < 0.1:
return False
else:
return True
def normalize_angle(self, angle):
"""Make any angle be between +/- pi."""
angle -= 2 * math.pi * int (angle / (2 * math.pi))
if angle <= -math.pi:
angle += 2 * math.pi
elif angle > math.pi:
angle -= 2 * math.pi
return angle
def main():
# Process CLI arguments.
try:
execname, host, port = sys.argv
except ValueError:
execname = sys.argv[0]
print >>sys.stderr, '%s: incorrect number of arguments' % execname
print >>sys.stderr, 'usage: %s hostname port' % sys.argv[0]
sys.exit(-1)
# Connect.
#bzrc = BZRC(host, int(port), debug=True)
bzrc = BZRC(host, int(port))
tank0 = DumbTank(bzrc, 0)
prev_time = time.time()
# Run the agent
try:
while True:
time_diff = time.time() - prev_time
tank0.tick(time_diff)
except KeyboardInterrupt:
print "Exiting due to keyboard interrupt."
bzrc.close()
if __name__ == '__main__':
main() | craig8196/ai | src/dumb_agent.py | Python | gpl-3.0 | 3,729 |
"""
1D Math Functions.
Bruce Wernick
10 June 2021
"""
import math
__all__ = ['Quad', 'Cubic', 'PolyEval', 'PolyEvalH', 'ExpFunc',
'ModExpFunc', 'LogFunc', 'RecipLogFunc', 'VaporPressureFunc',
'PowerFunc', 'ModPowerFunc', 'ShiftPowerFunc', 'GeometricFunc',
'ModGeometricFunc', 'RootFunc', 'HoerlFunc', 'ModHoerlFunc',
'RecipFunc', 'RecipQuadFunc', 'BleasdaleFunc', 'HarrisFunc',
'ExpAssocFunc2', 'ExpAssocFunc3', 'SatGrowthFunc',
'GompertzFunc', 'LogisticFunc', 'RichardsFunc', 'MMFFunc',
'WeibullFunc', 'SinusoidalFunc', 'GaussianFunc',
'HyperbolicFunc', 'HeatCapacityFunc', 'RationalFunc']
def Quad(x,coeff):
'quadratic (hard coded for speed)'
a,b,c = coeff
return a + x*(b + x*c)
def Cubic(x,coeff):
'cubic (hard coded for speed)'
a,b,c,d = coeff
return a + x*(b + x*(c + x*d))
def PolyEval(x,a):
'polynomial, a[0] + x*(a[1] + x*(a[2] ... + x*a[n-1]))'
p = 0.0
for c in reversed(a):
p = c + p * x
return p
def PolyEvalH(x,a):
'reversed polynomial, ((a[0]*x + a[1])*x + a[2])*x + ... + a[n-1]'
n = len(a)
p = 0.0
for i in range(0, n-1):
p = (p + a[i])*x
p += a[n-1]
return p
def ExpFunc(x,coeff):
'exponential function'
a,b = coeff
return a * math.exp(b*x)
def ModExpFunc(x,coeff):
'modified exponential function'
a,b = coeff
return a * math.exp(b / x)
def LogFunc(x,coeff):
'natural log function'
a,b = coeff
return a + b * math.log(x)
def RecipLogFunc(x,coeff):
'reciprocal log function'
a,b = coeff
return 1.0 / (a + b * math.log(x))
def VaporPressureFunc(x,coeff):
'vapor pressure function'
a,b,c = coeff
return math.exp(a + b / x + c * math.log(x))
def PowerFunc(x,coeff):
'power function'
a,b = coeff
return a * x**b
def ModPowerFunc(x,coeff):
'modified power function'
a,b = coeff
return a * b**x
def ShiftPowerFunc(x,coeff):
'shifted power function'
a,b,c = coeff
return a * (x - b)**c
def GeometricFunc(x,coeff):
'geometric function'
a,b = coeff
return a * x**(b*x)
def ModGeometricFunc(x,coeff):
'modified geometric function'
a,b = coeff
return a * x**(b / x)
def RootFunc(x,a):
'root function'
return a**(1.0 / x)
def HoerlFunc(x,coeff):
'hoerl function'
a,b,c = coeff
return a * (b**x) * (x**c)
def ModHoerlFunc(x,coeff):
'modified hoerl function'
a,b,c = coeff
return a * (b**(1.0 / x)) * (x**c)
def RecipFunc(x,coeff):
'reciprocal linear function'
a,b = coeff
return 1.0 / (a + b*x)
def RecipQuadFunc(x,coeff):
'reciprocal quadratic function'
a,b,c = coeff
return 1.0 / (a + x*(b + x*c))
def BleasdaleFunc(x,coeff):
'bleasdale function'
a,b,c = coeff
return (a + b*x)**(-1.0 / c)
def HarrisFunc(x,coeff):
'harris function'
a,b,c = coeff
return 1.0 / (a + (b*x)**c)
def ExpAssocFunc2(x,coeff):
'exponential associative function (2 coeff)'
a,b = coeff
return a * (1.0 - math.exp(-b*x))
def ExpAssocFunc3(x,coeff):
'exponential associative function (3 coeff)'
a,b,c = coeff
return a * (b - math.exp(-c*x))
def SatGrowthFunc(x,coeff):
'saturation growth model'
a,b = coeff
return a * x / (b + x)
def GompertzFunc(x,coeff):
'gompertz function'
a,b,c = coeff
return a * math.exp(-math.exp(b - c*x))
def LogisticFunc(x,coeff):
'logistic function'
a,b,c = coeff
return a / (1.0 + math.exp(b - c*x))
def RichardsFunc(x,coeff):
'richards function'
a,b,c,d = coeff
return a / ((1.0 + math.exp(b - c*x))**(1.0/d))
def MMFFunc(x,coeff):
'mmf function'
a,b,c,d = coeff
xa3 = x**d
return (a*b + c*xa3) / (b + xa3)
def WeibullFunc(x,coeff):
'weibull function'
a,b,c,d = coeff
return a - b * math.exp(-c * x**d)
def SinusoidalFunc(x,coeff):
'sinusoidal function'
a,b,c,d = coeff
return a + b * math.cos(c*x + d)
def GaussianFunc(x,coeff):
'gaussian function'
a,b,c = coeff
return a * math.exp((-(x - b)**2) / (2.0*(c**2)))
def HyperbolicFunc(x,coeff):
'hyperbolic function'
a,b = coeff
return a + b / x
def HeatCapacityFunc(x,coeff):
'heat capacity function (inverse quadratic)'
a,b,c = coeff
return a + b*x + c/x**2
def RationalFunc(x,coeff):
'rational function'
a,b,c,d = coeff
return (a + b*x) / (1.0 + x*(c + x*d))
# ---------------------------------------------------------------------
if __name__ == '__main__':
x,y = 1.7, 3.4
a,b,c,d = 0.1,0.2,0.3,0.4
z = Quad(x,(a,b,c))
print(f'Quadratic : {z:0.4g}')
z = Cubic(x,(a,b,c,d))
print(f'Cubic : {z:0.4g}')
z = PolyEval(x,[a,b,c,d])
print(f'Polynomial : {z:0.4g}')
z = PolyEvalH(x,[d,c,b,a])
print(f'Poly (rev) : {z:0.4g}')
z = ExpFunc(x,(a,b))
print(f'Exponential : {z:0.4g}')
z = ModExpFunc(x,(a,b))
print(f'Modified Exp : {z:0.4g}')
z = LogFunc(x,(a,b))
print(f'Logarithmic : {z:0.4g}')
z = RecipLogFunc(x,(a,b))
print(f'Recip Log : {z:0.4g}')
z = VaporPressureFunc(x,(a,b,c))
print(f'Vap Pressure : {z:0.4g}')
z = PowerFunc(x,(a,b))
print(f'Power : {z:0.4g}')
z = ModPowerFunc(x,(a,b))
print(f'Mod Power : {z:0.4g}')
z = ShiftPowerFunc(x,(a,b,c))
print(f'Shifted Power : {z:0.4g}')
z = GeometricFunc(x,(a,b))
print(f'Geometric : {z:0.4g}')
z = ModGeometricFunc(x,(a,b))
print(f'Mod Geom : {z:0.4g}')
z = RootFunc(x,a)
print(f'Root : {z:0.4g}')
z = HoerlFunc(x,(a,b,c))
print(f'Hoerl : {z:0.4g}')
z = ModHoerlFunc(x,(a,b,c))
print(f'Mod Hoerl : {z:0.4g}')
z = RecipFunc(x,(a,b))
print(f'Reciprocal : {z:0.4g}')
z = RecipQuadFunc(x,(a,b,c))
print(f'Recip Quad : {z:0.4g}')
z = BleasdaleFunc(x,(a,b,c))
print(f'Bleasdale : {z:0.4g}')
z = HarrisFunc(x,(a,b,c))
print(f'Harris : {z:0.4g}')
z = ExpAssocFunc2(x,(a,b))
print(f'Exp Assoc 2 : {z:0.4g}')
z = ExpAssocFunc3(x,(a,b,c))
print(f'Exp Assoc 3 : {z:0.4g}')
z = SatGrowthFunc(x,(a,b))
print(f'Sat Growth : {z:0.4g}')
z = GompertzFunc(x,(a,b,c))
print(f'Gompertz : {z:0.4g}')
z = LogisticFunc(x,(a,b,c))
print(f'Logistic : {z:0.4g}')
z = RichardsFunc(x,(a,b,c,d))
print(f'Richards : {z:0.4g}')
z = MMFFunc(x,(a,b,c,d))
print(f'MMF : {z:0.4g}')
z = WeibullFunc(x,(d,c,b,a))
print(f'Weibull : {z:0.4g}')
z = SinusoidalFunc(x,(a,b,c,d))
print(f'Sinusoidal : {z:0.4g}')
z = GaussianFunc(x,(a,b,c))
print(f'Gaussian : {z:0.4g}')
z = HyperbolicFunc(x,(a,b))
print(f'Hyperbolic : {z:0.4g}')
z = HeatCapacityFunc(x,(a,b,c))
print(f'Heat Capacity : {z:0.4g}')
z = RationalFunc(x,(a,b,c,d))
print(f'Rational : {z:0.4g}')
| bru32/magz | magz/funcs.py | Python | mit | 6,919 |
from LogReader import LogReader
import math
import json
import pickle
import numpy as np
import scipy as sp
import scipy.stats
def mean_confidence_interval(data, confidence=0.95):
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
class TimeConfidence:
def __init__(self):
self.rdrLog = LogReader()
def confidence(self, strPath):
dataOwl = None
log = self.rdrLog.loadLog(strPath)
dataOwl = log.getOwlData()
self.tti = dataOwl["task-tree-individuals"]
owlMeta = dataOwl["metadata"]
owlAnnot = dataOwl["annotation"]
if owlMeta:
toplevel_nodes = owlMeta.subActions()
else:
print "No meta data in file!"
self.timeSpans = {}
self.findTimeSpansPerTask(toplevel_nodes)
for ctx in self.timeSpans:
print ctx, mean_confidence_interval(self.timeSpans[ctx])
def findTimeSpansPerTask(self, nodes):
for node in nodes:
owlNode = self.tti[node]
ctx = owlNode.taskContext()
if not ctx in self.timeSpans:
self.timeSpans[ctx] = []
self.timeSpans[ctx].append(owlNode.time())
self.findTimeSpansPerTask(owlNode.subActions())
| airballking/semrec | scripts/bstools/Beliefstate Tools/TimeConfidence.py | Python | bsd-2-clause | 1,442 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.check_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class AssertProperIterableTest(tf.test.TestCase):
def test_single_tensor_raises(self):
tensor = tf.constant(1)
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(tensor)
def test_single_sparse_tensor_raises(self):
ten = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4])
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(ten)
def test_single_ndarray_raises(self):
array = np.array([1, 2, 3])
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(array)
def test_single_string_raises(self):
mystr = "hello"
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(mystr)
def test_non_iterable_object_raises(self):
non_iterable = 1234
with self.assertRaisesRegexp(TypeError, "to be iterable"):
tf.assert_proper_iterable(non_iterable)
def test_list_does_not_raise(self):
list_of_stuff = [tf.constant([11, 22]), tf.constant([1, 2])]
tf.assert_proper_iterable(list_of_stuff)
def test_generator_does_not_raise(self):
generator_of_stuff = (tf.constant([11, 22]), tf.constant([1, 2]))
tf.assert_proper_iterable(generator_of_stuff)
class AssertEqualTest(tf.test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies([tf.assert_equal(small, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies(
[tf.assert_equal(big, small, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*big.*small"):
out.eval()
def test_raises_when_less(self):
with self.test_session():
small = tf.constant([3, 1], name="small")
big = tf.constant([4, 2], name="big")
with tf.control_dependencies([tf.assert_equal(small, big)]):
out = tf.identity(small)
with self.assertRaisesOpError("small.*big"):
out.eval()
def test_doesnt_raise_when_equal_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
small_2 = tf.constant([1, 2], name="small_2")
with tf.control_dependencies([tf.assert_equal(small, small_2)]):
out = tf.identity(small)
out.eval()
def test_raises_when_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
small_2 = tf.constant([1, 1], name="small_2")
with self.assertRaisesRegexp(ValueError, "broadcast"):
with tf.control_dependencies([tf.assert_equal(small, small_2)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_equal(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertLessTest(tf.test.TestCase):
def test_raises_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies(
[tf.assert_less(small, small, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*small.*small"):
out.eval()
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies([tf.assert_less(big, small)]):
out = tf.identity(small)
with self.assertRaisesOpError("big.*small"):
out.eval()
def test_doesnt_raise_when_less(self):
with self.test_session():
small = tf.constant([3, 1], name="small")
big = tf.constant([4, 2], name="big")
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
def test_raises_when_less_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
big = tf.constant([3, 2], name="big")
with self.assertRaisesRegexp(ValueError, "broadcast"):
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_less(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertLessEqualTest(tf.test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies([tf.assert_less_equal(small, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies(
[tf.assert_less_equal(big, small, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*big.*small"):
out.eval()
def test_doesnt_raise_when_less_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_less_equal_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 1], name="big")
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
big = tf.constant([3, 1], name="big")
with self.assertRaisesRegexp(ValueError, "broadcast"):
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_less_equal(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertNegativeTest(tf.test.TestCase):
def test_doesnt_raise_when_negative(self):
with self.test_session():
frank = tf.constant([-1, -2], name="frank")
with tf.control_dependencies([tf.assert_negative(frank)]):
out = tf.identity(frank)
out.eval()
def test_raises_when_positive(self):
with self.test_session():
doug = tf.constant([1, 2], name="doug")
with tf.control_dependencies([tf.assert_negative(doug, message="fail")]):
out = tf.identity(doug)
with self.assertRaisesOpError("fail.*doug"):
out.eval()
def test_raises_when_zero(self):
with self.test_session():
claire = tf.constant([0], name="claire")
with tf.control_dependencies([tf.assert_negative(claire)]):
out = tf.identity(claire)
with self.assertRaisesOpError("claire"):
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is negative when it satisfies:
# For every element x_i in x, x_i < 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_negative(empty)]):
out = tf.identity(empty)
out.eval()
class AssertPositiveTest(tf.test.TestCase):
def test_raises_when_negative(self):
with self.test_session():
freddie = tf.constant([-1, -2], name="freddie")
with tf.control_dependencies(
[tf.assert_positive(freddie, message="fail")]):
out = tf.identity(freddie)
with self.assertRaisesOpError("fail.*freddie"):
out.eval()
def test_doesnt_raise_when_positive(self):
with self.test_session():
remmy = tf.constant([1, 2], name="remmy")
with tf.control_dependencies([tf.assert_positive(remmy)]):
out = tf.identity(remmy)
out.eval()
def test_raises_when_zero(self):
with self.test_session():
meechum = tf.constant([0], name="meechum")
with tf.control_dependencies([tf.assert_positive(meechum)]):
out = tf.identity(meechum)
with self.assertRaisesOpError("meechum"):
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is positive when it satisfies:
# For every element x_i in x, x_i > 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_positive(empty)]):
out = tf.identity(empty)
out.eval()
class AssertRankTest(tf.test.TestCase):
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(
ValueError, "fail.*my_tensor.*must have rank 1"):
with tf.control_dependencies(
[tf.assert_rank(tensor, desired_rank, message="fail")]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies(
[tf.assert_rank(tensor, desired_rank, message="fail")]):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_one_tensor_raises_if_rank_too_large_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 0
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_large_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 2
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_raises_if_rank_is_not_scalar_static(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
tf.assert_rank(tensor, np.array([], dtype=np.int32))
def test_raises_if_rank_is_not_scalar_dynamic(self):
with self.test_session():
tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
rank_tensor = tf.placeholder(tf.int32, name="rank_tensor")
with self.assertRaisesOpError("Rank must be a scalar"):
with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
tf.identity(tensor).eval(feed_dict={rank_tensor: [1, 2]})
def test_raises_if_rank_is_not_integer_static(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
tf.assert_rank(tensor, .5)
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.test_session():
tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
rank_tensor = tf.placeholder(tf.float32, name="rank_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
tf.identity(tensor).eval(feed_dict={rank_tensor: .5})
class AssertRankAtLeastTest(tf.test.TestCase):
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank at least 1"):
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_one_ten_doesnt_raise_raise_if_rank_too_large_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_ten_doesnt_raise_if_rank_too_large_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 2
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
class AssertNonNegativeTest(tf.test.TestCase):
def test_raises_when_negative(self):
with self.test_session():
zoe = tf.constant([-1, -2], name="zoe")
with tf.control_dependencies([tf.assert_non_negative(zoe)]):
out = tf.identity(zoe)
with self.assertRaisesOpError("zoe"):
out.eval()
def test_doesnt_raise_when_zero_and_positive(self):
with self.test_session():
lucas = tf.constant([0, 2], name="lucas")
with tf.control_dependencies([tf.assert_non_negative(lucas)]):
out = tf.identity(lucas)
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-negative when it satisfies:
# For every element x_i in x, x_i >= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_non_negative(empty)]):
out = tf.identity(empty)
out.eval()
class AssertNonPositiveTest(tf.test.TestCase):
def test_doesnt_raise_when_zero_and_negative(self):
with self.test_session():
tom = tf.constant([0, -2], name="tom")
with tf.control_dependencies([tf.assert_non_positive(tom)]):
out = tf.identity(tom)
out.eval()
def test_raises_when_positive(self):
with self.test_session():
rachel = tf.constant([0, 2], name="rachel")
with tf.control_dependencies([tf.assert_non_positive(rachel)]):
out = tf.identity(rachel)
with self.assertRaisesOpError("rachel"):
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-positive when it satisfies:
# For every element x_i in x, x_i <= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_non_positive(empty)]):
out = tf.identity(empty)
out.eval()
class AssertIntegerTest(tf.test.TestCase):
def test_doesnt_raise_when_integer(self):
with self.test_session():
integers = tf.constant([1, 2], name="integers")
with tf.control_dependencies([tf.assert_integer(integers)]):
out = tf.identity(integers)
out.eval()
def test_raises_when_float(self):
with self.test_session():
floats = tf.constant([1.0, 2.0], name="floats")
with self.assertRaisesRegexp(TypeError, "Expected.*integer"):
tf.assert_integer(floats)
class IsStrictlyIncreasingTest(tf.test.TestCase):
def test_constant_tensor_is_not_strictly_increasing(self):
with self.test_session():
self.assertFalse(tf.is_strictly_increasing([1, 1, 1]).eval())
def test_decreasing_tensor_is_not_strictly_increasing(self):
with self.test_session():
self.assertFalse(tf.is_strictly_increasing([1, 0, -1]).eval())
def test_2d_decreasing_tensor_is_not_strictly_increasing(self):
with self.test_session():
self.assertFalse(tf.is_strictly_increasing([[1, 3], [2, 4]]).eval())
def test_increasing_tensor_is_increasing(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([1, 2, 3]).eval())
def test_increasing_rank_two_tensor(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([[-1, 2], [3, 4]]).eval())
def test_tensor_with_one_element_is_strictly_increasing(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([1]).eval())
def test_empty_tensor_is_strictly_increasing(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([]).eval())
class IsNonDecreasingTest(tf.test.TestCase):
def test_constant_tensor_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([1, 1, 1]).eval())
def test_decreasing_tensor_is_not_non_decreasing(self):
with self.test_session():
self.assertFalse(tf.is_non_decreasing([3, 2, 1]).eval())
def test_2d_decreasing_tensor_is_not_non_decreasing(self):
with self.test_session():
self.assertFalse(tf.is_non_decreasing([[1, 3], [2, 4]]).eval())
def test_increasing_rank_one_tensor_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([1, 2, 3]).eval())
def test_increasing_rank_two_tensor(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([[-1, 2], [3, 3]]).eval())
def test_tensor_with_one_element_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([1]).eval())
def test_empty_tensor_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([]).eval())
if __name__ == "__main__":
tf.test.main()
| rew4332/tensorflow | tensorflow/python/kernel_tests/check_ops_test.py | Python | apache-2.0 | 24,539 |
# -*- coding: utf-8 -*-
##Copyright (C) [2003] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import sys
import os
sys.path.append(os.environ['CUON_PATH'])
from cuon.Databases.SingleData import SingleData
import logging
import threading
class SingleWebshop(SingleData):
def __init__(self, allTables):
SingleData.__init__(self)
# tables.dbd and address
self.sNameOfTable = "webshop"
self.xmlTableDef = 0
self.loadTable(allTables)
# self.saveTable()
#self.athread = threading.Thread(target = self.loadTable())
#self.athread.start()
self.listHeader['names'] = ['table', 'myName','ID']
self.listHeader['size'] = [25,25,10]
self.out( "number of Columns ")
self.out( len(self.table.Columns))
#
#self.statusfields = ['lastname', 'city']
| CuonDeveloper/cuon | cuon_client/CUON/cuon/WebShop/SingleWebshop.py | Python | gpl-3.0 | 1,550 |
import sys, os
## make sure this pyqtgraph is importable before any others
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from pyqtgraph.Qt import QtCore, QtGui
from exampleLoaderTemplate import Ui_Form
import os, sys
from collections import OrderedDict
examples = OrderedDict([
('Command-line usage', 'CLIexample.py'),
('Basic Plotting', 'Plotting.py'),
('ImageView', 'ImageView.py'),
('ParameterTree', '../parametertree'),
('Crosshair / Mouse interaction', 'crosshair.py'),
('Video speed test', 'VideoSpeedTest.py'),
('Plot speed test', 'PlotSpeedTest.py'),
('Data Slicing', 'DataSlicing.py'),
('GraphicsItems', OrderedDict([
('Scatter Plot', 'ScatterPlot.py'),
#('PlotItem', 'PlotItem.py'),
('IsocurveItem', 'isocurve.py'),
('ImageItem - video', 'ImageItem.py'),
('ImageItem - draw', 'Draw.py'),
('Region-of-Interest', 'ROItypes.py'),
('GraphicsLayout', 'GraphicsLayout.py'),
('Text Item', 'text.py'),
('Linked Views', 'linkedViews.py'),
('Arrow', 'Arrow.py'),
('ViewBox', 'ViewBox.py'),
])),
('3D Graphics', OrderedDict([
('Volumetric', 'GLVolumeItem.py'),
('Isosurface', 'GLMeshItem.py'),
])),
('Widgets', OrderedDict([
('PlotWidget', 'PlotWidget.py'),
#('SpinBox', '../widgets/SpinBox.py'),
('TreeWidget', '../widgets/TreeWidget.py'),
('DataTreeWidget', '../widgets/DataTreeWidget.py'),
('GradientWidget', '../widgets/GradientWidget.py'),
#('TableWidget', '../widgets/TableWidget.py'),
('ColorButton', '../widgets/ColorButton.py'),
#('CheckTable', '../widgets/CheckTable.py'),
#('VerticalLabel', '../widgets/VerticalLabel.py'),
('JoystickButton', 'JoystickButton.py'),
])),
('GraphicsScene', 'GraphicsScene.py'),
('Flowcharts', 'Flowchart.py'),
#('Canvas', '../canvas'),
#('MultiPlotWidget', 'MultiPlotWidget.py'),
])
path = os.path.abspath(os.path.dirname(__file__))
class ExampleLoader(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.ui = Ui_Form()
self.cw = QtGui.QWidget()
self.setCentralWidget(self.cw)
self.ui.setupUi(self.cw)
global examples
self.populateTree(self.ui.exampleTree.invisibleRootItem(), examples)
self.ui.exampleTree.expandAll()
self.resize(900,500)
self.show()
self.ui.splitter.setSizes([150,750])
self.ui.loadBtn.clicked.connect(self.loadFile)
self.ui.exampleTree.currentItemChanged.connect(self.showFile)
self.ui.exampleTree.itemDoubleClicked.connect(self.loadFile)
def populateTree(self, root, examples):
for key, val in examples.iteritems():
item = QtGui.QTreeWidgetItem([key])
if isinstance(val, basestring):
item.file = val
else:
self.populateTree(item, val)
root.addChild(item)
def currentFile(self):
item = self.ui.exampleTree.currentItem()
if hasattr(item, 'file'):
global path
return os.path.join(path, item.file)
return None
def loadFile(self):
fn = self.currentFile()
if fn is None:
return
if sys.platform.startswith('win'):
os.spawnl(os.P_NOWAIT, sys.executable, sys.executable, '"' + fn + '"')
else:
os.spawnl(os.P_NOWAIT, sys.executable, sys.executable, fn)
def showFile(self):
fn = self.currentFile()
if fn is None:
self.ui.codeView.clear()
return
if os.path.isdir(fn):
fn = os.path.join(fn, '__main__.py')
text = open(fn).read()
self.ui.codeView.setPlainText(text)
def run():
app = QtGui.QApplication([])
loader = ExampleLoader()
app.exec_()
if __name__ == '__main__':
run()
| robertsj/poropy | pyqtgraph/examples/__main__.py | Python | mit | 4,039 |
from ._utils import TestCase
from .. import views
class DummyObject(object):
def __init__(self, *args, **kwargs):
pass
class TestableForm(DummyObject):
pass
class CustomForm(DummyObject):
pass
class TestableInlineFormset(DummyObject):
pass
class CustomFormset(DummyObject):
pass
class FormMixinTestCase(TestCase):
mixin_class = views.FormMixin
def get_testable_mixin(self):
mixin = self.mixin_class()
mixin.form_class = TestableForm
mixin.request = self.factory.get("/any/url/works")
return mixin
def test_get_context_data_adds_default_form(self):
context_data = self.get_testable_mixin().get_context_data()
self.assertTrue("form" in context_data)
self.assertEqual(context_data["form"].__class__, TestableForm)
def test_get_context_data_uses_provided_form_if_available(self):
context_data = self.get_testable_mixin().get_context_data(
form=CustomForm())
self.assertEqual(context_data["form"].__class__, CustomForm)
def test_get_form_class_returns_form_class_property(self):
mixin = self.get_testable_mixin()
self.assertFalse(mixin.get_form_class() is None, msg="Sanity check")
self.assertEqual(mixin.form_class, mixin.get_form_class())
def test_get_form_uses_form_class_by_default(self):
form = self.get_testable_mixin().get_form()
self.assertEqual(form.__class__, TestableForm)
def test_get_form_uses_provided_form_class(self):
form = self.get_testable_mixin().get_form(form_class=CustomForm)
self.assertEqual(form.__class__, CustomForm)
class InlineFormsetMixinTestCase(FormMixinTestCase):
mixin_class = views.InlineFormsetMixin
def get_testable_mixin(self):
mixin = super(InlineFormsetMixinTestCase, self).get_testable_mixin()
mixin.inline_formset_class = TestableInlineFormset
return mixin
def test_get_inline_formset_uses_default_class(self):
formset = self.get_testable_mixin().get_inline_formset()
self.assertEqual(formset.__class__, TestableInlineFormset)
def test_get_inline_formset_with_provided_formset_class(self):
formset = self.get_testable_mixin().get_inline_formset(CustomFormset)
self.assertEqual(formset.__class__, CustomFormset)
def test_inline_formset_added_to_context(self):
context_data = self.get_testable_mixin().get_context_data()
self.assertTrue("inline_formset" in context_data)
self.assertEqual(context_data["inline_formset"].__class__,
TestableInlineFormset)
def test_get_context_data_uses_provided_inline_formset_if_available(self):
context_data = self.get_testable_mixin().get_context_data(
inline_formset=CustomFormset())
self.assertEqual(context_data["inline_formset"].__class__,
CustomFormset)
| tswicegood/cbv_utils | cbv_utils/tests/views.py | Python | apache-2.0 | 2,922 |
from smartmin.views import SmartCRUDL, SmartCreateView, SmartListView, SmartUpdateView
from django import forms
from dash.orgs.views import OrgPermsMixin, OrgObjPermsMixin
from .models import Category, CategoryImage
class CategoryImageForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.org = kwargs['org']
del kwargs['org']
super(CategoryImageForm, self).__init__(*args, **kwargs)
self.fields['category'].queryset = Category.objects.filter(org=self.org)
category = forms.ModelChoiceField(Category.objects.filter(id__lte=-1))
class Meta:
model = CategoryImage
fields = ('is_active', 'name', 'category', 'image')
class CategoryCRUDL(SmartCRUDL):
model = Category
actions = ('create', 'update', 'list')
class Update(OrgObjPermsMixin, SmartUpdateView):
fields = ('is_active', 'name')
class List(OrgPermsMixin, SmartListView):
def derive_fields(self):
if self.request.user.is_superuser:
return ('name', 'modified_on', 'created_on', 'org')
return ('name', 'modified_on', 'created_on')
def get_queryset(self, **kwargs):
queryset = super(CategoryCRUDL.List, self).get_queryset(**kwargs)
queryset = queryset.filter(org=self.derive_org())
return queryset
class Create(OrgPermsMixin, SmartCreateView):
def derive_fields(self):
if self.request.user.is_superuser:
return ('name', 'org')
return ('name', )
def pre_save(self, obj):
obj = super(CategoryCRUDL.Create, self).pre_save(obj)
if not self.get_user().is_superuser:
org = self.derive_org()
if org:
obj.org = org
return obj
class CategoryImageCRUDL(SmartCRUDL):
model = CategoryImage
actions = ('create', 'update', 'list')
class Update(OrgObjPermsMixin, SmartUpdateView):
form_class = CategoryImageForm
fields = ('is_active', 'name', 'category', 'image')
def get_object_org(self):
return self.get_object().category.org
def get_form_kwargs(self):
kwargs = super(CategoryImageCRUDL.Update, self).get_form_kwargs()
kwargs['org'] = self.request.org
return kwargs
class List(OrgPermsMixin, SmartListView):
fields = ('name', 'category', 'modified_on', 'created_on')
def get_queryset(self, **kwargs):
queryset = super(CategoryImageCRUDL.List, self).get_queryset(**kwargs)
queryset = queryset.filter(category__org=self.derive_org())
return queryset
class Create(OrgPermsMixin, SmartCreateView):
form_class = CategoryImageForm
fields = ('name', 'category', 'image')
def get_form_kwargs(self):
kwargs = super(CategoryImageCRUDL.Create, self).get_form_kwargs()
kwargs['org'] = self.request.org
return kwargs
| peterayeni/dash | dash/categories/views.py | Python | bsd-3-clause | 3,019 |
#!/usr/bin/env python3
"""
test/unit_tests_d/ut_daemon.py: unit test for the MMGen suite's Daemon class
"""
from subprocess import run,DEVNULL
from mmgen.common import *
from mmgen.daemon import *
from mmgen.protocol import init_proto
def test_flags():
d = CoinDaemon('eth')
vmsg(f'Available opts: {fmt_list(d.avail_opts,fmt="bare")}')
vmsg(f'Available flags: {fmt_list(d.avail_flags,fmt="bare")}')
vals = namedtuple('vals',['online','no_daemonize','keep_cfg_file'])
def gen():
for opts,flags,val in (
(None,None, vals(False,False,False)),
(None,['keep_cfg_file'], vals(False,False,True)),
(['online'],['keep_cfg_file'], vals(True,False,True)),
(['online','no_daemonize'],['keep_cfg_file'], vals(True,True,True)),
):
d = CoinDaemon('eth',opts=opts,flags=flags)
assert d.flag.keep_cfg_file == val.keep_cfg_file
assert d.opt.online == val.online
assert d.opt.no_daemonize == val.no_daemonize
d.flag.keep_cfg_file = not val.keep_cfg_file
d.flag.keep_cfg_file = val.keep_cfg_file
yield d
return tuple(gen())
def test_flags_err(ut,d):
def bad1(): d[0].flag.foo = False
def bad2(): d[0].opt.foo = False
def bad3(): d[0].opt.no_daemonize = True
def bad4(): d[0].flag.keep_cfg_file = 'x'
def bad5(): d[0].opt.no_daemonize = 'x'
def bad6(): d[0].flag.keep_cfg_file = False
def bad7(): d[1].flag.keep_cfg_file = True
ut.process_bad_data((
('flag (1)', 'ClassFlagsError', 'unrecognized flag', bad1 ),
('opt (1)', 'ClassFlagsError', 'unrecognized opt', bad2 ),
('opt (2)', 'AttributeError', 'is read-only', bad3 ),
('flag (2)', 'AssertionError', 'not boolean', bad4 ),
('opt (3)', 'AttributeError', 'is read-only', bad5 ),
('flag (3)', 'ClassFlagsError', 'not set', bad6 ),
('flag (4)', 'ClassFlagsError', 'already set', bad7 ),
))
arm_skip_daemons = ('openethereum','parity')
def test_cmds(op):
network_ids = CoinDaemon.get_network_ids()
import mmgen.daemon as daemon_mod
for test_suite in [True,False] if op == 'print' else [True]:
vmsg(orange(f'Start commands (op={op}, test_suite={test_suite}):'))
for coin,data in CoinDaemon.coins.items():
for daemon_id in data.daemon_ids:
if daemon_id in arm_skip_daemons:
continue
for network in data.networks:
if opt.no_altcoin_deps and coin != 'BTC':
continue
d = CoinDaemon(
proto=init_proto(coin=coin,network=network),
daemon_id = daemon_id,
test_suite = test_suite )
if op == 'print':
for cmd in d.start_cmds:
vmsg(' '.join(cmd))
elif op == 'check':
try:
cp = run([d.exec_fn,'--help'],stdout=PIPE,stderr=PIPE)
except:
die(2,f'Unable to execute {d.exec_fn}')
if cp.returncode:
die(2,f'Unable to execute {d.exec_fn}')
else:
vmsg('{:16} {}'.format(
d.exec_fn+':',
cp.stdout.decode().splitlines()[0] ))
else:
if opt.quiet:
msg_r('.')
if op == 'stop' and hasattr(d,'rpc'):
run_session(d.rpc.stop_daemon(quiet=opt.quiet))
else:
getattr(d,op)(silent=opt.quiet)
class unit_tests:
win_skip = ('start','status','stop')
def flags(self,name,ut):
qmsg_r('Testing flags and opts...')
vmsg('')
daemons = test_flags()
qmsg('OK')
qmsg_r('Testing error handling for flags and opts...')
vmsg('')
test_flags_err(ut,daemons)
qmsg('OK')
return True
def cmds(self,name,ut):
qmsg_r('Testing start commands for coin daemons...')
vmsg('')
test_cmds('print')
qmsg('OK')
return True
def exec(self,name,ut):
qmsg_r('Testing availability of coin daemons...')
vmsg('')
test_cmds('check')
qmsg('OK')
return True
def start(self,name,ut):
msg_r('Starting coin daemons...')
qmsg('')
test_cmds('start')
msg('OK')
return True
def status(self,name,ut):
msg_r('Checking status of coin daemons...')
qmsg('')
test_cmds('start')
msg('OK')
return True
def stop(self,name,ut):
msg_r('Stopping coin daemons...')
qmsg('')
test_cmds('stop')
msg('OK')
return True
| mmgen/mmgen | test/unit_tests_d/ut_daemon.py | Python | gpl-3.0 | 4,118 |
from .signature import Signature
class Statement(object):
def __init__(self, text, **kwargs):
self.text = text
self.in_response_to = kwargs.get("in_response_to", [])
self.extra_data = {}
if "in_response_to" in kwargs:
del(kwargs["in_response_to"])
self.extra_data.update(kwargs)
def __str__(self):
return self.text
def __repr__(self):
return "<Statement text:%s>" % (self.text)
def __eq__(self, other):
if not other:
return False
if isinstance(other, Statement):
return self.text == other.text
return self.text == other
def add_extra_data(self, key, value):
self.extra_data[key] = value
def add_response(self, response):
"""
Add the response to the list if it does not already exist.
"""
updated = False
for index in range(0, len(self.in_response_to)):
if response.text == self.in_response_to[index].text:
self.in_response_to[index].occurrence += 1
updated = True
if not updated:
self.in_response_to.append(response)
def remove_response(self, response_text):
"""
Removes a response from the statement's response list based
on the value of the response text.
"""
for response in self.in_response_to:
if response_text == response.text:
self.in_response_to.remove(response)
return True
return False
def get_response_count(self, statement):
"""
Return the number of times the statement occurs in the database.
"""
for response in self.in_response_to:
if statement.text == response.text:
return response.occurrence
return 0
def serialize(self):
"""
Returns a dictionary representation of the current object.
"""
data = {}
data["text"] = self.text
data["in_response_to"] = []
data.update(self.extra_data)
for response in self.in_response_to:
data["in_response_to"].append(response.serialize())
return data
class Response(object):
def __init__(self, text, **kwargs):
self.text = text
self.occurrence = kwargs.get("occurrence", 1)
self.signatures = kwargs.get("signatures", [])
def __str__(self):
return self.text
def __repr__(self):
return "<Response text:%s>" % (self.text)
def __eq__(self, other):
if not other:
return False
if isinstance(other, Response):
return self.text == other.text
return self.text == other
def add_signature(self, signature):
self.signatures.append(signature)
def serialize(self):
data = {}
data["text"] = self.text
data["occurrence"] = self.occurrence
data["signature"] = []
for signature in self.signatures:
data["signature"].append(signature.serialize())
return data
| imminent-tuba/thesis | server/chatterbot/chatterbot/conversation/statement.py | Python | mit | 3,111 |
"""urlconf for the base application"""
from django.conf.urls import url, patterns
from base.views import HomePageView
urlpatterns = patterns('base.views',
url(r'^$', HomePageView.as_view(), name='home'),
)
| zenweasel/cashflow2 | base/urls.py | Python | bsd-3-clause | 214 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from translator.hot.syntax.hot_resource import HotResource
# Name used to dynamically load appropriate map class.
TARGET_CLASS_NAME = 'ToscaPaypalPizzaStore'
class ToscaPaypalPizzaStore(HotResource):
'''Translate TOSCA type tosca.nodes.WebApplication.PayPalPizzaStore.'''
toscatype = 'tosca.nodes.WebApplication.PayPalPizzaStore'
def __init__(self, nodetemplate):
super(ToscaPaypalPizzaStore, self).__init__(nodetemplate)
pass
def handle_properties(self):
pass
| obulpathi/cloud-translator | transformer/custom/hot/tosca_paypalpizzastore.py | Python | apache-2.0 | 1,050 |
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferScene
class FilterPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
self.__column = GafferUI.ListContainer()
GafferUI.PlugValueWidget.__init__( self, self.__column, plug, **kw )
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )
label = GafferUI.LabelPlugValueWidget(
plug,
horizontalAlignment = GafferUI.Label.HorizontalAlignment.Right,
verticalAlignment = GafferUI.Label.VerticalAlignment.Top,
)
label.label()._qtWidget().setMinimumWidth( GafferUI.PlugWidget.labelWidth() )
row.append( label )
self.__menuButton = GafferUI.MenuButton()
self.__menuButton.setMenu( GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) ) )
row.append( self.__menuButton )
row.append( GafferUI.Spacer( IECore.V2i( 1 ), IECore.V2i( 100000, 1 ) ), expand = True )
self.__column.append( row )
self._updateFromPlug()
def hasLabel( self ) :
return True
## Must be implemented by subclasses so that the widget reflects the current
# status of the plug.
def _updateFromPlug( self ) :
thisNode = self.getPlug().node()
filterNode = self.__filterNode()
# update the selection menu text
if filterNode is None :
self.__menuButton.setText( "Add..." )
elif filterNode.parent().isSame( thisNode ) :
self.__menuButton.setText( filterNode.getName() )
else :
self.__menuButton.setText(
filterNode.relativeName(
filterNode.commonAncestor( thisNode, Gaffer.Node.staticTypeId() ),
)
)
# update the filter node ui
if filterNode is None :
del self.__column[1:]
else :
filterNodeUI = None
if len( self.__column ) > 1 :
filterNodeUI = self.__column[1]
if filterNodeUI is None or not filterNodeUI.node().isSame( filterNode ) :
filterNodeUI = GafferUI.StandardNodeUI( filterNode, displayMode = GafferUI.StandardNodeUI.DisplayMode.Bare )
if len( self.__column ) > 1 :
self.__column[1] = filterNodeUI
else :
self.__column.append( filterNodeUI )
def __filterNode( self ) :
input = self.getPlug().getInput()
if input is None :
return None
return input.node()
def __removeFilter( self ) :
filterNode = self.__filterNode()
filterNode.parent().removeChild( filterNode )
def __addFilter( self, filterType ) :
filterNode = filterType()
with Gaffer.UndoContext( self.getPlug().node().scriptNode() ) :
self.getPlug().node().parent().addChild( filterNode )
self.getPlug().setInput( filterNode["match"] )
# position the node appropriately.
## \todo In an ideal world the GraphGadget would do this
# without prompting.
scriptWindow = self.ancestor( GafferUI.ScriptWindow )
if scriptWindow is not None :
nodeGraphs = scriptWindow.getLayout().editors( GafferUI.NodeGraph )
if nodeGraphs :
graphGadget = nodeGraphs[0].graphGadget()
graphGadget.getLayout().positionNode( graphGadget, filterNode )
def __linkFilter( self ) :
## \todo Implement browsing to other nodes with existing filters
pass
def __menuDefinition( self ) :
filterNode = self.__filterNode()
result = IECore.MenuDefinition()
if filterNode is not None :
result.append( "/Remove", { "command" : Gaffer.WeakMethod( self.__removeFilter ) } )
result.append( "/RemoveDivider", { "divider" : True } )
for filterType in GafferScene.Filter.__subclasses__() :
result.append( "/" + filterType.staticTypeName().rpartition( ":" )[2], { "command" : IECore.curry( Gaffer.WeakMethod( self.__addFilter ), filterType ) } )
result.append( "/AddDivider", { "divider" : True } )
result.append( "/Link...", { "command" : Gaffer.WeakMethod( self.__linkFilter ), "active" : False } )
return result
| davidsminor/gaffer | python/GafferSceneUI/FilterPlugValueWidget.py | Python | bsd-3-clause | 5,666 |
#
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2012, SixSq Sarl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import shutil
import unittest
from stratuslab.messaging.MsgClientFactory import getMsgClient
from stratuslab.ConfigHolder import ConfigHolder
from stratuslab.messaging.Defaults import MSG_CLIENTS
class MsgClientTest(unittest.TestCase):
def setUp(self):
self.ch = ConfigHolder()
self.temp_dir = tempfile.mkdtemp()
self.ch.set('msg_queue', self.temp_dir)
self.ch.set('msg_endpoint', 'foo:1234')
def tearDown(self):
shutil.rmtree(self.temp_dir, ignore_errors=True)
def testGetMsgClient(self):
for msg_type in MSG_CLIENTS.keys():
self.ch.set('msg_type', msg_type)
getMsgClient(self.ch)
def testSendImplemented(self):
for msg_type in MSG_CLIENTS.keys():
self.ch.set('msg_type', msg_type)
client = getMsgClient(self.ch)
try:
client.send("message")
except NotImplementedError:
self.fail("send() should be implemented on '%s'." % msg_type)
except Exception:
pass
| StratusLab/client | api/code/src/test/python/messaging/MsgClientTest.py | Python | apache-2.0 | 1,821 |
# -----------------------------------------------------------------------------
# IMPORTS
# Standard library imports
import unittest
# Related third party imports
# Local application/library specific imports
from py_vollib.black.greeks import analytical as c_analytical
from py_vollib.black.greeks import numerical as c_numerical
from py_vollib.ref_python.black.greeks import analytical as py_analytical
from py_vollib.ref_python.black.greeks import numerical as py_numerical
from tests.test_utils import almost_equal
class TestGreeks(unittest.TestCase):
def test_analytical_delta(self):
F = 100
K = 90
sigma = .2
r = .02
t = .5
flag = 'p'
c_put = c_analytical.delta(flag, F, K, t, r, sigma)
py_put = py_analytical.delta(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_put, py_put))
flag = 'c'
c_call = c_analytical.delta(flag, F, K, t, r, sigma)
py_call = py_analytical.delta(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_call, py_call))
def test_analytical_gamma(self):
F = 100
K = 90
sigma = .2
r = .02
t = .5
flag = 'p'
c_put = c_analytical.gamma(flag, F, K, t, r, sigma)
py_put = py_analytical.gamma(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_put, py_put))
flag = 'c'
c_call = c_analytical.gamma(flag, F, K, t, r, sigma)
py_call = py_analytical.gamma(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_call, py_call))
def test_analytical_rho(self):
F = 100
K = 90
sigma = .2
r = .02
t = .5
flag = 'p'
c_put = c_analytical.rho(flag, F, K, t, r, sigma)
py_put = py_analytical.rho(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_put, py_put))
flag = 'c'
c_call = c_analytical.rho(flag, F, K, t, r, sigma)
py_call = py_analytical.rho(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_call, py_call))
def test_analytical_theta(self):
F = 100
K = 90
sigma = .2
r = .02
t = .5
flag = 'p'
c_put = c_analytical.theta(flag, F, K, t, r, sigma)
py_put = py_analytical.theta(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_put, py_put))
flag = 'c'
c_call = c_analytical.theta(flag, F, K, t, r, sigma)
py_call = py_analytical.theta(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_call, py_call))
def test_analytical_vega(self):
F = 100
K = 90
sigma = .2
r = .02
t = .5
flag = 'p'
c_put = c_analytical.vega(flag, F, K, t, r, sigma)
py_put = py_analytical.vega(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_put, py_put))
flag = 'c'
c_call = c_analytical.vega(flag, F, K, t, r, sigma)
py_call = py_analytical.vega(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_call, py_call))
def test_numerical_delta(self):
F = 100
K = 90
sigma = .2
r = .02
t = .5
flag = 'p'
c_put = c_numerical.delta(flag, F, K, t, r, sigma)
py_put = py_numerical.delta(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_put, py_put))
flag = 'c'
c_call = c_numerical.delta(flag, F, K, t, r, sigma)
py_call = py_numerical.delta(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_call, py_call))
def test_numerical_gamma(self):
F = 100
K = 90
sigma = .2
r = .02
t = .5
flag = 'p'
c_put = c_numerical.gamma(flag, F, K, t, r, sigma)
py_put = py_numerical.gamma(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_put, py_put))
flag = 'c'
c_call = c_numerical.gamma(flag, F, K, t, r, sigma)
py_call = py_numerical.gamma(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_call, py_call))
def test_numerical_rho(self):
F = 100
K = 90
sigma = .2
r = .02
t = .5
flag = 'p'
c_put = c_numerical.rho(flag, F, K, t, r, sigma)
py_put = py_numerical.rho(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_put, py_put))
flag = 'c'
c_call = c_numerical.rho(flag, F, K, t, r, sigma)
py_call = py_numerical.rho(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_call, py_call))
def test_numerical_theta(self):
F = 100
K = 90
sigma = .2
r = .02
t = .5
flag = 'p'
c_put = c_numerical.theta(flag, F, K, t, r, sigma)
py_put = py_numerical.theta(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_put, py_put))
flag = 'c'
c_call = c_numerical.theta(flag, F, K, t, r, sigma)
py_call = py_numerical.theta(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_call, py_call))
def test_numerical_vega(self):
F = 100
K = 90
sigma = .2
r = .02
t = .5
flag = 'p'
c_put = c_numerical.vega(flag, F, K, t, r, sigma)
py_put = py_numerical.vega(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_put, py_put))
flag = 'c'
c_call = c_numerical.vega(flag, F, K, t, r, sigma)
py_call = py_numerical.vega(flag, F, K, t, r, sigma)
self.assertTrue(almost_equal(c_call, py_call))
if __name__ == '__main__':
unittest.main()
| vollib/py_vollib | tests/test_vollib_vs_ref_python_values/black/test_greeks.py | Python | mit | 5,684 |
from flask.ext import restful
from person import People
def setup_api_routes(app):
api = restful.Api(app)
api.add_resource(People, '/api/people/') | wanderanimrod/family-tree | application/backend/api/routes.py | Python | gpl-2.0 | 156 |
from typing import Union, Optional
class BetfairError(Exception):
"""
Base class for Betfair Errors.
"""
pass
class PasswordError(BetfairError):
"""
Exception raised if password is not found.
"""
def __init__(self, username: str):
super(PasswordError, self).__init__(username)
self.username = username
def __str__(self):
return (
"Password not found in .bashprofile for %s, add or pass to APIClient"
% self.username
)
class AppKeyError(BetfairError):
"""
Exception raised if appkey is not found.
"""
def __init__(self, username: str):
super(AppKeyError, self).__init__(username)
self.username = username
def __str__(self):
return (
"AppKey not found in .bashprofile for %s, add or pass to APIClient"
% self.username
)
class CertsError(BetfairError):
"""
Exception raised if certs not found.
"""
def __init__(self, message: str = None):
super(CertsError, self).__init__(message)
self.message = message
def __str__(self):
return self.message
class StatusCodeError(BetfairError):
"""
Exception raised if status code is incorrect.
"""
def __init__(self, status_code: str):
super(StatusCodeError, self).__init__(status_code)
self.status_code = status_code
def __str__(self):
return "Status code error: %s" % self.status_code
class InvalidResponse(BetfairError):
"""
Exception raised if invalid response
received from betfair.
"""
def __init__(self, response: Union[dict, list]):
super(InvalidResponse, self).__init__(response)
self.response = response
def __str__(self):
return "Invalid response received: %s" % self.response
class LoginError(BetfairError):
"""
Exception raised if sessionToken is not found.
"""
def __init__(self, response: dict):
super(LoginError, self).__init__(response)
self.response = response
def __str__(self):
login_status = self.response.get("loginStatus")
if login_status is None: # different response when interactive login requested
login_status = self.response.get("error", "UNKNOWN")
return "API login: %s" % login_status
class KeepAliveError(BetfairError):
"""
Exception raised if keep alive fails.
"""
def __init__(self, response: dict):
super(KeepAliveError, self).__init__(response)
self.response = response
def __str__(self):
keep_alive_status = self.response.get("status", "UNKNOWN")
keep_alive_error = self.response.get("error")
return "API keepAlive %s: %s" % (keep_alive_status, keep_alive_error)
class APIError(BetfairError):
"""
Exception raised if error is found.
"""
def __init__(
self,
response: Optional[dict],
method: str = None,
params: dict = None,
exception: Exception = None,
):
super(APIError, self).__init__(response, method, params, exception)
self.response = response
self.method = method
self.params = params
self.exception = exception
def __str__(self):
if self.response:
error_data = self.response.get("error")
return "%s \nParams: %s \nException: %s \nError: %s \nFull Response: %s" % (
self.method,
self.params,
self.exception,
error_data,
self.response,
)
else:
return "%s \nParams: %s \nException: %s" % (
self.method,
self.params,
self.exception,
)
class LogoutError(BetfairError):
"""
Exception raised if logout errors.
"""
def __init__(self, response: dict):
super(LogoutError, self).__init__(response)
self.response = response
def __str__(self):
logout_status = self.response.get("status", "UNKNOWN")
logout_error = self.response.get("error")
return "API logout %s: %s" % (logout_status, logout_error)
class SocketError(BetfairError):
"""
Exception raised if error with socket.
"""
def __init__(self, message: str):
super(SocketError, self).__init__(message)
self.message = message
def __str__(self):
return self.message
class ListenerError(BetfairError):
"""
Exception raised if error with listener.
"""
def __init__(self, connection_id: str, data: str):
super(ListenerError, self).__init__(connection_id, data)
self.connection_id = connection_id
self.data = data
def __str__(self):
return "connection_id: %s, data: %s" % (self.connection_id, self.data)
class CacheError(BetfairError):
"""
Exception raised if error with cache.
"""
def __init__(self, message: str):
super(CacheError, self).__init__(message)
self.message = message
def __str__(self):
return self.message
class RaceCardError(BetfairError):
"""
Exception raised if error with race card request.
"""
def __init__(self, message: str):
super(RaceCardError, self).__init__(message)
self.message = message
def __str__(self):
return self.message
| liampauling/betfairlightweight | betfairlightweight/exceptions.py | Python | mit | 5,411 |
"""
Load geopotential heights/pt/sp hum and plot
22/05/14
"""
import os, sys
import numpy as np
import matplotlib
#matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from matplotlib import rc
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
#c_section_lon=74.
c_section_lat=0
c_lon_min=75.
c_lon_max=85.
gap=1.
diags=['408', 'temp', 'sp_hum']
#experiment_ids = ['djznw', 'djzny', 'djznq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'djzns', 'dkjxq' ]
experiment_ids = ['djzny', 'djznq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'djzns']
p_levels = [1000., 950., 925., 850., 700., 500., 400., 300., 250., 200., 150., 100., 70., 50., 30., 20., 10.]
for c_section_lon in np.arange(c_lon_min,c_lon_max, gap):
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
for diag in diags:
if diag=='408':
clevpt_min=600.
clevpt_max=700.
if diag=='temp':
clevpt_min=298.
clevpt_max=330.
if diag=='sp_hum':
clevpt_min=0.
clevpt_max=0.02
if c_section_lon!=0:
data=np.load('/nfs/a90/eepdw/Data/EMBRACE/Cross_Sections/%s_%s_height_XC_Longitude_%s.npz' % (experiment_id, diag, c_section_lon))
xc=data['xc']
coords=data['coord']
if c_section_lat!=0:
data=np.load('/nfs/a90/eepdw/Data/EMBRACE/Cross_Sections/%s_%s_height_XC_Latitude_%s.npz' % (experiment_id, diag, c_section_lat))
xc=data['xc']
coords=data['coord']
X,Y = np.meshgrid(coords,p_levels)
print xc
print xc.shape
#print X
#print Y
print coords
print p_levels[::-1]
# grid the data.
#zi = griddata(x,y,z,xi,yi,interp='linear')
fig=plt.figure(figsize=(8,10))
ax = fig.add_axes([0.05,0.05,0.9,0.85])
if diag=='408':
plt.title('%s - Geopotential Height' % experiment_id)
CS = ax.contourf(X,Y,np.swapaxes(xc,0,1), np.linspace(clevpt_min, clevpt_max, 256), cmap=plt.cm.jet)
#CS = ax.contourf(X,Y,np.swapaxes(xc,0,1), np.linspace(300, 500, 8), cmap=plt.cm.jet)
cbar = plt.colorbar(CS,orientation='horizontal', format='${%d}$')
cbar.set_label('${K}$')
if diag=='temp':
plt.title('%s - Potential Temperature' % experiment_id)
CS = ax.contourf(X,Y,np.swapaxes(xc,0,1), np.linspace(clevpt_min, clevpt_max, 256), cmap=plt.cm.jet)
#CS = ax.contourf(X,Y,np.swapaxes(xc,0,1), np.linspace(300, 500, 8), cmap=plt.cm.jet)
cbar = plt.colorbar(CS,orientation='horizontal', format='${%d}$')
cbar.set_label('${K}$')
if diag=='sp_hum':
plt.title('%s - Specific Humidity' % experiment_id)
CS = ax.contourf(X,Y,np.swapaxes(xc,0,1), np.linspace(clevpt_min, clevpt_max, 256), cmap=plt.cm.jet_r)
cbar = plt.colorbar(CS,orientation='horizontal', format='${%.3f}$')
cbar.set_label('${kg/kg}$')
#CS = ax.contour(X,Y,np.swapaxes(xc,0,1), np.linspace(clevpt_min, clevpt_max, 10), colors='k')
plt.ylim([950,850])
plt.xlim([20,40])
plt.gca().invert_yaxis
#plt.clabel(CS, fontsize=9, inline=1)
ax.xaxis.set_major_formatter(FormatStrFormatter('${%d}$'))
ax.yaxis.set_major_formatter(FormatStrFormatter('${%d}$'))
plt.show()
if c_section_lon!=0:
plt.xlabel('Latitude')
#plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Cross_Sections/%s_%s_height_XC_Longitude_%s.png' % (experiment_id, diag, c_section_lon), bbox_inches='tight')
if c_section_lat!=0:
plt.xlabel('Longitude')
#plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Cross_Sections/%s_%s_height_XC_Latitude_%s.png' % (experiment_id, diag, c_section_lat), bbox_inches='tight')
plt.close()
| peterwilletts24/Python-Scripts | vertical_cross_sections/pressure_heights_plot_multiple.py | Python | mit | 4,160 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring
"""
This sample "listen to directory". move the new file and print it,
using docker-containers.
The following operators are being used: DockerOperator,
BashOperator & ShortCircuitOperator.
TODO: Review the workflow, change it accordingly to
your environment & enable the code.
"""
from datetime import timedelta
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.python import ShortCircuitOperator
from airflow.providers.docker.operators.docker import DockerOperator
from airflow.utils.dates import days_ago
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": days_ago(2),
"email": ["[email protected]"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
dag = DAG("docker_sample_copy_data", default_args=default_args, schedule_interval=timedelta(minutes=10))
locate_file_cmd = """
sleep 10
find {{params.source_location}} -type f -printf "%f\n" | head -1
"""
t_view = BashOperator(
task_id="view_file",
bash_command=locate_file_cmd,
do_xcom_push=True,
params={"source_location": "/your/input_dir/path"},
dag=dag,
)
def is_data_available(*args, **kwargs):
ti = kwargs["ti"]
data = ti.xcom_pull(key=None, task_ids="view_file")
return not data == ""
t_is_data_available = ShortCircuitOperator(
task_id="check_if_data_available", python_callable=is_data_available, dag=dag
)
t_move = DockerOperator(
api_version="1.19",
docker_url="tcp://localhost:2375", # replace it with swarm/docker endpoint
image="centos:latest",
network_mode="bridge",
volumes=[
"/your/host/input_dir/path:/your/input_dir/path",
"/your/host/output_dir/path:/your/output_dir/path",
],
command=[
"/bin/bash",
"-c",
"/bin/sleep 30; "
"/bin/mv {{params.source_location}}/{{ ti.xcom_pull('view_file') }} {{params.target_location}};"
"/bin/echo '{{params.target_location}}/{{ ti.xcom_pull('view_file') }}';",
],
task_id="move_data",
do_xcom_push=True,
params={"source_location": "/your/input_dir/path", "target_location": "/your/output_dir/path"},
dag=dag,
)
print_templated_cmd = """
cat {{ ti.xcom_pull('move_data') }}
"""
t_print = DockerOperator(
api_version="1.19",
docker_url="tcp://localhost:2375",
image="centos:latest",
volumes=["/your/host/output_dir/path:/your/output_dir/path"],
command=print_templated_cmd,
task_id="print",
dag=dag,
)
t_view.set_downstream(t_is_data_available)
t_is_data_available.set_downstream(t_move)
t_move.set_downstream(t_print)
| spektom/incubator-airflow | airflow/providers/docker/example_dags/example_docker_copy_data.py | Python | apache-2.0 | 3,520 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('stormtrooper_novatrooper_cadet')
mobileTemplate.setLevel(80)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("imperial")
mobileTemplate.setAssistRange(8)
mobileTemplate.setStalker(False)
mobileTemplate.setFaction("imperial")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_dressed_stormtrooper_black_black.iff')
templates.add('object/mobile/shared_dressed_stormtrooper_black_blue.iff')
templates.add('object/mobile/shared_dressed_stormtrooper_black_gold.iff')
templates.add('object/mobile/shared_dressed_stormtrooper_black_green.iff')
templates.add('object/mobile/shared_dressed_stormtrooper_black_grey.iff')
templates.add('object/mobile/shared_dressed_stormtrooper_black_red.iff')
templates.add('object/mobile/shared_dressed_stormtrooper_black_white.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e11.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('novatrooper_cadet', mobileTemplate)
return | agry/NGECore2 | scripts/mobiles/generic/faction/imperial/nova_trooper_cadet.py | Python | lgpl-3.0 | 1,887 |
from kaneda import Metrics
from . import mark_benchmark
@mark_benchmark
class TestBenchmarksBackends(object):
def test_benchmark_elasticsearch(self, elasticsearch_backend, benchmark):
metrics = Metrics(backend=elasticsearch_backend)
benchmark(metrics.gauge, 'benchmark_elasticsearch', 1)
def test_benchmark_mongo(self, mongo_backend, benchmark):
metrics = Metrics(backend=mongo_backend)
benchmark(metrics.gauge, 'benchmark_mongo', 1)
def test_benchmark_rethink(self, rethink_backend, benchmark):
metrics = Metrics(backend=rethink_backend)
benchmark(metrics.gauge, 'benchmark_mongo', 1)
| APSL/kaneda | tests/integration/benchmarks/test_backends.py | Python | mit | 652 |
#==============================================================================
# post_client_model_content_example.py
# Python script that tests creating and posting content files under a client model
# using Exosite's provision device management system.
#
#==============================================================================
## Tested with python 2.6.5
##
## Copyright (c) 2014, Exosite LLC
## All rights reserved.
##
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## * Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## * Neither the name of Exosite LLC nor the names of its contributors may
## be used to endorse or promote products derived from this software
## without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
"""
Directions:
1) Have an Exosite Whitelabel / Vendor account (<vendor>.exosite.com)
2) Add a Client Model using whitelabel admin tools or API
3) Add the Vendor Token below where it says 'VENDOR_TOKEN_HERE'.
4) Add Client Model id where is says 'CLIENT_MODEL_ID_HERE'.
5) Run this python script in Python 2.6.5 or higher.
6) Assuming your computer has an active network connection, you should see new
content posted under your client model.
"""
import socket
import sys
import ssl
import urllib
import time
CLIENT_MODEL = 'CLIENT_MODEL_ID_HERE'
VENDOR_TOKEN = 'VENDOR_TOKEN_HERE'
CONTENT_ID = 'testfile' + str(int(time.time())) #create new file each time for debug purposes
CONTENT_META = 'Test File' #a description of the file, has no specific use or function
CONTENT_BLOB = 'content of testfile.\r\n' #actual content
print '========================================================================'
print 'POST CONTENT TO EXOSITE CLIENT MODEL - VENDOR DEVICE MANAGEMENT DEMO'
print '========================================================================'
print '\r\n'
print '=================='
print 'GET CONTENT FOR THIS CLIENT MODEL: ' + CLIENT_MODEL
print '=================='
request_packet = ''
request_packet += 'GET /provision/manage/content/' + CLIENT_MODEL +'/' + ' HTTP/1.1\r\n'
request_packet += 'Host: m2.exosite.com\r\n'
request_packet += 'X-Exosite-Token: '+VENDOR_TOKEN+'\r\n'
request_packet += 'Connection: Close \r\n'
request_packet += '\r\n' # Must have blank line here
print '--REQUEST:-----------------------'
print str(request_packet)
print '---------------------------------\r\n'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_s = ssl.wrap_socket(s)
ssl_s.connect(('m2.exosite.com', 443))
ssl_s.send(request_packet)
data = ssl_s.recv(1024)
ssl_s.close()
# URL DECODE - If required
data = urllib.unquote_plus(data) # library specific to python
print '--RESPONSE:----------------------'
print str(data),
print '---------------------------------'
print '=================='
print 'CREATE CONTENT ID: ' + CONTENT_ID
print '=================='
content = 'id='+ urllib.quote_plus(CONTENT_ID) +'&meta=' + urllib.quote_plus(CONTENT_META) + '&protected=false'
request_packet = ''
request_packet += 'POST /provision/manage/content/' + CLIENT_MODEL +'/' + ' HTTP/1.1\r\n'
request_packet += 'Host: m2.exosite.com\r\n'
request_packet += 'X-Exosite-Token: '+VENDOR_TOKEN+'\r\n'
request_packet += 'Connection: Close \r\n'
request_packet += 'Content-Type: application/x-www-form-urlencoded; charset=utf-8\r\n'
request_packet += 'Content-Length: '+ str(len(content)) +'\r\n'
request_packet += '\r\n' # Must have blank line here
request_packet += content
print '--REQUEST:-----------------------'
print str(request_packet)
print '---------------------------------\r\n'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_s = ssl.wrap_socket(s)
ssl_s.connect(('m2.exosite.com', 443))
ssl_s.send(request_packet)
data = ssl_s.recv(1024)
ssl_s.close()
# URL DECODE - If required
data = urllib.unquote_plus(data) # library specific to python
print '--RESPONSE:----------------------'
print str(data),
print '---------------------------------'
print '(Note: You should see a response of "HTTP/1.1 205 Reset Content" if this works correctly)'
print '=================='
print 'UPLOAD CONTENT BLOB for ' + CONTENT_ID
print '=================='
content = (CONTENT_BLOB)
request_packet = ''
request_packet += 'POST /provision/manage/content/' + CLIENT_MODEL + '/'+ CONTENT_ID + ' HTTP/1.1\r\n'
request_packet += 'Host: m2.exosite.com\r\n'
request_packet += 'X-Exosite-Token: '+VENDOR_TOKEN+'\r\n'
request_packet += 'Connection: Close \r\n'
request_packet += 'Content-Type: text/plain\r\n'
request_packet += 'Content-Length: '+ str(len(content)) +'\r\n'
request_packet += '\r\n' # Must have blank line here
request_packet += content # Must be same size as Content-Length specified
print '--REQUEST:-----------------------'
print str(request_packet)
print '---------------------------------\r\n'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_s = ssl.wrap_socket(s)
ssl_s.connect(('m2.exosite.com', 443))
ssl_s.send(request_packet)
data = ssl_s.recv(1024)
ssl_s.close()
# URL DECODE - If required
data = urllib.unquote_plus(data) # library specific to python
print '--RESPONSE:----------------------'
print str(data),
print '---------------------------------'
print '(Note: You should see a response of "HTTP/1.1 205 Reset Content" if this works correctly)'
print '\r\n\r\n'
print '=================='
print 'GET CONTENT AGAIN FOR THIS CLIENT MODEL: ' + CLIENT_MODEL
print '=================='
request_packet = ''
request_packet += 'GET /provision/manage/content/' + CLIENT_MODEL +'/' + ' HTTP/1.1\r\n'
request_packet += 'Host: m2.exosite.com\r\n'
request_packet += 'X-Exosite-Token: '+VENDOR_TOKEN+'\r\n'
request_packet += 'Connection: Close \r\n'
request_packet += '\r\n' # Must have blank line here
print '--REQUEST:-----------------------'
print str(request_packet)
print '---------------------------------\r\n'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_s = ssl.wrap_socket(s)
ssl_s.connect(('m2.exosite.com', 443))
ssl_s.send(request_packet)
data = ssl_s.recv(1024)
ssl_s.close()
# URL DECODE - If required
data = urllib.unquote_plus(data) # library specific to python
print '--RESPONSE:----------------------'
print str(data),
print '---------------------------------'
| exosite-garage/utility_scripts | upload_content_for_client_model_example.py | Python | bsd-3-clause | 7,436 |
#!/usr/bin/python
# Copyright 2017 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_labels
version_added: '2.4'
short_description: Create, Update or Destory GCE Labels.
description:
- Create, Update or Destory GCE Labels on instances, disks, snapshots, etc.
When specifying the GCE resource, users may specifiy the full URL for
the resource (its 'self_link'), or the individual parameters of the
resource (type, location, name). Examples for the two options can be
seen in the documentaion.
See U(https://cloud.google.com/compute/docs/label-or-tag-resources) for
more information about GCE Labels. Labels are gradually being added to
more GCE resources, so this module will need to be updated as new
resources are added to the GCE (v1) API.
requirements:
- 'python >= 2.6'
- 'google-api-python-client >= 1.6.2'
- 'google-auth >= 1.0.0'
- 'google-auth-httplib2 >= 0.0.2'
notes:
- Labels support resources such as instances, disks, images, etc. See
U(https://cloud.google.com/compute/docs/labeling-resources) for the list
of resources available in the GCE v1 API (not alpha or beta).
author:
- 'Eric Johnson (@erjohnso) <[email protected]>'
options:
labels:
description:
- A list of labels (key/value pairs) to add or remove for the resource.
required: false
resource_url:
description:
- The 'self_link' for the resource (instance, disk, snapshot, etc)
required: false
resource_type:
description:
- The type of resource (instances, disks, snapshots, images)
required: false
resource_location:
description:
- The location of resource (global, us-central1-f, etc.)
required: false
resource_name:
description:
- The name of resource.
required: false
'''
EXAMPLES = '''
- name: Add labels on an existing instance (using resource_url)
gce_labels:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
labels:
webserver-frontend: homepage
environment: test
experiment-name: kennedy
resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
state: present
- name: Add labels on an image (using resource params)
gce_labels:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
labels:
webserver-frontend: homepage
environment: test
experiment-name: kennedy
resource_type: images
resource_location: global
resource_name: my-custom-image
state: present
- name: Remove specified labels from the GCE instance
gce_labels:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
labels:
environment: prod
experiment-name: kennedy
resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
state: absent
'''
RETURN = '''
labels:
description: List of labels that exist on the resource.
returned: Always.
type: dict
sample: [ { 'webserver-frontend': 'homepage', 'environment': 'test', 'environment-name': 'kennedy' } ]
resource_url:
description: The 'self_link' of the GCE resource.
returned: Always.
type: str
sample: 'https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance'
resource_type:
description: The type of the GCE resource.
returned: Always.
type: str
sample: instances
resource_location:
description: The location of the GCE resource.
returned: Always.
type: str
sample: us-central1-f
resource_name:
description: The name of the GCE resource.
returned: Always.
type: str
sample: my-happy-little-instance
state:
description: state of the labels
returned: Always.
type: str
sample: present
'''
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import check_params, get_google_api_client, GCPUtils
UA_PRODUCT = 'ansible-gce_labels'
UA_VERSION = '0.0.1'
GCE_API_VERSION = 'v1'
# TODO(all): As Labels are added to more GCE resources, this list will need to
# be updated (along with some code changes below). The list can *only* include
# resources from the 'v1' GCE API and will *not* work with 'beta' or 'alpha'.
KNOWN_RESOURCES = ['instances', 'disks', 'snapshots', 'images']
def _fetch_resource(client, module):
params = module.params
if params['resource_url']:
if not params['resource_url'].startswith('https://www.googleapis.com/compute'):
module.fail_json(
msg='Invalid self_link url: %s' % params['resource_url'])
else:
parts = params['resource_url'].split('/')[8:]
if len(parts) == 2:
resource_type, resource_name = parts
resource_location = 'global'
else:
resource_location, resource_type, resource_name = parts
else:
if not params['resource_type'] or not params['resource_location'] \
or not params['resource_name']:
module.fail_json(msg='Missing required resource params.')
resource_type = params['resource_type'].lower()
resource_name = params['resource_name'].lower()
resource_location = params['resource_location'].lower()
if resource_type not in KNOWN_RESOURCES:
module.fail_json(msg='Unsupported resource_type: %s' % resource_type)
# TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
# added to the v1 GCE API for more resources, some minor code work will
# need to be added here.
if resource_type == 'instances':
resource = client.instances().get(project=params['project_id'],
zone=resource_location,
instance=resource_name).execute()
elif resource_type == 'disks':
resource = client.disks().get(project=params['project_id'],
zone=resource_location,
disk=resource_name).execute()
elif resource_type == 'snapshots':
resource = client.snapshots().get(project=params['project_id'],
snapshot=resource_name).execute()
elif resource_type == 'images':
resource = client.images().get(project=params['project_id'],
image=resource_name).execute()
else:
module.fail_json(msg='Unsupported resource type: %s' % resource_type)
return resource.get('labelFingerprint', ''), {
'resource_name': resource.get('name'),
'resource_url': resource.get('selfLink'),
'resource_type': resource_type,
'resource_location': resource_location,
'labels': resource.get('labels', {})
}
def _set_labels(client, new_labels, module, ri, fingerprint):
params = module.params
result = err = None
labels = {
'labels': new_labels,
'labelFingerprint': fingerprint
}
# TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
# added to the v1 GCE API for more resources, some minor code work will
# need to be added here.
if ri['resource_type'] == 'instances':
req = client.instances().setLabels(project=params['project_id'],
instance=ri['resource_name'],
zone=ri['resource_location'],
body=labels)
elif ri['resource_type'] == 'disks':
req = client.disks().setLabels(project=params['project_id'],
zone=ri['resource_location'],
resource=ri['resource_name'],
body=labels)
elif ri['resource_type'] == 'snapshots':
req = client.snapshots().setLabels(project=params['project_id'],
resource=ri['resource_name'],
body=labels)
elif ri['resource_type'] == 'images':
req = client.images().setLabels(project=params['project_id'],
resource=ri['resource_name'],
body=labels)
else:
module.fail_json(msg='Unsupported resource type: %s' % ri['resource_type'])
# TODO(erjohnso): Once Labels goes GA, we'll be able to use the GCPUtils
# method to poll for the async request/operation to complete before
# returning. However, during 'beta', we are in an odd state where
# API requests must be sent to the 'compute/beta' API, but the python
# client library only allows for *Operations.get() requests to be
# sent to 'compute/v1' API. The response operation is in the 'beta'
# API-scope, but the client library cannot find the operation (404).
# result = GCPUtils.execute_api_client_req(req, client=client, raw=False)
# return result, err
result = req.execute()
return True, err
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['absent', 'present'], default='present'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
pem_file=dict(),
credentials_file=dict(),
labels=dict(required=False, type='dict', default={}),
resource_url=dict(required=False, type='str'),
resource_name=dict(required=False, type='str'),
resource_location=dict(required=False, type='str'),
resource_type=dict(required=False, type='str'),
project_id=dict()
),
required_together=[
['resource_name', 'resource_location', 'resource_type']
],
mutually_exclusive=[
['resource_url', 'resource_name'],
['resource_url', 'resource_location'],
['resource_url', 'resource_type']
]
)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
client, cparams = get_google_api_client(module, 'compute',
user_agent_product=UA_PRODUCT,
user_agent_version=UA_VERSION,
api_version=GCE_API_VERSION)
# Get current resource info including labelFingerprint
fingerprint, resource_info = _fetch_resource(client, module)
new_labels = resource_info['labels'].copy()
update_needed = False
if module.params['state'] == 'absent':
for k, v in module.params['labels'].items():
if k in new_labels:
if new_labels[k] == v:
update_needed = True
new_labels.pop(k, None)
else:
module.fail_json(msg="Could not remove unmatched label pair '%s':'%s'" % (k, v))
else:
for k, v in module.params['labels'].items():
if k not in new_labels:
update_needed = True
new_labels[k] = v
changed = False
json_output = {'state': module.params['state']}
if update_needed:
changed, err = _set_labels(client, new_labels, module, resource_info,
fingerprint)
json_output['changed'] = changed
# TODO(erjohnso): probably want to re-fetch the resource to return the
# new labelFingerprint, check that desired labels match updated labels.
# BUT! Will need to wait for setLabels() to hit v1 API so we can use the
# GCPUtils feature to poll for the operation to be complete. For now,
# we'll just update the output with what we have from the original
# state of the resource.
json_output.update(resource_info)
json_output.update(module.params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| bearstech/ansible | lib/ansible/modules/cloud/google/gce_labels.py | Python | gpl-3.0 | 12,673 |
#!/usr/bin/env python
"""
Test module for fixed time stepping (serial)
This module solves equations of the form
.. _math::
u_t + \nabla \cdot \left( u \mathbf{v} - a(x) \nabla u \right) = 0
"""
from __future__ import absolute_import
import pytest
from proteus.iproteus import *
from proteus import Comm
from proteus.defaults import (load_physics as load_p,
load_numerics as load_n,
System_base as So)
comm = Comm.get()
Profiling.logLevel=7
Profiling.verbose=False
from petsc4py import PETSc
import numpy as np
import numpy.testing as npt
try:
from . import ladr_2d_p
from . import ladr_2d_n
except:
import ladr_2d_p
import ladr_2d_n
import os
modulepath = os.path.dirname(os.path.abspath(__file__))
def test_minModelStep_stepExactTrue():
pList = [load_p('ladr_2d_p', modulepath)]
nList = [load_n('ladr_2d_n', modulepath)]
so = So()
so.name = pList[0].name = "ladr"
so.tnList = nList[0].tnList
so.systemStepControllerType = SplitOperator.Sequential_MinModelStep
so.systemStepExact=True
so.sList=[default_s]
opts.logLevel=7
opts.verbose=True
opts.profile=True
opts.gatherArchive=True
nList[0].runCFL=0.33
nList[0].linearSolver=default_n.LU
nList[0].multilevelLinearSolver=default_n.LU
ns = NumericalSolution.NS_base(so,pList,nList,so.sList,opts)
ns.calculateSolution('ladr_minModelStep_stepExactTrue')
assert ns.tCount + 1 == len(so.tnList), "wrong number of archvie steps "+repr(ns.tCount)
assert ns.modelList[0].solver.solverList[0].solveCalls == 40, "wrong number of steps "+repr(ns.modelList[0].solver.solverList[0].solveCalls)
archiveTimes=[]
for t in ns.ar[0].treeGlobal.iter('Time'):
archiveTimes.append(t.attrib['Value'])
archiveTimesCorrect = so.tnList
npt.assert_almost_equal(np.array(archiveTimes,'d'), np.array(archiveTimesCorrect,'d'))
del ns
def test_minModelStep_stepExactFalse():
pList = [load_p('ladr_2d_p', modulepath)]
nList = [load_n('ladr_2d_n', modulepath)]
so = So()
so.name = pList[0].name = "ladr"
so.tnList = nList[0].tnList
so.systemStepControllerType = SplitOperator.Sequential_MinModelStep
so.systemStepExact=False
so.sList=[default_s]
opts.logLevel=7
opts.verbose=True
opts.profile=True
opts.gatherArchive=True
nList[0].runCFL=0.33
nList[0].linearSolver=default_n.LU
nList[0].multilevelLinearSolver=default_n.LU
ns = NumericalSolution.NS_base(so,pList,nList,so.sList,opts)
ns.calculateSolution('ladr_minModelStep_stepExactFalse')
assert ns.tCount + 1 == len(so.tnList), "wrong number of archvie steps " +repr(ns.tCount)
assert ns.modelList[0].solver.solverList[0].solveCalls == 34, "wrong number of steps "+repr(ns.modelList[0].solver.solverList[0].solveCalls)
archiveTimes=[]
for t in ns.ar[0].treeGlobal.iter('Time'):
archiveTimes.append(t.attrib['Value'])
archiveTimesCorrect = ['0.0', '0.029516097303', '0.0516531702802', '0.0811692675832', '0.10330634056', '0.125443413538', '0.154959510841', '0.177096583818', '0.206612681121', '0.228749754098', '0.250886827075']
npt.assert_almost_equal(np.array(archiveTimes,'d'), np.array(archiveTimesCorrect,'d'))
del ns
def test_fixedStep_stepExactFalse():
pList = [load_p('ladr_2d_p', modulepath)]
nList = [load_n('ladr_2d_n', modulepath)]
so = So()
so.name = pList[0].name = "ladr"
so.tnList = nList[0].tnList
so.systemStepControllerType = SplitOperator.Sequential_FixedStep
so.systemStepExact=False
so.dt_system_fixed = 0.01
so.sList=[default_s]
opts.logLevel=7
opts.verbose=True
opts.profile=True
opts.gatherArchive=True
nList[0].runCFL=0.33
nList[0].linearSolver=default_n.LU
nList[0].multilevelLinearSolver=default_n.LU
ns = NumericalSolution.NS_base(so,pList,nList,so.sList,opts)
ns.calculateSolution('ladr_minModelStep_stepExactFalse')
assert ns.tCount + 1 == len(so.tnList), "wrong number of archvie steps " +repr(ns.tCount)
assert ns.modelList[0].solver.solverList[0].solveCalls == 25, "wrong number of steps "+repr(ns.modelList[0].solver.solverList[0].solveCalls)
archiveTimes=[]
for t in ns.ar[0].treeGlobal.iter('Time'):
archiveTimes.append(t.attrib['Value'])
archiveTimesCorrect = []
interval=0
step=0
t=so.tnList[0]
archiveTimesCorrect.append(t)
while interval < len(so.tnList)-1:
t+= so.dt_system_fixed
if t >= so.tnList[interval+1]:
archiveTimesCorrect.append(t)
interval+=1
npt.assert_almost_equal(np.array(archiveTimes,'d'), np.array(archiveTimesCorrect,'d'))
del ns
def test_fixedStep_stepExactTrue():
pList = [load_p('ladr_2d_p', modulepath)]
nList = [load_n('ladr_2d_n', modulepath)]
so = So()
so.name = pList[0].name = "ladr"
so.tnList = nList[0].tnList
so.systemStepControllerType = SplitOperator.Sequential_FixedStep
so.systemStepExact=True
so.dt_system_fixed = 0.01
so.sList=[default_s]
opts.logLevel=7
opts.verbose=True
opts.profile=True
opts.gatherArchive=True
nList[0].runCFL=0.33
nList[0].linearSolver=default_n.LU
nList[0].multilevelLinearSolver=default_n.LU
ns = NumericalSolution.NS_base(so,pList,nList,so.sList,opts)
ns.calculateSolution('ladr_minModelStep_stepExactFalse')
assert ns.tCount + 1 == len(so.tnList), "wrong number of archvie steps " +repr(ns.tCount)
assert ns.modelList[0].solver.solverList[0].solveCalls == 30, "wrong number of steps "+repr(ns.modelList[0].solver.solverList[0].solveCalls)
archiveTimes=[]
for t in ns.ar[0].treeGlobal.iter('Time'):
archiveTimes.append(t.attrib['Value'])
archiveTimesCorrect = so.tnList
npt.assert_almost_equal(np.array(archiveTimes,'d'), np.array(archiveTimesCorrect,'d'))
del ns
def test_fixedStep_stepSimple():
pList = [load_p('ladr_2d_p', modulepath)]
nList = [load_n('ladr_2d_n', modulepath)]
so = So()
so.name = pList[0].name = "ladr"
so.tnList = nList[0].tnList
so.systemStepControllerType = SplitOperator.Sequential_FixedStep_Simple
so.systemStepExact=True#should be ignored
so.dt_system_fixed = 0.01#should be ignored
so.sList=[default_s]
opts.logLevel=7
opts.verbose=True
opts.profile=True
opts.gatherArchive=True
nList[0].runCFL=0.33
nList[0].linearSolver=default_n.LU
nList[0].multilevelLinearSolver=default_n.LU
ns = NumericalSolution.NS_base(so,pList,nList,so.sList,opts)
ns.calculateSolution('ladr_minModelStep_stepExactFalse')
assert ns.tCount + 1 == len(so.tnList), "wrong number of archvie steps " +repr(ns.tCount)
assert ns.modelList[0].solver.solverList[0].solveCalls == len(so.tnList)-1, "wrong number of steps "+repr(ns.modelList[0].solver.solverList[0].solveCalls)
archiveTimes=[]
for t in ns.ar[0].treeGlobal.iter('Time'):
archiveTimes.append(t.attrib['Value'])
archiveTimesCorrect = so.tnList
npt.assert_almost_equal(np.array(archiveTimes,'d'), np.array(archiveTimesCorrect,'d'))
del ns
if __name__ == '__main__':
test_minModelStep_stepExactTrue()
test_minModelStep_stepExactFalse()
test_fixedStep_stepExactFalse()
test_fixedStep_stepExactTrue()
Profiling.logEvent("Closing Log")
try:
Profiling.closeLog()
except:
pass
| erdc/proteus | proteus/tests/ci/test_systemStep.py | Python | mit | 7,424 |
# Aliex Cardona and Josep Casanovas
# Realitat aumentada practica 1
import cv2
import numpy as np
from matplotlib import pyplot as plt
from PyFiles.convolutionKernel import getMatchingMap
IMAGES_PATH = "../Images/"
#imageName = IMAGES_PATH + input("Source image: ")
#targetName = IMAGES_PATH + input("Target to search: ")
#detectionThreshold = input("Detection threshold: ")
imageName = IMAGES_PATH+'img1.png'
targetName = IMAGES_PATH+'t1-img1.png'
img = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)
template = cv2.imread(targetName, cv2.IMREAD_GRAYSCALE)
res = cv2.matchTemplate(img,template,0)
matching_map = getMatchingMap(img, template)
min_value_X = 0
min_value_Y = 0
min_value = 255
for i in range(matching_map.shape[0]):
for j in range(matching_map.shape[1]):
if matching_map[i][j] < min_value:
min_value = matching_map[i][j]
min_value_X = j
min_value_Y = i
cv2.rectangle(img,(min_value_X - 6, min_value_Y - 6), (min_value_X + 6, min_value_Y + 6), 0, 2)
print img.shape
print template.shape
print res.shape
print matching_map.shape
plt.subplot(1,3,1), plt.imshow(res, cmap = 'gray')
plt.title('Matching map'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,2), plt.imshow(matching_map, cmap = 'gray')
plt.title('Matching map'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,3), plt.imshow(img, cmap = 'gray')
plt.title('Matching map'), plt.xticks([]), plt.yticks([])
plt.show() | UndistinguishedFellows/RealitatAumentadaPractiques | Practica_1/MatchingImages.py | Python | mit | 1,447 |
##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for BiSearch, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.run import run_cmd_qa
class EB_BiSearch(EasyBlock):
"""
Support for building BiSearch.
Basically just run the interactive installation script install.sh.
"""
def configure_step(self):
"""(no configure)"""
pass
def build_step(self):
"""(empty, building is performed in make_install step)"""
pass
def install_step(self):
cmd = "./install.sh"
qanda = {
'Please enter the BiSearch root directory: ': self.installdir,
'Please enter the path of c++ compiler [/usr/bin/g++]: ': os.getenv('CXX')
}
no_qa = [r'Compiling components\s*\.*']
run_cmd_qa(cmd, qanda, no_qa=no_qa, log_all=True, simple=True)
def sanity_check_step(self):
"""Custom sanity check for BiSearch."""
custom_paths = {
'files':["bin/%s" % x for x in ["fpcr", "indexing_cdna",
"indexing_genome", "makecomp"]],
'dirs':[]
}
super(EB_BiSearch, self).sanity_check_step(custom_paths=custom_paths)
| omula/easybuild-easyblocks | easybuild/easyblocks/b/bisearch.py | Python | gpl-2.0 | 2,586 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
The BibRecord regression test suite.
"""
import unittest
from invenio.config import CFG_SITE_URL, \
CFG_SITE_RECORD
from invenio import bibrecord
from invenio.testutils import make_test_suite, run_test_suite
from invenio.search_engine import get_record
class BibRecordFilterBibrecordTest(unittest.TestCase):
""" bibrecord - testing for code filtering"""
def setUp(self):
self.rec = get_record(10)
def test_empty_filter(self):
"""bibrecord - empty filter"""
self.assertEqual(bibrecord.get_filtered_record(self.rec, []), self.rec)
def test_filter_tag_only(self):
"""bibrecord - filtering only by MARC tag"""
# Exist
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['001']), {'001': [([], ' ', ' ', '10', 1)]})
# Do not exist
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['037']), {})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['856']), {})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['999']), {})
# Sequence
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['001', '999']), {'001': [([], ' ', ' ', '10', 1)]})
# Some tags do not exist
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['001', '260', '856', '400', '500', '999']), {'001': [([], ' ', ' ', '10', 1)]})
def test_filter_subfields(self):
"""bibrecord - filtering subfields"""
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['65017a']), {'650': [([('a', 'Particle Physics - Experimental Results')], '1', '7', '', 1)],})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['65017a', '650172']), {'650': [([('a', 'Particle Physics - Experimental Results')], '1', '7', '', 1),
([('2', 'SzGeCERN')], '1', '7', '', 2)]})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['8560_f']), {'856': [([('f', '[email protected]')], '0', ' ', '', 1)]})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['260__a']), {'260': [([('a', 'Geneva')], ' ', ' ', '', 1)],})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['595__a']), {'595': [([('a', 'CERN EDS')], ' ', ' ', '', 1),
([('a', '20011220SLAC')], ' ', ' ', '', 2),
([('a', 'giva')], ' ', ' ', '', 3),
([('a', 'LANL EDS')], ' ', ' ', '', 4)]})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['8564_u']), {'856': [([('u', '%s/%s/10/files/ep-2001-094.ps.gz' % (CFG_SITE_URL, CFG_SITE_RECORD))], '4', ' ', '', 1),
([('u', '%s/%s/10/files/ep-2001-094.pdf' % (CFG_SITE_URL, CFG_SITE_RECORD))], '4', ' ', '', 2)]})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['595__a', '8564_u']), {'595': [([('a', 'CERN EDS')], ' ', ' ', '', 1),
([('a', '20011220SLAC')], ' ', ' ', '', 2),
([('a', 'giva')], ' ', ' ', '', 3),
([('a', 'LANL EDS')], ' ', ' ', '', 4)],
'856': [([('u', '%s/%s/10/files/ep-2001-094.ps.gz' % (CFG_SITE_URL, CFG_SITE_RECORD))], '4', ' ', '', 5),
([('u', '%s/%s/10/files/ep-2001-094.pdf' % (CFG_SITE_URL, CFG_SITE_RECORD))], '4', ' ', '', 6)]})
def test_filter_comprehensive(self):
"""bibrecord - comprehensive filtering"""
tags = ['001', '035', '037__a', '65017a', '650']
res = {}
res['001'] = [([], ' ', ' ', '10', 1)]
res['037'] = [([('a', 'hep-ex/0201013')], ' ', ' ', '', 2)]
res['650'] = [([('a', 'Particle Physics - Experimental Results')], '1', '7', '', 3)]
self.assertEqual(bibrecord.get_filtered_record(self.rec, tags), res)
def test_filter_wildcards(self):
"""bibrecord - wildcards filtering"""
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['595__%']), {'595': [([('a', 'CERN EDS')], ' ', ' ', '', 1),
([('a', '20011220SLAC')], ' ', ' ', '', 2),
([('a', 'giva')], ' ', ' ', '', 3),
([('a', 'LANL EDS')], ' ', ' ', '', 4)]})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['909CS%']), {'909': [([('s', 'n'), ('w', '200231')], 'C', 'S', '', 1)]})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['856%']), {})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['856%_u']), {'856': [([('u', '%s/%s/10/files/ep-2001-094.ps.gz' % (CFG_SITE_URL, CFG_SITE_RECORD))], '4', ' ', '', 1),
([('u', '%s/%s/10/files/ep-2001-094.pdf' % (CFG_SITE_URL, CFG_SITE_RECORD))], '4', ' ', '', 2)]})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['909%5v']), {})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['909%5b']), {'909': [([('b', 'CER')], 'C', '5', '', 1)]})
def test_filter_multi_wildcards(self):
"""bibrecord - multi wildcards filtering"""
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['909%%_']), {})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['856%_%']), {'856': [([('f', '[email protected]')], '0', ' ', '', 1),
([('s', '217223'), ('u', '%s/%s/10/files/ep-2001-094.ps.gz' % (CFG_SITE_URL, CFG_SITE_RECORD))], '4', ' ', '', 2),
([('s', '383040'), ('u', '%s/%s/10/files/ep-2001-094.pdf' % (CFG_SITE_URL, CFG_SITE_RECORD))], '4', ' ', '', 3)]})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['909%%b']), {'909': [([('b', '11')], 'C', '0', '', 1),
([('b', 'CER')], 'C', '5', '', 2)]})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['909%%%']), {'909': [([('y', '2002')], 'C', '0', '', 1),
([('e', 'ALEPH')], 'C', '0', '', 2),
([('b', '11')], 'C', '0', '', 3),
([('p', 'EP')], 'C', '0', '', 4),
([('a', 'CERN LEP')], 'C', '0', '', 5),
([('c', '2001-12-19'), ('l', '50'), ('m', '2002-02-19'), ('o', 'BATCH')], 'C', '1', '', 6),
([('u', 'CERN')], 'C', '1', '', 7),
([('p', 'Eur. Phys. J., C')], 'C', '4', '', 8),
([('b', 'CER')], 'C', '5', '', 9),
([('s', 'n'), ('w', '200231')], 'C', 'S', '', 10),
([('o', 'oai:cds.cern.ch:CERN-EP-2001-094'), ('p', 'cern:experiment')], 'C', 'O', '', 11)]})
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['980%%%']), bibrecord.get_filtered_record(self.rec, ['980_%%']))
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['980_%%']), bibrecord.get_filtered_record(self.rec, ['980%_%']))
self.assertEqual(bibrecord.get_filtered_record(self.rec, ['980__%']), bibrecord.get_filtered_record(self.rec, ['980%%%']))
def test_filter_wildcard_comprehensive(self):
"""bibrecord - comprehensive wildcard filtering"""
tags = ['595__%', '909CS%', '856%', '856%_%', '909%5b', '980%%%']
res = {}
res['595'] = [([('a', 'CERN EDS')], ' ', ' ', '', 1),
([('a', '20011220SLAC')], ' ', ' ', '', 2),
([('a', 'giva')], ' ', ' ', '', 3),
([('a', 'LANL EDS')], ' ', ' ', '', 4)]
res['856'] = [([('f', '[email protected]')], '0', ' ', '', 5),
([('s', '217223'), ('u', '%s/%s/10/files/ep-2001-094.ps.gz' % (CFG_SITE_URL, CFG_SITE_RECORD))], '4', ' ', '', 6),
([('s', '383040'), ('u', '%s/%s/10/files/ep-2001-094.pdf' % (CFG_SITE_URL, CFG_SITE_RECORD))], '4', ' ', '', 7)]
res['909'] = [([('s', 'n'), ('w', '200231')], 'C', 'S', '', 8),
([('b', 'CER')], 'C', '5', '', 9)]
res['980'] = [([('a', 'PREPRINT')], ' ', ' ', '', 10),
([('a', 'ALEPHPAPER')], ' ', ' ', '', 11)]
self.assertEqual(bibrecord.get_filtered_record(self.rec, tags), res)
TEST_SUITE = make_test_suite(
BibRecordFilterBibrecordTest,
)
if __name__ == '__main__':
run_test_suite(TEST_SUITE, warn_user=True)
| CERNDocumentServer/invenio | modules/bibrecord/lib/bibrecord_regression_tests.py | Python | gpl-2.0 | 10,828 |
from Screen import Screen
import ChannelSelection
from ServiceReference import ServiceReference
from Components.config import config, ConfigSelection, ConfigText, ConfigSubList, ConfigDateTime, ConfigClock, ConfigYesNo, getConfigListEntry
from Components.ActionMap import NumberActionMap
from Components.ConfigList import ConfigListScreen
from Tools.BoundFunction import boundFunction
from Components.MenuList import MenuList
from Components.Sources.StaticText import StaticText
from Components.Label import Label
from Components.NimManager import nimmanager
from Components.SystemInfo import SystemInfo
from Components.UsageConfig import defaultMoviePath
from Screens.MovieSelection import getPreferredTagEditor
from Screens.LocationBox import MovieLocationBox
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Tools.Alternatives import GetWithAlternative
from Tools.FallbackTimer import FallbackTimerDirs
from RecordTimer import AFTEREVENT
from enigma import eEPGCache, iRecordableServicePtr
from time import localtime, mktime, time, strftime
from datetime import datetime
import urllib
class TimerEntry(Screen, ConfigListScreen):
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer
self.timer.service_ref_prev = self.timer.service_ref
self.timer.begin_prev = self.timer.begin
self.timer.end_prev = self.timer.end
self.timer.external_prev = self.timer.external
self.timer.dirname_prev = self.timer.dirname
self.entryDate = None
self.entryService = None
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["key_yellow"] = StaticText(_("Timer type"))
self["key_blue"] = StaticText("")
self["actions"] = NumberActionMap(["SetupActions", "GlobalActions", "PiPSetupActions", "ColorActions"],
{
"ok": self.keySelect,
"save": self.keyGo,
"cancel": self.keyCancel,
"volumeUp": self.incrementStart,
"volumeDown": self.decrementStart,
"size+": self.incrementEnd,
"size-": self.decrementEnd,
"red": self.keyCancel,
"green": self.keyGo,
"yellow": self.changeTimerType,
"blue": self.changeZapWakeupType
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session = session)
self.setTitle(_("Timer entry"))
FallbackTimerDirs(self, self.createConfig)
def createConfig(self, currlocation=None, locations=[]):
justplay = self.timer.justplay
always_zap = self.timer.always_zap
zap_wakeup = self.timer.zap_wakeup
pipzap = self.timer.pipzap
rename_repeat = self.timer.rename_repeat
conflict_detection = self.timer.conflict_detection
afterevent = {
AFTEREVENT.NONE: "nothing",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.AUTO: "auto"
}[self.timer.afterEvent]
if self.timer.record_ecm and self.timer.descramble:
recordingtype = "descrambled+ecm"
elif self.timer.record_ecm:
recordingtype = "scrambled+ecm"
elif self.timer.descramble:
recordingtype = "normal"
weekday_table = ("mon", "tue", "wed", "thu", "fri", "sat", "sun")
day = list([int(x) for x in reversed('{0:07b}'.format(self.timer.repeated))])
weekday = 0
if self.timer.repeated: # repeated
type = "repeated"
if (self.timer.repeated == 31): # Mon-Fri
repeated = "weekdays"
elif (self.timer.repeated == 127): # daily
repeated = "daily"
else:
repeated = "user"
if day.count(1) == 1:
repeated = "weekly"
weekday = day.index(1)
else: # once
type = "once"
repeated = None
weekday = int(strftime("%u", localtime(self.timer.begin))) - 1
day[weekday] = 1
self.timerentry_fallback = ConfigYesNo(default=self.timer.external_prev or config.usage.remote_fallback_external_timer.value and config.usage.remote_fallback.value and not nimmanager.somethingConnected())
self.timerentry_justplay = ConfigSelection(choices = [
("zap", _("zap")), ("record", _("record")), ("zap+record", _("zap and record"))],
default = {0: "record", 1: "zap", 2: "zap+record"}[justplay + 2*always_zap])
if SystemInfo["DeepstandbySupport"]:
shutdownString = _("go to deep standby")
choicelist = [("always", _("always")), ("from_standby", _("only from standby")), ("from_deep_standby", _("only from deep standby")), ("never", _("never"))]
else:
shutdownString = _("shut down")
choicelist = [("always", _("always")), ("never", _("never"))]
self.timerentry_zapwakeup = ConfigSelection(choices = choicelist, default = zap_wakeup)
self.timerentry_afterevent = ConfigSelection(choices = [("nothing", _("do nothing")), ("standby", _("go to standby")), ("deepstandby", shutdownString), ("auto", _("auto"))], default = afterevent)
self.timerentry_recordingtype = ConfigSelection(choices = [("normal", _("normal")), ("descrambled+ecm", _("descramble and record ecm")), ("scrambled+ecm", _("don't descramble, record ecm"))], default = recordingtype)
self.timerentry_type = ConfigSelection(choices = [("once",_("once")), ("repeated", _("repeated"))], default = type)
self.timerentry_name = ConfigText(default = self.timer.name, visible_width = 50, fixed_size = False)
self.timerentry_description = ConfigText(default = self.timer.description, visible_width = 50, fixed_size = False)
self.timerentry_tags = self.timer.tags[:]
self.timerentry_tagsset = ConfigSelection(choices = [not self.timerentry_tags and "None" or " ".join(self.timerentry_tags)])
self.timerentry_repeated = ConfigSelection(default = repeated, choices = [("weekly", _("weekly")), ("daily", _("daily")), ("weekdays", _("Mon-Fri")), ("user", _("user defined"))])
self.timerentry_renamerepeat = ConfigYesNo(default = rename_repeat)
self.timerentry_pipzap = ConfigYesNo(default = pipzap)
self.timerentry_conflictdetection = ConfigYesNo(default = conflict_detection)
self.timerentry_date = ConfigDateTime(default = self.timer.begin, formatstring = _("%d.%B %Y"), increment = 86400)
self.timerentry_starttime = ConfigClock(default = self.timer.begin)
self.timerentry_endtime = ConfigClock(default = self.timer.end)
self.timerentry_showendtime = ConfigSelection(default = ((self.timer.end - self.timer.begin) > 4), choices = [(True, _("yes")), (False, _("no"))])
default = not self.timer.external_prev and self.timer.dirname or defaultMoviePath()
tmp = config.movielist.videodirs.value
if default not in tmp:
tmp.append(default)
self.timerentry_dirname = ConfigSelection(default = default, choices = tmp)
default = self.timer.external_prev and self.timer.dirname or currlocation
if default not in locations:
locations.append(default)
self.timerentry_fallbackdirname = ConfigSelection(default=default, choices=locations)
self.timerentry_repeatedbegindate = ConfigDateTime(default = self.timer.repeatedbegindate, formatstring = _("%d.%B %Y"), increment = 86400)
self.timerentry_weekday = ConfigSelection(default = weekday_table[weekday], choices = [("mon",_("Monday")), ("tue", _("Tuesday")), ("wed",_("Wednesday")), ("thu", _("Thursday")), ("fri", _("Friday")), ("sat", _("Saturday")), ("sun", _("Sunday"))])
self.timerentry_day = ConfigSubList()
for x in (0, 1, 2, 3, 4, 5, 6):
self.timerentry_day.append(ConfigYesNo(default = day[x]))
# FIXME some service-chooser needed here
servicename = "N/A"
try: # no current service available?
servicename = str(self.timer.service_ref.getServiceName())
except:
pass
self.timerentry_service_ref = self.timer.service_ref
self.timerentry_service = ConfigSelection([servicename])
self.createSetup("config")
def createSetup(self, widget):
self.list = []
self.entryFallbackTimer = getConfigListEntry(_("Fallback Timer"), self.timerentry_fallback)
if config.usage.remote_fallback_external_timer.value and config.usage.remote_fallback.value and not hasattr(self, "timerentry_remote"):
self.list.append(self.entryFallbackTimer)
self.entryName = getConfigListEntry(_("Name"), self.timerentry_name)
self.list.append(self.entryName)
self.entryDescription = getConfigListEntry(_("Description"), self.timerentry_description)
self.list.append(self.entryDescription)
self.timerJustplayEntry = getConfigListEntry(_("Timer type"), self.timerentry_justplay)
self.list.append(self.timerJustplayEntry)
self.timerTypeEntry = getConfigListEntry(_("Repeat type"), self.timerentry_type)
self.list.append(self.timerTypeEntry)
if self.timerentry_type.value == "once":
self.frequencyEntry = None
else: # repeated
self.frequencyEntry = getConfigListEntry(_("Repeats"), self.timerentry_repeated)
self.list.append(self.frequencyEntry)
self.repeatedbegindateEntry = getConfigListEntry(_("Starting on"), self.timerentry_repeatedbegindate)
self.list.append(self.repeatedbegindateEntry)
if self.timerentry_repeated.value == "daily":
pass
if self.timerentry_repeated.value == "weekdays":
pass
if self.timerentry_repeated.value == "weekly":
self.list.append(getConfigListEntry(_("Weekday"), self.timerentry_weekday))
if self.timerentry_repeated.value == "user":
self.list.append(getConfigListEntry(_("Monday"), self.timerentry_day[0]))
self.list.append(getConfigListEntry(_("Tuesday"), self.timerentry_day[1]))
self.list.append(getConfigListEntry(_("Wednesday"), self.timerentry_day[2]))
self.list.append(getConfigListEntry(_("Thursday"), self.timerentry_day[3]))
self.list.append(getConfigListEntry(_("Friday"), self.timerentry_day[4]))
self.list.append(getConfigListEntry(_("Saturday"), self.timerentry_day[5]))
self.list.append(getConfigListEntry(_("Sunday"), self.timerentry_day[6]))
if self.timerentry_justplay.value != "zap":
self.list.append(getConfigListEntry(_("Rename name and description for new events"), self.timerentry_renamerepeat))
self.entryDate = getConfigListEntry(_("Date"), self.timerentry_date)
if self.timerentry_type.value == "once":
self.list.append(self.entryDate)
self.entryStartTime = getConfigListEntry(_("Start time"), self.timerentry_starttime)
self.list.append(self.entryStartTime)
self.entryShowEndTime = getConfigListEntry(_("Set end time"), self.timerentry_showendtime)
self.entryZapWakeup = getConfigListEntry(_("Wakeup receiver for start timer"), self.timerentry_zapwakeup)
if self.timerentry_justplay.value == "zap":
self.list.append(self.entryZapWakeup)
if SystemInfo["PIPAvailable"]:
self.list.append(getConfigListEntry(_("Use as PiP if possible"), self.timerentry_pipzap))
self.list.append(self.entryShowEndTime)
self["key_blue"].setText(_("Wakeup type"))
else:
self["key_blue"].setText("")
self.entryEndTime = getConfigListEntry(_("End time"), self.timerentry_endtime)
if self.timerentry_justplay.value != "zap" or self.timerentry_showendtime.value:
self.list.append(self.entryEndTime)
self.channelEntry = getConfigListEntry(_("Channel"), self.timerentry_service)
self.list.append(self.channelEntry)
self.dirname = getConfigListEntry(_("Location"), self.timerentry_fallbackdirname) if self.timerentry_fallback.value and self.timerentry_fallbackdirname.value else getConfigListEntry(_("Location"), self.timerentry_dirname)
if config.usage.setup_level.index >= 2 and (self.timerentry_fallback.value and self.timerentry_fallbackdirname.value or self.timerentry_dirname.value): # expert+
self.list.append(self.dirname)
self.conflictDetectionEntry = getConfigListEntry(_("Enable timer conflict detection"), self.timerentry_conflictdetection)
if not self.timerentry_fallback.value:
self.list.append(self.conflictDetectionEntry)
self.tagsSet = getConfigListEntry(_("Tags"), self.timerentry_tagsset)
if self.timerentry_justplay.value != "zap" and not self.timerentry_fallback.value:
if getPreferredTagEditor():
self.list.append(self.tagsSet)
self.list.append(getConfigListEntry(_("After event"), self.timerentry_afterevent))
self.list.append(getConfigListEntry(_("Recording type"), self.timerentry_recordingtype))
self[widget].list = self.list
self[widget].l.setList(self.list)
def newConfig(self):
print "[TimerEdit] newConfig", self["config"].getCurrent()
if self["config"].getCurrent() in (self.timerTypeEntry, self.timerJustplayEntry, self.frequencyEntry, self.entryShowEndTime, self.entryFallbackTimer):
self.createSetup("config")
def keyLeft(self):
cur = self["config"].getCurrent()
if cur in (self.channelEntry, self.tagsSet):
self.keySelect()
elif cur in (self.entryName, self.entryDescription):
self.renameEntry()
else:
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
cur = self["config"].getCurrent()
if cur in (self.channelEntry, self.tagsSet):
self.keySelect()
elif cur in (self.entryName, self.entryDescription):
self.renameEntry()
else:
ConfigListScreen.keyRight(self)
self.newConfig()
def renameEntry(self):
cur = self["config"].getCurrent()
if cur == self.entryName:
title_text = _("Please enter new name:")
old_text = self.timerentry_name.value
else:
title_text = _("Please enter new description:")
old_text = self.timerentry_description.value
self.session.openWithCallback(self.renameEntryCallback, VirtualKeyBoard, title=title_text, text=old_text)
def renameEntryCallback(self, answer):
if answer:
cur = self["config"].getCurrent()
if cur == self.entryName:
self.timerentry_name.value = answer
self["config"].invalidate(self.entryName)
else:
self.timerentry_description.value = answer
self["config"].invalidate(self.entryDescription)
def handleKeyFileCallback(self, answer):
if self["config"].getCurrent() in (self.channelEntry, self.tagsSet):
self.keySelect()
else:
ConfigListScreen.handleKeyFileCallback(self, answer)
self.newConfig()
def openMovieLocationBox(self, answer=""):
self.session.openWithCallback(
self.pathSelected,
MovieLocationBox,
_("Select target folder"),
self.timerentry_dirname.value,
filename = answer,
minFree = 100 # We require at least 100MB free space
)
def keySelect(self):
cur = self["config"].getCurrent()
if cur == self.channelEntry:
self.session.openWithCallback(
self.finishedChannelSelection,
ChannelSelection.SimpleChannelSelection,
_("Select channel to record from"),
currentBouquet=True
)
elif cur == self.dirname:
menu = [(_("Open select location"), "empty")]
if self.timerentry_type.value == "repeated" and self.timerentry_name.value:
menu.append((_("Open select location as timer name"), "timername"))
if len(menu) == 1:
self.openMovieLocationBox()
elif len(menu) == 2:
text = _("Select action")
def selectAction(choice):
if choice:
if choice[1] == "timername":
self.openMovieLocationBox(self.timerentry_name.value)
elif choice[1] == "empty":
self.openMovieLocationBox()
self.session.openWithCallback(selectAction, ChoiceBox, title=text, list=menu)
elif getPreferredTagEditor() and cur == self.tagsSet:
self.session.openWithCallback(
self.tagEditFinished,
getPreferredTagEditor(),
self.timerentry_tags
)
else:
self.keyGo()
def finishedChannelSelection(self, *args):
if args:
self.timerentry_service_ref = ServiceReference(args[0])
self.timerentry_service.setCurrentText(self.timerentry_service_ref.getServiceName())
self["config"].invalidate(self.channelEntry)
def getTimestamp(self, date, mytime):
d = localtime(date)
dt = datetime(d.tm_year, d.tm_mon, d.tm_mday, mytime[0], mytime[1])
return int(mktime(dt.timetuple()))
def getBeginEnd(self):
date = self.timerentry_date.value
endtime = self.timerentry_endtime.value
starttime = self.timerentry_starttime.value
begin = self.getTimestamp(date, starttime)
end = self.getTimestamp(date, endtime)
# if the endtime is less than the starttime, add 1 day.
if end < begin:
end += 86400
return begin, end
def selectChannelSelector(self, *args):
self.session.openWithCallback(
self.finishedChannelSelectionCorrection,
ChannelSelection.SimpleChannelSelection,
_("Select channel to record from")
)
def finishedChannelSelectionCorrection(self, *args):
if args:
self.finishedChannelSelection(*args)
self.keyGo()
def RemoteSubserviceSelected(self, service):
if service:
# ouch, this hurts a little
service_ref = timerentry_service_ref
self.timerentry_service_ref = ServiceReference(service[1])
eit = self.timer.eit
self.timer.eit = None
self.keyGo()
self.timerentry_service_ref = service_ref
self.timer.eit = eit
def keyGo(self, result = None):
if not self.timerentry_service_ref.isRecordable():
self.session.openWithCallback(self.selectChannelSelector, MessageBox, _("You didn't select a channel to record from."), MessageBox.TYPE_ERROR)
else:
self.timer.external = self.timerentry_fallback.value
self.timer.name = self.timerentry_name.value
self.timer.description = self.timerentry_description.value
self.timer.justplay = self.timerentry_justplay.value == "zap"
self.timer.always_zap = self.timerentry_justplay.value == "zap+record"
self.timer.zap_wakeup = self.timerentry_zapwakeup.value
self.timer.pipzap = self.timerentry_pipzap.value
self.timer.rename_repeat = self.timerentry_renamerepeat.value
self.timer.conflict_detection = self.timerentry_conflictdetection.value
if self.timerentry_justplay.value == "zap":
if not self.timerentry_showendtime.value:
self.timerentry_endtime.value = self.timerentry_starttime.value
self.timerentry_afterevent.value = "nothing"
self.timer.resetRepeated()
self.timer.afterEvent = {
"nothing": AFTEREVENT.NONE,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"standby": AFTEREVENT.STANDBY,
"auto": AFTEREVENT.AUTO
}[self.timerentry_afterevent.value]
self.timer.descramble = {
"normal": True,
"descrambled+ecm": True,
"scrambled+ecm": False,
}[self.timerentry_recordingtype.value]
self.timer.record_ecm = {
"normal": False,
"descrambled+ecm": True,
"scrambled+ecm": True,
}[self.timerentry_recordingtype.value]
self.timer.service_ref = self.timerentry_service_ref
self.timer.tags = self.timerentry_tags
# reset state when edit timer type
if not self.timer.external and self.timer.justplay != "zap" and self.timer.isRunning():
if self.timer in self.session.nav.RecordTimer.timer_list and (not self.timer.record_service or not isinstance(self.timer.record_service, iRecordableServicePtr)):
self.timer.resetState()
if self.timerentry_fallback.value:
self.timer.dirname = self.timerentry_fallbackdirname.value
else:
if self.timer.dirname or self.timerentry_dirname.value != defaultMoviePath():
self.timer.dirname = self.timerentry_dirname.value
config.movielist.last_timer_videodir.value = self.timer.dirname
config.movielist.last_timer_videodir.save()
if self.timerentry_type.value == "once":
self.timer.begin, self.timer.end = self.getBeginEnd()
if self.timerentry_type.value == "repeated":
if self.timerentry_repeated.value == "daily":
for x in (0, 1, 2, 3, 4, 5, 6):
self.timer.setRepeated(x)
if self.timerentry_repeated.value == "weekly":
self.timer.setRepeated(self.timerentry_weekday.index)
if self.timerentry_repeated.value == "weekdays":
for x in (0, 1, 2, 3, 4):
self.timer.setRepeated(x)
if self.timerentry_repeated.value == "user":
for x in (0, 1, 2, 3, 4, 5, 6):
if self.timerentry_day[x].value:
self.timer.setRepeated(x)
self.timer.repeatedbegindate = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_starttime.value)
if self.timer.repeated:
self.timer.begin = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_starttime.value)
self.timer.end = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_endtime.value)
else:
self.timer.begin = self.getTimestamp(time(), self.timerentry_starttime.value)
self.timer.end = self.getTimestamp(time(), self.timerentry_endtime.value)
# when a timer end is set before the start, add 1 day
if self.timer.end < self.timer.begin:
self.timer.end += 86400
if self.timer.eit is not None:
event = eEPGCache.getInstance().lookupEventId(self.timer.service_ref.ref, self.timer.eit)
if event:
n = event.getNumOfLinkageServices()
if n > 1:
tlist = []
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
parent = self.timer.service_ref.ref
selection = 0
for x in range(n):
i = event.getLinkageService(parent, x)
if i.toString() == ref.toString():
selection = x
tlist.append((i.getName(), i))
self.session.openWithCallback(self.subserviceSelected, ChoiceBox, title=_("Please select a subservice to record..."), list = tlist, selection = selection)
return
elif n > 0:
parent = self.timer.service_ref.ref
self.timer.service_ref = ServiceReference(event.getLinkageService(parent, 0))
self.saveTimer()
self.close((True, self.timer))
def changeTimerType(self):
self.timerentry_justplay.selectNext()
self.timerJustplayEntry = getConfigListEntry(_("Timer type"), self.timerentry_justplay)
self["config"].invalidate(self.timerJustplayEntry)
self.createSetup("config")
def changeZapWakeupType(self):
if self.timerentry_justplay.value == "zap":
self.timerentry_zapwakeup.selectNext()
self["config"].invalidate(self.entryZapWakeup)
def incrementStart(self):
self.timerentry_starttime.increment()
self["config"].invalidate(self.entryStartTime)
if self.timerentry_type.value == "once" and self.timerentry_starttime.value == [0, 0]:
self.timerentry_date.value += 86400
self["config"].invalidate(self.entryDate)
def decrementStart(self):
self.timerentry_starttime.decrement()
self["config"].invalidate(self.entryStartTime)
if self.timerentry_type.value == "once" and self.timerentry_starttime.value == [23, 59]:
self.timerentry_date.value -= 86400
self["config"].invalidate(self.entryDate)
def incrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.increment()
self["config"].invalidate(self.entryEndTime)
def decrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.decrement()
self["config"].invalidate(self.entryEndTime)
def subserviceSelected(self, service):
if not service is None:
self.timer.service_ref = ServiceReference(service[1])
self.saveTimer()
self.close((True, self.timer))
def saveTimer(self):
self.session.nav.RecordTimer.saveTimer()
def keyCancel(self):
self.close((False,))
def pathSelected(self, res):
if res is not None:
if config.movielist.videodirs.value != self.timerentry_dirname.choices:
self.timerentry_dirname.setChoices(config.movielist.videodirs.value, default=res)
self.timerentry_dirname.value = res
def tagEditFinished(self, ret):
if ret is not None:
self.timerentry_tags = ret
self.timerentry_tagsset.setChoices([not ret and "None" or " ".join(ret)])
self["config"].invalidate(self.tagsSet)
class TimerLog(Screen):
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer;
self.log_entries = self.timer.log_entries[:]
self.fillLogList()
self["loglist"] = MenuList(self.list)
self["logentry"] = Label()
self["key_red"] = StaticText(_("Delete entry"))
self["key_green"] = StaticText("")
self["key_yellow"] = StaticText("")
self["key_blue"] = StaticText(_("Clear log"))
self.onShown.append(self.updateText)
self["actions"] = NumberActionMap(["OkCancelActions", "DirectionActions", "ColorActions"],
{
"ok": self.keyClose,
"cancel": self.keyClose,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"red": self.deleteEntry,
"blue": self.clearLog
}, -1)
self.setTitle(_("Timer log"))
def deleteEntry(self):
cur = self["loglist"].getCurrent()
if cur is None:
return
self.log_entries.remove(cur[1])
self.fillLogList()
self["loglist"].l.setList(self.list)
self.updateText()
def fillLogList(self):
self.list = [(str(strftime("%Y-%m-%d %H-%M", localtime(x[0])) + " - " + x[2]), x) for x in self.log_entries]
def clearLog(self):
self.log_entries = []
self.fillLogList()
self["loglist"].l.setList(self.list)
self.updateText()
def keyClose(self):
if self.timer.log_entries != self.log_entries:
self.timer.log_entries = self.log_entries
self.close((True, self.timer))
else:
self.close((False,))
def up(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.moveUp)
self.updateText()
def down(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.moveDown)
self.updateText()
def left(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.pageUp)
self.updateText()
def right(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.pageDown)
self.updateText()
def updateText(self):
if self.list:
self["logentry"].setText(str(self["loglist"].getCurrent()[1][2]))
else:
self["logentry"].setText("")
| IanSav/enigma2 | lib/python/Screens/TimerEntry.py | Python | gpl-2.0 | 25,248 |
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.comments import Comment
from openpyxl.workbook import Workbook
from openpyxl.worksheet import Worksheet
def test_init():
wb = Workbook()
ws = Worksheet(wb)
c = Comment("text", "author")
ws.cell(coordinate="A1").comment = c
assert c._parent == ws.cell(coordinate="A1")
assert c.text == "text"
assert c.author == "author"
| Darthkpo/xtt | openpyxl/comments/tests/test_comment.py | Python | mit | 438 |
# -*- coding: utf-8 -*-
# © 2016 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests.common import TransactionCase
from datetime import datetime
from openerp import models
class TestHrExpense(TransactionCase):
def setUp(self, *args, **kwargs):
result = super(TestHrExpense, self).setUp(*args, **kwargs)
self.obj_expense = self.env["hr.expense.expense"]
self.obj_product = self.env["product.product"]
self.department = self.env.ref("hr.dep_management")
self.job = self.env.ref("hr.job_ceo")
self.categ1 = self.env.ref("product.product_category_6")
self.categ1_products = self.obj_product.search([
("categ_id", "=", self.categ1.id),
("hr_expense_ok", "=", True),
])
self.categ1_products.write({"hr_expense_ok": True})
self.categ2 = self.env.ref("product.product_category_7")
self.categ2_products = self.obj_product.search([
("categ_id", "=", self.categ2.id),
("hr_expense_ok", "=", True),
])
self.categ2_products.write({"hr_expense_ok": True})
self.categ3 = self.env.ref("product.product_category_8")
self.categ3_products = self.obj_product.search([
("categ_id", "=", self.categ3.id),
("hr_expense_ok", "=", True),
])
self.categ3_products.write({"hr_expense_ok": True})
self.categ4 = self.env.ref("product.product_category_8")
self.categ4_products = self.obj_product.search([
("categ_id", "=", self.categ4.id),
("hr_expense_ok", "=", True),
])
self.categ4_products.write({"hr_expense_ok": True})
self.product1 = self.env.ref("hr_expense.car_travel")
self.product2 = self.env.ref("hr_expense.air_ticket")
self.product3 = self.env.ref("hr_expense.hotel_rent")
self.product4 = self.env.ref("product.product_product_4")
self.employee = self.env.ref("hr.employee")
self.employee.write({
"department_id": self.department.id,
"job_id": self.job.id})
self.expense_data = {
"name": "Daily expense",
"date": datetime.today().strftime("%Y-%m-%d"),
"employee_id": self.employee.id,
}
self.uom1 = self.env.ref("product.product_uom_unit")
return result
def test_onchange_required_product(self):
self.employee.write({"required_expense_product": True})
expense = self.obj_expense.create(self.expense_data)
expense.onchange_employee()
self.assertEqual(
expense.required_expense_product,
True)
self.employee.write({"required_expense_product": False})
expense.onchange_employee()
self.assertEqual(
expense.required_expense_product,
False)
def test_employee_only_product_policy(self):
self.employee.write({
"allowed_expense_product_categ_ids": [(6, 0, [self.categ1.id])]
})
self.assertEqual(
self.employee.all_allowed_expense_product_ids.sorted(
key=lambda x: x.id),
self.categ1_products.sorted(key=lambda x: x.id))
self.categ1_products += self.product1
self.employee.write({
"allowed_expense_product_ids": [(6, 0, [self.product1.id])]})
self.assertEqual(
self.employee.all_allowed_expense_product_ids.sorted(
key=lambda x: x.id),
self.categ1_products.sorted(key=lambda x: x.id))
def test_employee_department_product_policy(self):
self.employee.write({
"allowed_expense_product_categ_ids": [(6, 0, [self.categ1.id])]
})
expected_products = self.categ1_products
self.assertEqual(
self.employee.all_allowed_expense_product_ids.sorted(
key=lambda x: x.id),
expected_products.sorted(key=lambda x: x.id))
self.employee.write({
"allowed_expense_product_ids": [(6, 0, [self.product1.id])]})
expected_products += self.product1
self.assertEqual(
self.employee.all_allowed_expense_product_ids.sorted(
key=lambda x: x.id),
expected_products.sorted(key=lambda x: x.id))
self.department.write({
"allowed_expense_product_categ_ids": [(6, 0, [self.categ2.id])],
})
expected_products += self.categ2_products
self.assertEqual(
self.employee.all_allowed_expense_product_ids.sorted(
key=lambda x: x.id),
expected_products.sorted(key=lambda x: x.id))
self.department.write({
"allowed_expense_product_ids": [(6, 0, [self.product2.id])],
})
expected_products += self.product2
self.assertEqual(
self.employee.all_allowed_expense_product_ids.sorted(
key=lambda x: x.id),
expected_products.sorted(key=lambda x: x.id))
def test_employee_job_product_policy(self):
self.employee.write({
"allowed_expense_product_categ_ids": [(6, 0, [self.categ3.id])]
})
expected_products = self.categ1_products
self.assertEqual(
self.employee.all_allowed_expense_product_ids.sorted(
key=lambda x: x.id),
expected_products.sorted(key=lambda x: x.id))
self.employee.write({
"allowed_expense_product_ids": [(6, 0, [self.product1.id])]})
expected_products += self.product1
self.assertEqual(
self.employee.all_allowed_expense_product_ids.sorted(
key=lambda x: x.id),
expected_products.sorted(key=lambda x: x.id))
self.job.write({
"allowed_expense_product_categ_ids": [(6, 0, [self.categ3.id])],
})
expected_products += self.categ3_products
self.assertEqual(
self.employee.all_allowed_expense_product_ids.sorted(
key=lambda x: x.id),
expected_products.sorted(key=lambda x: x.id))
self.department.write({
"allowed_expense_product_ids": [(6, 0, [self.product3.id])],
})
expected_products += self.product3
self.assertEqual(
self.employee.all_allowed_expense_product_ids.sorted(
key=lambda x: x.id),
expected_products.sorted(key=lambda x: x.id))
def test_create_expense_1(self):
self.employee.write({"required_expense_product": True})
line1 = {
"name": "tes line",
"date_value": self.expense_data["date"],
"unit_amount": 100.00,
"uom_id": self.uom1.id,
}
self.expense_data.update({
"required_expense_product": True,
"line_ids": [
(0, 0, line1)]})
with self.assertRaises(models.ValidationError):
self.obj_expense.create(self.expense_data)
self.expense_data.update({
"required_expense_product": False})
self.obj_expense.create(self.expense_data)
def test_create_expense_2(self):
self.employee.write({
"allowed_expense_product_ids": [(6, 0, [self.product1.id])]
})
line1 = {
"name": "tes line",
"date_value": self.expense_data["date"],
"unit_amount": 100.00,
"product_id": self.product2.id,
"uom_id": self.product2.uom_id.id,
}
self.expense_data.update({
"limit_product_selection": True,
"line_ids": [
(0, 0, line1)]})
with self.assertRaises(models.ValidationError):
self.obj_expense.create(self.expense_data)
def test_create_expense_3(self):
self.employee.write({
"allowed_expense_product_ids": [(6, 0, [self.product1.id])]
})
line1 = {
"name": "tes line",
"date_value": self.expense_data["date"],
"unit_amount": 100.00,
"product_id": self.product1.id,
"uom_id": self.product1.uom_id.id,
}
self.expense_data.update({
"line_ids": [
(0, 0, line1)]})
self.obj_expense.create(self.expense_data)
def test_write_expense_1(self):
self.employee.write({
"allowed_expense_product_ids": [(6, 0, [self.product1.id])],
"required_expense_product": True,
})
line1 = {
"name": "tes line",
"date_value": self.expense_data["date"],
"unit_amount": 100.00,
"uom_id": self.uom1.id,
"product_id": self.product1.id,
}
self.expense_data.update({
"required_expense_product": True,
"limit_product_selection": True,
"line_ids": [
(0, 0, line1)]})
expense = self.obj_expense.create(self.expense_data)
line_id = expense.line_ids[0].id
res = {
"line_ids": [(1, line_id, {"product_id": False})],
}
with self.assertRaises(models.ValidationError):
expense.write(res)
res = {
"line_ids": [(1, line_id, {"product_id": self.product2.id})],
}
with self.assertRaises(models.ValidationError):
expense.write(res)
| VitalPet/hr | hr_expense_product_policy/tests/test_hr_expense.py | Python | agpl-3.0 | 9,394 |
#This script is an example for the nxt.server module. You need to run
#nxt.server.serve_forever() in another window. Or, if you want to use
#this across a network, pass the IP of the computer running the server
#as an argument in the command line.
import socket, sys
try:
server = sys.argv[1]
bindto = ''
except:
server = 'localhost'
bindto = 'localhost'
insock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
outsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
insock.bind((bindto, 54374))
while 1:
command = raw_input('nxt> ')
outsock.sendto(command, (server, 54174))
retvals, addr = insock.recvfrom(1024)
retcode = retvals[0]
retmsg = retvals[1:len(retvals)]
print 'Return code: '+retcode
print 'Return message: '+retmsg
| skorokithakis/nxt-python | examples/server_prompt.py | Python | gpl-3.0 | 808 |
from environment.SensoryEncoded import SensoryEncoded
import nltk
class LanguageEncoded(SensoryEncoded):
def __init__(self, data):
self.rawData = data
self.tokens = nltk.word_tokenize(self.rawData)
| randyhook/knynet | environment/LanguageEncoded.py | Python | mit | 231 |
# Copyright (C) 2015 Alexandre Teyar
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import logging
import os
import sys
from tqdm import tqdm
from metadata import metadata
from reverse_engineer import reverse_engineer
from utils import config, file_operation, process_info
class Payload(object):
def __init__(self, args):
self.args = args
self.payload_name = args.payload_name
self.app_name = os.path.basename(args.app.name)
self.app_path = os.path.abspath(args.app.name)
self.destination = os.path.join(
config.TMP_FOLDER,
os.path.splitext(self.app_name)[0],
args.destination
)
self.keywords = args.keywords.split(',')
def run(self):
logging.info("disassembling {}...".format(self.app_path))
logging.warning("this operation might take some time")
reverse_engineer.disassemble(self)
logging.info(
"exporting the {} smali files into the malware source code".format(
self.payload_name
)
)
self.export_payload()
self.set_payload_settings()
logging.info(
"injecting the call to the {} "
"main method within all methods in {}...".format(
self.payload_name, self.destination
)
)
if os.path.exists(self.destination):
if os.path.isdir(self.destination):
dir_metadata = metadata.generate_dir_metadata(self.destination)
self.inject_in_dir(dir_metadata)
methods, edited_method, log_path = process_info.get_dir_info(
self, dir_metadata
)
logging.info(
"the {} main method call has been injected "
"into {}/{} methods".format(
self.payload_name,
edited_method,
methods
)
)
logging.info(
"log file created at {}".format(log_path)
)
elif os.path.isfile(self.destination):
file_metadata = metadata.generate_file_metadata(
self.destination
)
self.inject(self.destination, file_metadata)
methods, edited_method, log_path = process_info.get_file_info(
self, file_metadata
)
logging.info(
"the {} main method call has been injected "
"into {}/{} methods".format(
self.payload_name,
edited_method,
methods
)
)
logging.info(
"log file created at {}".format(log_path)
)
else:
logging.error(
"{} does not exist, chose a valid destination...".format(
self.destination
)
)
sys.exit(1)
logging.info("reassembling the malware app...")
logging.warning("this operation might take some time")
reverse_engineer.reassemble(self)
logging.info("signing the malware app...")
reverse_engineer.sign(self)
def inject(self):
pass
def inject_in_dir(self, dir_metadata):
"""
Recursively inject the payload within the files contained in the
destination folder.
"""
for root, dirs, files in tqdm(
list(os.walk(self.destination)),
unit='dir',
unit_scale=True,
dynamic_ncols=True
):
for file in fnmatch.filter(files, "*.smali"):
file_path = os.path.join(root, file)
# Skip the payload directory
if (self.payload_name in file_path):
continue
else:
file_metadata = dir_metadata[file_path]
self.inject(file_path, file_metadata)
# TODO: Got really messy - improve this if possible
def export_payload(self):
"""
Copy the smali payload files into the app android folder.
"""
payload_dest_folder = os.path.join(
config.TMP_FOLDER, os.path.splitext(self.app_name)[0], "android"
)
file_operation.copy(
os.path.join(config.PAYLOAD_FOLDER, self.payload_name),
os.path.join(payload_dest_folder, self.payload_name)
)
# Copy the minimum 'android' libraries needed
# to run the payload
lib_v4 = os.walk(
os.path.join(config.PAYLOAD_FOLDER, "support", "v4")
)
for root, dirs, files in lib_v4:
lib_v4_dest_folder = root.replace(
config.PAYLOAD_FOLDER,
os.path.join(
config.TMP_FOLDER,
os.path.splitext(self.app_name)[0],
"android"
)
)
for file in files:
if not os.path.exists(
os.path.join(lib_v4_dest_folder, file)
):
file_operation.copy(
os.path.join(root, file),
os.path.join(lib_v4_dest_folder, file)
)
def set_payload_settings(self, payload_path):
"""
Add the rhost and ppg to the AndroidManifest.
"""
pass
| AresS31/SCI | src/payloads/payload.py | Python | apache-2.0 | 6,079 |
from setup import codeopts, git_version, setup
if __name__ == '__main__':
__version__ = git_version()
codeopts['version'] = __version__
setup(**codeopts)
| gavinbeatty/mkvtomp4 | codesetup.py | Python | mit | 166 |
"""
YANK is a testbed for experimenting with algorithms for the efficient computation of small molecule binding free energies to biomolecular targets using alchemical methods.
YANK is built on OpenMM, the API for molecular simulation, and uses its GPU-accelerated library implementation for hardware acceleration.
"""
from __future__ import print_function
import os
import sys
import distutils.extension
from setuptools import setup, Extension, find_packages
import numpy
import glob
import os
from os.path import relpath, join
import subprocess
from Cython.Build import cythonize
DOCLINES = __doc__.split("\n")
########################
VERSION = "0.9.1dev"
ISRELEASED = False
__version__ = VERSION
########################
CLASSIFIERS = """\
Development Status :: 3 - Alpha
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: Lesser GNU Public License (LGPL)
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Scientific/Engineering :: Chemistry
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
################################################################################
# Writing version control information to the module
################################################################################
def git_version():
# Return the git revision as a string
# copied from numpy setup.py
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = 'Unknown'
return GIT_REVISION
def write_version_py(filename='Yank/version.py'):
cnt = """
# This file is automatically generated by setup.py
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
else:
GIT_REVISION = 'Unknown'
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
################################################################################
# USEFUL SUBROUTINES
################################################################################
def find_package_data(data_root, package_root):
files = []
for root, dirnames, filenames in os.walk(data_root):
for fn in filenames:
files.append(relpath(join(root, fn), package_root))
return files
################################################################################
# SETUP
################################################################################
mixing_ext = distutils.extension.Extension("yank.mixing._mix_replicas", ['./Yank/mixing/_mix_replicas.pyx'])
write_version_py()
setup(
name='yank',
author='John Chodera',
author_email='[email protected]',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
version=__version__,
license='LGPL',
url='https://github.com/choderalab/yank',
platforms=['Linux', 'Mac OS-X', 'Unix', 'Windows'],
classifiers=CLASSIFIERS.splitlines(),
package_dir={'yank': 'Yank'},
packages=['yank', "yank.tests", "yank.commands", "yank.mixing"] + ['yank.%s' % package for package in find_packages('yank')],
package_data={'yank': find_package_data('examples', 'yank')}, # NOTE: examples installs to yank.egg/examples/, NOT yank.egg/yank/examples/. You need to do utils.get_data_filename("../examples/*/setup/").
zip_safe=False,
install_requires=[
'numpy',
'scipy',
'cython',
'openmm',
'pymbar',
'openmmtools',
'docopt>=0.6.1',
'netcdf4',
'alchemy',
'schema',
],
ext_modules=cythonize(mixing_ext),
entry_points={'console_scripts': ['yank = yank.cli:main']})
| jchodera/yank | setup.py | Python | lgpl-3.0 | 4,913 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Deep learning via the distributed memory and distributed bag of words models from
[1]_, using either hierarchical softmax or negative sampling [2]_ [3]_.
**Make sure you have a C compiler before installing gensim, to use optimized (compiled)
doc2vec training** (70x speedup [blog]_).
Initialize a model with e.g.::
>>> model = Doc2Vec(documents, size=100, window=8, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Doc2Vec.load(fname) # you can continue training with the loaded model!
The model can also be instantiated from an existing file on disk in the word2vec C format::
>>> model = Doc2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> model = Doc2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
.. [1] Quoc Le and Tomas Mikolov. Distributed Representations of Sentences and Documents. http://arxiv.org/pdf/1405.4053v2.pdf
.. [2] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [3] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
.. [blog] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
import logging
import os
import warnings
try:
from queue import Queue
except ImportError:
from Queue import Queue
from collections import namedtuple, defaultdict
from timeit import default_timer
from numpy import zeros, random, sum as np_sum, add as np_add, concatenate, \
repeat as np_repeat, array, float32 as REAL, empty, ones, memmap as np_memmap, \
sqrt, newaxis, ndarray, dot, vstack, dtype, divide as np_divide
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.models.word2vec import Word2Vec, Vocab, train_cbow_pair, train_sg_pair, train_batch_sg
from six.moves import xrange, zip
from six import string_types, integer_types, itervalues
logger = logging.getLogger(__name__)
try:
from gensim.models.doc2vec_inner import train_document_dbow, train_document_dm, train_document_dm_concat
from gensim.models.word2vec_inner import FAST_VERSION # blas-adaptation shared from word2vec
except:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
def train_document_dbow(model, doc_words, doctag_indexes, alpha, work=None,
train_words=False, learn_doctags=True, learn_words=True, learn_hidden=True,
word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None):
"""
Update distributed bag of words model ("PV-DBOW") by training on a single document.
Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`.
The document is provided as `doc_words`, a list of word tokens which are looked up
in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
into the doctag_vectors array.
If `train_words` is True, simultaneously train word-to-word (not just doc-to-word)
examples, exactly as per Word2Vec skip-gram training. (Without this option,
word vectors are neither consulted nor updated during DBOW doc vector training.)
Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
prevent learning-updates to those respective model weights, as if using the
(partially-)frozen model to infer other compatible vectors.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from doc2vec_inner instead.
"""
if doctag_vectors is None:
doctag_vectors = model.docvecs.doctag_syn0
if doctag_locks is None:
doctag_locks = model.docvecs.doctag_syn0_lockf
if train_words and learn_words:
train_batch_sg(model, [doc_words], alpha, work)
for doctag_index in doctag_indexes:
for word in doc_words:
train_sg_pair(model, word, doctag_index, alpha, learn_vectors=learn_doctags,
learn_hidden=learn_hidden, context_vectors=doctag_vectors,
context_locks=doctag_locks)
return len(doc_words)
def train_document_dm(model, doc_words, doctag_indexes, alpha, work=None, neu1=None,
learn_doctags=True, learn_words=True, learn_hidden=True,
word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None):
"""
Update distributed memory model ("PV-DM") by training on a single document.
Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`. This
method implements the DM model with a projection (input) layer that is
either the sum or mean of the context vectors, depending on the model's
`dm_mean` configuration field. See `train_document_dm_concat()` for the DM
model with a concatenated input layer.
The document is provided as `doc_words`, a list of word tokens which are looked up
in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
into the doctag_vectors array.
Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
prevent learning-updates to those respective model weights, as if using the
(partially-)frozen model to infer other compatible vectors.
This is the non-optimized, Python version. If you have a C compiler, gensim
will use the optimized version from doc2vec_inner instead.
"""
if word_vectors is None:
word_vectors = model.syn0
if word_locks is None:
word_locks = model.syn0_lockf
if doctag_vectors is None:
doctag_vectors = model.docvecs.doctag_syn0
if doctag_locks is None:
doctag_locks = model.docvecs.doctag_syn0_lockf
word_vocabs = [model.vocab[w] for w in doc_words if w in model.vocab and
model.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original doc2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indexes = [word2.index for pos2, word2 in window_pos if pos2 != pos]
l1 = np_sum(word_vectors[word2_indexes], axis=0) + np_sum(doctag_vectors[doctag_indexes], axis=0)
count = len(word2_indexes) + len(doctag_indexes)
if model.cbow_mean and count > 1 :
l1 /= count
neu1e = train_cbow_pair(model, word, word2_indexes, l1, alpha,
learn_vectors=False, learn_hidden=learn_hidden)
if not model.cbow_mean and count > 1:
neu1e /= count
if learn_doctags:
for i in doctag_indexes:
doctag_vectors[i] += neu1e * doctag_locks[i]
if learn_words:
for i in word2_indexes:
word_vectors[i] += neu1e * word_locks[i]
return len(word_vocabs)
def train_document_dm_concat(model, doc_words, doctag_indexes, alpha, work=None, neu1=None,
learn_doctags=True, learn_words=True, learn_hidden=True,
word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None):
"""
Update distributed memory model ("PV-DM") by training on a single document, using a
concatenation of the context window word vectors (rather than a sum or average).
Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`.
The document is provided as `doc_words`, a list of word tokens which are looked up
in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
into the doctag_vectors array.
Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
prevent learning-updates to those respective model weights, as if using the
(partially-)frozen model to infer other compatible vectors.
This is the non-optimized, Python version. If you have a C compiler, gensim
will use the optimized version from doc2vec_inner instead.
"""
if word_vectors is None:
word_vectors = model.syn0
if word_locks is None:
word_locks = model.syn0_lockf
if doctag_vectors is None:
doctag_vectors = model.docvecs.doctag_syn0
if doctag_locks is None:
doctag_locks = model.docvecs.doctag_syn0_lockf
word_vocabs = [model.vocab[w] for w in doc_words if w in model.vocab and
model.vocab[w].sample_int > model.random.rand() * 2**32]
doctag_len = len(doctag_indexes)
if doctag_len != model.dm_tag_count:
return 0 # skip doc without expected number of doctag(s) (TODO: warn/pad?)
null_word = model.vocab['\0']
pre_pad_count = model.window
post_pad_count = model.window
padded_document_indexes = (
(pre_pad_count * [null_word.index]) # pre-padding
+ [word.index for word in word_vocabs if word is not None] # elide out-of-Vocabulary words
+ (post_pad_count * [null_word.index]) # post-padding
)
for pos in range(pre_pad_count, len(padded_document_indexes) - post_pad_count):
word_context_indexes = (
padded_document_indexes[(pos - pre_pad_count): pos] # preceding words
+ padded_document_indexes[(pos + 1):(pos + 1 + post_pad_count)] # following words
)
word_context_len = len(word_context_indexes)
predict_word = model.vocab[model.index2word[padded_document_indexes[pos]]]
# numpy advanced-indexing copies; concatenate, flatten to 1d
l1 = concatenate((doctag_vectors[doctag_indexes], word_vectors[word_context_indexes])).ravel()
neu1e = train_cbow_pair(model, predict_word, None, l1, alpha,
learn_hidden=learn_hidden, learn_vectors=False)
# filter by locks and shape for addition to source vectors
e_locks = concatenate((doctag_locks[doctag_indexes], word_locks[word_context_indexes]))
neu1e_r = (neu1e.reshape(-1, model.vector_size)
* np_repeat(e_locks, model.vector_size).reshape(-1, model.vector_size))
if learn_doctags:
np_add.at(doctag_vectors, doctag_indexes, neu1e_r[:doctag_len])
if learn_words:
np_add.at(word_vectors, word_context_indexes, neu1e_r[doctag_len:])
return len(padded_document_indexes) - pre_pad_count - post_pad_count
class TaggedDocument(namedtuple('TaggedDocument', 'words tags')):
"""
A single document, made up of `words` (a list of unicode string tokens)
and `tags` (a list of tokens). Tags may be one or more unicode string
tokens, but typical practice (which will also be most memory-efficient) is
for the tags list to include a unique integer id as the only tag.
Replaces "sentence as a list of words" from Word2Vec.
"""
def __str__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.words, self.tags)
# for compatibility
class LabeledSentence(TaggedDocument):
def __init__(self, *args, **kwargs):
warnings.warn('LabeledSentence has been replaced by TaggedDocument', DeprecationWarning)
class DocvecsArray(utils.SaveLoad):
"""
Default storage of doc vectors during/after training, in a numpy array.
As the 'docvecs' property of a Doc2Vec model, allows access and
comparison of document vectors.
>>> docvec = d2v_model.docvecs[99]
>>> docvec = d2v_model.docvecs['SENT_99'] # if string tag used in training
>>> sims = d2v_model.docvecs.most_similar(99)
>>> sims = d2v_model.docvecs.most_similar('SENT_99')
>>> sims = d2v_model.docvecs.most_similar(docvec)
If only plain int tags are presented during training, the dict (of
string tag -> index) and list (of index -> string tag) stay empty,
saving memory.
Supplying a mapfile_path (as by initializing a Doc2Vec model with a
'docvecs_mapfile' value) will use a pair of memory-mapped
files as the array backing for doctag_syn0/doctag_syn0_lockf values.
The Doc2Vec model automatically uses this class, but a future alternative
implementation, based on another persistence mechanism like LMDB, LevelDB,
or SQLite, should also be possible.
"""
def __init__(self, mapfile_path=None):
self.doctags = {} # string -> Doctag (only filled if necessary)
self.max_rawint = -1 # highest rawint-indexed doctag
self.offset2doctag = [] # int offset-past-(max_rawint+1) -> String (only filled if necessary)
self.count = 0
self.mapfile_path = mapfile_path
def note_doctag(self, key, document_no, document_length):
"""Note a document tag during initial corpus scan, for structure sizing."""
if isinstance(key, int):
self.max_rawint = max(self.max_rawint, key)
else:
if key in self.doctags:
self.doctags[key] = self.doctags[key].repeat(document_length)
else:
self.doctags[key] = Doctag(len(self.offset2doctag), document_length, 1)
self.offset2doctag.append(key)
self.count = self.max_rawint + 1 + len(self.offset2doctag)
def indexed_doctags(self, doctag_tokens):
"""Return indexes and backing-arrays used in training examples."""
return ([self._int_index(index) for index in doctag_tokens if index in self],
self.doctag_syn0, self.doctag_syn0_lockf, doctag_tokens)
def trained_item(self, indexed_tuple):
"""Persist any changes made to the given indexes (matching tuple previously
returned by indexed_doctags()); a no-op for this implementation"""
pass
def _int_index(self, index):
"""Return int index for either string or int index"""
if isinstance(index, int):
return index
else:
return self.max_rawint + 1 + self.doctags[index].offset
def _key_index(self, i_index, missing=None):
"""Return string index for given int index, if available"""
warnings.warn("use DocvecsArray.index_to_doctag", DeprecationWarning)
return self.index_to_doctag(i_index)
def index_to_doctag(self, i_index):
"""Return string key for given i_index, if available. Otherwise return raw int doctag (same int)."""
candidate_offset = i_index - self.max_rawint - 1
if 0 <= candidate_offset < len(self.offset2doctag):
return self.offset2doctag[candidate_offset]
else:
return i_index
def __getitem__(self, index):
"""
Accept a single key (int or string tag) or list of keys as input.
If a single string or int, return designated tag's vector
representation, as a 1D numpy array.
If a list, return designated tags' vector representations as a
2D numpy array: #tags x #vector_size.
"""
if isinstance(index, string_types + (int,)):
return self.doctag_syn0[self._int_index(index)]
return vstack([self[i] for i in index])
def __len__(self):
return self.count
def __contains__(self, index):
if isinstance(index, int):
return index < self.count
else:
return index in self.doctags
def borrow_from(self, other_docvecs):
self.count = other_docvecs.count
self.doctags = other_docvecs.doctags
self.offset2doctag = other_docvecs.offset2doctag
def clear_sims(self):
self.doctag_syn0norm = None
def estimated_lookup_memory(self):
"""Estimated memory for tag lookup; 0 if using pure int tags."""
return 60 * len(self.offset2doctag) + 140 * len(self.doctags)
def reset_weights(self, model):
length = max(len(self.doctags), self.count)
if self.mapfile_path:
self.doctag_syn0 = np_memmap(self.mapfile_path+'.doctag_syn0', dtype=REAL,
mode='w+', shape=(length, model.vector_size))
self.doctag_syn0_lockf = np_memmap(self.mapfile_path+'.doctag_syn0_lockf', dtype=REAL,
mode='w+', shape=(length,))
self.doctag_syn0_lockf.fill(1.0)
else:
self.doctag_syn0 = empty((length, model.vector_size), dtype=REAL)
self.doctag_syn0_lockf = ones((length,), dtype=REAL) # zeros suppress learning
for i in xrange(length):
# construct deterministic seed from index AND model seed
seed = "%d %s" % (model.seed, self.index_to_doctag(i))
self.doctag_syn0[i] = model.seeded_vector(seed)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training or inference** after doing a replace.
The model becomes effectively read-only = you can call `most_similar`, `similarity`
etc., but not `train` or `infer_vector`.
"""
if getattr(self, 'doctag_syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of doc weight vectors")
if replace:
for i in xrange(self.doctag_syn0.shape[0]):
self.doctag_syn0[i, :] /= sqrt((self.doctag_syn0[i, :] ** 2).sum(-1))
self.doctag_syn0norm = self.doctag_syn0
else:
if self.mapfile_path:
self.doctag_syn0norm = np_memmap(
self.mapfile_path+'.doctag_syn0norm', dtype=REAL,
mode='w+', shape=self.doctag_syn0.shape)
else:
self.doctag_syn0norm = empty(self.doctag_syn0.shape, dtype=REAL)
np_divide(self.doctag_syn0, sqrt((self.doctag_syn0 ** 2).sum(-1))[..., newaxis], self.doctag_syn0norm)
def most_similar(self, positive=[], negative=[], topn=10, clip_start=0, clip_end=None):
"""
Find the top-N most similar docvecs known from training. Positive docs contribute
positively towards the similarity, negative docs negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given docs. Docs may be specified as vectors, integer indexes
of trained docvecs, or if the documents were originally presented with string tags,
by the corresponding tags.
The 'clip_start' and 'clip_end' allow limiting results to a particular contiguous
range of the underlying doctag_syn0norm vectors. (This may be useful if the ordering
there was chosen to be significant, such as more popular tag IDs in lower indexes.)
"""
self.init_sims()
clip_end = clip_end or len(self.doctag_syn0norm)
if isinstance(positive, string_types + integer_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each doc, if not already present; default to 1.0 for positive and -1.0 for negative docs
positive = [
(doc, 1.0) if isinstance(doc, string_types + (ndarray,) + integer_types)
else doc for doc in positive
]
negative = [
(doc, -1.0) if isinstance(doc, string_types + (ndarray,) + integer_types)
else doc for doc in negative
]
# compute the weighted average of all docs
all_docs, mean = set(), []
for doc, weight in positive + negative:
if isinstance(doc, ndarray):
mean.append(weight * doc)
elif doc in self.doctags or doc < self.count:
mean.append(weight * self.doctag_syn0norm[self._int_index(doc)])
all_docs.add(self._int_index(doc))
else:
raise KeyError("doc '%s' not in trained set" % doc)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
dists = dot(self.doctag_syn0norm[clip_start:clip_end], mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_docs), reverse=True)
# ignore (don't return) docs from the input
result = [(self.index_to_doctag(sim), float(dists[sim])) for sim in best if sim not in all_docs]
return result[:topn]
def doesnt_match(self, docs):
"""
Which doc from the given list doesn't go with the others?
(TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
self.init_sims()
docs = [doc for doc in docs if doc in self.doctags or 0 <= doc < self.count] # filter out unknowns
logger.debug("using docs %s" % docs)
if not docs:
raise ValueError("cannot select a doc from an empty list")
vectors = vstack(self.doctag_syn0norm[self._int_index(doc)] for doc in docs).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, docs))[0][1]
def similarity(self, d1, d2):
"""
Compute cosine similarity between two docvecs in the trained set, specified by int index or
string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
return dot(matutils.unitvec(self[d1]), matutils.unitvec(self[d2]))
def n_similarity(self, ds1, ds2):
"""
Compute cosine similarity between two sets of docvecs from the trained set, specified by int
index or string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
v1 = [self[doc] for doc in ds1]
v2 = [self[doc] for doc in ds2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
class Doctag(namedtuple('Doctag', 'offset, word_count, doc_count')):
"""A string document tag discovered during the initial vocabulary
scan. (The document-vector equivalent of a Vocab object.)
Will not be used if all presented document tags are ints.
The offset is only the true index into the doctags_syn0/doctags_syn0_lockf
if-and-only-if no raw-int tags were used. If any raw-int tags were used,
string Doctag vectors begin at index (max_rawint + 1), so the true index is
(rawint_index + 1 + offset). See also DocvecsArray.index_to_doctag().
"""
__slots__ = ()
def repeat(self, word_count):
return self._replace(word_count=self.word_count + word_count, doc_count=self.doc_count + 1)
class Doc2Vec(Word2Vec):
"""Class for training, using and evaluating neural networks described in http://arxiv.org/pdf/1405.4053v2.pdf"""
def __init__(self, documents=None, size=300, alpha=0.025, window=8, min_count=5,
max_vocab_size=None, sample=0, seed=1, workers=1, min_alpha=0.0001,
dm=1, hs=1, negative=0, dbow_words=0, dm_mean=0, dm_concat=0, dm_tag_count=1,
docvecs=None, docvecs_mapfile=None, comment=None, trim_rule=None, **kwargs):
"""
Initialize the model from an iterable of `documents`. Each document is a
TaggedDocument object that will be used for training.
The `documents` iterable can be simply a list of TaggedDocument elements, but for larger corpora,
consider an iterable that streams the documents directly from disk/network.
If you don't supply `documents`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`dm` defines the training algorithm. By default (`dm=1`), 'distributed memory' (PV-DM) is used.
Otherwise, `distributed bag of words` (PV-DBOW) is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the predicted word and context words used for prediction
within a document.
`alpha` is the initial learning rate (will linearly drop to zero as training progresses).
`seed` = for the random number generator.
Note that for a fully deterministically-reproducible run, you must also limit the model to
a single worker thread, to eliminate ordering jitter from OS thread scheduling. (In Python
3, reproducibility between interpreter launches also requires use of the PYTHONHASHSEED
environment variable to control hash randomization.)
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 0 (off), useful value is 1e-5.
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`iter` = number of iterations (epochs) over the corpus. The default inherited from Word2Vec is 5,
but values of 10 or 20 are common in published 'Paragraph Vector' experiments.
`hs` = if 1 (default), hierarchical sampling will be used for model training (else set to 0).
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
`dm_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean.
Only applies when dm is used in non-concatenative mode.
`dm_concat` = if 1, use concatenation of context vectors rather than sum/average;
default is 0 (off). Note concatenation results in a much-larger model, as the input
is no longer the size of one (sampled or arithmatically combined) word vector, but the
size of the tag(s) and all words in the context strung together.
`dm_tag_count` = expected constant number of document tags per document, when using
dm_concat mode; default is 1.
`dbow_words` if set to 1 trains word-vectors (in skip-gram fashion) simultaneous with DBOW
doc-vector training; default is 0 (faster training of doc-vectors only).
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either util.RULE_DISCARD, util.RULE_KEEP or util.RULE_DEFAULT.
Note: The rule, if given, is only used prune vocabulary during build_vocab() and is not stored as part
of the model.
"""
super(Doc2Vec, self).__init__(
size=size, alpha=alpha, window=window, min_count=min_count, max_vocab_size=max_vocab_size,
sample=sample, seed=seed, workers=workers, min_alpha=min_alpha,
sg=(1+dm) % 2, hs=hs, negative=negative, cbow_mean=dm_mean,
null_word=dm_concat, **kwargs)
self.dbow_words = dbow_words
self.dm_concat = dm_concat
self.dm_tag_count = dm_tag_count
if self.dm and self.dm_concat:
self.layer1_size = (self.dm_tag_count + (2 * self.window)) * self.vector_size
else:
self.layer1_size = size
self.docvecs = docvecs or DocvecsArray(docvecs_mapfile)
self.comment = comment
if documents is not None:
self.build_vocab(documents, trim_rule=trim_rule)
self.train(documents)
@property
def dm(self):
return not self.sg # opposite of SG
@property
def dbow(self):
return self.sg # same as SG
def clear_sims(self):
super(Doc2Vec, self).clear_sims()
self.docvecs.clear_sims()
def reset_weights(self):
if self.dm and self.dm_concat:
# expand l1 size to match concatenated tags+words length
self.layer1_size = (self.dm_tag_count + (2 * self.window)) * self.vector_size
logger.info("using concatenative %d-dimensional layer1" % (self.layer1_size))
super(Doc2Vec, self).reset_weights()
self.docvecs.reset_weights(self)
def reset_from(self, other_model):
"""Reuse shareable structures from other_model."""
self.docvecs.borrow_from(other_model.docvecs)
super(Doc2Vec, self).reset_from(other_model)
def scan_vocab(self, documents, progress_per=10000, trim_rule=None):
logger.info("collecting all words and their counts")
document_no = -1
total_words = 0
min_reduce = 1
interval_start = default_timer() - 0.00001 # guard against next sample being identical
interval_count = 0
vocab = defaultdict(int)
for document_no, document in enumerate(documents):
if document_no % progress_per == 0:
interval_rate = (total_words - interval_count) / (default_timer() - interval_start)
logger.info("PROGRESS: at example #%i, processed %i words (%i/s), %i word types, %i tags",
document_no, total_words, interval_rate, len(vocab), len(self.docvecs))
interval_start = default_timer()
interval_count = total_words
document_length = len(document.words)
for tag in document.tags:
self.docvecs.note_doctag(tag, document_no, document_length)
for word in document.words:
vocab[word] += 1
total_words += len(document.words)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
logger.info("collected %i word types and %i unique tags from a corpus of %i examples and %i words",
len(vocab), len(self.docvecs), document_no + 1, total_words)
self.corpus_count = document_no + 1
self.raw_vocab = vocab
def _do_train_job(self, job, alpha, inits):
work, neu1 = inits
tally = 0
for doc in job:
indexed_doctags = self.docvecs.indexed_doctags(doc.tags)
doctag_indexes, doctag_vectors, doctag_locks, ignored = indexed_doctags
if self.sg:
tally += train_document_dbow(self, doc.words, doctag_indexes, alpha, work,
train_words=self.dbow_words,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks)
elif self.dm_concat:
tally += train_document_dm_concat(self, doc.words, doctag_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks)
else:
tally += train_document_dm(self, doc.words, doctag_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks)
self.docvecs.trained_item(indexed_doctags)
return tally, self._raw_word_count(job)
def _raw_word_count(self, job):
"""Return the number of words in a given job."""
return sum(len(sentence.words) for sentence in job)
def infer_vector(self, doc_words, alpha=0.1, min_alpha=0.0001, steps=5):
"""
Infer a vector for given post-bulk training document.
Document should be a list of (word) tokens.
"""
doctag_vectors = empty((1, self.vector_size), dtype=REAL)
doctag_vectors[0] = self.seeded_vector(' '.join(doc_words))
doctag_locks = ones(1, dtype=REAL)
doctag_indexes = [0]
work = zeros(self.layer1_size, dtype=REAL)
if not self.sg:
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
for i in range(steps):
if self.sg:
train_document_dbow(self, doc_words, doctag_indexes, alpha, work,
learn_words=False, learn_hidden=False,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks)
elif self.dm_concat:
train_document_dm_concat(self, doc_words, doctag_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks)
else:
train_document_dm(self, doc_words, doctag_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks)
alpha = ((alpha - min_alpha) / (steps - i)) + min_alpha
return doctag_vectors[0]
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings."""
report = report or {}
report['doctag_lookup'] = self.docvecs.estimated_lookup_memory()
report['doctag_syn0'] = self.docvecs.count * self.vector_size * dtype(REAL).itemsize
return super(Doc2Vec, self).estimate_memory(vocab_size, report=report)
def __str__(self):
"""Abbreviated name reflecting major configuration paramaters."""
segments = []
if self.comment:
segments.append('"%s"' % self.comment)
if self.sg:
if self.dbow_words:
segments.append('dbow+w') # also training words
else:
segments.append('dbow') # PV-DBOW (skip-gram-style)
else: # PV-DM...
if self.dm_concat:
segments.append('dm/c') # ...with concatenative context layer
else:
if self.cbow_mean:
segments.append('dm/m')
else:
segments.append('dm/s')
segments.append('d%d' % self.vector_size) # dimensions
if self.negative:
segments.append('n%d' % self.negative) # negative samples
if self.hs:
segments.append('hs')
if not self.sg or (self.sg and self.dbow_words):
segments.append('w%d' % self.window) # window size, when relevant
if self.min_count > 1:
segments.append('mc%d' % self.min_count)
if self.sample > 0:
segments.append('s%g' % self.sample)
if self.workers > 1:
segments.append('t%d' % self.workers)
return '%s(%s)' % (self.__class__.__name__, ','.join(segments))
class TaggedBrownCorpus(object):
"""Iterate over documents from the Brown corpus (part of NLTK data), yielding
each document out as a TaggedDocument object."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for item_no, line in enumerate(utils.smart_open(fname)):
line = utils.to_unicode(line)
# each file line is a single document in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty documents
continue
yield TaggedDocument(words, ['%s_SENT_%s' % (fname, item_no)])
class TaggedLineDocument(object):
"""Simple format: one document = one line = one TaggedDocument object.
Words are expected to be already preprocessed and separated by whitespace,
tags are constructed automatically from the document line number."""
def __init__(self, source):
"""
`source` can be either a string (filename) or a file object.
Example::
documents = TaggedLineDocument('myfile.txt')
Or for compressed files::
documents = TaggedLineDocument('compressed_text.txt.bz2')
documents = TaggedLineDocument('compressed_text.txt.gz')
"""
self.source = source
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for item_no, line in enumerate(self.source):
yield TaggedDocument(utils.to_unicode(line).split(), [item_no])
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for item_no, line in enumerate(fin):
yield TaggedDocument(utils.to_unicode(line).split(), [item_no])
| ziky90/gensim | gensim/models/doc2vec.py | Python | lgpl-2.1 | 38,510 |
#!/usr/bin/env python
'''
# Inbound Proxy Module developed by Bharadwaj Machiraju (blog.tunnelshade.in)
# as a part of Google Summer of Code 2013
'''
from tornado import ioloop
import ssl
from gen_cert import gen_signed_cert
def wrap_socket(socket, domain, ca_crt, ca_key, ca_pass, certs_folder, success=None, failure=None, io=None, **options):
"""Wrap an active socket in an SSL socket."""
# # Default Options
options.setdefault('do_handshake_on_connect', False)
options.setdefault('ssl_version', ssl.PROTOCOL_SSLv23)
options.setdefault('server_side', True)
# The idea is to handle domains with greater than 3 dots using wildcard certs
if domain.count(".") >= 3:
key, cert = gen_signed_cert("*." + ".".join(domain.split(".")[-3:]), ca_crt, ca_key, ca_pass, certs_folder)
else:
key, cert = gen_signed_cert(domain, ca_crt, ca_key, ca_pass, certs_folder)
options.setdefault('certfile', cert)
options.setdefault('keyfile', key)
# # Handlers
def done():
"""Handshake finished successfully."""
io.remove_handler(wrapped.fileno())
success and success(wrapped)
def error():
"""The handshake failed."""
if failure:
return failure(wrapped)
# # By default, just close the socket.
io.remove_handler(wrapped.fileno())
wrapped.close()
def handshake(fd, events):
"""Handler fGetting the same error here... also looking for answers....
TheHippo Dec 19 '12 at 20:29or SSL handshake negotiation.
See Python docs for ssl.do_handshake()."""
if events & io.ERROR:
error()
return
try:
new_state = io.ERROR
wrapped.do_handshake()
return done()
except ssl.SSLError as exc:
if exc.args[0] == ssl.SSL_ERROR_WANT_READ:
new_state |= io.READ
elif exc.args[0] == ssl.SSL_ERROR_WANT_WRITE:
new_state |= io.WRITE
else:
raise
if new_state != state[0]:
state[0] = new_state
io.update_handler(fd, new_state)
# # set up handshake state; use a list as a mutable cell.
io = io or ioloop.IOLoop.instance()
state = [io.ERROR]
# # Wrap the socket; swap out handlers.
io.remove_handler(socket.fileno())
wrapped = ssl.SSLSocket(socket, **options)
wrapped.setblocking(0)
io.add_handler(wrapped.fileno(), handshake, state[0])
# # Begin the handshake.
handshake(wrapped.fileno(), 0)
return wrapped
| sharad1126/owtf | framework/http/proxy/socket_wrapper.py | Python | bsd-3-clause | 2,606 |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from bindings import revisionstore
from edenscm.mercurial.node import hex
from . import shallowutil
class mutablebasestore(object):
def __init__(self):
self._store = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self._store.flush()
self._store = None
def __getattr__(self, name):
return getattr(self._store, name)
class mutabledatastore(mutablebasestore):
@staticmethod
def makestore(repo, path):
shallowutil.mkstickygroupdir(repo.ui, path)
return revisionstore.mutabledeltastore(
packfilepath=path, config=repo.ui._uiconfig._rcfg._rcfg
)
def __init__(self, repo, path):
super(mutabledatastore, self).__init__()
self._store = self.makestore(repo, path)
class mutablehistorystore(mutablebasestore):
@staticmethod
def makestore(repo, path):
shallowutil.mkstickygroupdir(repo.ui, path)
return revisionstore.mutablehistorystore(packfilepath=path)
def __init__(self, repo, path):
super(mutablehistorystore, self).__init__()
self._store = self.makestore(repo, path)
class pendingmutablepack(object):
def __init__(self, repo, pathcb):
self._mutabledpack = None
self._mutablehpack = None
self._repo = repo
self._pathcb = pathcb
def getmutabledpack(self, read=False):
if self._mutabledpack is None and not read:
path = self._pathcb()
self._mutabledpack = mutabledatastore.makestore(self._repo, path)
return self._mutabledpack
def getmutablehpack(self, read=False):
if self._mutablehpack is None and not read:
path = self._pathcb()
self._mutablehpack = mutablehistorystore.makestore(self._repo, path)
return self._mutablehpack
def getmutablepack(self):
dpack = self.getmutabledpack()
hpack = self.getmutablehpack()
return dpack, hpack
def commit(self):
dpackpath = None
hpackpath = None
if self._mutabledpack is not None:
try:
dpackpath = self._mutabledpack.flush()
finally:
self._mutabledpack = None
if self._mutablehpack is not None:
try:
hpackpath = self._mutablehpack.flush()
finally:
self._mutablehpack = None
return dpackpath, hpackpath
def abort(self):
self._mutabledpack = None
self._mutablehpack = None
class mutabledatahistorystore(object):
"""A proxy class that gets added to the union store and knows how to answer
requests by inspecting the current mutable data and history packs. We can't
insert the mutable packs themselves into the union store because they can be
created and destroyed over time."""
def __init__(self, getpendingpacks):
self.getpendingpacks = getpendingpacks
def getmissing(self, keys):
dpack = self.getpendingpacks().getmutabledpack(True)
if dpack is None:
return keys
return dpack.getmissing(keys)
def get(self, name, node):
dpack = self.getpendingpacks().getmutabledpack(True)
if dpack is None:
raise KeyError(name, hex(node))
return dpack.get(name, node)
def getdelta(self, name, node):
dpack = self.getpendingpacks().getmutabledpack(True)
if dpack is None:
raise KeyError(name, hex(node))
return dpack.getdelta(name, node)
def getdeltachain(self, name, node):
dpack = self.getpendingpacks().getmutabledpack(True)
if dpack is None:
raise KeyError(name, hex(node))
return dpack.getdeltachain(name, node)
def getmeta(self, name, node):
dpack = self.getpendingpacks().getmutabledpack(True)
if dpack is None:
raise KeyError(name, hex(node))
return dpack.getmeta(name, node)
def getnodeinfo(self, name, node):
hpack = self.getpendingpacks().getmutablehpack(True)
if hpack is None:
raise KeyError(name, hex(node))
return hpack.getnodeinfo(name, node)
def getmetrics(self):
return {}
| facebookexperimental/eden | eden/scm/edenscm/hgext/remotefilelog/mutablestores.py | Python | gpl-2.0 | 4,466 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.trial import unittest
from buildbot.config import ConfigErrors
from buildbot.process.properties import Property
from buildbot.process.results import SUCCESS
from buildbot.steps.cmake import CMake
from buildbot.test.fake.remotecommand import ExpectShell
from buildbot.test.util.misc import TestReactorMixin
from buildbot.test.util.steps import BuildStepMixin
class TestCMake(BuildStepMixin, TestReactorMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
self.setUpBuildStep()
def tearDown(self):
self.tearDownBuildStep()
def expect_and_run_command(self, *params):
command = [CMake.DEFAULT_CMAKE] + list(params)
self.expectCommands(
ExpectShell(command=command, workdir='wkdir') + 0)
self.expectOutcome(result=SUCCESS)
return self.runStep()
def test_definitions_type(self):
with self.assertRaises(ConfigErrors):
CMake(definitions='hello')
def test_options_type(self):
with self.assertRaises(ConfigErrors):
CMake(options='hello')
def test_plain(self):
self.setupStep(CMake())
self.expectCommands(
ExpectShell(command=[CMake.DEFAULT_CMAKE], workdir='wkdir') + 0)
self.expectOutcome(result=SUCCESS)
return self.runStep()
def test_cmake(self):
cmake_bin = 'something/else/cmake'
self.setupStep(CMake(cmake=cmake_bin))
self.expectCommands(
ExpectShell(command=[cmake_bin], workdir='wkdir') + 0)
self.expectOutcome(result=SUCCESS)
return self.runStep()
def test_cmake_interpolation(self):
prop = 'CMAKE'
value = 'Real_CMAKE'
self.setupStep(CMake(cmake=Property(prop)))
self.properties.setProperty(prop, value, source='test')
self.expectCommands(
ExpectShell(command=[value], workdir='wkdir') + 0)
self.expectOutcome(result=SUCCESS)
return self.runStep()
def test_definitions(self):
definition = {
'a': 'b'
}
self.setupStep(CMake(definitions=definition))
self.expect_and_run_command('-D%s=%s' % list(definition.items())[0])
def test_environment(self):
command = [CMake.DEFAULT_CMAKE]
environment = {'a': 'b'}
self.setupStep(CMake(env=environment))
self.expectCommands(
ExpectShell(
command=command, workdir='wkdir', env={'a': 'b'}) + 0)
self.expectOutcome(result=SUCCESS)
return self.runStep()
def test_definitions_interpolation(self):
b_value = 'real_b'
definitions = {
'a': Property('b')
}
self.setupStep(CMake(definitions=definitions))
self.properties.setProperty('b', b_value, source='test')
self.expect_and_run_command('-D%s=%s' % ('a', b_value))
def test_definitions_renderable(self):
b_value = 'real_b'
definitions = Property('b')
self.setupStep(CMake(definitions=definitions))
self.properties.setProperty('b', {'a': b_value}, source='test')
self.expect_and_run_command('-D%s=%s' % ('a', b_value))
def test_generator(self):
generator = 'Ninja'
self.setupStep(CMake(generator=generator))
self.expect_and_run_command('-G', generator)
def test_generator_interpolation(self):
value = 'Our_GENERATOR'
self.setupStep(CMake(generator=Property('GENERATOR')))
self.properties.setProperty('GENERATOR', value, source='test')
self.expect_and_run_command('-G', value)
def test_options(self):
options = ('A', 'B')
self.setupStep(CMake(options=options))
self.expect_and_run_command(*options)
def test_options_interpolation(self):
prop = 'option'
value = 'value'
self.setupStep(CMake(options=(Property(prop),)))
self.properties.setProperty(prop, value, source='test')
self.expect_and_run_command(value)
def test_path(self):
path = 'some/path'
self.setupStep(CMake(path=path))
self.expect_and_run_command(path)
def test_path_interpolation(self):
prop = 'path'
value = 'some/path'
self.setupStep(CMake(path=Property(prop)))
self.properties.setProperty(prop, value, source='test')
self.expect_and_run_command(value)
| anish/buildbot | master/buildbot/test/unit/test_steps_cmake.py | Python | gpl-2.0 | 5,113 |
# -*- coding: utf-8 -*-
#
# Bauble documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 19 13:24:19 2008.
# Updated by sphinx-quickstart on Sun Jul 26 21:04:58 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
#import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
p = os.path.abspath('..')
sys.path.insert(0, p)
if 'VIRTUAL_ENV' in os.environ:
q = os.sep.join([os.environ['VIRTUAL_ENV'],
'lib', 'python2.7', 'site-packages'])
sys.path.insert(0, q)
p = p + ":" + q
os.environ['PYTHONPATH'] = p
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Bauble'
copyright = u'2004-2012, Brett Adams; 2012-2015, Mario Frasca'
author = u'Brett Adams; Mario Frasca'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.56' # :bump
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
locale_dirs = ['locale/'] # path is example but recommended.
gettext_compact = False # optional.
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/icons/bauble-32.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Baubledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '12pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Bauble.tex', u'Bauble Documentation',
u'Brett Adams', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bauble', u'Bauble Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Bauble', u'Bauble Documentation',
author, 'Bauble', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to
# save visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| mfrasca/bauble.classic | doc/conf.py | Python | gpl-2.0 | 11,723 |
from server import models
from . import TestCase
class TestBiobrick(TestCase):
def test_get_biobrick_list(self):
for type in ['input', 'receptor', 'promoter', 'output', 'RBS',
'terminator', 'logic']:
result = self.client.get('/biobrick/' + type).json
self.assert_(len(result['result']) > 0)
self.assert400(self.client.get('/biobrick/XXX'))
def test_get_one_biobrick(self):
for type in ['input', 'receptor', 'promoter', 'output', 'RBS',
'terminator', 'logic']:
result = self.client.get('/biobrick/%s?id=1' % type).json['result']
type = type.capitalize() if not type.isupper() else type
desired = getattr(models, type).query.get(1).to_dict()
self.assertEqual(result, desired)
def test_suggest_promoters(self):
result = self.client.get('/biobrick/suggest/promoters?input_id=1').json
self.assertEqual(result['result'],
[models.Promoter.query.get(17).to_dict()])
def test_suggest_receptors(self):
result = self.client.get(
'/biobrick/suggest/receptors?input_id=1&promoter_id=17').json
self.assertEqual(result['result'],
[models.Receptor.query.get(1).to_dict()])
def test_search_by_description(self):
result = self.client.get('/biobrick/search/promoter/LasR').json
self.assertItemsEqual(result['result'],
[models.Promoter.query.get(1).to_dict(),
models.Promoter.query.get(2).to_dict()])
result = self.client.get('/biobrick/search/receptor/AraC').json
self.assertItemsEqual(result['result'],
[models.Receptor.query.get(11).to_dict()])
result = self.client.get('/biobrick/search/output/CFP').json
self.assertItemsEqual(result['result'],
[models.Output.query.get(28).to_dict(),
models.Output.query.get(33).to_dict()])
self.assert400(self.client.get('/biobrick/search/XXX/yyy'))
def test_search_by_id(self):
result = self.client.get('/biobrick/search/input/ions').json
self.assertItemsEqual(result['result'],
[models.Input.query.get(1).to_dict(),
models.Input.query.get(4).to_dict(),
models.Input.query.get(9).to_dict()])
result = self.client.get('/biobrick/search/promoter/BBa_K0').json
self.assertItemsEqual(result['result'],
[models.Promoter.query.get(8).to_dict(),
models.Promoter.query.get(16).to_dict()])
result = self.client.get('/biobrick/search/receptor/BBa_K2').json
self.assertItemsEqual(result['result'],
[models.Receptor.query.get(19).to_dict(),
models.Receptor.query.get(16).to_dict()])
result = self.client.get('/biobrick/search/output/BBa_K18').json
self.assertItemsEqual(result['result'],
[models.Output.query.get(30).to_dict(),
models.Output.query.get(31).to_dict(),
models.Output.query.get(32).to_dict()])
| igemsoftware/SYSU-Software_2014 | tests/test_biobrick.py | Python | lgpl-3.0 | 3,363 |
from flask import request, Response
from base64 import b64decode
from routersploit.modules.exploits.cameras.brickcom.users_cgi_creds_disclosure import Exploit
response = (
"""
size=4
User1.index=1
User1.username=admin
User1.password=test1234
User1.privilege=1
User2.index=2
User2.username=viewer
User2.password=viewer
User2.privilege=0
User3.index=3
User3.username=rviewer
User3.password=rviewer
User3.privilege=2
User4.index=0
User4.username=visual
User4.password=visual1234
User4.privilege=0
"""
)
def apply_response(*args, **kwargs):
if "Authorization" in request.headers.keys():
creds = str(b64decode(request.headers["Authorization"].replace("Basic ", "")), "utf-8")
if creds in ["rviewer:rviewer"]:
return response, 200
resp = Response("Unauthorized")
resp.headers["WWW-Authenticate"] = "Basic ABC"
return resp, 401
def test_check_success(target):
""" Test scenario - successful check """
route_mock = target.get_route_mock("/cgi-bin/users.cgi", methods=["GET", "POST"])
route_mock.side_effect = apply_response
exploit = Exploit()
assert exploit.target == ""
assert exploit.port == 80
exploit.target = target.host
exploit.port = target.port
assert exploit.check() is True
assert exploit.run() is None
| dasseclab/dasseclab | clones/routersploit/tests/exploits/cameras/brickcom/test_users_cgi_creds_disclosure.py | Python | gpl-2.0 | 1,382 |
from __future__ import absolute_import
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: [email protected]
#
#
# #END_LICENSE#############################################################
import types
import signal
from PyQt4 import QtGui
from PyQt4 import QtCore
from .qt4_gui import _GUI, _PropertiesDialog, _BasicNodeActions
from . import layouts
#from ete3 import Tree, PhyloTree, ClusterTree
from .main import save
from .qt4_render import _TreeScene, render, get_tree_img_map, init_tree_style
__all__ = ["show_tree", "render_tree"]
_QApp = None
GUI_TIMEOUT = None
def exit_gui(a,b):
_QApp.exit(0)
def init_scene(t, layout, ts):
global _QApp
ts = init_tree_style(t, ts)
if layout:
ts.layout_fn = layout
if not _QApp:
_QApp = QtGui.QApplication(["ETE"])
scene = _TreeScene()
#ts._scale = None
return scene, ts
def show_tree(t, layout=None, tree_style=None, win_name=None):
""" Interactively shows a tree."""
scene, img = init_scene(t, layout, tree_style)
tree_item, n2i, n2f = render(t, img)
scene.init_values(t, img, n2i, n2f)
tree_item.setParentItem(scene.master_item)
scene.addItem(scene.master_item)
mainapp = _GUI(scene)
if win_name:
mainapp.setObjectName(win_name)
mainapp.show()
mainapp.on_actionFit2tree_triggered()
# Restore Ctrl-C behavior
signal.signal(signal.SIGINT, signal.SIG_DFL)
if GUI_TIMEOUT is not None:
signal.signal(signal.SIGALRM, exit_gui)
signal.alarm(GUI_TIMEOUT)
_QApp.exec_()
def render_tree(t, imgName, w=None, h=None, layout=None,
tree_style = None, header=None, units="px",
dpi=90):
""" Render tree image into a file."""
global _QApp
for nid, n in enumerate(t.traverse("preorder")):
n.add_feature("_nid", nid)
scene, img = init_scene(t, layout, tree_style)
tree_item, n2i, n2f = render(t, img)
scene.init_values(t, img, n2i, n2f)
tree_item.setParentItem(scene.master_item)
scene.master_item.setPos(0,0)
scene.addItem(scene.master_item)
if imgName.startswith("%%inline"):
imgmap = save(scene, imgName, w=w, h=h, units=units, dpi=dpi)
else:
x_scale, y_scale = save(scene, imgName, w=w, h=h, units=units, dpi=dpi)
imgmap = get_tree_img_map(n2i, x_scale, y_scale)
return imgmap
def get_img(t, w=None, h=None, layout=None, tree_style = None,
header=None, units="px", dpi=90):
global _QApp
scene, img = init_scene(t, layout, tree_style)
tree_item, n2i, n2f = render(t, img)
scene.init_values(t, img, n2i, n2f)
tree_item.setParentItem(scene.master_item)
scene.master_item.setPos(0,0)
scene.addItem(scene.master_item)
x_scale, y_scale, imgdata = save(scene, "%%return", w=w, h=h, units=units, dpi=dpi)
_QApp.quit()
_QApp = None
return imgdata, {}
| fmaguire/ete | ete3/treeview/drawer.py | Python | gpl-3.0 | 4,189 |
#!/usr/bin/env python
# Copyright (c) 2009 Technische Universitaet Muenchen, Informatik Lehrstuhl IX.
# Author: Federico Ruiz-Ugalde <memeruiz at gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from math import sqrt
from math import sin
from math import pow
from numpy import dot
from numpy import array
from numpy import matrix
from numpy import inner
from numpy import concatenate
import PyKDL as kdl
import time
from vectorFieldClass import VectorField
from vectorFields import decayRepeller1, vectorF_Angle, simpleAttractor1
printcounter = 0
epsilon = 0.0000001
class vectorFieldLibrary:
def __init__(self):
'''Registers all vectorfield types'''
print("Registering vector fields")
self.vfs = []
self.vfs.append(nullAttractor)
self.vfs.append(pointAttractor)
self.vfs.append(decayAttractor)
self.vfs.append(planeAttractor)
self.vfs.append(hemisphereAttractor)
self.vfs.append(funnelAttractor)
# add here more vector field functions
def __getitem__(self, number):
"""returns the vector field function"""
return self.vfs[number]
def __contains__(self, item):
return (item < self.vfs.__len__())
# all vectorfield classes must have a constructor a setParams a getScalar and
# a getVector function, with parameters, nothing and position as arguments
# respectively
class nullAttractor:
def __init__(self):
pass
def setParams(self, param):
pass
def getScalar(self, pos):
return array([1, 1])
def getVector(self, pos):
P = array([0.0] * 6)
return P
class pointAttractor:
'''Unit pointAttractor with rotation'''
def __init__(self):
self.frame = kdl.Frame()
self.gframe = kdl.Frame()
self.gp = array([0, 0, 0])
self.p = array([0, 0, 0])
self.dToAttractor = 0
self.slowDownDistance = 0.05 # normal value 0.03
self.tw = kdl.diff(self.frame, self.gframe)
self.rot_factor = 1.0
self.min_rot = 0.09
self.stopDistance = 0.005
self.r_stopDistance = 0.03
self.slowDownTime = 0.1
self.slowDowninitTime = time.time()
self.time = time.time()
self.filter = [1.0] * 5
self.filterC = array([0.05, 0.3, 0.3, 0.3, 0.05])
def setParams(self, param):
'''
0-15: homogeneous goal matrix
16: slow down distance
'''
gpos = matrix([param[0:4], param[4:8], param[8:12], param[12:16]])
self.gp = gpos[0:3, 3].T.A[0]
self.dToAttractor = length(self.p - self.gp)
self.slowDownDistance = param[16]
for i in range(3):
for j in range(3):
self.gframe.M[i, j] = gpos[i, j]
self.tw = kdl.diff(self.frame, self.gframe)
def getScalar(self, pos):
if self.dToAttractor < self.slowDownDistance:
ss = self.dToAttractor / self.slowDownDistance
else:
ss = 1
d_rot = sqrt(
self.tw.rot[0] ** 2 + self.tw.rot[1] ** 2 +
self.tw.rot[2] ** 2) # we can run into Euler singularities
if self.dToAttractor > 0:
r_weight = ((self.rot_factor * ss) / self.dToAttractor
) # (ap/dToAttractor) always smaller than 1
else:
if d_rot > 0:
r_weight = self.min_rot / (d_rot)
else:
r_weight = 0
st = r_weight
return array([ss, st]) # scalar speed, scalar twist
def getVector(self, pos):
''' 0-15: position'''
pos = matrix([pos[0:4], pos[4:8], pos[8:12], pos[12:16]])
self.p = pos[0:3, 3].T.A[0]
r = pos[0:3, 0:3]
# Cartesian vector
self.dToAttractor = length(self.p - self.gp)
if self.dToAttractor == 0:
P = array([0, 0, 0])
else:
P = -(self.p - self.gp) / self.dToAttractor
# TODO: decay near the attraction point
# rotational twist
for i in range(3):
for j in range(3):
self.frame.M[i, j] = r[i, j]
self.tw = kdl.diff(self.frame, self.gframe)
return concatenate((P,
array([self.tw.rot[0],
self.tw.rot[1],
self.tw.rot[2]])),
0)
class decayAttractor:
def __init__(self):
self.apos = array([0, 0, 0])
self.aradius = 0.1
self.safeD = 0.05
self.cutD = self.aradius + self.safeD
self.order = 5
def setParams(self, param):
'''Repeller with bellshape decay
Parameters:
0-2: position of obstacle
3: obstacle radius
4: extra safe distance
5: order of the decay
'''
self.apos = array(param[0:3])
self.aradius = param[3]
self.safeD = param[4]
self.cutD = self.aradius + self.safeD
self.order = param[5]
def getScalar(self, pos):
return array([1, 1])
def getVector(self, pos):
pos = matrix([pos[0:4], pos[4:8], pos[8:12], pos[12:16]])
p = pos[0:3, 3].T.A[0]
P = p - self.apos
lengthp = length(P)
if lengthp == 0:
P = array([0, 0, 1])
else:
P = P / lengthp
k = bellshape(self.cutD, self.order, lengthp)
P = -P * k
return concatenate((P, array([0, 0, 0])), 1)
class planeAttractor:
def __init__(self):
self.epsilon = 0.0000001
self.planePoint = array([0, 0, 0])
self.normalAxis = array([0, 0, 1])
self.order = 1.2
self.safeDistance = 0.05
self.d = dot(-self.normalAxis, self.planePoint)
def setParams(self, param):
'''parameters:
0-2: plane point
3-5: Normal Axis
6: decay order
7: decay cut distance
'''
self.planePoint = array(param[0:3])
normalAxis = array(param[3:6])
self.normalAxis = normalAxis / length(normalAxis)
self.order = param[6]
self.safeDistance = param[7]
self.d = dot(-self.normalAxis, self.planePoint)
def getScalar(self, pos):
return array([1, 1])
def getVector(self, pos):
pos = matrix([pos[0:4], pos[4:8], pos[8:12], pos[12:16]])
p = pos[0:3, 3].T.A[0]
distancetoPlane = dot(self.normalAxis, p) + self.d
if (distancetoPlane >= 0):
k = bellshape(self.epsilon + self.safeDistance, self.order,
distancetoPlane)
P = -k * self.normalAxis
else:
P = array([0, 0, 0])
return concatenate((P, array([0, 0, 0])), 1)
class hemisphereAttractor:
def __init__(self):
self.epsilon = 0.0000001
self.planePoint = array([0, 0, 0])
self.normalAxis = array([0, 0, 1])
self.order = 1.2
self.safeDistance = 0.05
self.d = dot(-self.normalAxis, self.planePoint)
def setParams(self, param):
'''parameters:
0-2: plane point
3-5: Normal Axis
6: decay cut distance
7: decay order
'''
self.planePoint = array(param[0:3])
normalAxis = array(param[3:6])
self.normalAxis = normalAxis / length(normalAxis)
self.order = param[7]
self.safeDistance = param[6]
self.d = dot(-self.normalAxis, self.planePoint)
def getScalar(self, pos):
return array([1, 1])
def getVector(self, pos):
pos = matrix([pos[0:4], pos[4:8], pos[8:12], pos[12:16]])
p = pos[0:3, 3].T.A[0]
distancetoPlane = dot(self.normalAxis, p) + self.d
if (distancetoPlane >= 0):
k = bellshape(self.epsilon + self.safeDistance, self.order,
distancetoPlane)
P = -k * self.normalAxis
else:
P = -self.normalAxis
return concatenate((P, array([0, 0, 0])), 1)
class funnelAttractor:
def __init__(self):
self.point = array([0, 0, 0])
self.axis = array([0, 0, 1])
self.aT = 0.2
self.ar = 10
self.dT = 0.1
self.dr = 2
def setParams(self, param):
'''Params:
0-2: position
3-5: axis
6: cut angle
7: angle decay order
8: cut distance
9: distance decay order
'''
self.point = array(param[0:3])
self.axis = array(param[3:6]) / length(array(param[3:6]))
print("Axis: ", self.axis)
self.aT = param[6]
self.ar = param[7]
self.dT = param[8]
self.dr = param[9]
def getScalar(self, pos):
return array([1, 1])
def getVector(self, pos):
# goal position and approach vector
pos = matrix([pos[0:4], pos[4:8], pos[8:12], pos[12:16]])
p = pos[0:3, 3].T.A[0]
if length(p - self.point) == 0:
P = array([0, 0, 0])
else:
P = -(p - self.point) / length(p - self.point)
angle = dot(-P, -self.axis)
if angle < 0:
angle = 0.0
# angle exponential decay
k = expDecay(1 - angle, self.ar, self.aT)
# distance exponential decay
k2 = expDecay(length(p - self.point), self.dr, self.dT)
P = P * k * k2
return concatenate((P, array([0, 0, 0])), 1)
def goalObstacleVField2(goal, goalObstacle, approach, obstacle1, table):
# Deals with the approach direction by using a vector of approach
# approach has to be unitary
# parameters goalObstacle
g0 = goal
o0 = goalObstacle
obstacleRadius = [0.07]
obstacleSafeDistance = [0.005]
obstacleOrder = [1.4]
initialForce = [2.0]
obstacleOrder = [2.5]
initialForce = [5.0]
goalObstacleParams = (obstacleRadius + obstacleSafeDistance +
obstacleOrder + initialForce)
# parameters goal Obstacle Hyperbola shadow Repeller
# HypB=[2] #Parameter associated with the shadow width
# Can not be bigger than 1, associated with the shadow
# width, bigger value more w
bPercentage = [
0.9
]
yOrder = [1.1]
yCutDistance = [0.15]
xOrder = [2.5]
xT = [0.5]
goalObstacleHypParams = bPercentage + yOrder + yCutDistance + xOrder + xT
# parameters obstacle1
obstacleRadius = [0.12]
obstacleSafeDistance = [0.005]
obstacleOrder = [4]
initialForce = [5.0]
obstacle1Params = (obstacleRadius + obstacleSafeDistance +
obstacleOrder + initialForce)
# parameters Obstacle1 Hyperbola shadow Repeller
# HypB=[2] #Parameter associated with the shadow width
# Can not be bigger than 1, associated with the shadow
# width, bigger value more w
bPercentage = [
0.9
]
yOrder = [1.1]
yCutDistance = [0.12]
xOrder = [2.5]
xT = [0.5]
obstacle1HypParams = bPercentage + yOrder + yCutDistance + xOrder + xT
# table parameters
tableInitForce = [10.0]
tableHeight = [0.82]
tableHeight = table
tableOrder = [100.0]
tableSafeDistance = [0.00]
tableParams = tableInitForce + tableHeight + tableOrder + tableSafeDistance
vectorFieldObstacle1 = VectorField(decayRepeller1,
obstacle1 + obstacle1Params)
vectorFieldObstacle1Hyp = VectorField(vectorF_hyperbola,
g0 + obstacle1 + obstacle1HypParams)
vectorFieldtable = VectorField(XYplanarRepeller, tableParams)
vectorFieldgoal = VectorField(simpleAttractor1, g0)
vectorFieldgoalObstacle = VectorField(decayRepeller1,
o0 + goalObstacleParams)
vectorFieldgoalObstacleHyp = VectorField(vectorF_hyperbola,
g0 + o0 + goalObstacleHypParams)
vectorFieldAngle = VectorField(vectorF_Angle, g0 + approach)
vectorField3 = (vectorFieldgoal + vectorFieldgoalObstacle +
vectorFieldgoalObstacleHyp + vectorFieldAngle * 10 +
vectorFieldtable + vectorFieldObstacle1 +
vectorFieldObstacle1Hyp)
# With no table repeller!!!
vectorField = vectorField3.norm()
return vectorField
def trapezoidWeight(startpos, goal, pos):
maxlen = len(startpos - goal)
curlen = len(pos - goal)
risingEdge = 0.75
fallingEdge = 0.70
printnow = False
global printcounter
printcounter = printcounter + 1
if (printcounter >= 10):
printcounter = 0
printnow = True
if (printnow):
print("Distance to the attractor: ", curlen)
perlen = curlen / maxlen
if ((perlen > risingEdge) and (perlen <= 1)):
# rising edge
k = (-1 / fallingEdge) * (perlen - risingEdge) + 1
if (printnow):
print("Accelerating! ", k)
elif perlen < fallingEdge:
# falling edge
k = (1 / fallingEdge) * perlen
elif ((perlen >= fallingEdge) and (perlen <= risingEdge)):
# flat center
k = 1
else:
k = 0.17 # outside of the attractor
if (printnow):
print("Outside of the attractor ", perlen)
return k
def simpleAttractor2(pos, param):
# attrator position:
p0 = array(param)
p = array(pos)
dist = sqrt(inner((p - p0), (p - p0)))
P = -(p - p0) / sqrt(inner((p - p0), (p - p0)))
k = 1 - 0.05 * dist
P = P * k
return P
def simpleRepeller1(pos, param):
# repeller position:
p0 = array(param)
p = array(pos)
P = (p - p0) / sqrt(inner((p - p0), (p - p0)))
return P
def simpleRepeller1a(pos, param):
# repeller position:
p0 = array(param)
p = array(pos)
P = (p - p0) / sqrt(inner((p - p0), (p - p0)))
dist = sqrt(inner((p - p0), (p - p0)))
distC = 1
n = 10
k = 1.0 / sqrt(1.0 + pow(dist / distC, 2 * (n)))
P = k * P * 2
return P
def simpleRepeller2(pos, param):
# repeller position:
p0 = array(param[:3])
dist0 = param[3]
p = array(pos)
d = p - p0
dist = sqrt(inner(d, d))
if dist <= dist0:
P = (p - p0) / sqrt(inner((p - p0), (p - p0)))
else:
P = array([0, 0, 0])
return P
def simpleRepeller3(pos, param):
p = array(pos)
f1 = array([5, 0, 0]) # focus 1
f2 = -f1
dist1 = sqrt(inner(p - f1, p - f1))
dist2 = sqrt(inner(p - f2, p - f2))
diff = dist2 - dist1
if diff > 9:
P = (p - f1) / sqrt(inner((p - f1), (p - f1)))
else:
P = array([0, 0, 0])
return P
def lateralRepeller1(pos, param):
p = array(pos)
obs = array(param)
goal = array([0, 0, 0])
shadowVector = obs - goal
normShadowVector = sqrt(dot(shadowVector, shadowVector))
if normShadowVector == 0:
print("obstacle and goal can't be in the same place")
shadowVector = shadowVector / normShadowVector
shadowVectorScaled = dot(p, shadowVector) * shadowVector
shadowRepel = p - shadowVectorScaled
normShadowVector = sqrt(dot(shadowRepel, shadowRepel))
if normShadowVector == 0:
shadowRepel = shadowRepel * 0
else:
shadowRepel = shadowRepel / normShadowVector
distToObstacle = sqrt(dot((p - obs), (p - obs)))
k = bellshape(3, 10, distToObstacle)
shadowRepel = shadowRepel * k
return shadowRepel
def XYplanarRepeller(pos, param):
p = array(pos)
initialForce = param[0]
tablePos = param[1] # table height
if p[2] < tablePos:
P = array([0, 0, initialForce])
else:
P = array([0, 0, 0])
return P
def bellshape(cutDistance, order, x):
distC = cutDistance
n = order
if x < epsilon:
return 1
else:
return (1.0 / sqrt(1.0 + pow(x / distC, 2 * (n))))
def hyperbola(x, focus, a):
# a: from the goal to the first nearer point in the curve
b = sqrt(
(focus * focus) -
(a * a)) # less distance between focus and a means less open hyperbola
y = b * sqrt((pow(x, 2) / pow(a, 2)) - 1.0)
return [x, y, y]
def hyperbolaDisplacedRotated(pos, goal, obstacle, b):
obstacle2temp = obstacle - goal
normobstacle2 = sqrt(dot(obstacle2temp, obstacle2temp))
pos2 = pos - goal
if normobstacle2 == 0:
print("Goal can't be in the same position as the obstacle")
hyp = (pos2) - (dot(pos2, obstacle2temp / normobstacle2) *
(obstacle2temp / normobstacle2))
x2 = dot(pos2, (obstacle2temp / normobstacle2))
lenFocus1 = len(obstacle2temp)
a = sqrt(pow(lenFocus1, 2) - pow(b, 2))
hyp2temp = hyperbola(
x2, normobstacle2,
a)
# x2 and hyp2temp[1] are x and y in a non rotated hyperbola
# returns vector perpendicular to the x of the hyperbola in the
# global frame and the x and y pair of the hyperbola in that x
return [
hyp, hyp2temp
]
def hyperbolaInside(goal, obstacle, pos, b):
# b can not be bigger than len(obstacle-goal)
focus1 = obstacle - goal
focus2 = -focus1
posrel = pos - goal
dist1 = sqrt(dot(posrel - focus1, posrel - focus1))
dist2 = sqrt(dot(posrel - focus2, posrel - focus2))
lenFocus1 = len(focus1)
a = sqrt(pow(lenFocus1, 2) - pow(b, 2))
diff0 = a * 2.0
diff = dist2 - dist1
if diff > diff0:
k = 1
else:
k = 0
return k
def len(x):
y = sqrt(dot(x, x))
return y
def length(x):
y = sqrt(dot(x, x))
return y
def vectorF_hyperbola(pos, param):
goal = array(param[:3])
obstacle = array(param[3:6])
b = param[6] * len(obstacle - goal)
yOrder = param[7]
yCutDistance = param[8]
xOrder = param[9]
xT = param[10]
k = hyperbolaInside(goal, obstacle, pos, b)
if k == 1:
temp = hyperbolaDisplacedRotated(pos, goal, obstacle, b)
perpHyptemp = temp[0]
if len(perpHyptemp) == 0:
perpHyp = array([0, 0, 0])
else:
perpHyp = perpHyptemp / len(perpHyptemp)
P = array([perpHyp[0], perpHyp[1], perpHyp[2]])
Hyp = temp[1]
y = Hyp[1]
if y == 0:
y2 = 0
else:
# Percentage of y position in hyperbola
y2 = len(perpHyptemp) / y
# bellshape in Y of the hyperbola
k = bellshape(yCutDistance, yOrder, y2)
decayX = len(Hyp[0]) - len(obstacle - goal)
kexp = expDecay(len(decayX), xOrder, xT)
P = P * kexp * k
else:
P = array([0, 0, 0])
return P
def expDecay(x, r, T):
y = pow(r, -x / T)
return y
def simpleRepeller4(pos, param):
p = array(pos)
f1 = array([5, 0, 0]) # focus 1
f2 = -f1
dist1 = sqrt(inner(p - f1, p - f1))
dist2 = sqrt(inner(p - f2, p - f2))
diff = dist2 - dist1
diff0 = 9
a = diff0 / 2
c = 5
x = p[0]
temp1 = ((pow(x, 2) / pow(a, 2)) - 1)
temp2 = (pow(c, 2) - pow(a, 2))
if temp1 * temp2 >= 0:
y = sqrt(temp1 * temp2)
else:
y = 1
if dist1 != 0:
k = 10 / dist1
k = 1 - 0.1 * dist1
else:
k = 1
if diff > 9:
P = (p - f1) / sqrt(inner((p - f1), (p - f1)))
P = P * 2.5 * pow(10, -dist1 / 0.8)
if (p[0] > a):
P2 = array([0, P[1], P[2]])
k = sqrt(inner(P2, P2))
P2 = P2 / k
print("hiperbola", y)
yper = pos[1] / y
print(yper)
P2 = P2 * 2.5 * pow(10, -(x - a) / 7)
P2 = P2 * 1.2 * pow(10, -(abs(yper)) / 0.3)
P = P * 1 + P2 * 0
else:
P = array([0, 0, 0])
return P
def attractorSinHib(pos, param):
x = pos[0]
y = pos[1]
z = pos[2]
x0 = param[0]
y0 = param[1]
z0 = param[2]
xm = param[3]
ym = param[4]
zm = param[5]
k2 = 0.00001
k = 1.0 / sqrt(1 + (k2 * sqrt(x**2 + y**2 + z**2)) /
(sqrt(xm**2 + ym**2 + zm**2)))
distance = sqrt((x - x0) * (x - x0) + (y - y0) * (y - y0) +
(z - z0) * (z - z0))
print("Current position: ", x, y, z)
print("Distance to the attractor: ", distance)
distancem = sqrt((x0 - xm) * (x0 - xm) + (y0 - ym) * (y0 - ym) +
(z0 - zm) * (z0 - zm))
# print distancem
k = 3.1416 / (distancem + (distancem / 50.0))
if distance != 0:
X = -sin(distance * k) * (x - x0) / distance
Y = -sin(distance * k) * (y - y0) / (distance)
Z = -sin(distance * k) * (z - z0) / distance
else:
X = 0
Y = 0
Z = 0
if distance > (distancem - (distancem / 100.0)):
print("Special Point: Outside of the sinusoidal attractor")
X = -(x - x0)
Y = -(y - y0)
Z = -(z - z0)
vectorSize = sqrt(X * X + Y * Y + Z * Z)
outsideVectorSpeed = 0.5
X = outsideVectorSpeed * X / vectorSize
Y = outsideVectorSpeed * Y / vectorSize
Z = outsideVectorSpeed * Z / vectorSize
scale = 0.5
X = X * scale
Y = Y * scale
Z = Z * scale
if distance < 0.02:
X = 0
Y = 0
Z = 0
vector = [X, Y, Z]
return array(vector)
def attractor3(x, y, z, x0, y0, z0, xm, ym, zm):
done = False
k2 = 0.00001
k = 1.0 / sqrt(1 + (k2 * sqrt(x**2 + y**2 + z**2)) /
(sqrt(xm**2 + ym**2 + zm**2)))
distance = sqrt((x - x0) * (x - x0) + (y - y0) * (y - y0) +
(z - z0) * (z - z0))
print("Current position: ", x, y, z)
print("Distance to the attractor: ", distance)
distancem = sqrt((x0 - xm) * (x0 - xm) + (y0 - ym) * (y0 - ym) +
(z0 - zm) * (z0 - zm))
k = 3.1416 / (distancem + (distancem / 50.0))
if distance != 0:
X = -sin(distance * k) * (x - x0) / distance
Y = -sin(distance * k) * (y - y0) / (distance)
Z = -sin(distance * k) * (z - z0) / distance
else:
X = 0
Y = 0
Z = 0
if distance > (distancem - (distancem / 100.0)):
print("Special Point: Outside of the sinusoidal attractor")
X = -(x - x0)
Y = -(y - y0)
Z = -(z - z0)
vectorSize = sqrt(X * X + Y * Y + Z * Z)
outsideVectorSpeed = 0.5
X = outsideVectorSpeed * X / vectorSize
Y = outsideVectorSpeed * Y / vectorSize
Z = outsideVectorSpeed * Z / vectorSize
scale = 0.3 # for b21
X = X * scale
Y = Y * scale
Z = Z * scale
if distance < 0.02:
X = 0
Y = 0
Z = 0
vector = [X, Y, Z, done]
return vector
def attractor2(x, y, z, x0, y0, z0, xm, ym, zm):
k2 = 0.00001
k = 1.0 / sqrt(1 + (k2 * sqrt(x**2 + y**2 + z**2)) /
(sqrt(xm**2 + ym**2 + zm**2)))
X = -sin(k * (x - x0)) / 2
Y = -sin(k * (y - y0)) / 2
vector = [X, Y]
return vector
def attractor1(x, y, z, x0, y0, z0, xm, ym, zm):
k2 = 0.000001
k = 0.10 / sqrt(1 + (k2 * sqrt(x**2 + y**2 + z**2)) /
(sqrt(xm**2 + ym**2 + zm**2)))
X = -(x - x0) * k
Y = -(y - y0) * k
vector = [X, Y]
return vector
| arcoslab/vfl | vfl/vfl.py | Python | gpl-3.0 | 23,622 |
print("HelloWorld")
text="HelloWorld_Text"
print(text) | KuChanTung/Python | HelloWorld.py | Python | epl-1.0 | 55 |
from sympy import *
from sympy.integrals.quadrature import gauss_legendre
import numpy as np
import numpy.linalg as linalg
import math
Order = 2
CarVals = ['x','y','z']
Vals = ['r' , 's' , 't']
Coeffs = ['a','b','c']
Range = np.linspace(-1,1,Order)
Dims = len(Vals)
N = Order ** Dims # number of coeffs
Size = []
for i in xrange(Dims):
Size.append(Order)
RangePts = np.zeros((Order ** len(Vals),len(Vals)))
CoeffMatrix = np.ones((RangePts.shape[0],RangePts.shape[0]),dtype = np.double)
for i in xrange(RangePts.shape[0]):
Sub = np.unravel_index(i,Size)
for j in xrange(len(Size)):
RangePts[i,j] = Range[Sub[j]];
for k in xrange(len(Coeffs)):
Poly = ""
for i in xrange(N):
if i != 0:
Poly = Poly + " + " + Coeffs[k] + repr(i) + "*"
Sub = np.unravel_index(i,Size)
for j in xrange(Dims):
if j != 0:
Poly = Poly + "*"
Poly = Poly + Vals[j] + "**" + repr(Sub[j])
exec(CarVals[k] + " = Poly")
for i in xrange(CoeffMatrix.shape[0]):
for j in xrange(CoeffMatrix.shape[1]):
Sub = np.unravel_index(j,Size)
for k in xrange(RangePts.shape[1]):
CoeffMatrix[i,j] = CoeffMatrix[i,j] * RangePts[i,k] ** Sub[k]
CoeffMatrix = np.matrix(CoeffMatrix)
print linalg.inv(CoeffMatrix)
Jacobian = MatrixSymbol('J',len(CarVals),len(Vals))
Jacobian = Matrix(Jacobian)
for i in xrange(len(CarVals)):
for j in xrange(len(Vals)):
exec(("Jacobian[i,j] = diff(" + CarVals[i] + ",'" + Vals[j] + "')"))
#print Jacobian
GOrder = int(math.ceil((Order + 1) / 2.0))
# now that we have the jacobian, we can use gauss quadrature to evaluate the determinant at discrete points to get the exact answer for up polynomials of order 2n-1, so if we have an element of order n we must have quadrature of at lease (n + 1) / 2
Locations, Weights = gauss_legendre(GOrder, 15)
GSize = []
for i in xrange(Dims):
GSize.append(GOrder)
EvalPts = np.zeros((len(Locations) ** len(Vals),len(Vals)))
EvalWeights = np.ones(EvalPts.shape[0])
for i in xrange(EvalPts.shape[0]):
Sub = np.unravel_index(i,GSize)
for j in xrange(len(Vals)):
EvalPts[i,j] = Locations[Sub[j]]
EvalWeights[i] = EvalWeights[i] * Weights[Sub[j]]
print Jacobian
JacobianList = []
#for k in xrange(len(EvalWeights)):
# myJacobian = Jacobian.copy()
# for l in xrange(len(Vals)):
# myJacobian = myJacobian.subs(Vals[l], EvalPts[k,l])
# JacobianList.append(myJacobian)
| JonHoy/FEA-Code | FEA/Python Scripts/ShapeFunction.py | Python | gpl-2.0 | 2,359 |
# -*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import models
| chienlieu2017/it_management | odoo/addons/l10n_be_hr_payroll/__init__.py | Python | gpl-3.0 | 113 |
#!/usr/bin/env python
import argparse
import Image, ImageDraw, ImageChops
import os
import sys
import util
# stitches a bunch of images together into a grid
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--x', type=int, default=3, help="output columns")
parser.add_argument('--y', type=int, default=2, help="output rows")
parser.add_argument('--nth', type=int, default=1, help="only process every nth frame")
parser.add_argument('--max-frame', type=int, default=0,
help="if >0 ignore frames past this")
parser.add_argument('--output-dir', type=str, default="/tmp/stitch",
help="where to output stitched imgs")
parser.add_argument('dirs', nargs='+')
opts = parser.parse_args()
print opts
X, Y = opts.x, opts.y
W, H = 160, 120
pixel_buffer = 3
util.make_dir(opts.output_dir)
imgs_per_directory = {}
max_imgs = 0
assert len(opts.dirs) == X * Y, opts.dirs
for directory in opts.dirs:
i = sorted(os.listdir(directory))
if opts.max_frame > 0:
i = i[:opts.max_frame]
imgs_per_directory[directory] = i
print "imgs per dir", directory, len(i)
max_imgs = max(max_imgs, len(i))
i = 0
while i <= max_imgs:
print i, "/", max_imgs
background = Image.new('RGB',
((W*X)+(X*pixel_buffer), (H*Y)+(Y*pixel_buffer)),
(0, 0, 0))
for n, directory in enumerate(opts.dirs):
imgs = imgs_per_directory[directory]
img_file = imgs[min(len(imgs)-1, i)]
img = Image.open("%s/%s" % (directory, img_file))
gx, gy = n%X, n/X
x_offset = (gx*W)+(gx*pixel_buffer)
y_offset = (gy*H)+(gy*pixel_buffer)
background.paste(img, (x_offset, y_offset))
background.save("%s/stitched_%03d.png" % (opts.output_dir, i))
i += opts.nth
print "mencoder mf://%s/ -ovc lavc -mf fps=10 -o stitched.avi" % opts.output_dir
| matpalm/malmomo | stitch_imgs.py | Python | mit | 1,879 |
import functools
import html
import itertools
import pprint
from mondrian import term
from bonobo import settings
from bonobo.config import Configurable, Method, Option, use_context, use_no_input, use_raw_input
from bonobo.config.functools import transformation_factory
from bonobo.config.processors import ContextProcessor, use_context_processor
from bonobo.constants import NOT_MODIFIED
from bonobo.errors import UnrecoverableAttributeError
from bonobo.util.objects import ValueHolder
from bonobo.util.term import CLEAR_EOL
__all__ = [
"FixedWindow",
"Format",
"Limit",
"OrderFields",
"MapFields",
"PrettyPrinter",
"Rename",
"SetFields",
"Tee",
"UnpackItems",
"count",
"identity",
"noop",
]
def identity(x):
return x
class Limit(Configurable):
"""
Creates a Limit() node, that will only let go through the first n rows (defined by the `limit` option), unmodified.
.. attribute:: limit
Number of rows to let go through.
TODO: simplify into a closure building factory?
"""
limit = Option(positional=True, default=10)
@ContextProcessor
def counter(self, context):
yield ValueHolder(0)
def __call__(self, counter, *args, **kwargs):
counter += 1
if counter <= self.limit:
yield NOT_MODIFIED
def Tee(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
nonlocal f
f(*args, **kwargs)
return NOT_MODIFIED
return wrapped
def _shorten(s, w):
if w and len(s) > w:
s = s[0 : w - 3] + "..."
return s
class PrettyPrinter(Configurable):
max_width = Option(
int,
default=term.get_size()[0],
required=False,
__doc__="""
If set, truncates the output values longer than this to this width.
""",
)
filter = Method(
default=(
lambda self, index, key, value: (value is not None)
and (not isinstance(key, str) or not key.startswith("_"))
),
__doc__="""
A filter that determine what to print.
Default is to ignore any key starting with an underscore and none values.
""",
)
@ContextProcessor
def context(self, context):
context.setdefault("_jupyter_html", None)
yield context
if context._jupyter_html is not None:
from IPython.display import display, HTML
display(HTML("\n".join(["<table>"] + context._jupyter_html + ["</table>"])))
def __call__(self, context, *args, **kwargs):
if not settings.QUIET:
if term.isjupyter:
self.print_jupyter(context, *args, **kwargs)
return NOT_MODIFIED
if term.istty:
self.print_console(context, *args, **kwargs)
return NOT_MODIFIED
self.print_quiet(context, *args, **kwargs)
return NOT_MODIFIED
def print_quiet(self, context, *args, **kwargs):
for index, (key, value) in enumerate(itertools.chain(enumerate(args), kwargs.items())):
if self.filter(index, key, value):
print(self.format_quiet(index, key, value, fields=context.get_input_fields()))
def format_quiet(self, index, key, value, *, fields=None):
# XXX should we implement argnames here ?
return " ".join(((" " if index else "-"), str(key), ":", str(value).strip()))
def print_console(self, context, *args, **kwargs):
print("\u250c")
for index, (key, value) in enumerate(itertools.chain(enumerate(args), kwargs.items())):
if self.filter(index, key, value):
print(self.format_console(index, key, value, fields=context.get_input_fields()))
print("\u2514")
def format_console(self, index, key, value, *, fields=None):
fields = fields or []
if not isinstance(key, str):
if len(fields) > key and str(key) != str(fields[key]):
key = "{}{}".format(fields[key], term.lightblack("[{}]".format(key)))
else:
key = str(index)
prefix = "\u2502 {} = ".format(key)
prefix_length = len(prefix)
def indent(text, prefix):
for i, line in enumerate(text.splitlines()):
yield (prefix if i else "") + line + CLEAR_EOL + "\n"
repr_of_value = "".join(
indent(pprint.pformat(value, width=self.max_width - prefix_length), "\u2502" + " " * (len(prefix) - 1))
).strip()
return "{}{}{}".format(prefix, repr_of_value.replace("\n", CLEAR_EOL + "\n"), CLEAR_EOL)
def print_jupyter(self, context, *args):
if not context._jupyter_html:
context._jupyter_html = [
"<thead><tr>",
*map("<th>{}</th>".format, map(html.escape, map(str, context.get_input_fields() or range(len(args))))),
"</tr></thead>",
]
context._jupyter_html += ["<tr>", *map("<td>{}</td>".format, map(html.escape, map(repr, args))), "</tr>"]
@use_no_input
def noop(*args, **kwargs):
return NOT_MODIFIED
class FixedWindow(Configurable):
"""
Transformation factory to create fixed windows of inputs, as lists.
For example, if the input is successively 1, 2, 3, 4, etc. and you pass it through a ``FixedWindow(2)``, you'll get
lists of elements 2 by 2: [1, 2], [3, 4], ...
"""
length = Option(int, positional=True) # type: int
@ContextProcessor
def buffer(self, context):
buffer = yield ValueHolder([])
if len(buffer):
last_value = buffer.get()
last_value += [None] * (self.length - len(last_value))
context.send(*last_value)
@use_raw_input
def __call__(self, buffer, bag):
buffer.append(bag)
if len(buffer) >= self.length:
yield tuple(buffer.get())
buffer.set([])
@transformation_factory
def OrderFields(fields):
"""
Transformation factory to reorder fields in a data stream.
:param fields:
:return: callable
"""
fields = list(fields)
@use_context
@use_raw_input
def _OrderFields(context, row):
nonlocal fields
context.setdefault("remaining", None)
if not context.output_type:
context.remaining = list(sorted(set(context.get_input_fields()) - set(fields)))
context.set_output_fields(fields + context.remaining)
yield tuple(row.get(field) for field in context.get_output_fields())
return _OrderFields
@transformation_factory
def SetFields(fields):
"""
Transformation factory that sets the field names on first iteration, without touching the values.
:param fields:
:return: callable
"""
@use_context
@use_no_input
def _SetFields(context):
nonlocal fields
if not context.output_type:
context.set_output_fields(fields)
return NOT_MODIFIED
return _SetFields
@transformation_factory
def UnpackItems(*items, fields=None, defaults=None):
"""
>>> UnpackItems(0)
:param items:
:param fields:
:param defaults:
:return: callable
"""
defaults = defaults or {}
@use_context
@use_raw_input
def _UnpackItems(context, bag):
nonlocal fields, items, defaults
if fields is None:
fields = ()
for item in items:
fields += tuple(bag[item].keys())
context.set_output_fields(fields)
values = ()
for item in items:
values += tuple(bag[item].get(field, defaults.get(field)) for field in fields)
return values
return _UnpackItems
@transformation_factory
def Rename(**translations):
# XXX todo handle duplicated
fields = None
translations = {v: k for k, v in translations.items()}
@use_context
@use_raw_input
def _Rename(context, bag):
nonlocal fields, translations
if not fields:
fields = tuple(translations.get(field, field) for field in context.get_input_fields())
context.set_output_fields(fields)
return NOT_MODIFIED
return _Rename
@transformation_factory
def Format(**formats):
fields, newfields = None, None
@use_context
@use_raw_input
def _Format(context, bag):
nonlocal fields, newfields, formats
if not context.output_type:
fields = context.input_type._fields
newfields = tuple(field for field in formats if not field in fields)
context.set_output_fields(fields + newfields)
return tuple(
formats[field].format(**bag._asdict()) if field in formats else bag.get(field)
for field in fields + newfields
)
return _Format
@transformation_factory
def MapFields(function, key=True):
"""
Transformation factory that maps `function` on the values of a row.
It can be applied either to
1. all columns (`key=True`),
2. no column (`key=False`), or
3. a subset of columns by passing a callable, which takes column name and returns `bool`
(same as the parameter `function` in `filter`).
:param function: callable
:param key: bool or callable
:return: callable
"""
@use_raw_input
def _MapFields(bag):
try:
factory = type(bag)._make
except AttributeError:
factory = type(bag)
if callable(key):
try:
fields = bag._fields
except AttributeError as e:
raise UnrecoverableAttributeError(
"This transformation works only on objects with named" " fields (namedtuple, BagType, ...)."
) from e
return factory(function(value) if key(key_) else value for key_, value in zip(fields, bag))
elif key:
return factory(function(value) for value in bag)
else:
return NOT_MODIFIED
return _MapFields
def _count(self, context):
counter = yield ValueHolder(0)
context.send(counter.get())
@use_no_input
@use_context_processor(_count)
def count(counter):
counter += 1
| python-bonobo/bonobo | bonobo/nodes/basics.py | Python | apache-2.0 | 10,187 |
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
def set_log_level(level):
"""
Convenience function to set the log level of all hyperspy modules.
Note: The log level of all other modules are left untouched.
Parameters
----------
level : int or str
The log level to set. Any values that `logging.Logger.setLevel()`
accepts are valid. The default options are:
- 'CRITICAL'
- 'ERROR'
- 'WARNING'
- 'INFO'
- 'DEBUG'
- 'NOTSET'
Example
-------
For normal logging of hyperspy functions, you can set the log level like
this:
>>> import hyperspy.api as hs
>>> hs.set_log_level('INFO')
>>> hs.load(r'my_file.dm3')
INFO:hyperspy.io_plugins.digital_micrograph:DM version: 3
INFO:hyperspy.io_plugins.digital_micrograph:size 4796607 B
INFO:hyperspy.io_plugins.digital_micrograph:Is file Little endian? True
INFO:hyperspy.io_plugins.digital_micrograph:Total tags in root group: 15
<Signal2D, title: My file, dimensions: (|1024, 1024)>
If you need the log output during the initial import of hyperspy, you
should set the log level like this:
>>> from hyperspy.logger import set_log_level
>>> set_log_level('DEBUG')
>>> import hyperspy.api as hs
DEBUG:hyperspy.gui:Loading hyperspy.gui
DEBUG:hyperspy.gui:Current MPL backend: TkAgg
DEBUG:hyperspy.gui:Current ETS toolkit: qt4
DEBUG:hyperspy.gui:Current ETS toolkit set to: null
"""
import logging
logging.basicConfig() # Does nothing if already configured
logging.getLogger('hyperspy').setLevel(level)
| thomasaarholt/hyperspy | hyperspy/logger.py | Python | gpl-3.0 | 2,319 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""contrib module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import ctc
from tensorflow.contrib import distributions
from tensorflow.contrib import layers
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import testing
from tensorflow.contrib import util
| panmari/tensorflow | tensorflow/contrib/__init__.py | Python | apache-2.0 | 1,187 |
#! /usr/bin/python
'''
Created on Jul 27, 2013
@author: Marcin Czupryniak
@license: GPLv3
@copyright: ESOS Project - Marcin Czupryniak
This module is used for compacting the statistics in the database, computing averages and deleting the 5 seconds samples.
It uses native database drivers and not SQLAlchemy for optimal speed and memory utilization
Dictionaries are returned from queries
cpass - NULL = standard sample
cpass - 1 = 15 min avg for samples older than 24 hours
cpass - 2 = hourly avg for samples older than 7 days
cpass - 3 = daily avg for samples older than 1 month
Keep hourly records for 31 days
Keep daily records for 1 year
Minimal version of Postgresql due to database uri is 9.2
'''
import ConfigParser
import datetime
version = 'ESOS perf-agent 0.0.1'
configfile = '/etc/perf-agent.conf'
def initconfig(configfile):
cfg = ConfigParser.ConfigParser()
try:
cfg.read(configfile)
except:
print "Error unable to read a valid configuration file"
exit(1)
return cfg
###############################
#
# module globals
class mvars: pass
mvars.dbconn = None
mvars.cfg = initconfig(configfile)
dbtype = mvars.cfg.get('Database','DBURI').split(':')[0]
###############################
# Generic insert query
iquery = '''
INSERT INTO perfdatablock (host, timestamp, device, readscompleted, readsmerged, sectorsread, writescompleted,
sectorswritten, kbwritten, kbread, averagereadtime, averagewritetime, iotime, interval, writespeed, readspeed, devicerate, cpass)
VALUES (%(host)s, %(timestamp)s, %(device)s, %(readscompleted)s, %(readsmerged)s,
%(sectorsread)s, %(writescompleted)s, %(sectorswritten)s, %(kbwritten)s, %(kbread)s, %(averagereadtime)s,
%(averagewritetime)s, %(iotime)s, %(interval)s, %(writespeed)s, %(readspeed)s, %(devicerate)s, %(cpass)s)
'''
# Universal cursor function
def getcur(**kwargs):
# no args needed for mySQL
if dbtype == 'postgres': return mvars.dbconn.cursor(**kwargs)
if dbtype == 'mysql': return mvars.dbconn.cursor()
def connectDB():
if dbtype == 'postgres':
global psycopg2
global DictCursor # Make them a global import in case have to be used in other functions
import psycopg2
from psycopg2.extras import DictCursor
try:
mvars.dbconn = psycopg2.connect(mvars.cfg.get('Database','DBURI'))
except Exception as err:
print 'Unable to connect to DB'
print err
exit(1)
if dbtype == 'mysql':
global mysql
global DictCursor
import MySQLdb
import MySQLdb.cursors
DictCursor = None
dburi = mvars.cfg.get('Database','DBURI')
import urlparse
# not very nice but it does the job
dbuser = urlparse.urlparse(dburi)[1].split(':')[0]
dbpass = urlparse.urlparse(dburi)[1].split(':')[1].split('@')[0]
dbhost = urlparse.urlparse(dburi)[1].split('@')[1]
dbname = urlparse.urlparse(dburi)[2].split('/')[1]
try:
mvars.dbconn = MySQLdb.connect(host=dbhost,user=dbuser,passwd=dbpass,db=dbname, cursorclass=MySQLdb.cursors.DictCursor)
except Exception as err:
print 'Unable to connect to DB'
print err
exit(1)
def getmyhostid():
myhost = mvars.cfg.get('Database','System')
query = "select id from hosts where host = %s"
cur = getcur(cursor_factory=DictCursor)
cur.execute(query,(myhost,))
res = cur.fetchone()
cur.close()
# If my system is not in the DB exit with error status
if res == None: exit(1)
return res['id']
def getmydevices(myhostid):
idlist = []
query = 'select id from devices where host = %s'
cur = getcur(cursor_factory=DictCursor)
cur.execute(query,(myhostid,))
res = cur.fetchall()
for rec in res:
idlist.append(rec['id'])
cur.close()
return idlist
def interavg(host,device):
# comuptes the 15 minute avg of the last day and store into DB
# compresses records of the previous day (run it before midnight!)
# Delete query
dquery = '''
delete from perfdatablock where
perfdatablock.timestamp BETWEEN %s and %s and device = %s and cpass IS NULL;
'''
# Select query
squery = '''
select SUM(readscompleted) as readscompleted,SUM(readsmerged) as readsmerged,SUM(sectorsread) as sectorsread,
SUM(sectorswritten) as sectorswritten,SUM(kbwritten) as kbwritten,SUM(kbread) as kbread,AVG(averagereadtime) as averagereadtime,
AVG(averagewritetime) as averagewritetime,AVG(iotime) as iotime,AVG(writespeed) as writespeed,AVG(readspeed) as readspeed,
AVG(devicerate) as devicerate, SUM(writescompleted) as writescompleted, COUNT(*) as nrecords
from perfdatablock where
perfdatablock.timestamp BETWEEN %s and %s and device = %s and cpass IS NULL;
'''
startdate = (datetime.datetime.now()-datetime.timedelta(days=1)).replace(hour=0, minute=0, second=0,microsecond=0)
dates = []
dates.append(startdate)
for i in xrange(1,96):
d = startdate + datetime.timedelta(minutes=(i*15))
dates.append(d)
enddate = startdate.replace(hour=23, minute=59, second=59,microsecond=999999)
dates.append(enddate)
for i in xrange(0,96):
# Select Query Block
cur = getcur(cursor_factory=DictCursor)
cur.execute(squery,(dates[i],dates[i+1],device))
rec = cur.fetchone()
if rec['nrecords'] == 0: continue
# Transform into a normal dict
nrec = rec.copy()
cur.close()
nrec['host'] = host
nrec['device'] = device
# shifting the timestamp by 7 min and 30 seconds since first sample (middle of the 15 min window)
nrec['timestamp'] = dates[i] + datetime.timedelta(minutes = 7, seconds = 30)
nrec['cpass'] = 1
nrec['interval'] = 300
cur = getcur(cursor_factory=DictCursor)
# rollback any transaction pending and start a new one
mvars.dbconn.rollback()
try:
# Insert the record
cur.execute(iquery,nrec)
# Delete all computed records
cur.execute(dquery,(dates[i],dates[i+1],device))
mvars.dbconn.commit()
except Exception as err:
mvars.dbconn.rollback()
print err
def hourlyavg(host,device):
# This will be executed 24 times in a row until the whole day gets processed
# Processes samples older than 7 days
# Delete query
dquery = '''
delete from perfdatablock where
perfdatablock.timestamp BETWEEN %s and %s and device = %s and cpass = 1;
'''
# Select query
squery = '''
select SUM(readscompleted) as readscompleted,SUM(readsmerged) as readsmerged,SUM(sectorsread) as sectorsread,
SUM(sectorswritten) as sectorswritten,SUM(kbwritten) as kbwritten,SUM(kbread) as kbread,AVG(averagereadtime) as averagereadtime,
AVG(averagewritetime) as averagewritetime,AVG(iotime) as iotime,AVG(writespeed) as writespeed,AVG(readspeed) as readspeed,
AVG(devicerate) as devicerate, SUM(writescompleted) as writescompleted, COUNT(*) as nrecords
from perfdatablock where
perfdatablock.timestamp BETWEEN %s and %s and device = %s and cpass = 1;
'''
# starts at 00:00:00 of the previous week
startdate = (datetime.datetime.now()-datetime.timedelta(days=7)).replace(hour=0, minute=0, second=0,microsecond=0)
# build the dates list - hours within a day
dates = []
dates.append(startdate)
for i in xrange(1,24):
d = startdate + datetime.timedelta(hours=i)
dates.append(d)
enddate = startdate.replace(hour=23, minute=59, second=59,microsecond=999999)
dates.append(enddate)
for i in xrange(0,24):
# Select Query Block
cur = getcur(cursor_factory=DictCursor)
cur.execute(squery,(dates[i],dates[i+1],device))
rec = cur.fetchone()
if rec['nrecords'] == 0: continue
# cast into a normal dict
nrec = rec.copy()
cur.close()
nrec['host'] = host
nrec['device'] = device
# shifting the timestamp by 30 min since first sample
nrec['timestamp'] = dates[i] + datetime.timedelta(minutes = 30)
nrec['cpass'] = 2
nrec['interval'] = 3600
# Insert Query Block
cur = getcur(cursor_factory=DictCursor)
# rollback any transaction pending and start a new one
mvars.dbconn.rollback()
try:
# Insert the record
cur.execute(iquery,nrec)
# Delete all computed records
cur.execute(dquery,(dates[i],dates[i+1],device))
mvars.dbconn.commit()
except Exception as err:
mvars.dbconn.rollback()
print err
cur.close()
def dailyavg(host,device):
# This should be executed daily by the cron script, otherwise it will skip
# 31 days
startdate = (datetime.datetime.now()-datetime.timedelta(days=31)).replace(hour=0, minute=0, second=0,microsecond=0)
enddate = startdate.replace(hour=23, minute=59, second=59,microsecond=999999)
# Select query
squery = '''
select SUM(readscompleted) as readscompleted,SUM(readsmerged) as readsmerged,SUM(sectorsread) as sectorsread,
SUM(sectorswritten) as sectorswritten,SUM(kbwritten) as kbwritten,SUM(kbread) as kbread,AVG(averagereadtime) as averagereadtime,
AVG(averagewritetime) as averagewritetime,AVG(iotime) as iotime,AVG(writespeed) as writespeed,AVG(readspeed) as readspeed,
AVG(devicerate) as devicerate, COUNT(*) as nrecords
from perfdatablock where
perfdatablock.timestamp >= %s and perfdatablock.timestamp <= %s and device = %s and cpass = 2
'''
# Delete query
dquery = '''
delete from perfdatablock where
perfdatablock.timestamp BETWEEN %s and %s and device = %s and cpass = 2;
'''
cur = getcur(cursor_factory=DictCursor)
cur.execute(squery,(startdate,enddate,0))
rec = cur.fetchone()
# make sure that we got some records from the query otherwise
# don't store anything in the database and exit this function
if rec['nrecords'] == 0: return
# cast into a normal dict
nrec = rec.copy()
cur.close()
nrec['host'] = host
nrec['device'] = device
# shifting the timestamp by 12 hours since first sample
nrec['timestamp'] = startdate + datetime.timedelta(hours=12)
nrec['cpass'] = 3
nrec['interval'] = 86400 # 24 hours
# Insert Query Block
cur = getcur(cursor_factory=DictCursor)
# rollback any transaction pending and start a new one
mvars.dbconn.rollback()
try:
# Insert the record
cur.execute(iquery,nrec)
# Delete all computed records
cur.execute(dquery,(startdate,enddate,device))
mvars.dbconn.commit()
except Exception as err:
mvars.dbconn.rollback()
print err
def start():
connectDB()
myhostid = getmyhostid()
devices = getmydevices(myhostid)
for device in devices:
# 15 min for last day samples
interavg(myhostid, device)
# 1 hour for records > 7 days
hourlyavg(myhostid,device)
# 1 day for records older than a month
dailyavg(myhostid, device)
# close the dbconnection
mvars.dbconn.close()
if __name__ == '__main__':
start()
| feldsam/esos | scripts/db_compact.py | Python | gpl-3.0 | 11,805 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# This file is part of MAVlinkplug.
# MAVlinkplug is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# MAVlinkplug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with MAVlinkplug. If not, see <http://www.gnu.org/licenses/>.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
import struct
from json import dumps
# Managing Mavlink dialect
import mavlinkplug
mavlink = __import__('pymavlink.dialects.v10.' + mavlinkplug._MAVLINKPLUG_DIALECT, globals(), locals(), [''], -1)
#import pymavlink.dialects.v10.pixhawk as mavlink
from pymavlink.generator.mavcrc import x25crc
from mavlinkplug.Exception import Exception
from collections import namedtuple
#Data classes
class RawData(object):
_type = 'RawData'
def __init__(self, value = None):
self._value = None #Useless
self.value = value
@property
def packed(self):
return self.value
@property
def value(self):
if(self._value == None):
raise Exception('Invalid value : value not define')
else:
return self._value
@value.setter
def value(self,value):
#self._value contains raw value
self._value = value
@property
def type(self):
return self._type
def build_from(self, value): #For signature uniformity
self.value = value
return self
@classmethod
def build_full_message_from(cls, destination, source, timestamp, data):
header = Header().build_from(destination, source, TYPE.RAW.value,timestamp)
msg_data = cls().build_from(data)
return Message().build_from(header, msg_data)
class MAVLinkData(RawData):
_type = 'MAVLinkData'
@property
def packed(self):
return self.value.get_msgbuf()
@property
def value(self):
if(self._value == None):
raise Exception('Invalid value : value not define')
else:
return self._value
@value.setter
def value(self,value):
#self._value has to contain a MAVlink message class instance
if(value != None):
if(isinstance(value,mavlink.MAVLink)): #Is this a mavlink message object ?
self._value = value
else: #Try to decode into a mavlink message object
self._value = self._decode(value)
else:
self._value = None
@property
def json(self):
d_type = self.value.get_type()
data = {}
data[d_type] = {}
if (d_type != 'BAD DATA' and d_type != 'BAD_DATA'): #BAD DATA message ignored
for field in self.data.get_fieldnames():
data[d_type][field]=self.value.__dict__[field]
json_data = dumps(data)
return json_data
@property
def fields(self):
d_type = self.value.get_type()
data = {}
if (d_type != 'BAD DATA' and d_type != 'BAD_DATA'): #BAD DATA message ignored
for field in self.data.get_fieldnames():
data[field]=self.value.__dict__[field]
return data
#Decode buffer into MAVlink message class instance
def _decode(self, msgbuf):
'''
Decode a buffer as a MAVLink message
'''
try:
magic, mlen, seq, srcSystem, srcComponent, msgId = struct.unpack('cBBBBB', msgbuf[:6])
except struct.error as emsg:
raise Exception('Unable to unpack MAVLink header: %s' % emsg)
if ord(magic) != 254:
raise Exception("invalid MAVLink prefix '%s'" % magic)
if mlen != len(msgbuf)-8:
raise Exception('invalid MAVLink message length. Got %u expected %u, msgId=%u' % (len(msgbuf) - 8, mlen, msgId))
if not msgId in mavlink.mavlink_map:
raise Exception('unknown MAVLink message ID %u' % msgId)
# decode the payload
type = mavlink.mavlink_map[msgId]
fmt = type.format
order_map = type.orders
len_map = type.lengths
crc_extra = type.crc_extra
# decode the checksum
try:
crc, = struct.unpack('<H', msgbuf[-2:])
except struct.error as emsg:
raise Exception('Unable to unpack MAVLink CRC: %s' % emsg)
crcbuf = msgbuf[1:-2]
crcbuf = crcbuf + struct.pack('B',crc_extra)
crc2 = x25crc(crcbuf)
if crc != crc2.crc:
raise Exception('invalid MAVLink CRC in msgID %u 0x%04x should be 0x%04x' % (msgId, crc, crc2.crc))
try:
t = struct.unpack(fmt, msgbuf[6:-2])
except struct.error as emsg:
raise Exception('Unable to unpack MAVLink payload type=%s fmt=%s payloadLength=%u: %s' % (
type, fmt, len(msgbuf[6:-2]), emsg))
tlist = list(t)
# handle sorted fields
if True:
t = tlist[:]
if sum(len_map) == len(len_map):
# message has no arrays in it
for i in range(0, len(tlist)):
tlist[i] = t[order_map[i]]
else:
# message has some arrays
tlist = []
for i in range(0, len(order_map)):
order = order_map[i]
L = len_map[order]
tip = sum(len_map[:order])
field = t[tip]
if L == 1 or isinstance(field, str):
tlist.append(field)
else:
tlist.append(t[tip:(tip + L)])
# terminate any strings
for i in range(0, len(tlist)):
if isinstance(tlist[i], str):
tlist[i] = str(mavlink.MAVString(tlist[i]))
t = tuple(tlist)
# construct the message object
try:
m = type(*t)
except Exception as emsg:
raise Exception('Unable to instantiate MAVLink message of type %s : %s' % (type, emsg))
m._msgbuf = msgbuf
m._payload = msgbuf[6:-2]
m._crc = crc
m._header = mavlink.MAVLink_header(msgId, mlen, seq, srcSystem, srcComponent)
self._value = m
return m
class MavCommandData(RawData):
_type = 'MavCommandData'
pass
class KillData(RawData):
_type = 'KillData'
pass
class LogData(RawData):
_type = 'LogData'
pass
class TypeContainer(object):
#Class attributes
_type_description = [
['MAV_MSG', 1, MAVLinkData],
['MAV_COMMAND', 2, MavCommandData],
['KILL', 4, KillData],
['RAW',8, RawData],
['LOG_DATA',16, LogData]
]
TypeItem = namedtuple('TypeItem', ['value','p_value','m_class'])
def __init__(self):
self._PACK_FORMAT = '!B'
self._names = []
self._values = []
self._p_values = []
self._m_classes = []
for value in self._type_description :
self._names.append(value[0])
self._values.append(value[1])
self._p_values.append(struct.pack(self._PACK_FORMAT,value[1]))
self._m_classes.append(value[2])
@property
def values(self):
return self._values
@property
def p_values(self):
return self._p_values
def __getattr__(self, name):
if(name in self._names):
return self.TypeItem(self._values[self._names.index(name)], self._p_values[self._names.index(name)], self._m_classes[self._names.index(name)])
else:
raise Exception('Message Type {0} not defined'.format(name))
def get_class_from_value(self, p_value):
return self._get_X_from_Y(p_value, self._m_classes, self._values)
def get_class_from_p_value(self, p_value):
return self._get_X_from_Y(p_value, self._m_classes, self._p_values)
def get_value_from_class(self, m_class):
return self._get_X_from_Y(m_class,self._values, self._m_classes)
def _get_X_from_Y(self,value, X_table, Y_table):
if(value in Y_table):
return X_table[Y_table.index(value)]
else:
raise Exception('Message Type search item not defined'.format(value))
class DestinationContainer(object):
_destination_description = [
['ALL', 255],
]
DestinationItem = namedtuple('DestinationItem',['value','p_value'])
def __init__(self):
self._PACK_FORMAT = '!B'
self._names = []
self._values = []
self._p_values = []
for value in self._destination_description :
self._names.append(value[0])
self._values.append(value[1])
self._p_values.append(struct.pack(self._PACK_FORMAT,value[1]))
def __getattr__(self, name):
if(name in self._names):
return self.DestinationItem(self._values[self._names.index(name)], self._p_values[self._names.index(name)])
else:
raise Exception('Message Destination {0} not defined'.format(name))
TYPE = TypeContainer()
DESTINATION = DestinationContainer()
# Header
class Header(object):
def __init__(self):
self._pack = '!BBBQ'
self._destination = None
self._source = None
self._type = None
self._timestamp = None
#Destination property
@property
def destination(self):
if(self._destination == None):
raise Exception('Invalid header data : destination not define')
else:
return self._destination
@destination.setter
def destination(self, destination):
if( not isinstance(destination, int) or destination < 0 or destination > 255):
raise Exception('Invalid header destination set value: {0}'.format(destination))
else:
self._destination = destination
#Source property
@property
def source(self):
if(self._source == None):
raise Exception('Invalid header data : source not define')
else:
return self._source
@source.setter
def source(self, source):
if( not isinstance(source, int) or source < 0 or source > 255):
raise Exception('Invalid header source set value: {0}'.format(source))
else:
self._source = source
#Type property
@property
def type(self):
if(self._type == None):
raise Exception('Invalid header data : type not define')
else:
return self._type
@type.setter
def type(self, type):
if( not type in TYPE.values):
raise Exception('Invalid header type set value: {0}'.format(type))
else:
self._type = type
#Timestamp property
@property
def timestamp(self):
if(self._timestamp == None):
raise Exception('Invalid header data : timestamp not define')
else:
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
if(timestamp < 0):
raise Exception('Invalid header timestamp set value: {0}'.format(timestamp))
else:
self._timestamp = timestamp
#Packed property
@property
def packed(self):
return struct.pack(self._pack, self.destination, self.source, self.type, self.timestamp)
#Build header from parameter
def build_from(self, destination, source, type, timestamp):
self.destination = destination
self.source = source
self.type = type
self.timestamp = timestamp
return self
#Unpack header from message
def unpack_from(self, message):
p_size = struct.Struct(self._pack).size
self.destination, self.source, self.type, self.timestamp = struct.unpack(self._pack ,message[:p_size])
return TYPE.get_class_from_value(self.type)(message[p_size:])
#Message
class Message(object):
def __init__(self):
self._header = None
self._data = None
@property
def data(self):
if(self._data == None):
raise Exception('Invalid Data : data not define')
else:
return self._data
@data.setter
def data(self,value):
if(self._header == None):
raise Exception('Invalid Header : header has to be define ahead of data')
else:
#self._data need to contain data class instance
#header type must match data type
self.header.type = TYPE.get_value_from_class(type(value))
self._data = value
@property
def header(self):
if(self._header == None):
raise Exception('Invalid Header : header not define')
else:
return self._header
@header.setter
def header(self,value):
#self._header need to contain header class instance
if(type(value) == type(Header())):
self._header = value
else:
raise Exception('Invalid Header : object is not an instance of Header class')
@property
def packed(self):
return self.header.packed + self.data.packed
def unpack_from(self, byte_message):
self.header = Header()
self.data = self.header.unpack_from(byte_message) #Check included !!!!
return self
def build_from(self, header_instance, data_instance):
self.header = header_instance
self.data = data_instance
return self
def integer_pack(_integer):
return struct.pack('!B', _integer) | sebastien17/MAVlink_plug | mavlinkplug/Message.py | Python | bsd-3-clause | 13,892 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from telemetry.page import page
from telemetry.page import page_set
from telemetry.page import page_test
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from webgl_conformance import WebglConformanceValidator
from webgl_conformance import conformance_harness_script
from webgl_conformance import conformance_path
robustness_harness_script = conformance_harness_script + r"""
var robustnessTestHarness = {};
robustnessTestHarness._contextLost = false;
robustnessTestHarness.initialize = function() {
var canvas = document.getElementById('example');
canvas.addEventListener('webglcontextlost', function() {
robustnessTestHarness._contextLost = true;
});
}
robustnessTestHarness.runTestLoop = function() {
// Run the test in a loop until the context is lost.
main();
if (!robustnessTestHarness._contextLost)
window.requestAnimationFrame(robustnessTestHarness.runTestLoop);
else
robustnessTestHarness.notifyFinished();
}
robustnessTestHarness.notifyFinished = function() {
// The test may fail in unpredictable ways depending on when the context is
// lost. We ignore such errors and only require that the browser doesn't
// crash.
webglTestHarness._allTestSucceeded = true;
// Notify test completion after a delay to make sure the browser is able to
// recover from the lost context.
setTimeout(webglTestHarness.notifyFinished, 3000);
}
window.confirm = function() {
robustnessTestHarness.initialize();
robustnessTestHarness.runTestLoop();
return false;
}
window.webglRobustnessTestHarness = robustnessTestHarness;
"""
class WebglRobustnessPage(page.Page):
def __init__(self, page_set, base_dir):
super(WebglRobustnessPage, self).__init__(
url='file://extra/lots-of-polys-example.html',
page_set=page_set,
base_dir=base_dir)
self.script_to_evaluate_on_commit = robustness_harness_script
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition('webglTestHarness._finished')
class WebglRobustness(test.Test):
test = WebglConformanceValidator
def CreatePageSet(self, options):
ps = page_set.PageSet(
file_path=conformance_path,
user_agent_type='desktop',
serving_dirs=[''])
ps.AddPage(WebglRobustnessPage(ps, ps.base_dir))
return ps
| TeamEOS/external_chromium_org | content/test/gpu/gpu_tests/webgl_robustness.py | Python | bsd-3-clause | 2,603 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-20 15:45
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vtn', '0035_auto_20171116_1838'),
]
operations = [
migrations.RemoveField(
model_name='report',
name='site',
),
migrations.RemoveField(
model_name='report',
name='site_event',
),
migrations.AlterField(
model_name='drevent',
name='scheduled_notification_time',
field=models.DateTimeField(default=datetime.datetime(2017, 11, 20, 15, 45, 40, 261036), verbose_name='Scheduled Notification Time'),
preserve_default=False,
),
migrations.DeleteModel(
name='Report',
),
]
| VOLTTRON/volttron-applications | kisensum/openadr/openadr/vtn/migrations/0036_auto_20171120_1545.py | Python | bsd-3-clause | 893 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web._auth}.
"""
from zope.interface import implements
from zope.interface.verify import verifyObject
from twisted.trial import unittest
from twisted.python.failure import Failure
from twisted.internet.error import ConnectionDone
from twisted.internet.address import IPv4Address
from twisted.cred import error, portal
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.checkers import ANONYMOUS, AllowAnonymousAccess
from twisted.cred.credentials import IUsernamePassword
from twisted.web.iweb import ICredentialFactory
from twisted.web.resource import IResource, Resource, getChildForRequest
from twisted.web._auth import basic, digest
from twisted.web._auth.wrapper import HTTPAuthSessionWrapper, UnauthorizedResource
from twisted.web._auth.basic import BasicCredentialFactory
from twisted.web.server import NOT_DONE_YET
from twisted.web.static import Data
from twisted.web.test.test_web import DummyRequest
def b64encode(s):
return s.encode('base64').strip()
class BasicAuthTestsMixin:
"""
L{TestCase} mixin class which defines a number of tests for
L{basic.BasicCredentialFactory}. Because this mixin defines C{setUp}, it
must be inherited before L{TestCase}.
"""
def setUp(self):
self.request = self.makeRequest()
self.realm = 'foo'
self.username = 'dreid'
self.password = 'S3CuR1Ty'
self.credentialFactory = basic.BasicCredentialFactory(self.realm)
def makeRequest(self, method='GET', clientAddress=None):
"""
Create a request object to be passed to
L{basic.BasicCredentialFactory.decode} along with a response value.
Override this in a subclass.
"""
raise NotImplementedError("%r did not implement makeRequest" % (
self.__class__,))
def test_interface(self):
"""
L{BasicCredentialFactory} implements L{ICredentialFactory}.
"""
self.assertTrue(
verifyObject(ICredentialFactory, self.credentialFactory))
def test_usernamePassword(self):
"""
L{basic.BasicCredentialFactory.decode} turns a base64-encoded response
into a L{UsernamePassword} object with a password which reflects the
one which was encoded in the response.
"""
response = b64encode('%s:%s' % (self.username, self.password))
creds = self.credentialFactory.decode(response, self.request)
self.assertTrue(IUsernamePassword.providedBy(creds))
self.assertTrue(creds.checkPassword(self.password))
self.assertFalse(creds.checkPassword(self.password + 'wrong'))
def test_incorrectPadding(self):
"""
L{basic.BasicCredentialFactory.decode} decodes a base64-encoded
response with incorrect padding.
"""
response = b64encode('%s:%s' % (self.username, self.password))
response = response.strip('=')
creds = self.credentialFactory.decode(response, self.request)
self.assertTrue(verifyObject(IUsernamePassword, creds))
self.assertTrue(creds.checkPassword(self.password))
def test_invalidEncoding(self):
"""
L{basic.BasicCredentialFactory.decode} raises L{LoginFailed} if passed
a response which is not base64-encoded.
"""
response = 'x' # one byte cannot be valid base64 text
self.assertRaises(
error.LoginFailed,
self.credentialFactory.decode, response, self.makeRequest())
def test_invalidCredentials(self):
"""
L{basic.BasicCredentialFactory.decode} raises L{LoginFailed} when
passed a response which is not valid base64-encoded text.
"""
response = b64encode('123abc+/')
self.assertRaises(
error.LoginFailed,
self.credentialFactory.decode,
response, self.makeRequest())
class RequestMixin:
def makeRequest(self, method='GET', clientAddress=None):
"""
Create a L{DummyRequest} (change me to create a
L{twisted.web.http.Request} instead).
"""
request = DummyRequest('/')
request.method = method
request.client = clientAddress
return request
class BasicAuthTestCase(RequestMixin, BasicAuthTestsMixin, unittest.TestCase):
"""
Basic authentication tests which use L{twisted.web.http.Request}.
"""
class DigestAuthTestCase(RequestMixin, unittest.TestCase):
"""
Digest authentication tests which use L{twisted.web.http.Request}.
"""
def setUp(self):
"""
Create a DigestCredentialFactory for testing
"""
self.realm = "test realm"
self.algorithm = "md5"
self.credentialFactory = digest.DigestCredentialFactory(
self.algorithm, self.realm)
self.request = self.makeRequest()
def test_decode(self):
"""
L{digest.DigestCredentialFactory.decode} calls the C{decode} method on
L{twisted.cred.digest.DigestCredentialFactory} with the HTTP method and
host of the request.
"""
host = '169.254.0.1'
method = 'GET'
done = [False]
response = object()
def check(_response, _method, _host):
self.assertEqual(response, _response)
self.assertEqual(method, _method)
self.assertEqual(host, _host)
done[0] = True
self.patch(self.credentialFactory.digest, 'decode', check)
req = self.makeRequest(method, IPv4Address('TCP', host, 81))
self.credentialFactory.decode(response, req)
self.assertTrue(done[0])
def test_interface(self):
"""
L{DigestCredentialFactory} implements L{ICredentialFactory}.
"""
self.assertTrue(
verifyObject(ICredentialFactory, self.credentialFactory))
def test_getChallenge(self):
"""
The challenge issued by L{DigestCredentialFactory.getChallenge} must
include C{'qop'}, C{'realm'}, C{'algorithm'}, C{'nonce'}, and
C{'opaque'} keys. The values for the C{'realm'} and C{'algorithm'}
keys must match the values supplied to the factory's initializer.
None of the values may have newlines in them.
"""
challenge = self.credentialFactory.getChallenge(self.request)
self.assertEqual(challenge['qop'], 'auth')
self.assertEqual(challenge['realm'], 'test realm')
self.assertEqual(challenge['algorithm'], 'md5')
self.assertIn('nonce', challenge)
self.assertIn('opaque', challenge)
for v in challenge.values():
self.assertNotIn('\n', v)
def test_getChallengeWithoutClientIP(self):
"""
L{DigestCredentialFactory.getChallenge} can issue a challenge even if
the L{Request} it is passed returns C{None} from C{getClientIP}.
"""
request = self.makeRequest('GET', None)
challenge = self.credentialFactory.getChallenge(request)
self.assertEqual(challenge['qop'], 'auth')
self.assertEqual(challenge['realm'], 'test realm')
self.assertEqual(challenge['algorithm'], 'md5')
self.assertIn('nonce', challenge)
self.assertIn('opaque', challenge)
class UnauthorizedResourceTests(unittest.TestCase):
"""
Tests for L{UnauthorizedResource}.
"""
def test_getChildWithDefault(self):
"""
An L{UnauthorizedResource} is every child of itself.
"""
resource = UnauthorizedResource([])
self.assertIdentical(
resource.getChildWithDefault("foo", None), resource)
self.assertIdentical(
resource.getChildWithDefault("bar", None), resource)
def test_render(self):
"""
L{UnauthorizedResource} renders with a 401 response code and a
I{WWW-Authenticate} header and puts a simple unauthorized message
into the response body.
"""
resource = UnauthorizedResource([
BasicCredentialFactory('example.com')])
request = DummyRequest([''])
request.render(resource)
self.assertEqual(request.responseCode, 401)
self.assertEqual(
request.responseHeaders.getRawHeaders('www-authenticate'),
['basic realm="example.com"'])
self.assertEqual(request.written, ['Unauthorized'])
def test_renderQuotesRealm(self):
"""
The realm value included in the I{WWW-Authenticate} header set in
the response when L{UnauthorizedResounrce} is rendered has quotes
and backslashes escaped.
"""
resource = UnauthorizedResource([
BasicCredentialFactory('example\\"foo')])
request = DummyRequest([''])
request.render(resource)
self.assertEqual(
request.responseHeaders.getRawHeaders('www-authenticate'),
['basic realm="example\\\\\\"foo"'])
class Realm(object):
"""
A simple L{IRealm} implementation which gives out L{WebAvatar} for any
avatarId.
@type loggedIn: C{int}
@ivar loggedIn: The number of times C{requestAvatar} has been invoked for
L{IResource}.
@type loggedOut: C{int}
@ivar loggedOut: The number of times the logout callback has been invoked.
"""
implements(portal.IRealm)
def __init__(self, avatarFactory):
self.loggedOut = 0
self.loggedIn = 0
self.avatarFactory = avatarFactory
def requestAvatar(self, avatarId, mind, *interfaces):
if IResource in interfaces:
self.loggedIn += 1
return IResource, self.avatarFactory(avatarId), self.logout
raise NotImplementedError()
def logout(self):
self.loggedOut += 1
class HTTPAuthHeaderTests(unittest.TestCase):
"""
Tests for L{HTTPAuthSessionWrapper}.
"""
makeRequest = DummyRequest
def setUp(self):
"""
Create a realm, portal, and L{HTTPAuthSessionWrapper} to use in the tests.
"""
self.username = 'foo bar'
self.password = 'bar baz'
self.avatarContent = "contents of the avatar resource itself"
self.childName = "foo-child"
self.childContent = "contents of the foo child of the avatar"
self.checker = InMemoryUsernamePasswordDatabaseDontUse()
self.checker.addUser(self.username, self.password)
self.avatar = Data(self.avatarContent, 'text/plain')
self.avatar.putChild(
self.childName, Data(self.childContent, 'text/plain'))
self.avatars = {self.username: self.avatar}
self.realm = Realm(self.avatars.get)
self.portal = portal.Portal(self.realm, [self.checker])
self.credentialFactories = []
self.wrapper = HTTPAuthSessionWrapper(
self.portal, self.credentialFactories)
def _authorizedBasicLogin(self, request):
"""
Add an I{basic authorization} header to the given request and then
dispatch it, starting from C{self.wrapper} and returning the resulting
L{IResource}.
"""
authorization = b64encode(self.username + ':' + self.password)
request.headers['authorization'] = 'Basic ' + authorization
return getChildForRequest(self.wrapper, request)
def test_getChildWithDefault(self):
"""
Resource traversal which encounters an L{HTTPAuthSessionWrapper}
results in an L{UnauthorizedResource} instance when the request does
not have the required I{Authorization} headers.
"""
request = self.makeRequest([self.childName])
child = getChildForRequest(self.wrapper, request)
d = request.notifyFinish()
def cbFinished(result):
self.assertEqual(request.responseCode, 401)
d.addCallback(cbFinished)
request.render(child)
return d
def _invalidAuthorizationTest(self, response):
"""
Create a request with the given value as the value of an
I{Authorization} header and perform resource traversal with it,
starting at C{self.wrapper}. Assert that the result is a 401 response
code. Return a L{Deferred} which fires when this is all done.
"""
self.credentialFactories.append(BasicCredentialFactory('example.com'))
request = self.makeRequest([self.childName])
request.headers['authorization'] = response
child = getChildForRequest(self.wrapper, request)
d = request.notifyFinish()
def cbFinished(result):
self.assertEqual(request.responseCode, 401)
d.addCallback(cbFinished)
request.render(child)
return d
def test_getChildWithDefaultUnauthorizedUser(self):
"""
Resource traversal which enouncters an L{HTTPAuthSessionWrapper}
results in an L{UnauthorizedResource} when the request has an
I{Authorization} header with a user which does not exist.
"""
return self._invalidAuthorizationTest('Basic ' + b64encode('foo:bar'))
def test_getChildWithDefaultUnauthorizedPassword(self):
"""
Resource traversal which enouncters an L{HTTPAuthSessionWrapper}
results in an L{UnauthorizedResource} when the request has an
I{Authorization} header with a user which exists and the wrong
password.
"""
return self._invalidAuthorizationTest(
'Basic ' + b64encode(self.username + ':bar'))
def test_getChildWithDefaultUnrecognizedScheme(self):
"""
Resource traversal which enouncters an L{HTTPAuthSessionWrapper}
results in an L{UnauthorizedResource} when the request has an
I{Authorization} header with an unrecognized scheme.
"""
return self._invalidAuthorizationTest('Quux foo bar baz')
def test_getChildWithDefaultAuthorized(self):
"""
Resource traversal which encounters an L{HTTPAuthSessionWrapper}
results in an L{IResource} which renders the L{IResource} avatar
retrieved from the portal when the request has a valid I{Authorization}
header.
"""
self.credentialFactories.append(BasicCredentialFactory('example.com'))
request = self.makeRequest([self.childName])
child = self._authorizedBasicLogin(request)
d = request.notifyFinish()
def cbFinished(ignored):
self.assertEqual(request.written, [self.childContent])
d.addCallback(cbFinished)
request.render(child)
return d
def test_renderAuthorized(self):
"""
Resource traversal which terminates at an L{HTTPAuthSessionWrapper}
and includes correct authentication headers results in the
L{IResource} avatar (not one of its children) retrieved from the
portal being rendered.
"""
self.credentialFactories.append(BasicCredentialFactory('example.com'))
# Request it exactly, not any of its children.
request = self.makeRequest([])
child = self._authorizedBasicLogin(request)
d = request.notifyFinish()
def cbFinished(ignored):
self.assertEqual(request.written, [self.avatarContent])
d.addCallback(cbFinished)
request.render(child)
return d
def test_getChallengeCalledWithRequest(self):
"""
When L{HTTPAuthSessionWrapper} finds an L{ICredentialFactory} to issue
a challenge, it calls the C{getChallenge} method with the request as an
argument.
"""
class DumbCredentialFactory(object):
implements(ICredentialFactory)
scheme = 'dumb'
def __init__(self):
self.requests = []
def getChallenge(self, request):
self.requests.append(request)
return {}
factory = DumbCredentialFactory()
self.credentialFactories.append(factory)
request = self.makeRequest([self.childName])
child = getChildForRequest(self.wrapper, request)
d = request.notifyFinish()
def cbFinished(ignored):
self.assertEqual(factory.requests, [request])
d.addCallback(cbFinished)
request.render(child)
return d
def _logoutTest(self):
"""
Issue a request for an authentication-protected resource using valid
credentials and then return the C{DummyRequest} instance which was
used.
This is a helper for tests about the behavior of the logout
callback.
"""
self.credentialFactories.append(BasicCredentialFactory('example.com'))
class SlowerResource(Resource):
def render(self, request):
return NOT_DONE_YET
self.avatar.putChild(self.childName, SlowerResource())
request = self.makeRequest([self.childName])
child = self._authorizedBasicLogin(request)
request.render(child)
self.assertEqual(self.realm.loggedOut, 0)
return request
def test_logout(self):
"""
The realm's logout callback is invoked after the resource is rendered.
"""
request = self._logoutTest()
request.finish()
self.assertEqual(self.realm.loggedOut, 1)
def test_logoutOnError(self):
"""
The realm's logout callback is also invoked if there is an error
generating the response (for example, if the client disconnects
early).
"""
request = self._logoutTest()
request.processingFailed(
Failure(ConnectionDone("Simulated disconnect")))
self.assertEqual(self.realm.loggedOut, 1)
def test_decodeRaises(self):
"""
Resource traversal which enouncters an L{HTTPAuthSessionWrapper}
results in an L{UnauthorizedResource} when the request has a I{Basic
Authorization} header which cannot be decoded using base64.
"""
self.credentialFactories.append(BasicCredentialFactory('example.com'))
request = self.makeRequest([self.childName])
request.headers['authorization'] = 'Basic decode should fail'
child = getChildForRequest(self.wrapper, request)
self.assertIsInstance(child, UnauthorizedResource)
def test_selectParseResponse(self):
"""
L{HTTPAuthSessionWrapper._selectParseHeader} returns a two-tuple giving
the L{ICredentialFactory} to use to parse the header and a string
containing the portion of the header which remains to be parsed.
"""
basicAuthorization = 'Basic abcdef123456'
self.assertEqual(
self.wrapper._selectParseHeader(basicAuthorization),
(None, None))
factory = BasicCredentialFactory('example.com')
self.credentialFactories.append(factory)
self.assertEqual(
self.wrapper._selectParseHeader(basicAuthorization),
(factory, 'abcdef123456'))
def test_unexpectedDecodeError(self):
"""
Any unexpected exception raised by the credential factory's C{decode}
method results in a 500 response code and causes the exception to be
logged.
"""
class UnexpectedException(Exception):
pass
class BadFactory(object):
scheme = 'bad'
def getChallenge(self, client):
return {}
def decode(self, response, request):
raise UnexpectedException()
self.credentialFactories.append(BadFactory())
request = self.makeRequest([self.childName])
request.headers['authorization'] = 'Bad abc'
child = getChildForRequest(self.wrapper, request)
request.render(child)
self.assertEqual(request.responseCode, 500)
self.assertEqual(len(self.flushLoggedErrors(UnexpectedException)), 1)
def test_unexpectedLoginError(self):
"""
Any unexpected failure from L{Portal.login} results in a 500 response
code and causes the failure to be logged.
"""
class UnexpectedException(Exception):
pass
class BrokenChecker(object):
credentialInterfaces = (IUsernamePassword,)
def requestAvatarId(self, credentials):
raise UnexpectedException()
self.portal.registerChecker(BrokenChecker())
self.credentialFactories.append(BasicCredentialFactory('example.com'))
request = self.makeRequest([self.childName])
child = self._authorizedBasicLogin(request)
request.render(child)
self.assertEqual(request.responseCode, 500)
self.assertEqual(len(self.flushLoggedErrors(UnexpectedException)), 1)
def test_anonymousAccess(self):
"""
Anonymous requests are allowed if a L{Portal} has an anonymous checker
registered.
"""
unprotectedContents = "contents of the unprotected child resource"
self.avatars[ANONYMOUS] = Resource()
self.avatars[ANONYMOUS].putChild(
self.childName, Data(unprotectedContents, 'text/plain'))
self.portal.registerChecker(AllowAnonymousAccess())
self.credentialFactories.append(BasicCredentialFactory('example.com'))
request = self.makeRequest([self.childName])
child = getChildForRequest(self.wrapper, request)
d = request.notifyFinish()
def cbFinished(ignored):
self.assertEqual(request.written, [unprotectedContents])
d.addCallback(cbFinished)
request.render(child)
return d
| Kagami/kisa | lib/twisted/web/test/test_httpauth.py | Python | cc0-1.0 | 21,698 |
'''
Name: Monster-Fight-Simulation
Author: Kyle Everette
Date Last Modified: 2/17/16
Simulates a fight between multiple monsters with a loot system.
'''
import random
roll = input("Type f to begin or q to quit: ")
print("\n")
p_health = 100 #Initializes beginning variables
m_health = 40
kills = 0
exp = 0
level = 0
lp_dmg = 5 #Player low and and high damage
hp_dmg = 30 #Monster low and hight damage
lm_dmg = 1
hm_dmg = 5
item_dmg = 0
tuff = 0
loot_list = ["shiny_dagger", "shield", "sword"] #Create loot list
while roll != "q": #Creates main menu
p_dmg = random.randint(lp_dmg, hp_dmg)
m_dmg = random.randint(lm_dmg, hm_dmg)
if roll != "f" and roll != "q":
print("Invalid Choice")
print("\n")
roll = input("Type f to begin or q to quit: ")
print("\n")
else:
if p_health >= 1: #Combat mechanics
print("Number of kills: " + str(kills))
print("\n")
print("Your level: " + str(level))
print("\n")
print("Your experience: " + str(exp))
print("\n")
if m_health >= 1:
p_health = p_health - (m_dmg + tuff)
m_health = m_health - (p_dmg + item_dmg)
print("The skeleton has hit you for " + str(m_dmg) + " damage")
print("\n")
print("Your health is: " + str(p_health))
print("\n")
print("You have hit the skeleton for " + str(p_dmg) + " damage")
print("\n")
print("Your enemies health is: " + str(m_health))
print("\n")
if m_health <= 0 and p_health <= 0:
print("You have both died in combat.")
print("\n")
print("GAME OVER")
print("\n")
elif p_health <= 0:
print("You have died.")
print("\n")
print("GAME OVER")
print("\n")
roll = input("Type q to quit: ")
print("\n")
elif m_health <= 0: #Loot system
print("You've slayed the skeleton")
print("\n")
loot = random.randint(1, 10)
if loot_list[2] == "false" and loot_list[1] == "false":
if exp % 20:
level += 1
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
print("You leveled up!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
elif loot_list[0] == "false" and loot_list[1] == "false":
if loot == 10:
print("The skeleton dropped a sword!")
print("\n")
loot_list.pop(2)
loot_list.insert(2, "false")
item_dmg = 10
if exp % 20:
level += 1
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
print("You leveled up!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
if exp % 20:
level += 1
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
print("You leveled up!")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
elif loot_list[2] == "false":
if loot == 8 or loot == 9:
print("The skeleton dropped a shield!")
print("\n")
tuff = -1
loot_list.pop(1)
loot_list.insert(1, "false")
if exp % 20:
level += 1
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
print("You leveled up!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
if exp % 20:
level += 1
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
print("You leveled up!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
elif loot_list[0] == "false":
if loot == 8 or loot == 9:
print("The skeleton dropped a shield!")
print("\n")
tuff = -1
loot_list.pop(1)
loot_list.insert(1, "false")
if exp % 20:
level += 1
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
print("You leveled up!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
elif loot == 10:
print("The skeleton dropped a sword!")
print("\n")
loot_list.pop(2)
loot_list.insert(2, "false")
item_dmg = 10
if exp % 20:
level += 1
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
print("You leveled up!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
if exp % 20:
level += 1
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
print("You leveled up!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
if loot == 6 or loot == 7:
print("The skeleton dropped a shiny dagger!")
print("\n")
loot_list.pop(0)
loot_list.insert(0, "false")
item_dmg = 1
if exp % 20:
level += 1
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
print("You leveled up!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
elif loot == 8 or loot == 9:
print("The skeleton has dropped a shield")
print("\n")
tuff = -1
loot_list.pop(1)
loot_list.insert(1, "false")
if exp % 20:
level += 1
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
print("You leveled up!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
elif loot == 10:
print("The skeleton has dropped a sword")
print("\n")
loot_list.pop(2)
loot_list.insert(2, "false")
item_dmg = 10
if exp % 20:
level += 1
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
print("You leveled up!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
if exp % 20:
level += 1
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
print("You leveled up!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
exp += 10
print("You have slayed the skeleton!")
print("\n")
print("You gain 10 experience points!")
print("\n")
roll = input("Type f to continue or q to quit: ")
print("\n")
m_health = 25
kills += 1
else:
roll = input("Type f to continue or q to quit: ")
print("\n")
else:
roll = input("Type f to continue or q to quit: ")
print("\n")
else:
roll = input("Type q to quit: ")
print("\n")
| K-Everette123/Monster-Fight-Simulation | Monster-Fight-Simulation.py | Python | mit | 19,538 |
# THIS FILE IS SAFE TO EDIT. It will not be overwritten when rerunning go-raml.
from flask import jsonify, request
import json as JSON
import jsonschema
from jsonschema import Draft4Validator
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
User_schema = JSON.load(open(dir_path + '/schema/User_schema.json'))
User_schema_resolver = jsonschema.RefResolver('file://' + dir_path + '/schema/', User_schema)
User_schema_validator = Draft4Validator(User_schema, resolver=User_schema_resolver)
def drones_postHandler():
inputs = request.get_json()
try:
User_schema_validator.validate(inputs)
except jsonschema.ValidationError as e:
return jsonify(errors="bad request body"), 400
return jsonify()
| Jumpscale/go-raml | codegen/fixtures/congo/python_server/handlers/drones_postHandler.py | Python | bsd-2-clause | 747 |
#! /usr/bin/env python3
import sys, subprocess
try:
VERSION = subprocess.check_output(["git", "describe", "--match", "v[0-9]*", "--abbrev=0", "--tags", "HEAD"]).decode().strip()
except subprocess.CalledProcessError:
VERSION = "Zip"
def run():
if sys.argv[1] == '--client':
import poshc2.client as client
client.start()
elif sys.argv[1] == '--server':
import poshc2.server as server
server.start()
else:
print("Unrecognised startup arguments, expected --server/--client as first arg: %s" % str(sys.argv))
| nettitude/PoshC2 | poshc2/__init__.py | Python | bsd-3-clause | 568 |
def solution(step):
for num in xrange(step, 999999999, step):
if all(num % n == 0 for n in range(1,20)):
return num
else:
pass
return None
print solution(20)
| ganesh-95/python-programs | project/5small.py | Python | mit | 208 |
# -*- coding: utf-8 -*-
"""Unittests for submodules of package 'nemoa.core'."""
__author__ = 'Patrick Michl'
__email__ = '[email protected]'
__license__ = 'GPLv3'
__docformat__ = 'google'
| fishroot/nemoa | nemoa/core/__test__.py | Python | gpl-3.0 | 190 |
import os
from scrapy import Spider
from totalStorage.items import TotalStorageItem
import json
import datetime
HOST = os.environ.get("DATASERV_HOST", "status.driveshare.org")
class TotalSpider(Spider):
name = "total"
allowed_domains = [HOST]
start_urls = [
"http://{0}/api/total".format(HOST)
]
def parse(self, response):
jsonresponse = json.loads(response.body_as_unicode())
item = TotalStorageItem()
item["total_id"] = jsonresponse["id"]
item["total_TB"] = jsonresponse["total_TB"]
item["total_farmers"] = jsonresponse["total_farmers"]
item["time"] = datetime.datetime.now()
return item
| littleskunk/driveshare-graph | scrapers/totalStorage/totalStorage/spiders/total_spider.py | Python | mit | 618 |
from distutils.core import setup
setup(
name = 'pythonversiontest',
packages = ['pythonversiontest'], # this must be the same as the name above
version = '1.0.0',
description = 'A random test lib',
author = 'Prabhay Gupta',
author_email = '[email protected]',
url = 'https://github.com/prabhay759/pythonversiontest', # use the URL to the github repo
download_url = 'https://github.com/prabhay759/pythonversiontest/tarball/1.0.0', # I'll explain this in a second
keywords = ['testing'], # arbitrary keywords
classifiers = [],
)
| prabhay759/pythonversiontest | setup.py | Python | mit | 552 |
# Copyright 2015 The Tornado Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
__all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock']
import collections
from tornado import gen, ioloop
from tornado.concurrent import Future
class _TimeoutGarbageCollector(object):
"""Base class for objects that periodically clean up timed-out waiters.
Avoids memory leak in a common pattern like:
while True:
yield condition.wait(short_timeout)
print('looping....')
"""
def __init__(self):
self._waiters = collections.deque() # Futures.
self._timeouts = 0
def _garbage_collect(self):
# Occasionally clear timed-out waiters.
self._timeouts += 1
if self._timeouts > 100:
self._timeouts = 0
self._waiters = collections.deque(
w for w in self._waiters if not w.done())
class Condition(_TimeoutGarbageCollector):
"""A condition allows one or more coroutines to wait until notified.
Like a standard `threading.Condition`, but does not need an underlying lock
that is acquired and released.
With a `Condition`, coroutines can wait to be notified by other coroutines:
.. testcode::
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Condition
condition = Condition()
@gen.coroutine
def waiter():
print("I'll wait right here")
yield condition.wait() # Yield a Future.
print("I'm done waiting")
@gen.coroutine
def notifier():
print("About to notify")
condition.notify()
print("Done notifying")
@gen.coroutine
def runner():
# Yield two Futures; wait for waiter() and notifier() to finish.
yield [waiter(), notifier()]
IOLoop.current().run_sync(runner)
.. testoutput::
I'll wait right here
About to notify
Done notifying
I'm done waiting
`wait` takes an optional ``timeout`` argument, which is either an absolute
timestamp::
io_loop = IOLoop.current()
# Wait up to 1 second for a notification.
yield condition.wait(timeout=io_loop.time() + 1)
...or a `datetime.timedelta` for a timeout relative to the current time::
# Wait up to 1 second.
yield condition.wait(timeout=datetime.timedelta(seconds=1))
The method raises `tornado.gen.TimeoutError` if there's no notification
before the deadline.
"""
def __init__(self):
super(Condition, self).__init__()
self.io_loop = ioloop.IOLoop.current()
def __repr__(self):
result = '<%s' % (self.__class__.__name__, )
if self._waiters:
result += ' waiters[%s]' % len(self._waiters)
return result + '>'
def wait(self, timeout=None):
"""Wait for `.notify`.
Returns a `.Future` that resolves ``True`` if the condition is notified,
or ``False`` after a timeout.
"""
waiter = Future()
self._waiters.append(waiter)
if timeout:
def on_timeout():
waiter.set_result(False)
self._garbage_collect()
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
waiter.add_done_callback(
lambda _: io_loop.remove_timeout(timeout_handle))
return waiter
def notify(self, n=1):
"""Wake ``n`` waiters."""
waiters = [] # Waiters we plan to run right now.
while n and self._waiters:
waiter = self._waiters.popleft()
if not waiter.done(): # Might have timed out.
n -= 1
waiters.append(waiter)
for waiter in waiters:
waiter.set_result(True)
def notify_all(self):
"""Wake all waiters."""
self.notify(len(self._waiters))
class Event(object):
"""An event blocks coroutines until its internal flag is set to True.
Similar to `threading.Event`.
A coroutine can wait for an event to be set. Once it is set, calls to
``yield event.wait()`` will not block unless the event has been cleared:
.. testcode::
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Event
event = Event()
@gen.coroutine
def waiter():
print("Waiting for event")
yield event.wait()
print("Not waiting this time")
yield event.wait()
print("Done")
@gen.coroutine
def setter():
print("About to set the event")
event.set()
@gen.coroutine
def runner():
yield [waiter(), setter()]
IOLoop.current().run_sync(runner)
.. testoutput::
Waiting for event
About to set the event
Not waiting this time
Done
"""
def __init__(self):
self._future = Future()
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__, 'set' if self.is_set() else 'clear')
def is_set(self):
"""Return ``True`` if the internal flag is true."""
return self._future.done()
def set(self):
"""Set the internal flag to ``True``. All waiters are awakened.
Calling `.wait` once the flag is set will not block.
"""
if not self._future.done():
self._future.set_result(None)
def clear(self):
"""Reset the internal flag to ``False``.
Calls to `.wait` will block until `.set` is called.
"""
if self._future.done():
self._future = Future()
def wait(self, timeout=None):
"""Block until the internal flag is true.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
timeout.
"""
if timeout is None:
return self._future
else:
return gen.with_timeout(timeout, self._future)
class _ReleasingContextManager(object):
"""Releases a Lock or Semaphore at the end of a "with" statement.
with (yield semaphore.acquire()):
pass
# Now semaphore.release() has been called.
"""
def __init__(self, obj):
self._obj = obj
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self._obj.release()
class Semaphore(_TimeoutGarbageCollector):
"""A lock that can be acquired a fixed number of times before blocking.
A Semaphore manages a counter representing the number of `.release` calls
minus the number of `.acquire` calls, plus an initial value. The `.acquire`
method blocks if necessary until it can return without making the counter
negative.
Semaphores limit access to a shared resource. To allow access for two
workers at a time:
.. testsetup:: semaphore
from collections import deque
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.concurrent import Future
# Ensure reliable doctest output: resolve Futures one at a time.
futures_q = deque([Future() for _ in range(3)])
@gen.coroutine
def simulator(futures):
for f in futures:
yield gen.moment
f.set_result(None)
IOLoop.current().add_callback(simulator, list(futures_q))
def use_some_resource():
return futures_q.popleft()
.. testcode:: semaphore
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Semaphore
sem = Semaphore(2)
@gen.coroutine
def worker(worker_id):
yield sem.acquire()
try:
print("Worker %d is working" % worker_id)
yield use_some_resource()
finally:
print("Worker %d is done" % worker_id)
sem.release()
@gen.coroutine
def runner():
# Join all workers.
yield [worker(i) for i in range(3)]
IOLoop.current().run_sync(runner)
.. testoutput:: semaphore
Worker 0 is working
Worker 1 is working
Worker 0 is done
Worker 2 is working
Worker 1 is done
Worker 2 is done
Workers 0 and 1 are allowed to run concurrently, but worker 2 waits until
the semaphore has been released once, by worker 0.
`.acquire` is a context manager, so ``worker`` could be written as::
@gen.coroutine
def worker(worker_id):
with (yield sem.acquire()):
print("Worker %d is working" % worker_id)
yield use_some_resource()
# Now the semaphore has been released.
print("Worker %d is done" % worker_id)
In Python 3.5, the semaphore itself can be used as an async context
manager::
async def worker(worker_id):
async with sem:
print("Worker %d is working" % worker_id)
await use_some_resource()
# Now the semaphore has been released.
print("Worker %d is done" % worker_id)
.. versionchanged:: 4.3
Added ``async with`` support in Python 3.5.
"""
def __init__(self, value=1):
super(Semaphore, self).__init__()
if value < 0:
raise ValueError('semaphore initial value must be >= 0')
self._value = value
def __repr__(self):
res = super(Semaphore, self).__repr__()
extra = 'locked' if self._value == 0 else 'unlocked,value:{0}'.format(
self._value)
if self._waiters:
extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
return '<{0} [{1}]>'.format(res[1:-1], extra)
def release(self):
"""Increment the counter and wake one waiter."""
self._value += 1
while self._waiters:
waiter = self._waiters.popleft()
if not waiter.done():
self._value -= 1
# If the waiter is a coroutine paused at
#
# with (yield semaphore.acquire()):
#
# then the context manager's __exit__ calls release() at the end
# of the "with" block.
waiter.set_result(_ReleasingContextManager(self))
break
def acquire(self, timeout=None):
"""Decrement the counter. Returns a Future.
Block if the counter is zero and wait for a `.release`. The Future
raises `.TimeoutError` after the deadline.
"""
waiter = Future()
if self._value > 0:
self._value -= 1
waiter.set_result(_ReleasingContextManager(self))
else:
self._waiters.append(waiter)
if timeout:
def on_timeout():
waiter.set_exception(gen.TimeoutError())
self._garbage_collect()
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
waiter.add_done_callback(
lambda _: io_loop.remove_timeout(timeout_handle))
return waiter
def __enter__(self):
raise RuntimeError(
"Use Semaphore like 'with (yield semaphore.acquire())', not like"
" 'with semaphore'")
__exit__ = __enter__
@gen.coroutine
def __aenter__(self):
yield self.acquire()
@gen.coroutine
def __aexit__(self, typ, value, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""A semaphore that prevents release() being called too many times.
If `.release` would increment the semaphore's value past the initial
value, it raises `ValueError`. Semaphores are mostly used to guard
resources with limited capacity, so a semaphore released too many times
is a sign of a bug.
"""
def __init__(self, value=1):
super(BoundedSemaphore, self).__init__(value=value)
self._initial_value = value
def release(self):
"""Increment the counter and wake one waiter."""
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
super(BoundedSemaphore, self).release()
class Lock(object):
"""A lock for coroutines.
A Lock begins unlocked, and `acquire` locks it immediately. While it is
locked, a coroutine that yields `acquire` waits until another coroutine
calls `release`.
Releasing an unlocked lock raises `RuntimeError`.
`acquire` supports the context manager protocol in all Python versions:
>>> from tornado import gen, locks
>>> lock = locks.Lock()
>>>
>>> @gen.coroutine
... def f():
... with (yield lock.acquire()):
... # Do something holding the lock.
... pass
...
... # Now the lock is released.
In Python 3.5, `Lock` also supports the async context manager
protocol. Note that in this case there is no `acquire`, because
``async with`` includes both the ``yield`` and the ``acquire``
(just as it does with `threading.Lock`):
>>> async def f(): # doctest: +SKIP
... async with lock:
... # Do something holding the lock.
... pass
...
... # Now the lock is released.
.. versionchanged:: 4.3
Added ``async with`` support in Python 3.5.
"""
def __init__(self):
self._block = BoundedSemaphore(value=1)
def __repr__(self):
return "<%s _block=%s>" % (
self.__class__.__name__,
self._block)
def acquire(self, timeout=None):
"""Attempt to lock. Returns a Future.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
timeout.
"""
return self._block.acquire(timeout)
def release(self):
"""Unlock.
The first coroutine in line waiting for `acquire` gets the lock.
If not locked, raise a `RuntimeError`.
"""
try:
self._block.release()
except ValueError:
raise RuntimeError('release unlocked lock')
def __enter__(self):
raise RuntimeError(
"Use Lock like 'with (yield lock)', not like 'with lock'")
__exit__ = __enter__
@gen.coroutine
def __aenter__(self):
yield self.acquire()
@gen.coroutine
def __aexit__(self, typ, value, tb):
self.release()
| jsjohnst/tornado | tornado/locks.py | Python | apache-2.0 | 15,234 |
HOST = "172.31.28.89"
PORT = "5432"
USER = "postgres"
PASSWORD = "enw1989"
DATABASE = "google"
READ_PREFERENCE = "primary"
COLLECTION_INPUT = "task_events"
COLLECTION_OUTPUT = "task_events_info"
PREFIX_COLUMN = "g_"
ATTRIBUTES = ["event type", "CPU request", "memory request"]
SORT = ["filepath", "numline"]
OPERATION_TYPE = "GROUP_BY_COLUMN"
COLUMN = "event type"
VALUE = ["0","1","2","3","4","5","6","7","8"]
INPUT_FILE = "task_events_0.dat"
OUTPUT_FILE = "task_events_info_0.dat"
| elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 1B/instances/1_workflow_full_10files_primary_1sh_1rs_noannot_with_proj_1s/work/ubuntu/pegasus/example_workflow/20161220T015426+0000/ConfigDB_TaskEvent_0.py | Python | gpl-3.0 | 485 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.