code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# =============================================================================
from __future__ import division
from builtins import zip
from builtins import range
import unittest
import math
import numpy as np
import singa.tensor as tensor
import singa.optimizer as opt
import singa.device as device
from singa import singa_wrap
if singa_wrap.USE_CUDA:
cuda = device.create_cuda_gpu()
def np_adam(plist, glist, mlist, vlist, lr, t, b1=0.9, b2=0.999):
for p, g, m, v in zip(plist, glist, mlist, vlist):
m *= b1
m += (1-b1) * g
v *= b2
v += (1-b2) * g * g
alpha = lr * math.sqrt(1. - math.pow(b2, t)) / (1. - math.pow(b1, t))
p -= alpha * m / (np.sqrt(v) + 1e-8)
def np_rmsprop(plist, glist, vlist, lr, t, rho=0.9):
for p, g, v in zip(plist, glist, vlist):
v *= rho
v += (1-rho) * g * g
p -= lr * g / (np.sqrt(v + 1e-8))
def np_momentum(plist, glist, vlist, lr, t, momentum=0.9):
for p, g, v in zip(plist, glist, vlist):
v *= momentum
v += lr * g
p -= v
def np_adagrad(plist, glist, vlist, lr, t):
for p, g, v in zip(plist, glist, vlist):
v += g * g
p -= lr * g / (np.sqrt(v + 1e-8))
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.np_W = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32)
self.W = tensor.from_numpy(self.np_W)
self.np_g = np.array([0.1, 0.3, 0.1, 0.2], dtype=np.float32)
self.g = tensor.from_numpy(self.np_g)
def to_cuda(self):
self.W.to_device(cuda)
self.g.to_device(cuda)
def test_sgd(self):
lr = 0.1
sgd = opt.SGD(lr)
sgd.apply(0, self.g, self.W, 'w')
w = tensor.to_numpy(self.W)
for i in range(self.W.size()):
self.assertAlmostEqual(w[i], self.np_W[i] - lr * self.np_g[i])
def test_adam(self):
lr = 0.1
n, m = 4, 6
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
m1 = np.zeros((n, m))
m2 = np.zeros((n, m))
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 10):
np_adam([p1, p2], [g1, g2], [m1, m2], [v1, v2], lr, t)
adam = opt.Adam(lr=lr)
for t in range(1, 10):
adam.apply(0, tg1, t1, 'p1', t)
adam.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 6)
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_sgd_cuda(self):
lr = 0.1
sgd = opt.SGD(lr)
self.to_cuda()
sgd.apply(0, self.g, self.W, 'w')
self.W.to_host()
w = tensor.to_numpy(self.W)
for i in range(self.W.size()):
self.assertAlmostEqual(w[i], self.np_W[i] - lr * self.np_g[i])
def test_constraint(self):
threshold = 0.02
cons = opt.L2Constraint(threshold)
cons.apply(0, self.W, self.g)
g = tensor.to_numpy(self.g)
nrm = np.linalg.norm(self.np_g) / self.np_g.size
for i in range(g.size):
self.assertAlmostEqual(g[i], self.np_g[i] * threshold / nrm)
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_constraint_cuda(self):
threshold = 0.02
self.to_cuda()
cons = opt.L2Constraint(threshold)
cons.apply(0, self.W, self.g)
self.g.to_host()
g = tensor.to_numpy(self.g)
nrm = np.linalg.norm(self.np_g) / self.np_g.size
for i in range(g.size):
self.assertAlmostEqual(g[i], self.np_g[i] * threshold / nrm)
def test_regularizer(self):
coefficient = 0.0001
reg = opt.L2Regularizer(coefficient)
reg.apply(0, self.W, self.g)
g = tensor.to_numpy(self.g)
for i in range(g.size):
self.assertAlmostEqual(g[i],
self.np_g[i] + coefficient * self.np_W[i])
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_regularizer_cuda(self):
coefficient = 0.0001
reg = opt.L2Regularizer(coefficient)
self.to_cuda()
reg.apply(0, self.W, self.g)
self.g.to_host()
g = tensor.to_numpy(self.g)
for i in range(g.size):
self.assertAlmostEqual(g[i],
self.np_g[i] + coefficient * self.np_W[i])
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_adam_cuda(self):
lr = 0.1
n, m = 4, 6
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
m1 = np.zeros((n, m))
m2 = np.zeros((n, m))
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 10):
np_adam([p1, p2], [g1, g2], [m1, m2], [v1, v2], lr, t)
adam = opt.Adam(lr=lr)
self.to_cuda()
for t in range(1, 10):
adam.apply(0, tg1, t1, 'p1', t)
adam.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 6)
def test_rmsprop(self):
lr = 0.1
n, m = 2, 2
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 4):
np_rmsprop([p1, p2], [g1, g2], [v1, v2], lr, t)
rsmprop = opt.RMSProp(lr=lr)
for t in range(1, 4):
rsmprop.apply(0, tg1, t1, 'p1', t)
rsmprop.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 2)
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_rmsprop_cuda(self):
lr = 0.1
n, m = 2, 2
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 4):
np_rmsprop([p1, p2], [g1, g2], [v1, v2], lr, t)
rsmprop = opt.RMSProp(lr=lr)
self.to_cuda()
for t in range(1, 4):
rsmprop.apply(0, tg1, t1, 'p1', t)
rsmprop.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 2)
def test_momentum(self):
lr = 0.1
n, m = 2, 2
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 4):
np_momentum([p1, p2], [g1, g2], [v1, v2], lr, t)
momentum = opt.SGD(lr, momentum=0.9)
for t in range(1, 4):
momentum.apply(0, tg1, t1, 'p1', t)
momentum.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 2)
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_momentum_cuda(self):
lr = 0.1
n, m = 2, 2
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 4):
np_momentum([p1, p2], [g1, g2], [v1, v2], lr, t)
momentum = opt.SGD(lr, momentum=0.9)
self.to_cuda()
for t in range(1, 4):
momentum.apply(0, tg1, t1, 'p1', t)
momentum.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 2)
def test_adagrad(self):
lr = 0.1
n, m = 2, 2
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 4):
np_adagrad([p1, p2], [g1, g2], [v1, v2], lr, t)
adagrad = opt.AdaGrad(lr=lr)
for t in range(1, 4):
adagrad.apply(0, tg1, t1, 'p1', t)
adagrad.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 2)
@unittest.skipIf(not singa_wrap.USE_CUDA, 'CUDA is not enabled')
def test_adagrad_cuda(self):
lr = 0.1
n, m = 2, 2
p1 = np.random.rand(n, m)
p2 = np.random.rand(n, m)
g1 = np.random.rand(n, m) * 0.01
g2 = np.random.rand(n, m) * 0.01
v1 = np.zeros((n, m))
v2 = np.zeros((n, m))
t1 = tensor.from_numpy(p1)
t2 = tensor.from_numpy(p2)
tg1 = tensor.from_numpy(g1)
tg2 = tensor.from_numpy(g2)
for t in range(1, 4):
np_adagrad([p1, p2], [g1, g2], [v1, v2], lr, t)
adagrad = opt.AdaGrad(lr=lr)
self.to_cuda()
for t in range(1, 4):
adagrad.apply(0, tg1, t1, 'p1', t)
adagrad.apply(0, tg2, t2, 'p2', t)
t1 = tensor.to_numpy(t1)
t2 = tensor.to_numpy(t2)
for t, p in zip([t1, t2], [p1, p2]):
for i in range(n):
for j in range(m):
self.assertAlmostEqual(t[i, j], p[i, j], 2)
if __name__ == '__main__':
unittest.main()
| nusdbsystem/incubator-singa | test/python/test_optimizer.py | Python | apache-2.0 | 12,498 |
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from os.path import dirname
import platform
import sys
import tempfile
import unittest
import driver_env
import driver_log
import driver_temps
import driver_tools
def CanRunHost():
# Some of the test+tools require running the host binaries, but that
# does not work on some bots (e.g., the ARM bots).
if platform.machine().startswith('arm'):
return False
# We also cannot run some of the Windows binaries directly, since
# they depend on cygwin DLLs and the cygwin DLLs are only in the
# path for the installed drivers bin and not for the binaries.
if sys.platform == 'win32':
return False
return True
def _SetupLinuxHostDir(env, nacl_dir):
# Use the 32-bit path by default, but fall back to 64-bit if the 32-bit does
# not exist.
dir_template = os.path.join(nacl_dir, 'toolchain', 'linux_x86',
'pnacl_newlib', 'host_%s')
dir_32 = dir_template % 'x86_32'
dir_64 = dir_template % 'x86_64'
driver_tools.AddHostBinarySearchPath(
dir_32 if os.path.exists(dir_32) else dir_64)
def SetupNaClDir(env):
test_dir = os.path.abspath(dirname(__file__))
nacl_dir = dirname(dirname(dirname(test_dir)))
env.set('BASE_NACL', nacl_dir)
def SetupToolchainDir(env):
test_dir = os.path.abspath(dirname(__file__))
nacl_dir = dirname(dirname(dirname(test_dir)))
os_name = driver_tools.GetOSName()
toolchain_dir = os.path.join(nacl_dir, 'toolchain', '%s_x86' % os_name,
'pnacl_newlib')
env.set('BASE_TOOLCHAIN', toolchain_dir)
def SetupHostDir(env):
# Some of the tools end up running one of the host binaries. Find the host
# dir on the test system and inject it into the search path using the
# implementation of -B
test_dir = os.path.abspath(dirname(__file__))
nacl_dir = dirname(dirname(dirname(test_dir)))
if sys.platform == 'darwin':
host_arch = 'x86_64'
elif sys.platform.startswith('linux'):
_SetupLinuxHostDir(env, nacl_dir)
return
elif sys.platform in ('cygwin', 'win32'):
host_arch = 'x86_32'
os_shortname = driver_tools.GetOSName()
host_dir = os.path.join(nacl_dir, 'toolchain',
'%s_x86' % os_shortname,
'pnacl_newlib',
'host_%s' % host_arch)
driver_tools.AddHostBinarySearchPath(host_dir)
# A collection of override methods that mock driver_env.Environment.
# One thing is we prevent having to read a driver.conf file,
# so instead we have a base group of variables set for testing.
def TestEnvReset(self, more_overrides={}):
# Call to "super" class method (assumed to be driver_env.Environment).
# TODO(jvoung): We may want a different way of overriding things.
driver_env.Environment.reset(self)
# The overrides.
self.set('PNACL_RUNNING_UNITTESTS', '1')
SetupNaClDir(self)
SetupToolchainDir(self)
SetupHostDir(self)
for k, v in more_overrides.iteritems():
self.set(k, v)
def ApplyTestEnvOverrides(env, more_overrides={}):
"""Register all the override methods and reset the env to a testable state.
"""
resetter = lambda self: TestEnvReset(self, more_overrides)
driver_env.override_env('reset', resetter)
env.reset()
# Utils to prevent driver exit.
class DriverExitException(Exception):
pass
def FakeExit(i):
raise DriverExitException('Stubbed out DriverExit!')
# Basic argument parsing.
def GetPlatformToTest():
for arg in sys.argv:
if arg.startswith('--platform='):
return arg.split('=')[1]
raise Exception('Unknown platform: "%s"' % arg)
# We would like to be able to use a temp file whether it is open or closed.
# However File's __enter__ method requires it to be open. So we override it
# to just return the fd regardless.
class TempWrapper(object):
def __init__(self, fd, close=True):
self.fd_ = fd
if close:
fd.close()
def __enter__(self):
return self.fd_
def __exit__(self, exc_type, exc_value, traceback):
return self.fd_.__exit__(exc_type, exc_value, traceback)
def __getattr__(self, name):
return getattr(self.fd_, name)
class DriverTesterCommon(unittest.TestCase):
def setUp(self):
super(DriverTesterCommon, self).setUp()
self._tempfiles = []
def tearDown(self):
for t in self._tempfiles:
if not t.closed:
t.close()
os.remove(t.name)
driver_temps.TempFiles.wipe()
super(DriverTesterCommon, self).tearDown()
def getTemp(self, close=True, **kwargs):
""" Get a temporary named file object.
"""
# Set delete=False, so that we can close the files and
# re-open them. Windows sometimes does not allow you to
# re-open an already opened temp file.
t = tempfile.NamedTemporaryFile(delete=False, **kwargs)
self._tempfiles.append(t)
return TempWrapper(t, close=close)
| davidbrazdil/nacl | pnacl/driver/tests/driver_test_utils.py | Python | bsd-3-clause | 4,978 |
# Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
import logging
from django.core import urlresolvers
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext, loader
from django.utils import simplejson
from django.utils.translation import ugettext as _
from tinymce.compressor import gzip_compressor
from tinymce.widgets import get_language_config
from django.views.decorators.csrf import csrf_exempt
def textareas_js(request, name, lang=None):
"""
Returns a HttpResponse whose content is a Javscript file. The template
is loaded from 'tinymce/<name>_textareas.js' or
'<name>/tinymce_textareas.js'. Optionally, the lang argument sets the
content language.
"""
template_files = (
'tinymce/%s_textareas.js' % name,
'%s/tinymce_textareas.js' % name,
)
template = loader.select_template(template_files)
vars = get_language_config(lang)
vars['content_language'] = lang
context = RequestContext(request, vars)
return HttpResponse(template.render(context),
content_type="application/x-javascript")
@csrf_exempt
def spell_check(request):
"""
Returns a HttpResponse that implements the TinyMCE spellchecker protocol.
"""
try:
import enchant
raw = request.raw_post_data
input = simplejson.loads(raw)
id = input['id']
method = input['method']
params = input['params']
lang = params[0]
arg = params[1]
if not enchant.dict_exists(str(lang)):
raise RuntimeError("dictionary not found for language '%s'" % lang)
checker = enchant.Dict(str(lang))
if method == 'checkWords':
result = [word for word in arg if not checker.check(word)]
elif method == 'getSuggestions':
result = checker.suggest(arg)
else:
raise RuntimeError("Unkown spellcheck method: '%s'" % method)
output = {
'id': id,
'result': result,
'error': None,
}
except Exception:
logging.exception("Error running spellchecker")
return HttpResponse(_("Error running spellchecker"))
return HttpResponse(simplejson.dumps(output),
content_type='application/json')
def preview(request, name):
"""
Returns a HttpResponse whose content is an HTML file that is used
by the TinyMCE preview plugin. The template is loaded from
'tinymce/<name>_preview.html' or '<name>/tinymce_preview.html'.
"""
template_files = (
'tinymce/%s_preview.html' % name,
'%s/tinymce_preview.html' % name,
)
template = loader.select_template(template_files)
return HttpResponse(template.render(RequestContext(request)),
content_type="text/html")
def flatpages_link_list(request):
"""
Returns a HttpResponse whose content is a Javscript file representing a
list of links to flatpages.
"""
from django.contrib.flatpages.models import FlatPage
link_list = [(page.title, page.url) for page in FlatPage.objects.all()]
return render_to_link_list(link_list)
def compressor(request):
"""
Returns a GZip-compressed response.
"""
return gzip_compressor(request)
def render_to_link_list(link_list):
"""
Returns a HttpResponse whose content is a Javscript file representing a
list of links suitable for use wit the TinyMCE external_link_list_url
configuration option. The link_list parameter must be a list of 2-tuples.
"""
return render_to_js_vardef('tinyMCELinkList', link_list)
def render_to_image_list(image_list):
"""
Returns a HttpResponse whose content is a Javscript file representing a
list of images suitable for use wit the TinyMCE external_image_list_url
configuration option. The image_list parameter must be a list of 2-tuples.
"""
return render_to_js_vardef('tinyMCEImageList', image_list)
def render_to_js_vardef(var_name, var_value):
output = "var %s = %s" % (var_name, simplejson.dumps(var_value))
return HttpResponse(output, content_type='application/x-javascript')
def filebrowser(request):
fb_url = urlresolvers.reverse('filebrowser.views.browse')
return render_to_response('tinymce/filebrowser.js', {'fb_url': fb_url},
context_instance=RequestContext(request))
| django-wodnas/django-tinymce | tinymce/views.py | Python | mit | 4,440 |
import unittest
from backtesting.Order import Order
from backtesting.Parser.CSVOrderParser import CSVOrderParser
class CSVOrderParserTest(unittest.TestCase):
def test_parse_file(self):
parser = CSVOrderParser("data/SimpleOrdersFile.csv")
parser.parse()
orders = parser.get_parsed_orders()
symbols = parser.get_symbols_traded()
self.assertEqual(len(symbols), 2)
self.assertTrue("AAPL" in symbols)
self.assertTrue("IBM" in symbols)
first_trade, last_trade = parser.get_dates_range()
self.assertEqual("2011-01-10", first_trade.strftime("%Y-%m-%d"))
self.assertEqual("2011-01-13", last_trade.strftime("%Y-%m-%d"))
self.assertEqual(len(orders), 3)
self.assertEqual(orders[0].symbol, "AAPL")
self.assertEqual(orders[0].type, Order.TYPE_BUY)
self.assertEqual(orders[0].quantity,1500)
self.assertEqual(orders[0].timestamp.strftime("%Y-%m-%d %H:%M:%S"),"2011-01-10 16:00:00")
self.assertEqual(orders[1].symbol, "AAPL")
self.assertEqual(orders[1].type, Order.TYPE_SELL)
self.assertEqual(orders[1].quantity,1500)
self.assertEqual(orders[1].timestamp.strftime("%Y-%m-%d %H:%M:%S"),"2011-01-13 16:00:00")
self.assertEqual(orders[2].symbol, "IBM")
self.assertEqual(orders[2].type, Order.TYPE_BUY)
self.assertEqual(orders[2].quantity,4000)
self.assertEqual(orders[2].timestamp.strftime("%Y-%m-%d %H:%M:%S"),"2011-01-13 16:00:00") | Winterflower/voyager | test/Parser/test_CSV_Order_Parser.py | Python | mit | 1,514 |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import copy
from ansible.module_utils.basic import AnsibleFallbackNotFound
from ansible.module_utils.junos import junos_argument_spec
from ansible.module_utils.six import iteritems
from ansible.plugins import connection_loader, module_loader
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
module = module_loader._load_module_source(self._task.action, module_loader.find_plugin(self._task.action))
if not getattr(module, 'USE_PERSISTENT_CONNECTION', False):
return super(ActionModule, self).run(tmp, task_vars)
provider = self.load_provider()
pc = copy.deepcopy(self._play_context)
pc.network_os = 'junos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
if self._task.action == 'junos_netconf':
pc.connection = 'network_cli'
pc.port = provider['port'] or self._play_context.port or 22
else:
pc.connection = 'netconf'
pc.port = provider['port'] or self._play_context.port or 830
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = provider['timeout'] or self._play_context.timeout
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = self._get_socket_path(pc)
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not os.path.exists(socket_path):
# start the connection if it isn't started
if pc.connection == 'netconf':
rc, out, err = connection.exec_command('open_session()')
display.vvvv('open_session() returned %s %s %s' % (rc, out, err))
else:
rc, out, err = connection.exec_command('open_shell()')
display.vvvv('open_shell() returned %s %s %s' % (rc, out, err))
if rc != 0:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell',
'rc': rc}
elif pc.connection == 'network_cli':
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
while str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
rc, out, err = connection.exec_command('prompt()')
task_vars['ansible_socket'] = socket_path
result = super(ActionModule, self).run(tmp, task_vars)
return result
def _get_socket_path(self, play_context):
ssh = connection_loader.get('ssh', class_only=True)
path = unfrackpath("$HOME/.ansible/pc")
# use play_context.connection instea of play_context.port to avoid
# collision if netconf is listening on port 22
# cp = ssh._create_control_path(play_context.remote_addr, play_context.connection, play_context.remote_user)
cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user)
return cp % dict(directory=path)
def load_provider(self):
provider = self._task.args.get('provider', {})
for key, value in iteritems(junos_argument_spec):
if key != 'provider' and key not in provider:
if key in self._task.args:
provider[key] = self._task.args[key]
elif 'fallback' in value:
provider[key] = self._fallback(value['fallback'])
elif key not in provider:
provider[key] = None
return provider
def _fallback(self, fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
| jmehnle/ansible | lib/ansible/plugins/action/junos.py | Python | gpl-3.0 | 5,819 |
import uuid, os, requests, re
from django.conf import settings
from panapp.constants import NAME_INVALID, DOB_INVALID, PAN_INVALID
def get_upload_file_path(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
initial_path = str(settings.BASE_DIR) + str(settings.MEDIA_URL) + 'uploads/'
return os.path.join(initial_path, filename)
def extract_text(data):
image_path = str(data.image)
payload = {'apikey': settings.OCR_SPACE_API_KEY}
with open(image_path, 'rb') as f:
r = requests.post(settings.OCR_SPACE_API,
files={'file': f},
data=payload
)
return r
def check_if_pan_card_pic(parsed_text):
status = False
if len(parsed_text) >= 6:
if 'INCOME TAX DEPARTMENT' in str(parsed_text[0]).strip().upper():
if 'GOVT. OF INDIA' in str(parsed_text[5]).strip().upper():
status = True
return status
else:
return False
for text in parsed_text:
if 'INCOME TAX DEPARTMENT' in text.strip():
status = True
if 'GOVT. OF INDIA' in text.strip():
status = True
return status
def detect_if_not_forged(img_url):
return True
def match_with_user_data(parsed_text, data):
name_matched = False
dob_matched = False
pan_matched = False
res = {'status': False, 'reason': []}
if len(parsed_text) > 6:
if str(parsed_text[1]).strip().upper() == str(data.name).strip().upper():
name_matched = True
if str(parsed_text[3]).strip().upper() == str(data.dob).strip().upper():
dob_matched = True
if str(parsed_text[5]).strip().upper() == str(data.pan).strip().upper():
pan_matched = True
if name_matched and dob_matched and pan_matched:
res['status'] = True
return res
for text in parsed_text:
if str(text).strip().upper() == str(data.name).strip().upper():
name_matched = True
if str(text).strip().upper() == str(data.dob).strip().upper():
dob_matched = True
if str(text).strip().upper() == str(data.pan).strip().upper():
pan_matched = True
if name_matched and dob_matched and pan_matched:
res['status'] = True
return res
res['status'] = False
if not name_matched:
res['reason'].append(NAME_INVALID)
if not dob_matched:
res['reason'].append(DOB_INVALID)
if not pan_matched:
res['reason'].append(PAN_INVALID)
return res
def get_data(parsed_text):
data = (False, False, False)
if len(parsed_text) >= 6:
name = str(parsed_text[1]).strip().upper()
dob = str(parsed_text[3]).strip().upper()
pan = str(parsed_text[5]).strip().upper()
data = (name, dob, pan)
return data
def verify_pan_number(parsed_text):
reg_exp = "[A-Z]{5}[0-9]{4}[A-Z]{1}"
fourth_char_dict = {
'A': True, 'B': True, 'C': True, 'F': True, 'G': True,
'H': True, 'L': True, 'J': True, 'P': True, 'T': True,
'K': True
}
fifth_char_dict = {}
if len(parsed_text) >= 6:
pan = str(parsed_text[5]).strip().upper()
name = str(parsed_text[1]).strip().upper()
for name_part in name.split():
if name_part:
fifth_char_dict[name_part[0]] = True
z = re.match(reg_exp, pan)
if z:
if not fourth_char_dict.get(pan[3], None):
return False
if not fifth_char_dict.get(pan[4], None):
return False
return True
else:
return False
else:
return False
def check_image_size(path):
try:
if os.path.getsize(path)/1000 < 1024:
return True
return False
except:
return False
| sheeshmohsin/insta_hack | panverification/panapp/utils.py | Python | mit | 3,344 |
######################################################################
# Python Open Dynamics Engine Wrapper
# Copyright (C) 2004 PyODE developers (see file AUTHORS)
# All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of EITHER:
# (1) The GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at
# your option) any later version. The text of the GNU Lesser
# General Public License is included with this library in the
# file LICENSE.
# (2) The BSD-style license that is included with this library in
# the file LICENSE-BSD.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files
# LICENSE and LICENSE-BSD for more details.
######################################################################
# PyODE Example: Transforms
# This example demonstrates the way object transforms are calculated relative
# to the parent element's transform in XODE.
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from cgkit.cgtypes import *
import pygame
import math
import ode
import xode.parser
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode>
<world name="world1">
<space name="space1">
<body>
<transform>
<position x="0" y="0" z="0"/>
<rotation>
<euler x="0" y="0" z="0" aformat="degrees"/>
</rotation>
</transform>
<!-- Y-axis Rotations -->
<body>
<transform scale="1.25">
<position x="0" y="1" z="0"/>
<rotation>
<euler x="0" y="30" z="0" aformat="degrees"/>
</rotation>
</transform>
<body>
<transform scale="1.25">
<position x="0" y="1" z="0"/>
<rotation>
<euler x="0" y="30" z="0" aformat="degrees"/>
</rotation>
</transform>
</body>
</body>
<!-- X-axis Rotations -->
<body>
<transform scale="1.25">
<position x="1" y="0" z="0"/>
<rotation>
<euler x="30" y="0" z="0" aformat="degrees"/>
</rotation>
</transform>
<body>
<transform scale="1.25">
<position x="1" y="0" z="0"/>
<rotation>
<euler x="30" y="0" z="0" aformat="degrees"/>
</rotation>
</transform>
</body>
</body>
<!-- Z-axis Rotations -->
<body>
<transform scale="1.25">
<position x="0" y="0" z="-1"/>
<rotation>
<euler x="0" y="0" z="30" aformat="degrees"/>
</rotation>
</transform>
<body>
<transform scale="1.25">
<position x="0" y="0" z="-1"/>
<rotation>
<euler x="0" y="0" z="30" aformat="degrees"/>
</rotation>
</transform>
</body>
</body>
</body>
</space>
</world>
</xode>
'''
def prepare_GL(c):
"""Prepare drawing.
"""
# Viewport
glViewport(0, 0, 640, 480)
# Initialize
glClearColor(0.8, 0.8, 0.9, 0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST)
glDisable(GL_LIGHTING)
glEnable(GL_LIGHTING)
glEnable(GL_NORMALIZE)
glShadeModel(GL_FLAT)
# Projection
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
P = mat4(1).perspective(45,1.3333,0.2,20)
glMultMatrixd(P.toList())
# Initialize ModelView matrix
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# Light source
glLightfv(GL_LIGHT0,GL_POSITION,[0,0,1,0])
glLightfv(GL_LIGHT0,GL_DIFFUSE,[1,1,1,1])
glLightfv(GL_LIGHT0,GL_SPECULAR,[1,1,1,1])
glEnable(GL_LIGHT0)
# View transformation
V = mat4(1).lookAt(1.2*vec3(0.5*c,0.7*c,c),(1.0,1.0,0), up=(0,1,0))
V.rotate(math.pi,vec3(0,1,0))
V = V.inverse()
glMultMatrixd(V.toList())
def draw_body(body):
"""Draw an ODE body.
"""
x,y,z = body.getPosition()
R = body.getRotation()
T = mat4()
T[0,0] = R[0]
T[0,1] = R[1]
T[0,2] = R[2]
T[1,0] = R[3]
T[1,1] = R[4]
T[1,2] = R[5]
T[2,0] = R[6]
T[2,1] = R[7]
T[2,2] = R[8]
T[3] = (x,y,z,1.0)
glPushMatrix()
glMultMatrixd(T.toList())
if body.shape=="box":
sx,sy,sz = body.boxsize
glScale(sx, sy, sz)
glutSolidCube(1)
glPopMatrix()
######################################################################
# Initialize pygame
passed, failed = pygame.init()
# Open a window
srf = pygame.display.set_mode((640,480), pygame.OPENGL | pygame.DOUBLEBUF)
glutInit()
root = xode.parser.Parser().parseString(doc)
world = root.namedChild('world1').getODEObject()
world.setGravity( (0, 0, 0) )
# Add all ODE bodies from the XODE document into bodies.
def transverse(node):
obj = node.getODEObject()
if (isinstance(obj, ode.Body)):
# Set attributes for draw_body()
obj.shape = 'box'
obj.boxsize = (0.4, 0.4, 0.4)
bodies.append(obj)
for node in node.getChildren():
transverse(node)
bodies = []
transverse(root)
# Some variables used inside the simulation loop
fps = 50
dt = 1.0/fps
counter = 0.0
running = True
clk = pygame.time.Clock()
while running:
events = pygame.event.get()
for e in events:
if e.type==pygame.QUIT:
running=False
if (counter < 5):
counter = counter + 0.1
# Draw the scene
prepare_GL(counter)
for b in bodies:
draw_body(b)
pygame.display.flip()
# Simulate
n = 2
for i in range(n):
world.step(dt/n)
clk.tick(fps)
| coiax/pyode | examples/transforms.py | Python | lgpl-2.1 | 6,011 |
from django.views.decorators.cache import never_cache
from django.views.generic.base import RedirectView
from C4CApplication.views.utils import create_user
class MemberDetailsRedirectView(RedirectView):
url = ""
connected_member = None
def dispatch(self, request, *args, **kwargs):
# Create the object representing the user
if 'email' not in self.request.session:
raise PermissionDenied # HTTP 403
self.connected_member = create_user(self.request.session['email'])
return super(MemberDetailsRedirectView, self).dispatch(request, *args, **kwargs)
@never_cache
def get(self, request, *args, **kwargs):
member_to_ad_as_a_friend_mail = kwargs['pk']
self.url = "/memberdetails/"+str(member_to_ad_as_a_friend_mail)
self.connected_member.add_favorite( member_to_ad_as_a_friend_mail)
return super(MemberDetailsRedirectView, self).get(request, *args, **kwargs) | dsarkozi/care4care-sdp-grp4 | Care4Care/C4CApplication/views/MemberDetailsRedirectView.py | Python | agpl-3.0 | 1,019 |
import re
__all__ = (
'Email', 'email', 'EqualTo', 'equal_to', 'IPAddress', 'ip_address',
'Length', 'length', 'NumberRange', 'number_range', 'Optional', 'optional',
'Required', 'required', 'Regexp', 'regexp', 'URL', 'url', 'AnyOf',
'any_of', 'NoneOf', 'none_of'
)
class ValidationError(ValueError):
"""
Raised when a validator fails to validate its input.
"""
def __init__(self, message=u'', *args, **kwargs):
ValueError.__init__(self, message, *args, **kwargs)
class StopValidation(Exception):
"""
Causes the validation chain to stop.
If StopValidation is raised, no more validators in the validation chain are
called. If raised with a message, the message will be added to the errors
list.
"""
def __init__(self, message=u'', *args, **kwargs):
Exception.__init__(self, message, *args, **kwargs)
class EqualTo(object):
"""
Compares the values of two fields.
:param fieldname:
The name of the other field to compare to.
:param message:
Error message to raise in case of a validation error. Can be
interpolated with `%(other_label)s` and `%(other_name)s` to provide a
more helpful error.
"""
def __init__(self, fieldname, message=None):
self.fieldname = fieldname
self.message = message
def __call__(self, form, field):
try:
other = form[self.fieldname]
except KeyError:
raise ValidationError(field.gettext(u"Invalid field name '%s'.") % self.fieldname)
if field.data != other.data:
d = {
'other_label': hasattr(other, 'label') and other.label.text or self.fieldname,
'other_name': self.fieldname
}
if self.message is None:
self.message = field.gettext(u'Field must be equal to %(other_name)s.')
raise ValidationError(self.message % d)
class Length(object):
"""
Validates the length of a string.
:param min:
The minimum required length of the string. If not provided, minimum
length will not be checked.
:param max:
The maximum length of the string. If not provided, maximum length
will not be checked.
:param message:
Error message to raise in case of a validation error. Can be
interpolated using `%(min)d` and `%(max)d` if desired. Useful defaults
are provided depending on the existence of min and max.
"""
def __init__(self, min=-1, max=-1, message=None):
assert min != -1 or max!=-1, 'At least one of `min` or `max` must be specified.'
assert max == -1 or min <= max, '`min` cannot be more than `max`.'
self.min = min
self.max = max
self.message = message
def __call__(self, form, field):
l = field.data and len(field.data) or 0
if l < self.min or self.max != -1 and l > self.max:
if self.message is None:
if self.max == -1:
self.message = field.ngettext(u'Field must be at least %(min)d character long.',
u'Field must be at least %(min)d characters long.', self.min)
elif self.min == -1:
self.message = field.ngettext(u'Field cannot be longer than %(max)d character.',
u'Field cannot be longer than %(max)d characters.', self.max)
else:
self.message = field.gettext(u'Field must be between %(min)d and %(max)d characters long.')
raise ValidationError(self.message % dict(min=self.min, max=self.max))
class NumberRange(object):
"""
Validates that a number is of a minimum and/or maximum value, inclusive.
This will work with any comparable number type, such as floats and
decimals, not just integers.
:param min:
The minimum required value of the number. If not provided, minimum
value will not be checked.
:param max:
The maximum value of the number. If not provided, maximum value
will not be checked.
:param message:
Error message to raise in case of a validation error. Can be
interpolated using `%(min)s` and `%(max)s` if desired. Useful defaults
are provided depending on the existence of min and max.
"""
def __init__(self, min=None, max=None, message=None):
self.min = min
self.max = max
self.message = message
def __call__(self, form, field):
data = field.data
if data is None or (self.min is not None and data < self.min) or \
(self.max is not None and data > self.max):
if self.message is None:
# we use %(min)s interpolation to support floats, None, and
# Decimals without throwing a formatting exception.
if self.max is None:
self.message = field.gettext(u'Number must be greater than %(min)s.')
elif self.min is None:
self.message = field.gettext(u'Number must be less than %(max)s.')
else:
self.message = field.gettext(u'Number must be between %(min)s and %(max)s.')
raise ValidationError(self.message % dict(min=self.min, max=self.max))
class Optional(object):
"""
Allows empty input and stops the validation chain from continuing.
If input is empty, also removes prior errors (such as processing errors)
from the field.
"""
field_flags = ('optional', )
def __call__(self, form, field):
if not field.raw_data or isinstance(field.raw_data[0], basestring) and not field.raw_data[0].strip():
field.errors[:] = []
raise StopValidation()
class Required(object):
"""
Validates that the field contains data. This validator will stop the
validation chain on error.
:param message:
Error message to raise in case of a validation error.
"""
field_flags = ('required', )
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
if not field.data or isinstance(field.data, basestring) and not field.data.strip():
if self.message is None:
self.message = field.gettext(u'This field is required.')
field.errors[:] = []
raise StopValidation(self.message)
class Regexp(object):
"""
Validates the field against a user provided regexp.
:param regex:
The regular expression string to use. Can also be a compiled regular
expression pattern.
:param flags:
The regexp flags to use, for example re.IGNORECASE. Ignored if
`regex` is not a string.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, regex, flags=0, message=None):
if isinstance(regex, basestring):
regex = re.compile(regex, flags)
self.regex = regex
self.message = message
def __call__(self, form, field):
if not self.regex.match(field.data or u''):
if self.message is None:
self.message = field.gettext(u'Invalid input.')
raise ValidationError(self.message)
class Email(Regexp):
"""
Validates an email address. Note that this uses a very primitive regular
expression and should only be used in instances where you later verify by
other means, such as email activation or lookups.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
super(Email, self).__init__(r'^.+@[^.].*\.[a-z]{2,10}$', re.IGNORECASE, message)
def __call__(self, form, field):
if self.message is None:
self.message = field.gettext(u'Invalid email address.')
super(Email, self).__call__(form, field)
class IPAddress(Regexp):
"""
Validates an IP(v4) address.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
super(IPAddress, self).__init__(r'^([0-9]{1,3}\.){3}[0-9]{1,3}$', message=message)
def __call__(self, form, field):
if self.message is None:
self.message = field.gettext(u'Invalid IP address.')
super(IPAddress, self).__call__(form, field)
class URL(Regexp):
"""
Simple regexp based url validation. Much like the email validator, you
probably want to validate the url later by other means if the url must
resolve.
:param require_tld:
If true, then the domain-name portion of the URL must contain a .tld
suffix. Set this to false if you want to allow domains like
`localhost`.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, require_tld=True, message=None):
tld_part = (require_tld and ur'\.[a-z]{2,10}' or u'')
regex = ur'^[a-z]+://([^/:]+%s|([0-9]{1,3}\.){3}[0-9]{1,3})(:[0-9]+)?(\/.*)?$' % tld_part
super(URL, self).__init__(regex, re.IGNORECASE, message)
def __call__(self, form, field):
if self.message is None:
self.message = field.gettext(u'Invalid URL.')
super(URL, self).__call__(form, field)
class AnyOf(object):
"""
Compares the incoming data to a sequence of valid inputs.
:param values:
A sequence of valid inputs.
:param message:
Error message to raise in case of a validation error. `%(values)s`
contains the list of values.
:param values_formatter:
Function used to format the list of values in the error message.
"""
def __init__(self, values, message=None, values_formatter=None):
self.values = values
self.message = message
if values_formatter is None:
values_formatter = lambda v: u', '.join(v)
self.values_formatter = values_formatter
def __call__(self, form, field):
if field.data not in self.values:
if self.message is None:
self.message = field.gettext(u'Invalid value, must be one of: %(values)s.')
raise ValueError(self.message % dict(values=self.values_formatter(self.values)))
class NoneOf(object):
"""
Compares the incoming data to a sequence of invalid inputs.
:param values:
A sequence of invalid inputs.
:param message:
Error message to raise in case of a validation error. `%(values)s`
contains the list of values.
:param values_formatter:
Function used to format the list of values in the error message.
"""
def __init__(self, values, message=None, values_formatter=None):
self.values = values
self.message = message
if values_formatter is None:
values_formatter = lambda v: u', '.join(v)
self.values_formatter = values_formatter
def __call__(self, form, field):
if field.data in self.values:
if self.message is None:
self.message = field.gettext(u'Invalid value, can\'t be any of: %(values)s.')
raise ValueError(self.message % dict(values=self.values_formatter(self.values)))
email = Email
equal_to = EqualTo
ip_address = IPAddress
length = Length
number_range = NumberRange
optional = Optional
required = Required
regexp = Regexp
url = URL
any_of = AnyOf
none_of = NoneOf
| ForrestSutton/foodsafely | wtforms/validators.py | Python | gpl-2.0 | 11,527 |
#! /usr/bin/env python
## Simple Python routine for creating a new AVR project
## Feel free to extend this to meet your needs
## In particular, main.h includes nearly every AVR library you'll ever need
## which is no problem, b/c the linker will just ignore the unused ones.
## But if you're not using them, it might cause confusion later.
## Trim them down to fit?
## Or, if you're feeling DIY, you can just copy the Makefile, main.c and main.h
## into a new directory yourself. The other files are optional, but handy.
import os
import shutil
import sys
## Get command-line input
class UsageError(Exception):
pass
try:
newProjectName = sys.argv[1]
except IndexError:
raise(UsageError("Please specify a project name on the command-line.\n"))
## Create new project directory...
## ... in parent directory
## relativeDirectory = os.path.join(os.path.pardir, newProjectName)
## ... or in this directory, and you get to move it yourself.
relativeDirectory = newProjectName
os.mkdir(relativeDirectory)
## Files copied directly over...
def copyToNewDirectory(whichFile, newDirectory):
shutil.copy(whichFile, newDirectory)
## ... these ones.
for filename in ["Makefile", "main.c", "main.h", "USART.h", "USART.c", "macros.h"]:
copyToNewDirectory(filename, relativeDirectory)
print "Copied Makefile, main.c, and main.h into %s." % relativeDirectory
print "Time to start coding."
| hexagon5un/AVR-Programming | setupProject/setupProject.py | Python | mit | 1,408 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for network_units."""
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from dragnn.protos import spec_pb2
from dragnn.python import network_units
import dragnn.python.load_dragnn_cc_impl
import syntaxnet.load_parser_ops
FLAGS = tf.app.flags.FLAGS
class NetworkUnitsConverterTest(test_util.TensorFlowTestCase):
def testConvertNetworkStateTensorarray(self):
with self.test_session() as session:
ta = tf.TensorArray(
dtype=tf.float32,
size=0,
dynamic_size=True,
clear_after_read=False,
infer_shape=False)
# Create a 3-step x 2-stride x 2-feature-dim source array.
ta = ta.write(0, [[0., 0.]] * 2) # The zeroth step will be removed.
ta = ta.write(1, [[1., 10.]] * 2)
ta = ta.write(2, [[2., 20.]] * 2)
ta = ta.write(3, [[3., 30.]] * 2)
tensor = network_units.convert_network_state_tensorarray(ta)
actual = session.run(tensor)
self.assertEqual(actual.shape, (6, 2))
# The arrangement of the values is expected to be stride * steps.
expected = [[1., 10.], [2., 20.], [3., 30.], [1., 10.], [2., 20.],
[3., 30.]]
self.assertAllEqual(actual, expected)
class MockComponent(object):
def __init__(self, master, component_spec):
self.master = master
self.spec = component_spec
self.name = component_spec.name
self.beam_size = 1
self._attrs = {}
def attr(self, name):
return self._attrs[name]
class MockMaster(object):
def __init__(self):
self.spec = spec_pb2.MasterSpec()
self.hyperparams = spec_pb2.GridPoint()
self.lookup_component = {
'previous': MockComponent(self, spec_pb2.ComponentSpec())
}
class NetworkUnitsLookupTest(test_util.TensorFlowTestCase):
def setUp(self):
# Clear the graph and all existing variables. Otherwise, variables created
# in different tests may collide with each other.
tf.reset_default_graph()
self._master = MockMaster()
self._master.spec = spec_pb2.MasterSpec()
# Add a component with a linked feature.
component_spec = self._master.spec.component.add()
component_spec.name = 'fake_linked'
component_spec.backend.registered_name = 'FakeComponent'
linked_feature = component_spec.linked_feature.add()
linked_feature.source_component = 'fake_linked'
linked_feature.source_translator = 'identity'
linked_feature.embedding_dim = -1
linked_feature.size = 2
self._linked_component = MockComponent(self._master, component_spec)
# Add a feature with a fixed feature.
component_spec = self._master.spec.component.add()
component_spec.name = 'fake_fixed'
component_spec.backend.registered_name = 'FakeComponent'
fixed_feature = component_spec.fixed_feature.add()
fixed_feature.fml = 'input.word'
fixed_feature.embedding_dim = 1
fixed_feature.size = 1
self._fixed_component = MockComponent(self._master, component_spec)
def testExportFixedFeaturesNetworkWithEnabledEmbeddingMatrix(self):
network = network_units.ExportFixedFeaturesNetwork(self._fixed_component)
self.assertEqual(1, len(network.params))
def testExportFixedFeaturesNetworkWithDisabledEmbeddingMatrix(self):
self._fixed_component.spec.fixed_feature[0].embedding_dim = -1
network = network_units.ExportFixedFeaturesNetwork(self._fixed_component)
self.assertEqual(0, len(network.params))
class GetAttrsWithDefaultsTest(test_util.TensorFlowTestCase):
def MakeAttrs(self, defaults, key=None, value=None):
"""Returns attrs based on the |defaults| and one |key|,|value| override."""
spec = spec_pb2.RegisteredModuleSpec()
if key and value:
spec.parameters[key] = value
return network_units.get_attrs_with_defaults(spec.parameters, defaults)
def testFalseValues(self):
def _assert_attr_is_false(value=None):
key = 'foo'
attrs = self.MakeAttrs({key: False}, key, value)
self.assertFalse(attrs[key])
_assert_attr_is_false()
_assert_attr_is_false('false')
_assert_attr_is_false('False')
_assert_attr_is_false('FALSE')
_assert_attr_is_false('no')
_assert_attr_is_false('whatever')
_assert_attr_is_false(' ')
_assert_attr_is_false('')
def testTrueValues(self):
def _assert_attr_is_true(value=None):
key = 'foo'
attrs = self.MakeAttrs({key: False}, key, value)
self.assertTrue(attrs[key])
_assert_attr_is_true('true')
_assert_attr_is_true('True')
_assert_attr_is_true('TRUE')
if __name__ == '__main__':
googletest.main()
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/syntaxnet/dragnn/python/network_units_test.py | Python | bsd-2-clause | 5,321 |
# Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Weigh cells by memory needed in a way that spreads instances.
"""
from oslo.config import cfg
from nova.cells import weights
ram_weigher_opts = [
cfg.FloatOpt('ram_weight_multiplier',
default=10.0,
help='Multiplier used for weighing ram. Negative '
'numbers mean to stack vs spread.'),
]
CONF = cfg.CONF
CONF.register_opts(ram_weigher_opts, group='cells')
class RamByInstanceTypeWeigher(weights.BaseCellWeigher):
"""Weigh cells by instance_type requested."""
def weight_multiplier(self):
return CONF.cells.ram_weight_multiplier
def _weigh_object(self, cell, weight_properties):
"""Use the 'ram_free' for a particular instance_type advertised from a
child cell's capacity to compute a weight. We want to direct the
build to a cell with a higher capacity. Since higher weights win,
we just return the number of units available for the instance_type.
"""
request_spec = weight_properties['request_spec']
instance_type = request_spec['instance_type']
memory_needed = instance_type['memory_mb']
ram_free = cell.capacities.get('ram_free', {})
units_by_mb = ram_free.get('units_by_mb', {})
return units_by_mb.get(str(memory_needed), 0)
| ChinaMassClouds/copenstack-server | openstack/src/nova-2014.2/nova/cells/weights/ram_by_instance_type.py | Python | gpl-2.0 | 1,971 |
"""Tests the pytest-redis backup list arguments."""
import utils
def create_test_file(testdir):
"""Create test file and return array of paths to tests."""
test_filename = "test_file.py"
test_filename_contents = """
def test_exists():
assert True
def test_does_exist():
assert True
"""
utils.create_test_file(testdir, test_filename, test_filename_contents)
return [test_filename + "::test_exists", test_filename +
"::test_does_exist"]
def get_args_for_backup_list(redis_args, backup_list_key):
"""Return args for the backup list tests."""
return utils.get_standard_args(redis_args) + ["-s",
"--redis-backup-list-key=" +
backup_list_key]
def test_run_back_up_test(testdir, redis_connection,
redis_args):
"""Ensure that the backup list is filled with tests."""
file_paths_to_test = create_test_file(testdir)
back_up_list = redis_args["redis-backup-list-key"]
py_test_args = get_args_for_backup_list(redis_args, back_up_list)
for a_file in file_paths_to_test:
redis_connection.lpush(back_up_list,
a_file)
testdir.runpytest(*py_test_args)
assert redis_connection.llen(back_up_list) == 2
for a_file in file_paths_to_test:
assert redis_connection.rpop(back_up_list) == a_file
def test_run_tests_multiple_times_with_backup(testdir, redis_connection,
redis_args):
"""Run a test multiple times to ensure backup list is used."""
file_paths_to_test = create_test_file(testdir)
back_up_list = redis_args["redis-backup-list-key"]
py_test_args = get_args_for_backup_list(redis_args, back_up_list)
for a_file in file_paths_to_test:
redis_connection.lpush(redis_args['redis-list-key'],
a_file)
for i in range(10):
result = testdir.runpytest(*py_test_args)
result.stdout.fnmatch_lines([i + " PASSED" for i in file_paths_to_test])
assert redis_connection.llen(back_up_list) == 2
for a_file in file_paths_to_test:
assert redis_connection.rpop(back_up_list) == a_file
| sabidib/pytest-redis | tests/test_backup_list.py | Python | mit | 2,296 |
from django.contrib import admin
from locations.models import LocationType, LocationBase
admin.site.register(LocationType)
admin.site.register(LocationBase)
| ascii1011/basecat | apps/locations/admin.py | Python | gpl-2.0 | 159 |
'''
Tests for the ``gtk.CellRendererText`` class.
'''
import gtk
from gtk import gdk
def test_default_attributes():
cr = gtk.CellRendererText()
assert not cr.get_property('editable')
assert not cr.get_property('text')
# gtk.CellRenderer properties
bg = cr.get_property('cell-background-gdk')
assert bg.red == 0
assert bg.green == 0
assert bg.blue == 0
assert cr.get_property('width') == -1
assert cr.get_property('height') == -1
# gtk.CellRendererText overrides these defaults.
assert cr.get_property('xalign') == 0.0
assert cr.get_property('yalign') == 0.5
assert cr.get_property('xpad') == 2
assert cr.get_property('ypad') == 2
assert not cr.get_property('cell-background-set')
assert not cr.get_property('is-expanded')
assert not cr.get_property('is-expander')
assert cr.get_property('mode') == gtk.CELL_RENDERER_MODE_INERT
assert cr.get_property('sensitive') == True
assert cr.get_property('visible') == True
def test_editing_started_signal():
'''
Ensure that the ``editing-started`` signal is emitted when
``start_editing`` is called on an editable
``gtk.CellRendererText``.
'''
n_emits = [0]
def editing_started_cb(*args):
n_emits[0] += 1
cr = gtk.CellRendererText()
cr.connect('editing-started', editing_started_cb)
cr.set_property('editable', True)
cr.start_editing(gdk.Event(gdk.NOTHING),
gtk.Button(),
'',
(0, 0, 10, 10),
(0, 0, 10, 10),
0)
assert n_emits[0] == 1
# It won't be emitted again if the cell renderer isn't editable.
cr.set_property('editable', False)
cr.start_editing(gdk.Event(gdk.NOTHING),
gtk.Button(),
'',
(0, 0, 10, 10),
(0, 0, 10, 10),
0)
assert n_emits[0] == 1
def test_set_get_text():
cr = gtk.CellRendererText()
cr.set_property('text', 'foo')
assert cr.get_property('text') == 'foo'
cr.set_property('text', None)
assert not cr.get_property('text')
| GNOME/testinggtk | tests/test_cellrenderertext.py | Python | lgpl-2.1 | 2,177 |
from O365.message import Message
import logging
import json
import requests
log = logging.getLogger(__name__)
class Inbox( object ):
'''
Wrapper class for an inbox which mostly holds a list of messages.
Methods:
getMessages -- downloads messages to local memory.
Variables:
inbox_url -- url used for fetching emails.
'''
#url for fetching emails. Takes a flag for whether they are read or not.
inbox_url = 'https://outlook.office365.com/api/v1.0/me/messages'
def __init__(self, auth, getNow=True, verify=True):
'''
Creates a new inbox wrapper. Send email and password for authentication.
set getNow to false if you don't want to immedeatly download new messages.
'''
log.debug('creating inbox for the email %s',auth[0])
self.auth = auth
self.messages = []
self.filters = ''
if getNow:
self.filters = 'IsRead eq false'
self.getMessages()
self.verify = verify
def getMessages(self, number = 10):
'''
Downloads messages to local memory.
You create an inbox to be the container class for messages, this method
then pulls those messages down to the local disk. This is called in the
init method, so it's kind of pointless for you. Unless you think new
messages have come in.
You can filter only certain emails by setting filters. See the set and
get filters methods for more information.
'''
log.debug('fetching messages.')
response = requests.get(self.inbox_url,auth=self.auth,params={'$filter':self.filters, '$top':number},verify=self.verify)
log.info('Response from O365: %s', str(response))
for message in response.json()['value']:
try:
duplicate = False
for i,m in enumerate(self.messages):
if message['Id'] == m.json['Id']:
self.messages[i] = Message(message,self.auth)
duplicate = True
break
if not duplicate:
self.messages.append(Message(message,self.auth))
log.debug('appended message: %s',message['Subject'])
except Exception as e:
log.info('failed to append message: %',str(e))
log.debug('all messages retrieved and put in to the list.')
return True
def getFilter(self):
'''get the value set for a specific filter, if exists, else None'''
return self.filters
def setFilter(self,f_string):
'''
Set the value of a filter. More information on what filters are available
can be found here:
https://msdn.microsoft.com/office/office365/APi/complex-types-for-mail-contacts-calendar#RESTAPIResourcesMessage
I may in the future have the ability to add these in yourself. but right now that is to complicated.
Arguments:
f_string -- The string that represents the filters you want to enact.
should be something like: (HasAttachments eq true) and (IsRead eq false)
or just: IsRead eq false
test your filter stirng here: https://outlook.office365.com/api/v1.0/me/messages?$filter=
if that accepts it then you know it works.
'''
self.filters = f_string
return True
#To the King!
| roycem90/python-o365 | O365/inbox.py | Python | apache-2.0 | 2,984 |
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2009, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the EC2Connection
"""
import unittest
import time
import telnetlib
import socket
from nose.plugins.attrib import attr
from boto.ec2.connection import EC2Connection
from boto.exception import EC2ResponseError
class EC2ConnectionTest(unittest.TestCase):
ec2 = True
@attr('notdefault')
def test_launch_permissions(self):
# this is my user_id, if you want to run these tests you should
# replace this with yours or they won't work
user_id = '963068290131'
print '--- running EC2Connection tests ---'
c = EC2Connection()
# get list of private AMI's
rs = c.get_all_images(owners=[user_id])
assert len(rs) > 0
# now pick the first one
image = rs[0]
# temporarily make this image runnable by everyone
status = image.set_launch_permissions(group_names=['all'])
assert status
d = image.get_launch_permissions()
assert 'groups' in d
assert len(d['groups']) > 0
# now remove that permission
status = image.remove_launch_permissions(group_names=['all'])
assert status
time.sleep(10)
d = image.get_launch_permissions()
assert 'groups' not in d
def test_1_basic(self):
# create 2 new security groups
c = EC2Connection()
group1_name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
group1 = c.create_security_group(group1_name, group_desc)
time.sleep(2)
group2_name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
group2 = c.create_security_group(group2_name, group_desc)
# now get a listing of all security groups and look for our new one
rs = c.get_all_security_groups()
found = False
for g in rs:
if g.name == group1_name:
found = True
assert found
# now pass arg to filter results to only our new group
rs = c.get_all_security_groups([group1_name])
assert len(rs) == 1
# try some group to group authorizations/revocations
# first try the old style
status = c.authorize_security_group(group1.name,
group2.name,
group2.owner_id)
assert status
status = c.revoke_security_group(group1.name,
group2.name,
group2.owner_id)
assert status
# now try specifying a specific port
status = c.authorize_security_group(group1.name,
group2.name,
group2.owner_id,
'tcp', 22, 22)
assert status
status = c.revoke_security_group(group1.name,
group2.name,
group2.owner_id,
'tcp', 22, 22)
assert status
# now delete the second security group
status = c.delete_security_group(group2_name)
# now make sure it's really gone
rs = c.get_all_security_groups()
found = False
for g in rs:
if g.name == group2_name:
found = True
assert not found
group = group1
# now try to launch apache image with our new security group
rs = c.get_all_images()
img_loc = 'ec2-public-images/fedora-core4-apache.manifest.xml'
for image in rs:
if image.location == img_loc:
break
reservation = image.run(security_groups=[group.name])
instance = reservation.instances[0]
while instance.state != 'running':
print '\tinstance is %s' % instance.state
time.sleep(30)
instance.update()
# instance in now running, try to telnet to port 80
t = telnetlib.Telnet()
try:
t.open(instance.dns_name, 80)
except socket.error:
pass
# now open up port 80 and try again, it should work
group.authorize('tcp', 80, 80, '0.0.0.0/0')
t.open(instance.dns_name, 80)
t.close()
# now revoke authorization and try again
group.revoke('tcp', 80, 80, '0.0.0.0/0')
try:
t.open(instance.dns_name, 80)
except socket.error:
pass
# now kill the instance and delete the security group
instance.terminate()
# check that state and previous_state have updated
assert instance.state == 'shutting-down'
assert instance.state_code == 32
assert instance.previous_state == 'running'
assert instance.previous_state_code == 16
# unfortunately, I can't delete the sg within this script
#sg.delete()
# create a new key pair
key_name = 'test-%d' % int(time.time())
status = c.create_key_pair(key_name)
assert status
# now get a listing of all key pairs and look for our new one
rs = c.get_all_key_pairs()
found = False
for k in rs:
if k.name == key_name:
found = True
assert found
# now pass arg to filter results to only our new key pair
rs = c.get_all_key_pairs([key_name])
assert len(rs) == 1
key_pair = rs[0]
# now delete the key pair
status = c.delete_key_pair(key_name)
# now make sure it's really gone
rs = c.get_all_key_pairs()
found = False
for k in rs:
if k.name == key_name:
found = True
assert not found
# short test around Paid AMI capability
demo_paid_ami_id = 'ami-bd9d78d4'
demo_paid_ami_product_code = 'A79EC0DB'
l = c.get_all_images([demo_paid_ami_id])
assert len(l) == 1
assert len(l[0].product_codes) == 1
assert l[0].product_codes[0] == demo_paid_ami_product_code
print '--- tests completed ---'
def test_dry_run(self):
c = EC2Connection()
dry_run_msg = 'Request would have succeeded, but DryRun flag is set.'
try:
rs = c.get_all_images(dry_run=True)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
try:
rs = c.run_instances(
image_id='ami-a0cd60c9',
instance_type='m1.small',
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
# Need an actual instance for the rest of this...
rs = c.run_instances(
image_id='ami-a0cd60c9',
instance_type='m1.small'
)
time.sleep(120)
try:
rs = c.stop_instances(
instance_ids=[rs.instances[0].id],
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
try:
rs = c.terminate_instances(
instance_ids=[rs.instances[0].id],
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
# And kill it.
rs.instances[0].terminate()
| harshilasu/GraphicMelon | y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/ec2/test_connection.py | Python | gpl-3.0 | 8,892 |
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^ratings/', include('star_ratings.urls', namespace='ratings', app_name='ratings')),
url(r'^', include('location.urls',namespace='posts')),
url(r'^accounts/', include('registration.backends.simple.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root= settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root= settings.MEDIA_ROOT)
| radamizell/WallApp | walltalkie/urls.py | Python | mit | 612 |
from xml.dom.minidom import Document
import os
import re
from PIL import Image
import shutil
import random
import time
import glob
class Palette:
def __init__(self):
#print 'image info'
self.paletteColors = {}
self.palette = {}
def generatePaletteInfo(self):
file = 'icon.png'
self.im = Image.open(file)
width = self.im.size[0]
height = self.im.size[1]
#print height, width
count = 0
for i in range(width):
for j in range(height):
pco = self.im.getpixel((i,j))
try:
count = self.palette[pco]
count += 1
except:
count = 0
self.palette[pco] = count
#print self.palette
return self.palette
def _create_styles(self):
count= 0
keys = []
for key in self.palette:
keys.append(key)
for key in keys:
count+=1
input = open('_template.css', 'r+')
output = open('style_'+str(count)+'.css', 'w')
def _replace_parts(replace=''):
newline =''
newline = i.replace('__BGCOLOR__', str("".join(map(chr, key)).encode('hex')))
newline = newline.replace('__TEXTCOLOR__', str("".join(map(chr, keys[random.randint(0, len(keys)-1)])).encode('hex')))
newline = newline.replace('__LINKCOLOR__', str("".join(map(chr, keys[random.randint(0, len(keys)-1)])).encode('hex')))
newline = newline.replace('__VISITEDCOLOR__', str("".join(map(chr, keys[random.randint(0, len(keys)-1)])).encode('hex')))
newline = newline.replace('__HOVERCOLOR__', str("".join(map(chr, keys[random.randint(0, len(keys)-1)])).encode('hex')))
newline = newline.replace('__ACTIVECOLOR__', str("".join(map(chr, keys[random.randint(0, len(keys)-1)])).encode('hex')))
newline = newline.replace('__BGINSERTCOLOR__', str("".join(map(chr, keys[random.randint(0, len(keys)-1)])).encode('hex')))
return newline
f2 = input.readlines()
for i in f2:
newline = ''
newline =_replace_parts(replace=i)
output.write(newline)
input.close()
output.close()
p = Palette()
p.generatePaletteInfo()
p._create_styles()
| initials/dowebsite | _python/core.py | Python | gpl-3.0 | 2,064 |
# I just use a tricky technique here
# Improved from 3.10% to 14.34 %
# Far from perfect, however, better
# {Leetcode : AC, Lintcode : AC}
# Based on the 001 version, and make use of the SYMMETRY of this problem
class Solution(object):
def solveNQueens(self, n):
"""
:type n: int
:rtype: List[List[str]]
"""
if n < 1:
return []
cnt = [range(n) for k in range(n)]
if n == 1:
return self.draw(self.solve(0, cnt))
cnt[0] = range(n / 2)
res = self.solve(0, cnt)
m = len(res)
if n % 2 == 1:
cnt[0] = [n / 2]
tmp = self.solve(0, cnt)
res.extend(tmp)
for i in range(m):
res.append(res[i][::-1])
return self.draw(res)
def solve(self, start, cnt):
n = len(cnt)
res = []
if start == n - 1:
for k in cnt[start]:
tmp = [-1] * n
tmp[k] = n - 1
res.append(tmp)
return res
for i in cnt[start]:
newcnt = copy.deepcopy(cnt)
tmp = []
for j in range(start + 1, n):
for k in range(3):
idx = i + (j - start) * (k - 1)
if idx in newcnt[j]:
newcnt[j].remove(idx)
if len(newcnt[j]) == 0:
break
tmp = [] if len(newcnt[j]) == 0 else self.solve(start + 1, newcnt)
for t in tmp:
t[i] = start
res.append(t)
return res
def draw(self, res):
pics = []
for case in res:
n = len(case)
pic = []
for i in range(n):
start = case[i]
tmp = '.' * start + 'Q' + '.' * (n - start - 1)
pic.append(tmp)
pics.append(pic)
return pics
| Chasego/cod | leetcode/051-N-Queens/NQueens_002.py | Python | mit | 1,941 |
# -*- coding: utf-8 -*-
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import summary_template
from . import summary
from . import summary_log
from . import file_system
| CLVsol/clvsol_odoo_addons | clv_summary/models/__init__.py | Python | agpl-3.0 | 256 |
# -*- coding: utf-8 -*-
#
# Author: Taylor Smith <[email protected]>
#
# Test the autoencoder
from __future__ import division, absolute_import, division
from tensorflow.examples.tutorials.mnist import input_data
from numpy.testing import assert_almost_equal
from smrt.autoencode import AutoEncoder
import numpy as np
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
def test_autoencoder():
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
all_data = np.asarray(mnist.train.images)
seed = 42
X_train, X_test = train_test_split(all_data, train_size=0.7, random_state=seed)
# define
ae = AutoEncoder(n_hidden=400, n_epochs=10, learning_rate=0.01, batch_size=256,
display_step=5, activation_function='sigmoid', verbose=2,
random_state=seed, layer_type='gaussian', early_stopping=True)
# fit
ae.fit(X_train)
# show we can get the shape
_ = ae.topography_.shape
# train error
# this changed when we fixed L2 error... 0.031209234
# assert_almost_equal(ae.train_cost_, 0.79045808, 3) # equal to 3 dec pts
# ensure transform runs... todo assert vals
ae.transform(X_train)
# transform and reconstruct the test images
reconstructed = ae.feed_forward(X_test)
# get the error:
mse = ((X_test - reconstructed) ** 2).sum(axis=1).sum() / X_test.shape[0]
# assert_almost_equal(mse, 4.40549573864)
| tgsmith61591/smrt | smrt/autoencode/tests/test_autoencoder.py | Python | bsd-3-clause | 1,531 |
#!/usr/bin/env python
import numpy
numpy.seterr(all='raise')
import pylab
from constants import T, L, d, pl, E, mu_s, mu_d, mu_v0
us = mu_s; ud = mu_d; v0 = mu_v0;
#from defs import FS
DIRNAME="out-bow-skip/"
import os
if not os.path.exists(DIRNAME):
os.makedirs(DIRNAME)
FS = 44100
#FS = 88200
Y_MIN = -1.0
Y_MAX = 1.0
X_MIN = -0.5
X_MAX = 0.5
DV_range = numpy.linspace(X_MIN, X_MAX, 1e3)
#F_b = 1.0
#v_b = 0.4
x0 = 0.12 * L
#x0 = 0.1 * L
x1 = 0
F_b = 1.0
v_b = 0.5
def phi(x, n):
phivals = numpy.sqrt(2.0/L) * numpy.sin(n*numpy.pi*x/L)
return phivals
N = 40
def try_FS(FS, color, extratext):
print
n = numpy.arange(1, N+1)
dt = 1.0 / FS
rn = 7.8 + 0.4*(n) # hard-code E string
I = numpy.pi * (d**4) / 64
w0n = numpy.sqrt( (T/pl)*(n*numpy.pi/L)**2 + (E*I/pl)*(n*numpy.pi/L)**4 )
wn = numpy.sqrt( w0n**2 - rn**2 )
print "highest mode:", wn[-1] / (2*numpy.pi)
X1n = (numpy.cos(wn*dt) + (rn/wn)*numpy.sin(wn*dt))*numpy.exp(-rn*dt)
X2n = (1.0/wn) * numpy.sin(wn*dt)*numpy.exp(-rn*dt)
X3n = (1.0 - X1n) / (pl*(w0n**2))
Y1n = -(wn+(rn**2)/wn)*numpy.sin(wn*dt)*numpy.exp(-rn*dt)
Y2n = (numpy.cos(wn*dt) - (rn/wn)*numpy.sin(wn*dt))*numpy.exp(-rn*dt)
Y3n = -Y1n / (pl*w0n**2)
if any( Y3n < 0):
print "FS not sufficient for number of modes!"
exit(1)
### bow pos
phix0 = phi(x0, n)
#print phix0
phix1 = phi(x1, n)
A00 = sum(X3n * phix0 * phix0)
A01 = sum(X3n * phix0 * phix1)
A10 = sum(X3n * phix1 * phix0)
A11 = sum(X3n * phix1 * phix1)
A10 = A10
B00 = sum(Y3n * phix0 * phix0)
B01 = sum(Y3n * phix0 * phix1)
#B10 = sum(Y3n * phix1 * phix0)
#B11 = sum(Y3n * phix1 * phix1)
K1 = 1e6
#L1 = -K0 / (A00*A11*K0*K1 - A01*A10*K0*K1 + A11*K1 + A00*K0 + 1.0)
#D1 = -K1 / (A11*K1 + 1.0)
#D2 = A10*D1
#D3 = (A11*K1+1.0)*L1
#D4 = -(A01*K1)*L1
L1 = 1.0 / (B00)
D1 = 1.0 * L1
D5 = 0
D6 = 0
print "B00: %.3g" % B00
#D5 = 0.318897
#print "D5:\t%g\tD6:\t%g" % (D5, D6)
### friction curves
y1h = 0.0
def low_v0h():
#v0h = (
# (D6*y1h + D5*(v_b + v0) - F_b*ud
# - 2*numpy.sqrt(D5*v0*F_b*(us-ud)))
# / D5)
#v0h = (
# (D6*y1h + D5*(v_b+v0)
# - numpy.sqrt(D5*v0*F_b*(us-ud)))
# / D5)
### stick limit
#v0h = -F_b*us*B00 + v_b
#return v0h
#over_sqrt = 1.0 / numpy.sqrt(D5*v0*F_b*(us-ud))
#v0h = (y1h*D6/D5
# - 2*F_b*v0*over_sqrt*(us-ud)
# - F_b*ud/D5
# + v_b + v0
# )
dv = v0 - numpy.sqrt(D1*v0*F_b*(us-ud)) / (D1)
v0h = ((D1*((v0 - dv) * v_b + v0*dv - dv**2) + F_b*(ud*dv-us*v0))
/ ( D1*(v0-dv)))
#v0h_maybe = (v_b*D1 + F_b *us) / (D1)
#print "zzzzzzzzzzzzzzzzzzzzzzzzz MAYBE: ", v0h, v0h_maybe
if dv >= 0:
print "Can't find single low point for %i modes with FS=%i" % (
N, FS)
v0h = -F_b*us*B00 + v_b
return v0h
def high_v0h():
v0h = F_b*us*B00 + v_b
return v0h
v0h_cands = [ low_v0h(), high_v0h() ]
#y0dh_cands = [ high_v0h() ]
#y0dh_cands = [ low_v0h(), low_v0h() ]
def modal(dv, v0h):
return D1 * (dv + v_b - v0h)
def friction(dv):
return F_b * (ud + (us-ud) / (1.0-dv/v0))
#return F_b * (ud + (us-ud)*v0 / (v0-dv))
for i, cand in enumerate(v0h_cands):
m = modal(DV_range, cand)
dv_tiny = numpy.array([ min(DV_range), max(DV_range)])
m_tiny = numpy.array([ min(m), max(m)])
data = numpy.vstack( (dv_tiny, m_tiny) ).transpose()
numpy.savetxt( DIRNAME+"modal-%i-%i-%s.txt" % (FS, i,
extratext), data)
pylab.plot(dv_tiny, m_tiny,
label="%i Hz, time %i"% (FS, i),
color=color)
print "%i nodes: low, high\t%s" % (N, v0h_cands)
bad_accel = v0h_cands[1] - v0h_cands[0]
print "bad diff v0h:\t%.3g" % (bad_accel)
print "low, high modal:\t%.3g\t%.3g" % (
modal(-1, v0h_cands[0]), modal(1, v0h_cands[0]))
print "low, high modal:\t%.3g\t%.3g" % (
modal(-1, v0h_cands[1]), modal(1, v0h_cands[1]))
low = v0h_cands[0]
high = v0h_cands[1]
print "copy&paste next line:"
text = "%.4g %.4g %.4g %.4g" % (
B00, low, high, high-low)
print text
if True:
def msw_friction(dv):
if dv < 0:
return F_b * (ud + (us-ud)*v0 / (v0-dv))
else:
return -F_b * (ud + (us-ud)*v0 / (v0+dv))
msw_friction = numpy.vectorize(msw_friction)
msw = msw_friction(DV_range)
pylab.axhline(0, color="gray")
pylab.plot(DV_range, msw,
color="red")
data = numpy.vstack( (DV_range, msw) ).transpose()
numpy.savetxt( DIRNAME+"skips-friction-curve-%s.txt" % "first", data)
#try_N(10, "cyan")
#try_N(20, "green")
try_FS(FS, "blue", "first")
pylab.xlim([X_MIN, X_MAX])
pylab.ylim([Y_MIN, Y_MAX])
pylab.figure()
F_b = 0.01
v_b = 0.1
#Y_MIN = -0.01
#Y_MAX = 0.01
X_MIN_SMALL = -0.001
X_MAX_SMALL = 0.001
DV_range = numpy.append(
numpy.linspace(X_MIN, X_MAX, 1e3),
numpy.linspace(X_MIN_SMALL, X_MAX_SMALL, 1e3))
DV_range.sort()
if True:
def msw_friction(dv):
if dv < 0:
return F_b * (ud + (us-ud)*v0 / (v0-dv))
else:
return -F_b * (ud + (us-ud)*v0 / (v0+dv))
msw_friction = numpy.vectorize(msw_friction)
msw = msw_friction(DV_range)
pylab.axhline(0, color="gray")
pylab.plot(DV_range, msw,
color="red")
data = numpy.vstack( (DV_range, msw) ).transpose()
numpy.savetxt( DIRNAME+"skips-friction-curve-%s.txt" % "second", data)
try_FS(FS, "blue", "second")
#try_N(36, "orange")
#try_N(70, "purple")
pylab.xlim([X_MIN, X_MAX])
pylab.ylim([Y_MIN, Y_MAX])
pylab.legend()
pylab.show()
| gperciva/artifastring | research/numpy-string/test_changing_slipstate.py | Python | gpl-3.0 | 5,901 |
import commands
import os
#linux
commands.getstatusoutput('netstat -n')
commands.getoutput('netstat -n')
#windows
os.system('netstat -n') | AgainstWind/python-demos | common-libs/test.py | Python | apache-2.0 | 140 |
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import time
import odoo
import odoo.tests
from odoo.modules.module import get_manifest
from odoo.tools import mute_logger
_logger = logging.getLogger(__name__)
class TestAssetsGenerateTimeCommon(odoo.tests.TransactionCase):
def generate_bundles(self):
installed_module_names = self.env['ir.module.module'].search([('state', '=', 'installed')]).mapped('name')
bundles = {
key
for module in installed_module_names
for key in get_manifest(module)['assets']
}
for bundle in bundles:
with mute_logger('odoo.addons.base.models.assetsbundle'):
for assets_type in 'css', 'js':
try:
start_t = time.time()
css = assets_type == 'css'
js = assets_type == 'js'
self.env['ir.qweb']._generate_asset_nodes(bundle, css=css, js=js)
yield (f'{bundle}.{assets_type}', time.time() - start_t)
except ValueError:
_logger.info('Error detected while generating bundle %r %s', bundle, assets_type)
@odoo.tests.tagged('post_install', '-at_install')
class TestLogsAssetsGenerateTime(TestAssetsGenerateTimeCommon):
def test_logs_assets_generate_time(self):
"""
The purpose of this test is to monitor the time of assets bundle generation.
This is not meant to test the generation failure, hence the try/except and the mute logger.
For example, 'web.assets_qweb' is contains only static xml.
"""
for bundle, duration in self.generate_bundles():
_logger.info('Bundle %r generated in %.2fs', bundle, duration)
@odoo.tests.tagged('post_install', '-at_install', '-standard', 'bundle_generation')
class TestAssetsGenerateTime(TestAssetsGenerateTimeCommon):
"""
This test is meant to be run nightly to ensure bundle generation does not exceed
a low threshold
"""
def test_assets_generate_time(self):
for bundle, duration in self.generate_bundles():
threshold = 2
self.assertLess(duration, threshold, "Bundle %r took more than %s sec" % (bundle, threshold))
| jeremiahyan/odoo | addons/web/tests/test_assets.py | Python | gpl-3.0 | 2,324 |
from .app import app
class Highlighter(object):
def __init__(self, default_attr='highlight', default_focus='highlight_focus'):
self.highlights = {}
self.default_attr = default_attr
self.default_focus = default_focus
def apply(self, row, attr_style=None, focus_style=None):
attr_style = attr_style or self.default_attr
focus_style = focus_style or self.default_focus
self.highlights[row] = [row, (row._attr_map, row._focus_map), (attr_style, focus_style)]
row.set_attr_map({None: attr_style})
row.set_focus_map({None: focus_style or attr_style})
app.draw_screen()
def remove(self, row):
if row in self.highlights:
row, original_style, applied_style = self.highlights.pop(row)
attr_map, focus_map = original_style
row.set_attr_map(attr_map)
row.set_focus_map(focus_map)
app.draw_screen()
highlighter = Highlighter()
| dustinlacewell/console | console/highlights.py | Python | mit | 972 |
# -*- coding: utf-8 -*-
#Copyright (C) 2011 Seán Hayes
from django.conf.urls.defaults import patterns, include, url
from django.http import HttpResponse
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'swarm_war_test_project.views.home', name='home'),
# url(r'^swarm_war_test_project/', include('swarm_war_test_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
(r'^', include('swarm_war.core.urls')),
(r'^battles/', include('swarm_war.battles.urls')),
(r'^collections/', include('swarm_war.collections.urls')),
(r'^credits/', include('swarm_war.credits.urls')),
(r'^leaderboard/', include('swarm_war.leaderboard.urls')),
(r'^missions/', include('swarm_war.missions.urls')),
(r'^teams/', include('swarm_war.teams.urls')),
url(r'accounts/login/', lambda request: HttpResponse('', status=302)),
)
| SeanHayes/swarm-war | swarm_war_test_project/urls.py | Python | agpl-3.0 | 1,127 |
import RPi.GPIO as GPIO
#from sakshat import SAKSHAT
import time
import os,sys
import signal
#SAKS = SAKSHAT()
GPIO.setmode(GPIO.BCM) # BOARD编号方式,基于插座引脚编号
GPIO.setwarnings(False)
DS = 6
SHCP = 19
STCP = 13 #LED灯
LKEY = 16
RKEY = 20 #左右开关
BEEP = 12
LIR_RCV = 18 #地上红外线
HIR_RCV = 17 #较高位置红外线
def init():
#输出方式
GPIO.setup(DS,GPIO.OUT)
GPIO.setup(SHCP,GPIO.OUT)
GPIO.setup(STCP,GPIO.OUT) #led灯
GPIO.setup(BEEP, GPIO.OUT, initial = GPIO.HIGH) #蜂鸣器
#输入方式
GPIO.setup(LIR_RCV,GPIO.IN)
GPIO.setup(HIR_RCV,GPIO.IN) #红外线
GPIO.setup(LKEY,GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(RKEY,GPIO.IN) #开关,输入方式
def writeBit(data):
GPIO.output(DS, data)
GPIO.output(SHCP, GPIO.LOW)
GPIO.output(SHCP, GPIO.HIGH)
def beep(seconds): #蜂鸣器状态
GPIO.output(BEEP, GPIO.LOW)
time.sleep(seconds)
GPIO.output(BEEP, GPIO.HIGH)
def led_buzzer(data):
for i in range(0, 8):
writeBit((data >> i) & 0x01) #写入8位LED的状态
GPIO.output(STCP, GPIO.LOW)
beep(0.2)
GPIO.output(STCP, GPIO.HIGH)
def alarm():
for i in [0x01,0x04,0x10,0x40,0x02,0x08,0x20,0x80]: # 一组8个编码由一组二进制转换而成,分别对应8个LED点亮状态
led_buzzer(i)
time.sleep(0.2)
def turn_off(channel):
global key
GPIO.output(BEEP, GPIO.HIGH)
GPIO.output(DS,8& 0x01)
GPIO.output(SHCP, GPIO.HIGH)
#GPIO.output(STCP, GPIO.LOW)
#for i in range(0, 8):
# writeBit((0x00 >> i) & 0x80)
time.sleep(1)
key = True
def main():
init()
global alarm_beep_times, key, count
GPIO.add_event_detect(LKEY,GPIO.FALLING, callback=turn_off)
while True:
key = False
count = 0
alarm_beep_times = 0
while GPIO.input(LIR_RCV) is 1:
if GPIO.input(HIR_RCV) is 0:
count += 1
print(count)
print(GPIO.input(HIR_RCV))
while alarm_beep_times < 4 and key is False and count > 80:
alarm()
alarm_beep_times += 1
main()
| chbpku/rpi.sessdsa | 代码/11 多功能篮球计分器/nightlight/digital-clock/beep.py | Python | gpl-3.0 | 2,319 |
# Raspberry Pi Film Capture Library package
#
# Copyright (c) 2016, Joe Herman
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import filmCap.config as config
import filmCap.codes as codes
from filmCap.control import (
fcControl, MotorDriver
)
from filmCap.camera import (
fcCamera
)
from filmCap.nbstreamreader import (
NonBlockingStreamReader
)
from filmCap.streamer import(
ImageStreamer, PreviewThread
)
| jphfilm/rpi-film-capture | server/filmCap/__init__.py | Python | mit | 1,890 |
from ..external.qt.QtGui import QWidget, QListWidgetItem
from ..external.qt.QtCore import Signal
from .qtutil import load_ui
class ComponentSelector(QWidget):
""" An interface to view the components and data of a DataCollection
Components can be draged and dropped.
The currently-selected componentID is stored in the
Component property. The currently-selected Data is stored in the
Data property.
Usage:
>>> widget = ComponentSelector()
>>> widget.setup(data_collection)
"""
component_changed = Signal()
def __init__(self, parent=None):
super(ComponentSelector, self).__init__(parent)
self._data = None
self._ui = load_ui('component_selector', self)
self._init_widgets()
self._connect()
def _init_widgets(self):
self._ui.component_selector.setDragEnabled(True)
self._ui.setMinimumWidth(300)
def _connect(self):
#attach Qt signals
ds = self._ui.data_selector
ds.currentIndexChanged.connect(self._set_components)
self._ui.component_selector.currentItemChanged.connect(
lambda *args: self.component_changed.emit())
def set_current_row(self, row):
"""Select which component is selected
:param row: Row number
"""
self._ui.component_selector.setCurrentRow(row)
def set_data_row(self, row):
"""Select which data object is selected
:param row: Row number
"""
self._ui.data_selector.setCurrentIndex(row)
def setup(self, data_collection):
""" Set up the widgets.
:param data_collection: Object to browse
:type data_colleciton:
:class:`~glue.core.data_collection.DataCollection`
"""
self._data = data_collection
self._set_data()
self._set_components()
def _set_components(self):
""" Set list of component widgets to match current data set """
index = self._ui.data_selector.currentIndex()
if index < 0:
return
data = self._data[index]
cids = data.components
c_list = self._ui.component_selector
c_list.clear()
for c in cids:
item = QListWidgetItem(c.label)
c_list.addItem(item)
c_list.set_data(item, c)
def _set_data(self):
""" Populate the data list with data sets in the collection """
d_list = self._ui.data_selector
for d in self._data:
d_list.addItem(d.label)
@property
def component(self):
"""Returns the currently-selected ComponentID
:rtype: :class:`~glue.core.data.ComponentID`
"""
item = self._ui.component_selector.currentItem()
return self._ui.component_selector.get_data(item)
@property
def data(self):
index = self._ui.data_selector.currentIndex()
if index < 0:
return
return self._data[index]
@data.setter
def data(self, value):
for i, d in enumerate(self._data):
if d is value:
self._ui.data_selector.setCurrentIndex(i)
return
else:
raise ValueError("Data is not part of the DataCollection")
def main(): # pragma: no cover
import glue
import numpy as np
from . import get_qapp
from ..external.qt.QtGui import QApplication
d = glue.core.Data(label="hi")
d2 = glue.core.Data(label="there")
c1 = glue.core.Component(np.array([1, 2, 3]))
c2 = glue.core.Component(np.array([1, 2, 3]))
c3 = glue.core.Component(np.array([1, 2, 3]))
dc = glue.core.DataCollection()
dc.append(d)
dc.append(d2)
d.add_component(c1, "a")
d.add_component(c2, "b")
d2.add_component(c3, "c")
app = get_qapp()
w = ComponentSelector()
w.setup(dc)
w.show()
app.exec_()
if __name__ == "__main__": # pragma: no cover
main()
| bsipocz/glue | glue/qt/component_selector.py | Python | bsd-3-clause | 3,939 |
# Copyright (c) 2009 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.SimObject import SimObject
from m5.params import *
from NativeTrace import NativeTrace
class SparcNativeTrace(NativeTrace):
type = 'SparcNativeTrace'
cxx_class = 'Trace::SparcNativeTrace'
cxx_header = 'arch/sparc/nativetrace.hh'
| xiaoyuanW/gem5 | src/arch/sparc/SparcNativeTrace.py | Python | bsd-3-clause | 1,822 |
#! /usr/bin/env python
"""
pyparsing based grammar for DCPU-16 0x10c assembler
"""
try:
from itertools import izip_longest
except ImportError:
from itertools import zip_longest as izip_longest
try:
basestring
except NameError:
basestring = str
import logging; log = logging.getLogger("dcpu16_asm")
log.setLevel(logging.DEBUG)
import argparse
import os
import struct
import sys
import pyparsing as P
from collections import defaultdict
# Replace the debug actions so that the results go to the debug log rather
# than stdout, so that the output can be usefully piped.
def _defaultStartDebugAction(instring, loc, expr):
log.debug("Match " + P._ustr(expr) + " at loc " + P._ustr(loc) + "(%d,%d)"
% ( P.lineno(loc,instring), P.col(loc,instring) ))
def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):
log.debug("Matched " + P._ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction(instring, loc, expr, exc):
log.debug("Exception raised:" + P._ustr(exc))
P._defaultStartDebugAction = _defaultStartDebugAction
P._defaultSuccessDebugAction = _defaultSuccessDebugAction
P._defaultExceptionDebugAction = _defaultExceptionDebugAction
# Run with "DEBUG=1 python ./asm_pyparsing.py"
DEBUG = "DEBUG" in os.environ
WORD_MAX = 0xFFFF
# otherwise \n is also treated as ignorable whitespace
P.ParserElement.setDefaultWhitespaceChars(" \t")
identifier = P.Word(P.alphas+"_", P.alphanums+"_")
label = P.Combine(P.Literal(":").suppress() + identifier)
comment = P.Literal(";").suppress() + P.restOfLine
register = (P.Or(P.CaselessKeyword(x) for x in "ABCIJXYZO")
| P.oneOf("PC SP", caseless=True))
stack_op = P.oneOf("PEEK POP PUSH", caseless=True)
hex_literal = P.Combine(P.Literal("0x") + P.Word(P.hexnums))
dec_literal = P.Word(P.nums)
numeric_literal = hex_literal | dec_literal
literal = numeric_literal | identifier
opcode = P.oneOf("SET ADD SUB MUL DIV MOD SHL SHR "
"AND BOR XOR IFE IFN IFG IFB JSR", caseless=True)
basic_operand = P.Group(register("register")
| stack_op("stack_op")
| literal("literal"))
indirect_expr = P.Group(literal("literal")
+ P.Literal("+")
+ register("register"))
hex_literal.setParseAction(lambda s, l, t: int(t[0], 16))
dec_literal.setParseAction(lambda s, l, t: int(t[0]))
register.addParseAction(P.upcaseTokens)
stack_op.addParseAction(P.upcaseTokens)
opcode.addParseAction(P.upcaseTokens)
def sandwich(brackets, expr):
l, r = brackets
return P.Literal(l).suppress() + expr + P.Literal(r).suppress()
indirection_content = indirect_expr("expr") | basic_operand("basic")
indirection = P.Group(sandwich("[]", indirection_content) |
sandwich("()", indirection_content))
operand = basic_operand("basic") | indirection("indirect")
def make_words(data):
return [a << 8 | b for a, b in izip_longest(data[::2], data[1::2],
fillvalue=0)]
def wordize_string(s, l, tokens):
bytes = [ord(c) for c in tokens.string]
# TODO(pwaller): possibly add syntax for packing string data?
packed = False
return make_words(bytes) if packed else bytes
quoted_string = P.quotedString("string").addParseAction(P.removeQuotes).addParseAction(wordize_string)
datum = quoted_string | numeric_literal
def parse_data(string, loc, tokens):
result = []
for token in tokens:
values = datum.parseString(token).asList()
assert all(v < WORD_MAX for v in values), "Datum exceeds word size"
result.extend(values)
return result
# TODO(pwaller): Support for using macro argument values in data statement
datalist = P.commaSeparatedList.copy().setParseAction(parse_data)
data = P.CaselessKeyword("DAT")("opcode") + P.Group(datalist)("data")
line = P.Forward()
macro_definition_args = P.Group(P.delimitedList(P.Optional(identifier("arg"))))("args")
macro_definition = P.Group(
P.CaselessKeyword("#macro").suppress()
+ identifier("name")
+ sandwich("()", macro_definition_args)
+ sandwich("{}", P.Group(P.OneOrMore(line))("lines"))
)("macro_definition")
macro_argument = operand | datum
macro_call_args = P.Group(P.delimitedList(P.Group(macro_argument)("arg")))("args")
macro_call = P.Group(
identifier("name") + sandwich("()", macro_call_args)
)("macro_call")
instruction = (
opcode("opcode")
+ P.Group(operand)("first")
+ P.Optional(P.Literal(",").suppress() + P.Group(operand)("second"))
)
statement = P.Group(
instruction
| data
| macro_definition
| macro_call
)
line << P.Group(
P.Optional(label("label"))
+ P.Optional(statement("statement"), default=None)
+ P.Optional(comment("comment"))
+ P.lineEnd.suppress()
)("line")
full_grammar = (
P.stringStart
+ P.ZeroOrMore(line)
+ (P.stringEnd | P.Literal("#stop").suppress())
)("program")
if DEBUG:
# Turn setdebug on for all parse elements
for name, var in locals().copy().items():
if isinstance(var, P.ParserElement):
var.setName(name).setDebug()
def debug_line(string, location, tokens):
"""
Show the current line number and content being parsed
"""
lineno = string[:location].count("\n")
remaining = string[location:]
line_end = remaining.index("\n") if "\n" in remaining else None
log.debug("====")
log.debug(" Parse line {0}".format(lineno))
log.debug(" '{0}'".format(remaining[:line_end]))
log.debug("====")
line.setDebugActions(debug_line, None, None)
IDENTIFIERS = {"A": 0x0, "B": 0x1, "C": 0x2, "X": 0x3, "Y": 0x4, "Z": 0x5,
"I": 0x6, "J": 0x7,
"POP": 0x18, "PEEK": 0x19, "PUSH": 0x1A,
"SP": 0x1B, "PC": 0x1C,
"O": 0x1D}
OPCODES = {"SET": 0x1, "ADD": 0x2, "SUB": 0x3, "MUL": 0x4, "DIV": 0x5,
"MOD": 0x6, "SHL": 0x7, "SHR": 0x8, "AND": 0x9, "BOR": 0xA,
"XOR": 0xB, "IFE": 0xC, "IFN": 0xD, "IFG": 0xE, "IFB": 0xF}
def process_operand(o, lvalue=False):
"""
Returns (a, x) where a is a value which identifies the nature of the value
and x is either None or a word to be inserted directly into the output stream
(e.g. a literal value >= 0x20)
"""
# TODO(pwaller): Reject invalid lvalues
def invalid_op(reason):
# TODO(pwaller): Need to indicate origin of error
return RuntimeError("Invalid operand, {0}: {1}"
.format(reason, o.asXML()))
def check_indirect_register(register):
if register not in "ABCXYZIJ":
raise invalid_op("only registers A-J can be used for indirection")
if o.basic:
# Literals, stack ops, registers
b = o.basic
if b.register:
return IDENTIFIERS[b.register], None
elif b.stack_op:
return IDENTIFIERS[b.stack_op], None
elif b.literal is not None:
l = b.literal
if not isinstance(l, basestring) and l < 0x20:
return 0x20 | l, None
if l == "": raise invalid_op("this is a bug")
if isinstance(l, int) and not 0 <= l <= WORD_MAX:
raise invalid_op("literal exceeds word size")
return 0x1F, l
elif o.indirect:
i = o.indirect
if i.basic:
# [register], [literal]
ib = i.basic
if ib.register:
check_indirect_register(ib.register)
return 0x8 + IDENTIFIERS[ib.register], None
elif ib.stack_op:
raise invalid_op("don't use PUSH/POP/PEEK with indirection")
elif ib.literal is not None:
return 0x1E, ib.literal
elif i.expr:
# [register+literal]
ie = i.expr
check_indirect_register(ie.register)
return 0x10 | IDENTIFIERS[ie.register], ie.literal
raise invalid_op("this is a bug")
def codegen(source, input_filename="<unknown>"):
try:
parsed = full_grammar.parseString(source)
except P.ParseException as exc:
log.fatal("Parse error:")
log.fatal(" {0}:{1}:{2} HERE {3}"
.format(input_filename, exc.lineno, exc.col,
exc.markInputline()))
return None
log.debug("=====")
log.debug(" Successful parse, XML syntax interpretation:")
log.debug("=====")
log.debug(parsed.asXML())
labels = {}
macros = {}
program = []
# Number of times a given macro has been called so that we can generate
# unique labels
n_macro_calls = defaultdict(int)
def process_macro_definition(statement):
log.debug("Macro definition: {0}".format(statement.asXML()))
macros[statement.name] = statement
def process_macro_call(offset, statement, context=""):
log.debug("--------------")
log.debug("Macro call: {0}".format(statement.asXML()))
log.debug("--------------")
macroname = statement.name
macro = macros.get(macroname, None)
n_macro_calls[macroname] += 1
context = context + macroname + str(n_macro_calls[macroname])
if not macro:
raise RuntimeError("Call to undefined macro: {0}".format(macroname))
assert len(macro.args) == len(statement.args), (
"Wrong number of arguments to macro call {0!r}".format(macroname))
# TODO(pwaller): Check for collisions between argument name and code
# label
args = {}
log.debug("Populated args:")
for name, arg in zip(macro.args, statement.args):
args[name] = arg
log.debug(" - {0}: {1}".format(name, arg))
lines = []
for l in macro.lines:
new_line = l.copy()
s = l.statement
if s:
new_statement = s.copy()
new_line["statement"] = new_statement
#if l.label: new_line["label"] = context + l.label
# Replace literals whose names are macro arguments
# also, substitute labels with (context, label).
# Resolution of a label happens later by first searching for a label
# called `context + label`, and if it doesn't exist `label` is used.
if s and s.first and s.first.basic and s.first.basic.literal:
if s.first.basic.literal in args:
new_statement["first"] = args[s.first.basic.literal]
elif isinstance(s.first.basic.literal, basestring):
new_basic = s.first.basic.copy()
new_basic["literal"] = context, s.first.basic.literal
new_op = new_statement.first.copy()
new_op["basic"] = new_basic
new_statement["first"] = new_op
if s and s.second and s.second.basic and s.second.basic.literal:
if s.second.basic.literal in args:
new_statement["second"] = args[s.second.basic.literal]
elif isinstance(s.second.basic.literal, basestring):
new_basic = s.second.basic.copy()
new_basic["literal"] = context, s.second.basic.literal
new_op = new_statement.second.copy()
new_op["basic"] = new_basic
new_statement["second"] = new_op
# Replace macro call arguments
if s and s.macro_call:
new_macro_call = s.macro_call.copy()
new_statement["macro_call"] = new_macro_call
new_macro_call_args = s.macro_call.args.copy()
new_statement.macro_call["args"] = new_macro_call_args
for i, arg in enumerate(s.macro_call.args):
if arg.basic.literal not in args:
continue
new_macro_call_args[i] = args[arg.basic.literal]
lines.append(new_line)
log.debug("Populated macro: {0}"
.format("\n".join(l.dump() for l in lines)))
# Do code generation
code = []
for l in lines:
a = generate(offset + len(code), l, context)
log.debug("Codegen for statement: {0}".format(l.asXML()))
log.debug(" Code: {0}".format(a))
code.extend(a)
return code
def generate(offset, line, context=""):
log.debug("Interpreting element {0}: {1}".format(i, line))
if line.label:
label = context + line.label
if label in labels:
# TODO(pwaller): Line indications
msg = "Duplicate label definition! {0}".format(label)
log.fatal(msg)
raise RuntimeError(msg)
labels[label] = offset
s = line.statement
if not s: return []
if s.macro_definition:
process_macro_definition(s.macro_definition)
return []
elif s.macro_call:
return process_macro_call(offset, s.macro_call, context)
log.debug("Generating for {0}".format(s.asXML(formatted=False)))
if s.opcode == "DAT":
return s.data
if s.opcode == "JSR":
o = 0x00
a, x = 0x01, None
b, y = process_operand(s.first)
else:
o = OPCODES[s.opcode]
a, x = process_operand(s.first, lvalue=True)
b, y = process_operand(s.second)
code = []
code.append(((b << 10) + (a << 4) + o))
if x is not None: code.append(x)
if y is not None: code.append(y)
return code
for i, line in enumerate(parsed):
program.extend(generate(len(program), line))
log.debug("Labels: {0}".format(labels))
log.debug("program: {0}".format(program))
# Substitute labels
for i, c in enumerate(program):
if isinstance(c, basestring):
if c not in labels:
raise RuntimeError("Undefined label used: {0}".format(c))
program[i] = labels[c]
elif isinstance(c, tuple):
context, label = c
if context + label in labels:
label = context + label
if label not in labels:
raise RuntimeError("Undefined label used: {0}".format(c))
program[i] = labels[label]
# Turn words into bytes
result = bytes()
for word in program:
result += struct.pack(">H", word)
return result
def main():
parser = argparse.ArgumentParser(
description='A simple pyparsing-based DCPU assembly compiler')
parser.add_argument(
'source', metavar='IN', type=str,
help='file path of the file containing the assembly code')
parser.add_argument(
'destination', metavar='OUT', type=str, nargs='?',
help='file path where to store the binary code')
args = parser.parse_args()
if not log.handlers:
from sys import stderr
handler = logging.StreamHandler(stderr)
log.addHandler(handler)
if not DEBUG: handler.setLevel(logging.INFO)
if args.source == "-":
program = codegen(sys.stdin.read(), "<stdin>")
else:
with open(args.source) as fd:
program = codegen(fd.read(), args.source)
if program is None:
log.fatal("No program produced.")
if not DEBUG:
log.fatal("Run with DEBUG=1 ./asm_pyparsing.py "
"for more information.")
return 1
if not args.destination:
if os.isatty(sys.stdout.fileno()):
log.fatal("stdout is a tty, not writing binary. "
"Specify destination file or pipe output somewhere")
else:
sys.stdout.write(program)
else:
with open(args.destination, "wb") as fd:
fd.write(program)
log.info("Program written to {0} ({1} bytes, hash={2})"
.format(args.destination, len(program),
hex(abs(hash(program)))))
return 0
if __name__ == "__main__":
raise SystemExit(main())
| Olical/dcpu16py | asm_pyparsing.py | Python | mit | 16,612 |
from django import template
from django.core.urlresolvers import reverse
import yaml
register = template.Library()
@register.inclusion_tag('menus/subnav.html', takes_context=True)
def subnav(context, path=None):
"""Renderiza sub navegação para objetos no padrão Mestre Detalhe
Existem três possíveis fontes de busca do yaml
com precedência enumerada abaixo:
1) Se a variável path não é nula;
2) Se existe no contexto a chave subnav_template_name;
3) o path default: <app_name>/subnav.yaml
"""
menu = None
root_pk = context.get('root_pk', None)
if not root_pk:
obj = context.get('object', None)
if obj:
root_pk = obj.pk
if root_pk:
request = context['request']
"""
As implementações das Views de Modelos que são dados auxiliares e
de diversas app's estão concentradas em urls com prefixo 'sistema'.
Essas Views não possuem submenu de navegação e são incompativeis com a
execução deste subnav. Inicialmente, a maneira mais prática encontrada
de isolar foi com o teste abaixo.
"""
if 'sistema' in request.path:
return
rm = request.resolver_match
app_template = rm.app_name.rsplit('.', 1)[-1]
if path:
yaml_path = path
elif 'subnav_template_name' in context:
yaml_path = context['subnav_template_name']
else:
yaml_path = '%s/%s' % (app_template, 'subnav.yaml')
try:
"""
Por padrão, são carragados dois Loaders,
filesystem.Loader - busca em TEMPLATE_DIRS do projeto atual
app_directories.Loader - busca em todas apps instaladas
A função nativa abaixo busca em todos os Loaders Configurados.
"""
yaml_template = template.loader.get_template(yaml_path)
rendered = yaml_template.render()
menu = yaml.load(rendered)
resolve_urls_inplace(menu, root_pk, rm, context)
except Exception as e:
print(e)
return {'menu': menu}
def resolve_urls_inplace(menu, pk, rm, context):
if isinstance(menu, list):
list_active = ''
for item in menu:
menuactive = resolve_urls_inplace(item, pk, rm, context)
list_active = menuactive if menuactive else list_active
if not isinstance(item, list):
item['active'] = menuactive
return list_active
else:
if 'url' in menu:
url_name = menu['url']
try:
menu['url'] = reverse('%s:%s' % (rm.app_name, menu['url']),
kwargs={'pk': pk})
except:
menu['url'] = reverse('%s:%s' % (rm.app_name, menu['url']))
menu['active'] = 'active'\
if context['request'].path == menu['url'] else ''
if not menu['active']:
"""
Se não encontrada diretamente,
procura a url acionada dentro do crud, caso seja um.
Serve para manter o active no suvnav correto ao acionar
as funcionalidades diretas do MasterDetailCrud, como:
- visualização de detalhes, adição, edição, remoção.
Casos para urls_extras:
Em relações de segundo nível, como ocorre em
(0) Comissões -> (1) Composição -> (2) Participação
(2) não tem ligação direta com (1) através da view. Para (2)
ser localizado, e o nav-tabs ou nav-pills do front-end serem
ativados foi inserido o teste de existência de urls_extras
para serem testadas e, sendo válidado, o active do front-end
seja devidamente colocado.
"""
view = context['view']
if hasattr(view, '__class__') and\
hasattr(view.__class__, 'crud'):
urls = view.__class__.crud.get_urls()
for u in urls:
if (u.name == url_name or
'urls_extras' in menu and
u.name in menu['urls_extras']):
menu['active'] = 'active'
break
if 'children' in menu:
menu['active'] = resolve_urls_inplace(
menu['children'], pk, rm, context)
return menu['active']
| ramiroluz/saap | saap/core/templatetags/menus.py | Python | gpl-3.0 | 4,559 |
# -*- coding: utf-8 -*-
#
# traffic documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 5 23:08:31 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'traffic'
copyright = u'2014, Ariel Gerardo Ríos'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'trafficdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'traffic.tex', u'traffic Documentation',
u'Ariel Gerardo Ríos', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'traffic', u'traffic Documentation',
[u'Ariel Gerardo Ríos'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'traffic', u'traffic Documentation',
u'Ariel Gerardo Ríos', 'traffic', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'traffic'
epub_author = u'Ariel Gerardo Ríos'
epub_publisher = u'Ariel Gerardo Ríos'
epub_copyright = u'2014, Ariel Gerardo Ríos'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'traffic'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| ariel17/traffic | docs/conf.py | Python | mit | 10,344 |
# -*- coding: utf-8 -*-
#
# File: __init__.py
# Module: l10n_ch_hr_payroll
#
# Created by [email protected]
#
# Copyright (c) 2014-TODAY Open-Net Ltd. <http://www.open-net.ch>
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import hr_contract
from . import hr_employee
| cgaspoz/l10n-switzerland | l10n_ch_hr_payroll/models/__init__.py | Python | agpl-3.0 | 1,194 |
import cv2
import numpy as np
import opensfm.transformations as tf
from opensfm import align
from opensfm import types
def points_errors(reference, candidate):
common_points = set(reference.points.keys()).intersection(
set(candidate.points.keys())
)
return np.array(
[
reference.points[p].coordinates - candidate.points[p].coordinates
for p in common_points
]
)
def completeness_errors(reference, candidate):
return (
float(len(candidate.shots)) / float(len(reference.shots)),
float(len(candidate.points)) / float(len(reference.points)),
)
def gps_errors(candidate):
errors = []
for shot in candidate.shots.values():
pose1 = shot.metadata.gps_position.value
pose2 = shot.pose.get_origin()
errors.append(pose1 - pose2)
return np.array(errors)
def position_errors(reference, candidate):
common_shots = set(reference.shots.keys()).intersection(set(candidate.shots.keys()))
errors = []
for s in common_shots:
pose1 = reference.shots[s].pose.get_origin()
pose2 = candidate.shots[s].pose.get_origin()
errors.append(pose1 - pose2)
return np.array(errors)
def rotation_errors(reference, candidate):
common_shots = set(reference.shots.keys()).intersection(set(candidate.shots.keys()))
errors = []
for s in common_shots:
pose1 = reference.shots[s].pose.get_rotation_matrix()
pose2 = candidate.shots[s].pose.get_rotation_matrix()
difference = np.transpose(pose1).dot(pose2)
rodrigues = cv2.Rodrigues(difference)[0].ravel()
angle = np.linalg.norm(rodrigues)
errors.append(angle)
return np.array(errors)
def find_alignment(points0, points1):
"""Compute similarity transform between point sets.
Returns (s, A, b) such that ``points1 = s * A * points0 + b``
"""
v0, v1 = [], []
for p0, p1 in zip(points0, points1):
if p0 is not None and p1 is not None:
v0.append(p0)
v1.append(p1)
v0 = np.array(v0).T
v1 = np.array(v1).T
M = tf.affine_matrix_from_points(v0, v1, shear=False)
s = np.linalg.det(M[:3, :3]) ** (1.0 / 3.0)
A = M[:3, :3] / s
b = M[:3, 3]
return s, A, b
def aligned_to_reference(reference, reconstruction):
"""Align a reconstruction to a reference."""
coords1, coords2 = [], []
for point1 in reconstruction.points.values():
point2 = reference.points.get(point1.id)
if point2 is not None:
coords1.append(point1.coordinates)
coords2.append(point2.coordinates)
if len(coords1) == 0 or len(coords2) == 0:
for shot1 in reconstruction.shots.values():
shot2 = reference.shots.get(shot1.id)
if shot2 is not None:
coords1.append(shot1.pose.get_origin())
coords2.append(shot2.pose.get_origin())
s, A, b = find_alignment(coords1, coords2)
aligned = _copy_reconstruction(reconstruction)
align.apply_similarity(aligned, s, A, b)
return aligned
def _copy_reconstruction(reconstruction):
copy = types.Reconstruction()
for camera in reconstruction.cameras.values():
copy.add_camera(camera)
for shot in reconstruction.shots.values():
copy.add_shot(shot)
for point in reconstruction.points.values():
copy.add_point(point)
return copy
def rmse(errors):
return np.sqrt(np.mean(errors ** 2))
| oscarlorentzon/OpenSfM | opensfm/synthetic_data/synthetic_metrics.py | Python | bsd-2-clause | 3,481 |
from django.apps import apps
import logging
import urlparse
import random
import requests
from framework.celery_tasks import app as celery_app
from website import settings, mails
from website.util.share import GraphNode, format_contributor
logger = logging.getLogger(__name__)
@celery_app.task(ignore_results=True)
def on_node_updated(node_id, user_id, first_save, saved_fields, request_headers=None):
# WARNING: Only perform Read-Only operations in an asynchronous task, until Repeatable Read/Serializable
# transactions are implemented in View and Task application layers.
AbstractNode = apps.get_model('osf.AbstractNode')
node = AbstractNode.load(node_id)
if node.is_collection or node.archiving or node.is_quickfiles:
return
need_update = bool(node.SEARCH_UPDATE_FIELDS.intersection(saved_fields))
# due to async nature of call this can issue a search update for a new record (acceptable trade-off)
if bool({'spam_status', 'is_deleted'}.intersection(saved_fields)):
need_update = True
elif not node.is_public and 'is_public' not in saved_fields:
need_update = False
if need_update:
node.update_search()
update_node_share(node)
def update_node_share(node):
# Wrapper that ensures share_url and token exist
if settings.SHARE_URL:
if not settings.SHARE_API_TOKEN:
return logger.warning('SHARE_API_TOKEN not set. Could not send "{}" to SHARE.'.format(node._id))
_update_node_share(node)
def _update_node_share(node):
# Any modifications to this function may need to change _async_update_node_share
data = serialize_share_node_data(node)
resp = send_share_node_data(data)
try:
resp.raise_for_status()
except Exception:
if resp.status_code >= 500:
_async_update_node_share.delay(node._id)
else:
send_desk_share_error(node, resp, 0)
@celery_app.task(bind=True, max_retries=4, acks_late=True)
def _async_update_node_share(self, node_id):
# Any modifications to this function may need to change _update_node_share
# Takes node_id to ensure async retries push fresh data
AbstractNode = apps.get_model('osf.AbstractNode')
node = AbstractNode.load(node_id)
data = serialize_share_node_data(node)
resp = send_share_node_data(data)
try:
resp.raise_for_status()
except Exception as e:
if resp.status_code >= 500:
if self.request.retries == self.max_retries:
send_desk_share_error(node, resp, self.request.retries)
raise self.retry(
exc=e,
countdown=(random.random() + 1) * min(60 + settings.CELERY_RETRY_BACKOFF_BASE ** self.request.retries, 60 * 10)
)
else:
send_desk_share_error(node, resp, self.request.retries)
def send_share_node_data(data):
resp = requests.post('{}api/normalizeddata/'.format(settings.SHARE_URL), json=data, headers={'Authorization': 'Bearer {}'.format(settings.SHARE_API_TOKEN), 'Content-Type': 'application/vnd.api+json'})
logger.debug(resp.content)
return resp
def serialize_share_node_data(node):
return {
'data': {
'type': 'NormalizedData',
'attributes': {
'tasks': [],
'raw': None,
'data': {'@graph': format_registration(node) if node.is_registration else format_node(node)}
}
}
}
def format_node(node):
return [
{
'@id': '_:123',
'@type': 'workidentifier',
'creative_work': {'@id': '_:789', '@type': 'project'},
'uri': '{}{}/'.format(settings.DOMAIN, node._id),
}, {
'@id': '_:789',
'@type': 'project',
'is_deleted': not node.is_public or node.is_deleted or node.is_spammy,
}
]
def format_registration(node):
registration_graph = GraphNode('registration', **{
'title': node.title,
'description': node.description or '',
'is_deleted': not node.is_public or 'qatest' in (node.tags.all() or []) or node.is_deleted,
'date_published': node.registered_date.isoformat() if node.registered_date else None,
'registration_type': node.registered_schema.first().name if node.registered_schema else None,
'withdrawn': node.is_retracted,
'justification': node.retraction.justification if node.retraction else None,
})
to_visit = [
registration_graph,
GraphNode('workidentifier', creative_work=registration_graph, uri=urlparse.urljoin(settings.DOMAIN, node.url))
]
registration_graph.attrs['tags'] = [
GraphNode('throughtags', creative_work=registration_graph, tag=GraphNode('tag', name=tag._id))
for tag in node.tags.all() or [] if tag._id
]
to_visit.extend(format_contributor(registration_graph, user, bool(user._id in node.visible_contributor_ids), i) for i, user in enumerate(node.contributors))
to_visit.extend(GraphNode('AgentWorkRelation', creative_work=registration_graph, agent=GraphNode('institution', name=institution.name)) for institution in node.affiliated_institutions.all())
visited = set()
to_visit.extend(registration_graph.get_related())
while True:
if not to_visit:
break
n = to_visit.pop(0)
if n in visited:
continue
visited.add(n)
to_visit.extend(list(n.get_related()))
return [node_.serialize() for node_ in visited]
def send_desk_share_error(node, resp, retries):
mails.send_mail(
to_addr=settings.SUPPORT_EMAIL,
mail=mails.SHARE_ERROR_DESK,
node=node,
resp=resp,
retries=retries,
)
| aaxelb/osf.io | website/project/tasks.py | Python | apache-2.0 | 5,755 |
#!/usr/bin/env python
from solo.template import render_template
class HomeController(object):
def index(self):
return render_template('index.html')
def _404_page(self, *args, **kw):
return render_template('404.html') | whiteclover/Lilac | lilac/controller/home.py | Python | lgpl-3.0 | 251 |
# Natural Language Toolkit: Senseval 2 Corpus Reader
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Trevor Cohn <[email protected]>
# Steven Bird <[email protected]> (modifications)
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Read from the Senseval 2 Corpus.
SENSEVAL [http://www.senseval.org/]
Evaluation exercises for Word Sense Disambiguation.
Organized by ACL-SIGLEX [http://www.siglex.org/]
Prepared by Ted Pedersen <[email protected]>, University of Minnesota,
http://www.d.umn.edu/~tpederse/data.html
Distributed with permission.
The NLTK version of the Senseval 2 files uses well-formed XML.
Each instance of the ambiguous words "hard", "interest", "line", and "serve"
is tagged with a sense identifier, and supplied with context.
"""
import os
import re
import xml.sax
from xmldocs import XMLCorpusReader
from nltk.tokenize import *
from nltk.etree import ElementTree
from util import *
from api import *
class SensevalInstance(object):
def __init__(self, word, position, context, senses):
self.word = word
self.senses = tuple(senses)
self.position = position
self.context = context
def __repr__(self):
return ('SensevalInstance(word=%r, position=%r, '
'context=%r, senses=%r)' %
(self.word, self.position, self.context, self.senses))
class SensevalCorpusReader(CorpusReader):
def instances(self, fileids=None):
return concat([SensevalCorpusView(fileid, enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def raw(self, fileids=None):
"""
@return: the text contents of the given fileids, as a single string.
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, basestring): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def _entry(self, tree):
elts = []
for lexelt in tree.findall('lexelt'):
for inst in lexelt.findall('instance'):
sense = inst[0].attrib['senseid']
context = [(w.text, w.attrib['pos'])
for w in inst[1]]
elts.append( (sense, context) )
return elts
class SensevalCorpusView(StreamBackedCorpusView):
def __init__(self, fileid, encoding):
StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
self._word_tokenizer = WhitespaceTokenizer()
self._lexelt_starts = [0] # list of streampos
self._lexelts = [None] # list of lexelt names
def read_block(self, stream):
# Decide which lexical element we're in.
lexelt_num = bisect.bisect_right(self._lexelt_starts, stream.tell())-1
lexelt = self._lexelts[lexelt_num]
instance_lines = []
in_instance = False
while True:
line = stream.readline()
if line == '':
assert instance_lines == []
return []
# Start of a lexical element?
if line.lstrip().startswith('<lexelt'):
lexelt_num += 1
m = re.search('item=("[^"]+"|\'[^\']+\')', line)
assert m is not None # <lexelt> has no 'item=...'
lexelt = m.group(1)[1:-1]
if lexelt_num < len(self._lexelts):
assert lexelt == self._lexelts[lexelt_num]
else:
self._lexelts.append(lexelt)
self._lexelt_starts.append(stream.tell())
# Start of an instance?
if line.lstrip().startswith('<instance'):
assert instance_lines == []
in_instance = True
# Body of an instance?
if in_instance:
instance_lines.append(line)
# End of an instance?
if line.lstrip().startswith('</instance'):
xml_block = '\n'.join(instance_lines)
xml_block = _fixXML(xml_block)
inst = ElementTree.fromstring(xml_block)
return [self._parse_instance(inst, lexelt)]
def _parse_instance(self, instance, lexelt):
senses = []
context = []
position = None
for child in instance:
if child.tag == 'answer':
senses.append(child.attrib['senseid'])
elif child.tag == 'context':
context += self._word_tokenizer.tokenize(child.text)
for cword in child:
if cword.tag == 'compound':
cword = cword[0] # is this ok to do?
if cword.tag == 'head':
# Some santiy checks:
assert position is None, 'head specified twice'
assert cword.text.strip() or len(cword)==1
assert not (cword.text.strip() and len(cword)==1)
# Record the position of the head:
position = len(context)
# Addd on the head word itself:
if cword.text.strip():
context.append(cword.text.strip())
elif cword[0].tag == 'wf':
context.append((cword[0].text,
cword[0].attrib['pos']))
if cword[0].tail:
context += self._word_tokenizer.tokenize(
cword[0].tail)
else:
assert False, 'expected CDATA or wf in <head>'
elif cword.tag == 'wf':
context.append((cword.text, cword.attrib['pos']))
elif cword.tag == 's':
pass # Sentence boundary marker.
else:
print 'ACK', cword.tag
assert False, 'expected CDATA or <wf> or <head>'
if cword.tail:
context += self._word_tokenizer.tokenize(cword.tail)
else:
assert False, 'unexpected tag %s' % child.tag
return SensevalInstance(lexelt, position, context, senses)
def _fixXML(text):
"""
Fix the various issues with Senseval pseudo-XML.
"""
# <~> or <^> => ~ or ^
text = re.sub(r'<([~\^])>', r'\1', text)
# fix lone &
text = re.sub(r'(\s+)\&(\s+)', r'\1&\2', text)
# fix """
text = re.sub(r'"""', '\'"\'', text)
# fix <s snum=dd> => <s snum="dd"/>
text = re.sub(r'(<[^<]*snum=)([^">]+)>', r'\1"\2"/>', text)
# fix foreign word tag
text = re.sub(r'<\&frasl>\s*<p[^>]*>', 'FRASL', text)
# remove <&I .>
text = re.sub(r'<\&I[^>]*>', '', text)
# fix <{word}>
text = re.sub(r'<{([^}]+)}>', r'\1', text)
# remove <@>, <p>, </p>
text = re.sub(r'<(@|/?p)>', r'', text)
# remove <&M .> and <&T .> and <&Ms .>
text = re.sub(r'<&\w+ \.>', r'', text)
# remove <!DOCTYPE... > lines
text = re.sub(r'<!DOCTYPE[^>]*>', r'', text)
# remove <[hi]> and <[/p]> etc
text = re.sub(r'<\[\/?[^>]+\]*>', r'', text)
# take the thing out of the brackets: <…>
text = re.sub(r'<(\&\w+;)>', r'\1', text)
# and remove the & for those patterns that aren't regular XML
text = re.sub(r'&(?!amp|gt|lt|apos|quot)', r'', text)
# fix 'abc <p="foo"/>' style tags - now <wf pos="foo">abc</wf>
text = re.sub(r'[ \t]*([^<>\s]+?)[ \t]*<p="([^"]*"?)"/>',
r' <wf pos="\2">\1</wf>', text)
text = re.sub(r'\s*"\s*<p=\'"\'/>', " <wf pos='\"'>\"</wf>", text)
return text
| tadgh/ArgoRevisit | third_party/nltk/corpus/reader/senseval.py | Python | apache-2.0 | 7,816 |
from tornado import gen
from jupyterhub.auth import Authenticator
from traitlets import Dict
class BasicAuthenticator(Authenticator):
""" Simple authenticator based on a fixed set of users"""
#: Dictionary of regular username: password keys allowed
user_data = Dict().tag(config=True)
#: Dictionary of admin username: password keys allowed
admin_data = Dict().tag(config=True)
@gen.coroutine
def authenticate(self, handler, data):
self.log.warning(
'This is a basic authenticator with a fixed set '
'of usernames and passwords.')
username = data['username']
if username in self.admin_data:
if data['password'] == self.admin_data[username]:
return username
if username in self.user_data:
if data['password'] == self.user_data[username]:
return username
| simphony/simphony-remote | remoteappmanager/jupyterhub/auth/basic_authenticator.py | Python | bsd-3-clause | 902 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgsmarkerlinesymbollayer.py
---------------------
Date : November 2018
Copyright : (C) 2018 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'November 2018'
__copyright__ = '(C) 2018, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from utilities import unitTestDataPath
from qgis.PyQt.QtCore import QDir, Qt
from qgis.PyQt.QtGui import QImage, QColor, QPainter
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsGeometry,
QgsFillSymbol,
QgsRenderContext,
QgsFeature,
QgsMapSettings,
QgsRenderChecker,
QgsReadWriteContext,
QgsSymbolLayerUtils,
QgsSimpleMarkerSymbolLayer,
QgsLineSymbolLayer,
QgsMarkerLineSymbolLayer,
QgsMarkerSymbol
)
from qgis.testing import unittest, start_app
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsMarkerLineSymbolLayer(unittest.TestCase):
def setUp(self):
self.report = "<h1>Python QgsMarkerLineSymbolLayer Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def testRingFilter(self):
# test filtering rings during rendering
s = QgsFillSymbol()
s.deleteSymbolLayer(0)
marker_line = QgsMarkerLineSymbolLayer(True)
marker_line.setPlacement(QgsMarkerLineSymbolLayer.FirstVertex)
marker = QgsSimpleMarkerSymbolLayer(QgsSimpleMarkerSymbolLayer.Triangle, 4)
marker.setColor(QColor(255, 0, 0))
marker.setStrokeStyle(Qt.NoPen)
marker_symbol = QgsMarkerSymbol()
marker_symbol.changeSymbolLayer(0, marker)
marker_line.setSubSymbol(marker_symbol)
s.appendSymbolLayer(marker_line.clone())
self.assertEqual(s.symbolLayer(0).ringFilter(), QgsLineSymbolLayer.AllRings)
s.symbolLayer(0).setRingFilter(QgsLineSymbolLayer.ExteriorRingOnly)
self.assertEqual(s.symbolLayer(0).ringFilter(), QgsLineSymbolLayer.ExteriorRingOnly)
s2 = s.clone()
self.assertEqual(s2.symbolLayer(0).ringFilter(), QgsLineSymbolLayer.ExteriorRingOnly)
doc = QDomDocument()
context = QgsReadWriteContext()
element = QgsSymbolLayerUtils.saveSymbol('test', s, doc, context)
s2 = QgsSymbolLayerUtils.loadSymbol(element, context)
self.assertEqual(s2.symbolLayer(0).ringFilter(), QgsLineSymbolLayer.ExteriorRingOnly)
# rendering test
s3 = QgsFillSymbol()
s3.deleteSymbolLayer(0)
s3.appendSymbolLayer(
QgsMarkerLineSymbolLayer())
s3.symbolLayer(0).setRingFilter(QgsLineSymbolLayer.ExteriorRingOnly)
g = QgsGeometry.fromWkt('Polygon((0 0, 10 0, 10 10, 0 10, 0 0),(1 1, 1 2, 2 2, 2 1, 1 1),(8 8, 9 8, 9 9, 8 9, 8 8))')
rendered_image = self.renderGeometry(s3, g)
assert self.imageCheck('markerline_exterioronly', 'markerline_exterioronly', rendered_image)
s3.symbolLayer(0).setRingFilter(QgsLineSymbolLayer.InteriorRingsOnly)
g = QgsGeometry.fromWkt('Polygon((0 0, 10 0, 10 10, 0 10, 0 0),(1 1, 1 2, 2 2, 2 1, 1 1),(8 8, 9 8, 9 9, 8 9, 8 8))')
rendered_image = self.renderGeometry(s3, g)
assert self.imageCheck('markerline_interioronly', 'markerline_interioronly', rendered_image)
def renderGeometry(self, symbol, geom):
f = QgsFeature()
f.setGeometry(geom)
image = QImage(200, 200, QImage.Format_RGB32)
painter = QPainter()
ms = QgsMapSettings()
extent = geom.get().boundingBox()
# buffer extent by 10%
if extent.width() > 0:
extent = extent.buffered((extent.height() + extent.width()) / 20.0)
else:
extent = extent.buffered(10)
ms.setExtent(extent)
ms.setOutputSize(image.size())
context = QgsRenderContext.fromMapSettings(ms)
context.setPainter(painter)
context.setScaleFactor(96 / 25.4) # 96 DPI
painter.begin(image)
try:
image.fill(QColor(0, 0, 0))
symbol.startRender(context)
symbol.renderFeature(f, context)
symbol.stopRender(context)
finally:
painter.end()
return image
def imageCheck(self, name, reference_image, image):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'symbol_' + name + ".png"
image.save(file_name, "PNG")
checker = QgsRenderChecker()
checker.setControlPathPrefix("symbol_markerline")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.compareImages(name, 20)
self.report += checker.report()
print((self.report))
return result
if __name__ == '__main__':
unittest.main()
| dwadler/QGIS | tests/src/python/test_qgsmarkerlinesymbollayer.py | Python | gpl-2.0 | 6,063 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pppcemr', '0085_auto_20160222_2041'),
]
operations = [
migrations.CreateModel(
name='DrugAdmin',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
],
),
]
| sstebbins/pppcpro | pppcemr/migrations/0086_drugadmin.py | Python | agpl-3.0 | 532 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2018: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Jean Gabes, [email protected]
# Hartmut Goebel, [email protected]
# Grégory Starck, [email protected]
# Romain Forlot, [email protected]
# Sebastien Coavoux, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This modules provides class for the Poller daemon
"""
from alignak.satellite import Satellite
from alignak.property import IntegerProp, StringProp
class Poller(Satellite):
"""Poller class. Referenced as "app" in most Interface
"""
do_checks = True # I do checks
do_actions = False # but no actions
my_type = 'poller'
properties = Satellite.properties.copy()
properties.update({
'type':
StringProp(default='poller'),
'port':
IntegerProp(default=7771)
})
def __init__(self, **kwargs):
"""Poller daemon initialisation
:param kwargs: command line arguments
"""
super(Poller, self).__init__(kwargs.get('daemon_name', 'Default-poller'), **kwargs)
| Alignak-monitoring/alignak | alignak/daemons/pollerdaemon.py | Python | agpl-3.0 | 2,565 |
#!/usr/bin/env python2
#
# Author: Masahiro Yamada <[email protected]>
#
# SPDX-License-Identifier: GPL-2.0+
#
"""
Move config options from headers to defconfig files.
Since Kconfig was introduced to U-Boot, we have worked on moving
config options from headers to Kconfig (defconfig).
This tool intends to help this tremendous work.
Usage
-----
First, you must edit the Kconfig to add the menu entries for the configs
you are moving.
And then run this tool giving CONFIG names you want to move.
For example, if you want to move CONFIG_CMD_USB and CONFIG_SYS_TEXT_BASE,
simply type as follows:
$ tools/moveconfig.py CONFIG_CMD_USB CONFIG_SYS_TEXT_BASE
The tool walks through all the defconfig files and move the given CONFIGs.
The log is also displayed on the terminal.
The log is printed for each defconfig as follows:
<defconfig_name>
<action1>
<action2>
<action3>
...
<defconfig_name> is the name of the defconfig.
<action*> shows what the tool did for that defconfig.
It looks like one of the following:
- Move 'CONFIG_... '
This config option was moved to the defconfig
- CONFIG_... is not defined in Kconfig. Do nothing.
The entry for this CONFIG was not found in Kconfig. The option is not
defined in the config header, either. So, this case can be just skipped.
- CONFIG_... is not defined in Kconfig (suspicious). Do nothing.
This option is defined in the config header, but its entry was not found
in Kconfig.
There are two common cases:
- You forgot to create an entry for the CONFIG before running
this tool, or made a typo in a CONFIG passed to this tool.
- The entry was hidden due to unmet 'depends on'.
The tool does not know if the result is reasonable, so please check it
manually.
- 'CONFIG_...' is the same as the define in Kconfig. Do nothing.
The define in the config header matched the one in Kconfig.
We do not need to touch it.
- Compiler is missing. Do nothing.
The compiler specified for this architecture was not found
in your PATH environment.
(If -e option is passed, the tool exits immediately.)
- Failed to process.
An error occurred during processing this defconfig. Skipped.
(If -e option is passed, the tool exits immediately on error.)
Finally, you will be asked, Clean up headers? [y/n]:
If you say 'y' here, the unnecessary config defines are removed
from the config headers (include/configs/*.h).
It just uses the regex method, so you should not rely on it.
Just in case, please do 'git diff' to see what happened.
How does it work?
-----------------
This tool runs configuration and builds include/autoconf.mk for every
defconfig. The config options defined in Kconfig appear in the .config
file (unless they are hidden because of unmet dependency.)
On the other hand, the config options defined by board headers are seen
in include/autoconf.mk. The tool looks for the specified options in both
of them to decide the appropriate action for the options. If the given
config option is found in the .config, but its value does not match the
one from the board header, the config option in the .config is replaced
with the define in the board header. Then, the .config is synced by
"make savedefconfig" and the defconfig is updated with it.
For faster processing, this tool handles multi-threading. It creates
separate build directories where the out-of-tree build is run. The
temporary build directories are automatically created and deleted as
needed. The number of threads are chosen based on the number of the CPU
cores of your system although you can change it via -j (--jobs) option.
Toolchains
----------
Appropriate toolchain are necessary to generate include/autoconf.mk
for all the architectures supported by U-Boot. Most of them are available
at the kernel.org site, some are not provided by kernel.org.
The default per-arch CROSS_COMPILE used by this tool is specified by
the list below, CROSS_COMPILE. You may wish to update the list to
use your own. Instead of modifying the list directly, you can give
them via environments.
Available options
-----------------
-c, --color
Surround each portion of the log with escape sequences to display it
in color on the terminal.
-C, --commit
Create a git commit with the changes when the operation is complete. A
standard commit message is used which may need to be edited.
-d, --defconfigs
Specify a file containing a list of defconfigs to move. The defconfig
files can be given with shell-style wildcards.
-n, --dry-run
Perform a trial run that does not make any changes. It is useful to
see what is going to happen before one actually runs it.
-e, --exit-on-error
Exit immediately if Make exits with a non-zero status while processing
a defconfig file.
-s, --force-sync
Do "make savedefconfig" forcibly for all the defconfig files.
If not specified, "make savedefconfig" only occurs for cases
where at least one CONFIG was moved.
-S, --spl
Look for moved config options in spl/include/autoconf.mk instead of
include/autoconf.mk. This is useful for moving options for SPL build
because SPL related options (mostly prefixed with CONFIG_SPL_) are
sometimes blocked by CONFIG_SPL_BUILD ifdef conditionals.
-H, --headers-only
Only cleanup the headers; skip the defconfig processing
-j, --jobs
Specify the number of threads to run simultaneously. If not specified,
the number of threads is the same as the number of CPU cores.
-r, --git-ref
Specify the git ref to clone for building the autoconf.mk. If unspecified
use the CWD. This is useful for when changes to the Kconfig affect the
default values and you want to capture the state of the defconfig from
before that change was in effect. If in doubt, specify a ref pre-Kconfig
changes (use HEAD if Kconfig changes are not committed). Worst case it will
take a bit longer to run, but will always do the right thing.
-v, --verbose
Show any build errors as boards are built
-y, --yes
Instead of prompting, automatically go ahead with all operations. This
includes cleaning up headers and CONFIG_SYS_EXTRA_OPTIONS.
To see the complete list of supported options, run
$ tools/moveconfig.py -h
"""
import copy
import difflib
import filecmp
import fnmatch
import glob
import multiprocessing
import optparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
SHOW_GNU_MAKE = 'scripts/show-gnu-make'
SLEEP_TIME=0.03
# Here is the list of cross-tools I use.
# Most of them are available at kernel.org
# (https://www.kernel.org/pub/tools/crosstool/files/bin/), except the following:
# arc: https://github.com/foss-for-synopsys-dwc-arc-processors/toolchain/releases
# blackfin: http://sourceforge.net/projects/adi-toolchain/files/
# nds32: http://osdk.andestech.com/packages/nds32le-linux-glibc-v1.tgz
# nios2: https://sourcery.mentor.com/GNUToolchain/subscription42545
# sh: http://sourcery.mentor.com/public/gnu_toolchain/sh-linux-gnu
#
# openrisc kernel.org toolchain is out of date, download latest one from
# http://opencores.org/or1k/OpenRISC_GNU_tool_chain#Prebuilt_versions
CROSS_COMPILE = {
'arc': 'arc-linux-',
'aarch64': 'aarch64-linux-',
'arm': 'arm-unknown-linux-gnueabi-',
'avr32': 'avr32-linux-',
'blackfin': 'bfin-elf-',
'm68k': 'm68k-linux-',
'microblaze': 'microblaze-linux-',
'mips': 'mips-linux-',
'nds32': 'nds32le-linux-',
'nios2': 'nios2-linux-gnu-',
'openrisc': 'or1k-elf-',
'powerpc': 'powerpc-linux-',
'sh': 'sh-linux-gnu-',
'sparc': 'sparc-linux-',
'x86': 'i386-linux-',
'xtensa': 'xtensa-linux-'
}
STATE_IDLE = 0
STATE_DEFCONFIG = 1
STATE_AUTOCONF = 2
STATE_SAVEDEFCONFIG = 3
ACTION_MOVE = 0
ACTION_NO_ENTRY = 1
ACTION_NO_ENTRY_WARN = 2
ACTION_NO_CHANGE = 3
COLOR_BLACK = '0;30'
COLOR_RED = '0;31'
COLOR_GREEN = '0;32'
COLOR_BROWN = '0;33'
COLOR_BLUE = '0;34'
COLOR_PURPLE = '0;35'
COLOR_CYAN = '0;36'
COLOR_LIGHT_GRAY = '0;37'
COLOR_DARK_GRAY = '1;30'
COLOR_LIGHT_RED = '1;31'
COLOR_LIGHT_GREEN = '1;32'
COLOR_YELLOW = '1;33'
COLOR_LIGHT_BLUE = '1;34'
COLOR_LIGHT_PURPLE = '1;35'
COLOR_LIGHT_CYAN = '1;36'
COLOR_WHITE = '1;37'
### helper functions ###
def get_devnull():
"""Get the file object of '/dev/null' device."""
try:
devnull = subprocess.DEVNULL # py3k
except AttributeError:
devnull = open(os.devnull, 'wb')
return devnull
def check_top_directory():
"""Exit if we are not at the top of source directory."""
for f in ('README', 'Licenses'):
if not os.path.exists(f):
sys.exit('Please run at the top of source directory.')
def check_clean_directory():
"""Exit if the source tree is not clean."""
for f in ('.config', 'include/config'):
if os.path.exists(f):
sys.exit("source tree is not clean, please run 'make mrproper'")
def get_make_cmd():
"""Get the command name of GNU Make.
U-Boot needs GNU Make for building, but the command name is not
necessarily "make". (for example, "gmake" on FreeBSD).
Returns the most appropriate command name on your system.
"""
process = subprocess.Popen([SHOW_GNU_MAKE], stdout=subprocess.PIPE)
ret = process.communicate()
if process.returncode:
sys.exit('GNU Make not found')
return ret[0].rstrip()
def get_matched_defconfigs(defconfigs_file):
"""Get all the defconfig files that match the patterns in a file."""
defconfigs = []
for i, line in enumerate(open(defconfigs_file)):
line = line.strip()
if not line:
continue # skip blank lines silently
pattern = os.path.join('configs', line)
matched = glob.glob(pattern) + glob.glob(pattern + '_defconfig')
if not matched:
print >> sys.stderr, "warning: %s:%d: no defconfig matched '%s'" % \
(defconfigs_file, i + 1, line)
defconfigs += matched
# use set() to drop multiple matching
return [ defconfig[len('configs') + 1:] for defconfig in set(defconfigs) ]
def get_all_defconfigs():
"""Get all the defconfig files under the configs/ directory."""
defconfigs = []
for (dirpath, dirnames, filenames) in os.walk('configs'):
dirpath = dirpath[len('configs') + 1:]
for filename in fnmatch.filter(filenames, '*_defconfig'):
defconfigs.append(os.path.join(dirpath, filename))
return defconfigs
def color_text(color_enabled, color, string):
"""Return colored string."""
if color_enabled:
# LF should not be surrounded by the escape sequence.
# Otherwise, additional whitespace or line-feed might be printed.
return '\n'.join([ '\033[' + color + 'm' + s + '\033[0m' if s else ''
for s in string.split('\n') ])
else:
return string
def show_diff(a, b, file_path, color_enabled):
"""Show unidified diff.
Arguments:
a: A list of lines (before)
b: A list of lines (after)
file_path: Path to the file
color_enabled: Display the diff in color
"""
diff = difflib.unified_diff(a, b,
fromfile=os.path.join('a', file_path),
tofile=os.path.join('b', file_path))
for line in diff:
if line[0] == '-' and line[1] != '-':
print color_text(color_enabled, COLOR_RED, line),
elif line[0] == '+' and line[1] != '+':
print color_text(color_enabled, COLOR_GREEN, line),
else:
print line,
def update_cross_compile(color_enabled):
"""Update per-arch CROSS_COMPILE via environment variables
The default CROSS_COMPILE values are available
in the CROSS_COMPILE list above.
You can override them via environment variables
CROSS_COMPILE_{ARCH}.
For example, if you want to override toolchain prefixes
for ARM and PowerPC, you can do as follows in your shell:
export CROSS_COMPILE_ARM=...
export CROSS_COMPILE_POWERPC=...
Then, this function checks if specified compilers really exist in your
PATH environment.
"""
archs = []
for arch in os.listdir('arch'):
if os.path.exists(os.path.join('arch', arch, 'Makefile')):
archs.append(arch)
# arm64 is a special case
archs.append('aarch64')
for arch in archs:
env = 'CROSS_COMPILE_' + arch.upper()
cross_compile = os.environ.get(env)
if not cross_compile:
cross_compile = CROSS_COMPILE.get(arch, '')
for path in os.environ["PATH"].split(os.pathsep):
gcc_path = os.path.join(path, cross_compile + 'gcc')
if os.path.isfile(gcc_path) and os.access(gcc_path, os.X_OK):
break
else:
print >> sys.stderr, color_text(color_enabled, COLOR_YELLOW,
'warning: %sgcc: not found in PATH. %s architecture boards will be skipped'
% (cross_compile, arch))
cross_compile = None
CROSS_COMPILE[arch] = cross_compile
def extend_matched_lines(lines, matched, pre_patterns, post_patterns, extend_pre,
extend_post):
"""Extend matched lines if desired patterns are found before/after already
matched lines.
Arguments:
lines: A list of lines handled.
matched: A list of line numbers that have been already matched.
(will be updated by this function)
pre_patterns: A list of regular expression that should be matched as
preamble.
post_patterns: A list of regular expression that should be matched as
postamble.
extend_pre: Add the line number of matched preamble to the matched list.
extend_post: Add the line number of matched postamble to the matched list.
"""
extended_matched = []
j = matched[0]
for i in matched:
if i == 0 or i < j:
continue
j = i
while j in matched:
j += 1
if j >= len(lines):
break
for p in pre_patterns:
if p.search(lines[i - 1]):
break
else:
# not matched
continue
for p in post_patterns:
if p.search(lines[j]):
break
else:
# not matched
continue
if extend_pre:
extended_matched.append(i - 1)
if extend_post:
extended_matched.append(j)
matched += extended_matched
matched.sort()
def cleanup_one_header(header_path, patterns, options):
"""Clean regex-matched lines away from a file.
Arguments:
header_path: path to the cleaned file.
patterns: list of regex patterns. Any lines matching to these
patterns are deleted.
options: option flags.
"""
with open(header_path) as f:
lines = f.readlines()
matched = []
for i, line in enumerate(lines):
if i - 1 in matched and lines[i - 1][-2:] == '\\\n':
matched.append(i)
continue
for pattern in patterns:
if pattern.search(line):
matched.append(i)
break
if not matched:
return
# remove empty #ifdef ... #endif, successive blank lines
pattern_if = re.compile(r'#\s*if(def|ndef)?\W') # #if, #ifdef, #ifndef
pattern_elif = re.compile(r'#\s*el(if|se)\W') # #elif, #else
pattern_endif = re.compile(r'#\s*endif\W') # #endif
pattern_blank = re.compile(r'^\s*$') # empty line
while True:
old_matched = copy.copy(matched)
extend_matched_lines(lines, matched, [pattern_if],
[pattern_endif], True, True)
extend_matched_lines(lines, matched, [pattern_elif],
[pattern_elif, pattern_endif], True, False)
extend_matched_lines(lines, matched, [pattern_if, pattern_elif],
[pattern_blank], False, True)
extend_matched_lines(lines, matched, [pattern_blank],
[pattern_elif, pattern_endif], True, False)
extend_matched_lines(lines, matched, [pattern_blank],
[pattern_blank], True, False)
if matched == old_matched:
break
tolines = copy.copy(lines)
for i in reversed(matched):
tolines.pop(i)
show_diff(lines, tolines, header_path, options.color)
if options.dry_run:
return
with open(header_path, 'w') as f:
for line in tolines:
f.write(line)
def cleanup_headers(configs, options):
"""Delete config defines from board headers.
Arguments:
configs: A list of CONFIGs to remove.
options: option flags.
"""
if not options.yes:
while True:
choice = raw_input('Clean up headers? [y/n]: ').lower()
print choice
if choice == 'y' or choice == 'n':
break
if choice == 'n':
return
patterns = []
for config in configs:
patterns.append(re.compile(r'#\s*define\s+%s\W' % config))
patterns.append(re.compile(r'#\s*undef\s+%s\W' % config))
for dir in 'include', 'arch', 'board':
for (dirpath, dirnames, filenames) in os.walk(dir):
if dirpath == os.path.join('include', 'generated'):
continue
for filename in filenames:
if not fnmatch.fnmatch(filename, '*~'):
cleanup_one_header(os.path.join(dirpath, filename),
patterns, options)
def cleanup_one_extra_option(defconfig_path, configs, options):
"""Delete config defines in CONFIG_SYS_EXTRA_OPTIONS in one defconfig file.
Arguments:
defconfig_path: path to the cleaned defconfig file.
configs: A list of CONFIGs to remove.
options: option flags.
"""
start = 'CONFIG_SYS_EXTRA_OPTIONS="'
end = '"\n'
with open(defconfig_path) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line.startswith(start) and line.endswith(end):
break
else:
# CONFIG_SYS_EXTRA_OPTIONS was not found in this defconfig
return
old_tokens = line[len(start):-len(end)].split(',')
new_tokens = []
for token in old_tokens:
pos = token.find('=')
if not (token[:pos] if pos >= 0 else token) in configs:
new_tokens.append(token)
if new_tokens == old_tokens:
return
tolines = copy.copy(lines)
if new_tokens:
tolines[i] = start + ','.join(new_tokens) + end
else:
tolines.pop(i)
show_diff(lines, tolines, defconfig_path, options.color)
if options.dry_run:
return
with open(defconfig_path, 'w') as f:
for line in tolines:
f.write(line)
def cleanup_extra_options(configs, options):
"""Delete config defines in CONFIG_SYS_EXTRA_OPTIONS in defconfig files.
Arguments:
configs: A list of CONFIGs to remove.
options: option flags.
"""
if not options.yes:
while True:
choice = (raw_input('Clean up CONFIG_SYS_EXTRA_OPTIONS? [y/n]: ').
lower())
print choice
if choice == 'y' or choice == 'n':
break
if choice == 'n':
return
configs = [ config[len('CONFIG_'):] for config in configs ]
defconfigs = get_all_defconfigs()
for defconfig in defconfigs:
cleanup_one_extra_option(os.path.join('configs', defconfig), configs,
options)
### classes ###
class Progress:
"""Progress Indicator"""
def __init__(self, total):
"""Create a new progress indicator.
Arguments:
total: A number of defconfig files to process.
"""
self.current = 0
self.total = total
def inc(self):
"""Increment the number of processed defconfig files."""
self.current += 1
def show(self):
"""Display the progress."""
print ' %d defconfigs out of %d\r' % (self.current, self.total),
sys.stdout.flush()
class KconfigParser:
"""A parser of .config and include/autoconf.mk."""
re_arch = re.compile(r'CONFIG_SYS_ARCH="(.*)"')
re_cpu = re.compile(r'CONFIG_SYS_CPU="(.*)"')
def __init__(self, configs, options, build_dir):
"""Create a new parser.
Arguments:
configs: A list of CONFIGs to move.
options: option flags.
build_dir: Build directory.
"""
self.configs = configs
self.options = options
self.dotconfig = os.path.join(build_dir, '.config')
self.autoconf = os.path.join(build_dir, 'include', 'autoconf.mk')
self.spl_autoconf = os.path.join(build_dir, 'spl', 'include',
'autoconf.mk')
self.config_autoconf = os.path.join(build_dir, 'include', 'config',
'auto.conf')
self.defconfig = os.path.join(build_dir, 'defconfig')
def get_cross_compile(self):
"""Parse .config file and return CROSS_COMPILE.
Returns:
A string storing the compiler prefix for the architecture.
Return a NULL string for architectures that do not require
compiler prefix (Sandbox and native build is the case).
Return None if the specified compiler is missing in your PATH.
Caller should distinguish '' and None.
"""
arch = ''
cpu = ''
for line in open(self.dotconfig):
m = self.re_arch.match(line)
if m:
arch = m.group(1)
continue
m = self.re_cpu.match(line)
if m:
cpu = m.group(1)
if not arch:
return None
# fix-up for aarch64
if arch == 'arm' and cpu == 'armv8':
arch = 'aarch64'
return CROSS_COMPILE.get(arch, None)
def parse_one_config(self, config, dotconfig_lines, autoconf_lines):
"""Parse .config, defconfig, include/autoconf.mk for one config.
This function looks for the config options in the lines from
defconfig, .config, and include/autoconf.mk in order to decide
which action should be taken for this defconfig.
Arguments:
config: CONFIG name to parse.
dotconfig_lines: lines from the .config file.
autoconf_lines: lines from the include/autoconf.mk file.
Returns:
A tupple of the action for this defconfig and the line
matched for the config.
"""
not_set = '# %s is not set' % config
for line in autoconf_lines:
line = line.rstrip()
if line.startswith(config + '='):
new_val = line
break
else:
new_val = not_set
for line in dotconfig_lines:
line = line.rstrip()
if line.startswith(config + '=') or line == not_set:
old_val = line
break
else:
if new_val == not_set:
return (ACTION_NO_ENTRY, config)
else:
return (ACTION_NO_ENTRY_WARN, config)
# If this CONFIG is neither bool nor trisate
if old_val[-2:] != '=y' and old_val[-2:] != '=m' and old_val != not_set:
# tools/scripts/define2mk.sed changes '1' to 'y'.
# This is a problem if the CONFIG is int type.
# Check the type in Kconfig and handle it correctly.
if new_val[-2:] == '=y':
new_val = new_val[:-1] + '1'
return (ACTION_NO_CHANGE if old_val == new_val else ACTION_MOVE,
new_val)
def update_dotconfig(self):
"""Parse files for the config options and update the .config.
This function parses the generated .config and include/autoconf.mk
searching the target options.
Move the config option(s) to the .config as needed.
Arguments:
defconfig: defconfig name.
Returns:
Return a tuple of (updated flag, log string).
The "updated flag" is True if the .config was updated, False
otherwise. The "log string" shows what happend to the .config.
"""
results = []
updated = False
suspicious = False
rm_files = [self.config_autoconf, self.autoconf]
if self.options.spl:
if os.path.exists(self.spl_autoconf):
autoconf_path = self.spl_autoconf
rm_files.append(self.spl_autoconf)
else:
for f in rm_files:
os.remove(f)
return (updated, suspicious,
color_text(self.options.color, COLOR_BROWN,
"SPL is not enabled. Skipped.") + '\n')
else:
autoconf_path = self.autoconf
with open(self.dotconfig) as f:
dotconfig_lines = f.readlines()
with open(autoconf_path) as f:
autoconf_lines = f.readlines()
for config in self.configs:
result = self.parse_one_config(config, dotconfig_lines,
autoconf_lines)
results.append(result)
log = ''
for (action, value) in results:
if action == ACTION_MOVE:
actlog = "Move '%s'" % value
log_color = COLOR_LIGHT_GREEN
elif action == ACTION_NO_ENTRY:
actlog = "%s is not defined in Kconfig. Do nothing." % value
log_color = COLOR_LIGHT_BLUE
elif action == ACTION_NO_ENTRY_WARN:
actlog = "%s is not defined in Kconfig (suspicious). Do nothing." % value
log_color = COLOR_YELLOW
suspicious = True
elif action == ACTION_NO_CHANGE:
actlog = "'%s' is the same as the define in Kconfig. Do nothing." \
% value
log_color = COLOR_LIGHT_PURPLE
elif action == ACTION_SPL_NOT_EXIST:
actlog = "SPL is not enabled for this defconfig. Skip."
log_color = COLOR_PURPLE
else:
sys.exit("Internal Error. This should not happen.")
log += color_text(self.options.color, log_color, actlog) + '\n'
with open(self.dotconfig, 'a') as f:
for (action, value) in results:
if action == ACTION_MOVE:
f.write(value + '\n')
updated = True
self.results = results
for f in rm_files:
os.remove(f)
return (updated, suspicious, log)
def check_defconfig(self):
"""Check the defconfig after savedefconfig
Returns:
Return additional log if moved CONFIGs were removed again by
'make savedefconfig'.
"""
log = ''
with open(self.defconfig) as f:
defconfig_lines = f.readlines()
for (action, value) in self.results:
if action != ACTION_MOVE:
continue
if not value + '\n' in defconfig_lines:
log += color_text(self.options.color, COLOR_YELLOW,
"'%s' was removed by savedefconfig.\n" %
value)
return log
class Slot:
"""A slot to store a subprocess.
Each instance of this class handles one subprocess.
This class is useful to control multiple threads
for faster processing.
"""
def __init__(self, configs, options, progress, devnull, make_cmd, reference_src_dir):
"""Create a new process slot.
Arguments:
configs: A list of CONFIGs to move.
options: option flags.
progress: A progress indicator.
devnull: A file object of '/dev/null'.
make_cmd: command name of GNU Make.
reference_src_dir: Determine the true starting config state from this
source tree.
"""
self.options = options
self.progress = progress
self.build_dir = tempfile.mkdtemp()
self.devnull = devnull
self.make_cmd = (make_cmd, 'O=' + self.build_dir)
self.reference_src_dir = reference_src_dir
self.parser = KconfigParser(configs, options, self.build_dir)
self.state = STATE_IDLE
self.failed_boards = set()
self.suspicious_boards = set()
def __del__(self):
"""Delete the working directory
This function makes sure the temporary directory is cleaned away
even if Python suddenly dies due to error. It should be done in here
because it is guaranteed the destructor is always invoked when the
instance of the class gets unreferenced.
If the subprocess is still running, wait until it finishes.
"""
if self.state != STATE_IDLE:
while self.ps.poll() == None:
pass
shutil.rmtree(self.build_dir)
def add(self, defconfig):
"""Assign a new subprocess for defconfig and add it to the slot.
If the slot is vacant, create a new subprocess for processing the
given defconfig and add it to the slot. Just returns False if
the slot is occupied (i.e. the current subprocess is still running).
Arguments:
defconfig: defconfig name.
Returns:
Return True on success or False on failure
"""
if self.state != STATE_IDLE:
return False
self.defconfig = defconfig
self.log = ''
self.current_src_dir = self.reference_src_dir
self.do_defconfig()
return True
def poll(self):
"""Check the status of the subprocess and handle it as needed.
Returns True if the slot is vacant (i.e. in idle state).
If the configuration is successfully finished, assign a new
subprocess to build include/autoconf.mk.
If include/autoconf.mk is generated, invoke the parser to
parse the .config and the include/autoconf.mk, moving
config options to the .config as needed.
If the .config was updated, run "make savedefconfig" to sync
it, update the original defconfig, and then set the slot back
to the idle state.
Returns:
Return True if the subprocess is terminated, False otherwise
"""
if self.state == STATE_IDLE:
return True
if self.ps.poll() == None:
return False
if self.ps.poll() != 0:
self.handle_error()
elif self.state == STATE_DEFCONFIG:
if self.reference_src_dir and not self.current_src_dir:
self.do_savedefconfig()
else:
self.do_autoconf()
elif self.state == STATE_AUTOCONF:
if self.current_src_dir:
self.current_src_dir = None
self.do_defconfig()
else:
self.do_savedefconfig()
elif self.state == STATE_SAVEDEFCONFIG:
self.update_defconfig()
else:
sys.exit("Internal Error. This should not happen.")
return True if self.state == STATE_IDLE else False
def handle_error(self):
"""Handle error cases."""
self.log += color_text(self.options.color, COLOR_LIGHT_RED,
"Failed to process.\n")
if self.options.verbose:
self.log += color_text(self.options.color, COLOR_LIGHT_CYAN,
self.ps.stderr.read())
self.finish(False)
def do_defconfig(self):
"""Run 'make <board>_defconfig' to create the .config file."""
cmd = list(self.make_cmd)
cmd.append(self.defconfig)
self.ps = subprocess.Popen(cmd, stdout=self.devnull,
stderr=subprocess.PIPE,
cwd=self.current_src_dir)
self.state = STATE_DEFCONFIG
def do_autoconf(self):
"""Run 'make include/config/auto.conf'."""
self.cross_compile = self.parser.get_cross_compile()
if self.cross_compile is None:
self.log += color_text(self.options.color, COLOR_YELLOW,
"Compiler is missing. Do nothing.\n")
self.finish(False)
return
cmd = list(self.make_cmd)
if self.cross_compile:
cmd.append('CROSS_COMPILE=%s' % self.cross_compile)
cmd.append('KCONFIG_IGNORE_DUPLICATES=1')
cmd.append('include/config/auto.conf')
self.ps = subprocess.Popen(cmd, stdout=self.devnull,
stderr=subprocess.PIPE,
cwd=self.current_src_dir)
self.state = STATE_AUTOCONF
def do_savedefconfig(self):
"""Update the .config and run 'make savedefconfig'."""
(updated, suspicious, log) = self.parser.update_dotconfig()
if suspicious:
self.suspicious_boards.add(self.defconfig)
self.log += log
if not self.options.force_sync and not updated:
self.finish(True)
return
if updated:
self.log += color_text(self.options.color, COLOR_LIGHT_GREEN,
"Syncing by savedefconfig...\n")
else:
self.log += "Syncing by savedefconfig (forced by option)...\n"
cmd = list(self.make_cmd)
cmd.append('savedefconfig')
self.ps = subprocess.Popen(cmd, stdout=self.devnull,
stderr=subprocess.PIPE)
self.state = STATE_SAVEDEFCONFIG
def update_defconfig(self):
"""Update the input defconfig and go back to the idle state."""
log = self.parser.check_defconfig()
if log:
self.suspicious_boards.add(self.defconfig)
self.log += log
orig_defconfig = os.path.join('configs', self.defconfig)
new_defconfig = os.path.join(self.build_dir, 'defconfig')
updated = not filecmp.cmp(orig_defconfig, new_defconfig)
if updated:
self.log += color_text(self.options.color, COLOR_LIGHT_BLUE,
"defconfig was updated.\n")
if not self.options.dry_run and updated:
shutil.move(new_defconfig, orig_defconfig)
self.finish(True)
def finish(self, success):
"""Display log along with progress and go to the idle state.
Arguments:
success: Should be True when the defconfig was processed
successfully, or False when it fails.
"""
# output at least 30 characters to hide the "* defconfigs out of *".
log = self.defconfig.ljust(30) + '\n'
log += '\n'.join([ ' ' + s for s in self.log.split('\n') ])
# Some threads are running in parallel.
# Print log atomically to not mix up logs from different threads.
print >> (sys.stdout if success else sys.stderr), log
if not success:
if self.options.exit_on_error:
sys.exit("Exit on error.")
# If --exit-on-error flag is not set, skip this board and continue.
# Record the failed board.
self.failed_boards.add(self.defconfig)
self.progress.inc()
self.progress.show()
self.state = STATE_IDLE
def get_failed_boards(self):
"""Returns a set of failed boards (defconfigs) in this slot.
"""
return self.failed_boards
def get_suspicious_boards(self):
"""Returns a set of boards (defconfigs) with possible misconversion.
"""
return self.suspicious_boards - self.failed_boards
class Slots:
"""Controller of the array of subprocess slots."""
def __init__(self, configs, options, progress, reference_src_dir):
"""Create a new slots controller.
Arguments:
configs: A list of CONFIGs to move.
options: option flags.
progress: A progress indicator.
reference_src_dir: Determine the true starting config state from this
source tree.
"""
self.options = options
self.slots = []
devnull = get_devnull()
make_cmd = get_make_cmd()
for i in range(options.jobs):
self.slots.append(Slot(configs, options, progress, devnull,
make_cmd, reference_src_dir))
def add(self, defconfig):
"""Add a new subprocess if a vacant slot is found.
Arguments:
defconfig: defconfig name to be put into.
Returns:
Return True on success or False on failure
"""
for slot in self.slots:
if slot.add(defconfig):
return True
return False
def available(self):
"""Check if there is a vacant slot.
Returns:
Return True if at lease one vacant slot is found, False otherwise.
"""
for slot in self.slots:
if slot.poll():
return True
return False
def empty(self):
"""Check if all slots are vacant.
Returns:
Return True if all the slots are vacant, False otherwise.
"""
ret = True
for slot in self.slots:
if not slot.poll():
ret = False
return ret
def show_failed_boards(self):
"""Display all of the failed boards (defconfigs)."""
boards = set()
output_file = 'moveconfig.failed'
for slot in self.slots:
boards |= slot.get_failed_boards()
if boards:
boards = '\n'.join(boards) + '\n'
msg = "The following boards were not processed due to error:\n"
msg += boards
msg += "(the list has been saved in %s)\n" % output_file
print >> sys.stderr, color_text(self.options.color, COLOR_LIGHT_RED,
msg)
with open(output_file, 'w') as f:
f.write(boards)
def show_suspicious_boards(self):
"""Display all boards (defconfigs) with possible misconversion."""
boards = set()
output_file = 'moveconfig.suspicious'
for slot in self.slots:
boards |= slot.get_suspicious_boards()
if boards:
boards = '\n'.join(boards) + '\n'
msg = "The following boards might have been converted incorrectly.\n"
msg += "It is highly recommended to check them manually:\n"
msg += boards
msg += "(the list has been saved in %s)\n" % output_file
print >> sys.stderr, color_text(self.options.color, COLOR_YELLOW,
msg)
with open(output_file, 'w') as f:
f.write(boards)
class ReferenceSource:
"""Reference source against which original configs should be parsed."""
def __init__(self, commit):
"""Create a reference source directory based on a specified commit.
Arguments:
commit: commit to git-clone
"""
self.src_dir = tempfile.mkdtemp()
print "Cloning git repo to a separate work directory..."
subprocess.check_output(['git', 'clone', os.getcwd(), '.'],
cwd=self.src_dir)
print "Checkout '%s' to build the original autoconf.mk." % \
subprocess.check_output(['git', 'rev-parse', '--short', commit]).strip()
subprocess.check_output(['git', 'checkout', commit],
stderr=subprocess.STDOUT, cwd=self.src_dir)
def __del__(self):
"""Delete the reference source directory
This function makes sure the temporary directory is cleaned away
even if Python suddenly dies due to error. It should be done in here
because it is guaranteed the destructor is always invoked when the
instance of the class gets unreferenced.
"""
shutil.rmtree(self.src_dir)
def get_dir(self):
"""Return the absolute path to the reference source directory."""
return self.src_dir
def move_config(configs, options):
"""Move config options to defconfig files.
Arguments:
configs: A list of CONFIGs to move.
options: option flags
"""
if len(configs) == 0:
if options.force_sync:
print 'No CONFIG is specified. You are probably syncing defconfigs.',
else:
print 'Neither CONFIG nor --force-sync is specified. Nothing will happen.',
else:
print 'Move ' + ', '.join(configs),
print '(jobs: %d)\n' % options.jobs
if options.git_ref:
reference_src = ReferenceSource(options.git_ref)
reference_src_dir = reference_src.get_dir()
else:
reference_src_dir = None
if options.defconfigs:
defconfigs = get_matched_defconfigs(options.defconfigs)
else:
defconfigs = get_all_defconfigs()
progress = Progress(len(defconfigs))
slots = Slots(configs, options, progress, reference_src_dir)
# Main loop to process defconfig files:
# Add a new subprocess into a vacant slot.
# Sleep if there is no available slot.
for defconfig in defconfigs:
while not slots.add(defconfig):
while not slots.available():
# No available slot: sleep for a while
time.sleep(SLEEP_TIME)
# wait until all the subprocesses finish
while not slots.empty():
time.sleep(SLEEP_TIME)
print ''
slots.show_failed_boards()
slots.show_suspicious_boards()
def main():
try:
cpu_count = multiprocessing.cpu_count()
except NotImplementedError:
cpu_count = 1
parser = optparse.OptionParser()
# Add options here
parser.add_option('-c', '--color', action='store_true', default=False,
help='display the log in color')
parser.add_option('-C', '--commit', action='store_true', default=False,
help='Create a git commit for the operation')
parser.add_option('-d', '--defconfigs', type='string',
help='a file containing a list of defconfigs to move')
parser.add_option('-n', '--dry-run', action='store_true', default=False,
help='perform a trial run (show log with no changes)')
parser.add_option('-e', '--exit-on-error', action='store_true',
default=False,
help='exit immediately on any error')
parser.add_option('-s', '--force-sync', action='store_true', default=False,
help='force sync by savedefconfig')
parser.add_option('-S', '--spl', action='store_true', default=False,
help='parse config options defined for SPL build')
parser.add_option('-H', '--headers-only', dest='cleanup_headers_only',
action='store_true', default=False,
help='only cleanup the headers')
parser.add_option('-j', '--jobs', type='int', default=cpu_count,
help='the number of jobs to run simultaneously')
parser.add_option('-r', '--git-ref', type='string',
help='the git ref to clone for building the autoconf.mk')
parser.add_option('-y', '--yes', action='store_true', default=False,
help="respond 'yes' to any prompts")
parser.add_option('-v', '--verbose', action='store_true', default=False,
help='show any build errors as boards are built')
parser.usage += ' CONFIG ...'
(options, configs) = parser.parse_args()
if len(configs) == 0 and not options.force_sync:
parser.print_usage()
sys.exit(1)
# prefix the option name with CONFIG_ if missing
configs = [ config if config.startswith('CONFIG_') else 'CONFIG_' + config
for config in configs ]
check_top_directory()
if not options.cleanup_headers_only:
check_clean_directory()
update_cross_compile(options.color)
move_config(configs, options)
if configs:
cleanup_headers(configs, options)
cleanup_extra_options(configs, options)
if options.commit:
subprocess.call(['git', 'add', '-u'])
if configs:
msg = 'Convert %s %sto Kconfig' % (configs[0],
'et al ' if len(configs) > 1 else '')
msg += ('\n\nThis converts the following to Kconfig:\n %s\n' %
'\n '.join(configs))
else:
msg = 'configs: Resync with savedefconfig'
msg += '\n\nRsync all defconfig files using moveconfig.py'
subprocess.call(['git', 'commit', '-s', '-m', msg])
if __name__ == '__main__':
main()
| dasuimao/U-BOOT-Tiny4412 | tools/moveconfig.py | Python | gpl-3.0 | 45,311 |
# Pimp is a highly interactive music player.
# Copyright (C) 2011 [email protected]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""To launch a mpd server, use :class:`MpdServer` or
:class:`MpdServerDaemon` classes.
:class:`MpdRequestHandler` manages a client connection. It parses
client requests and executes corresponding commands. Supported MPD
commands are specified with method
:func:`MpdRequestHandler.RegisterCommand`. Skeletons commands are
provided by module :mod:`command_skel`. They can easily be override.
A client connection can begin by a password command. In this case, a
:class:`Frontend` is created by client password command. This object
is provided to commands treated during this session.
"""
import SocketServer
SocketServer.TCPServer.allow_reuse_address = True
import time
import re
import threading
import sys
#from pimp.core.playlist import *
#from pimp.core.player import *
#import pimp.core.db
import logging
from command_base import *
from command_skel import *
logger=logging
logger.basicConfig(level=logging.INFO)
#logger.basicConfig(level=logging.DEBUG)
##################################
### Mpd supported return types ###
##################################
class MpdErrorMsgFormat(Exception):pass
class MpdCommandError(Exception):
def __init__(self,msg="Unknown error",command="command is not specified"):
self.command=command
self.msg=msg
def toMpdMsg(self):
return "ACK [error@command_listNum] {%s} %s\n" % (self.command,self.msg)
class CommandNotSupported(MpdCommandError):
def __init__(self,commandName):
self.commandName=commandName
def toMpdMsg(self):
return "ACK [error@command_listNum] {%s} Command '%s' not supported\n" % (self.commandName,self.commandName)
class CommandNotMPDCommand(MpdCommandError):
def __init__(self,commandName):
self.commandName=commandName
def toMpdMsg(self):
return "ACK [error@command_listNum] {%s} Command '%s' is not a MPD command\n" % (self.commandName,self.commandName)
class CommandNotImplemented(MpdCommandError):
def __init__(self,commandName,message=""):
self.commandName=commandName
self.message=message
def toMpdMsg(self):
return "ACK [error@command_listNum] {%s} Command '%s' is not implemented (%s)\n" % (self.commandName,self.commandName,self.message)
class UserNotAllowed(MpdCommandError):
def __init__(self,commandName,userName):
self.commandName=commandName
self.userName=userName
def toMpdMsg(self):
return "ACK [error@command_listNum] {%s} User '%s' is not allowed to execute command %s\n" % (self.commandName,self.userName,self.commandName)
class PasswordError(MpdCommandError):
def __init__(self,pwd,format):
self.pwd=pwd
self.format=format
def toMpdMsg(self):
return "ACK [error@command_listNum] {password} Password '%s' is not allowed a valid password. You should use a password such as '%s'\n" % (self.pwd,self.format)
class Frontend(object):
""" To define a frontend. To specify a frontend and user , use MPD
password command with format 'frontend:user'. If password command
is not used, frontend is set to 'unknown' and user to 'default'."""
_DefaultUsername='default'
username=_DefaultUsername
_DefaultFrontend='unknown'
frontend=_DefaultFrontend
def set(self,frontendPassword):
""" Password from frontend contains the name of frontend (mpc,
sonata, ...) and a user name. The format is 'frontend:user'"""
(self.frontend,t,self.username)=frontendPassword.partition(':')
if self.frontend == '' or self.username == '':
logger.warning("Wrong password request '%s'" % frontendPassword)
raise PasswordError(frontendPassword,"frontend:user")
return True
def get(self):
""" Get frontend information. Return a dict."""
return {'username':self.username,'frontend':self.frontend}
def getUsername(self):
return self.username
@classmethod
def GetDefaultUsername(cls):
return cls._DefaultUsername
class MpdRequestHandler(SocketServer.StreamRequestHandler):
""" Manage the connection from a mpd client. Each client
connection instances this object."""
Playlist=MpdPlaylist
__player=None
__SupportedCommands={'currentsong' :{'class':CurrentSong,'users':['default'],'group':'read','mpdVersion':"0.12",'neededBy':["sonata"]},
'outputs' :{'class':Outputs,'users':['default'],'group':'read','mpdVersion':"0.12",'neededBy':["gmpc"]},
'status' :{'class':Status,'users':['default'],'group':'read','mpdVersion':"0.12",'neededBy':["sonata"]},
'stats' :{'class':Stats,'users':['default'],'group':'read','mpdVersion':"0.12",'neededBy':None},
'notcommands' :{'class':NotCommands,'users':['default'],'group':'read','mpdVersion':"0.12",'neededBy':["gmpc"]},
'commands' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'lsinfo' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'tagtypes' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'playlistinfo' :{'class':PlaylistInfo,'users':['default'],'group':'read','mpdVersion':"0.12",'neededBy':None},
'playlistid' :{'class':PlaylistId,'users':['default'],'group':'read','mpdVersion':"0.12",'neededBy':None},
'listplaylistinfo' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'plchanges' :{'class':PlChanges,'users':['default'],'group':'read','mpdVersion':"0.12",'neededBy':["sonata"]},
'plchangesposid' :{'class':PlChangesPosId,'users':['default'],'group':'read','mpdVersion':"0.12",'neededBy':None},
'moveid' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'move' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'delete' :{'class':Delete,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'deleteid' :{'class':DeleteId,'users':['default'],'group':'read','mpdVersion':"0.12",'neededBy':None},
'add' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'playid' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'play' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'password' :{'class':Password,'users':['default'],'group':'read','mpdVersion':"0.12",'neededBy':["all"]},
'clear' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'stop' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'seek' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'pause' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'next' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'previous' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'random' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'listplaylists' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'load' :{'class':None,'users':[],'group':'write','mpdVersion':"0.12",'neededBy':None},
'save' :{'class':None,'users':[],'group':'write','mpdVersion':"0.12",'neededBy':None},
'search' :{'class':None,'users':[],'group':'read','mpdVersion':"0.12",'neededBy':None},
'rm' :{'class':None,'users':[],'group':'write','mpdVersion':"0.12",'neededBy':None},
'setvol' :{'class':None,'users':[],'group':'control','mpdVersion':"0.12",'neededBy':None}
}
def __init__(self, request, client_address, server):
self.playlist=self.Playlist()
self.frontend=Frontend()
logger.debug( "Client connected (%s)" % threading.currentThread().getName())
SocketServer.StreamRequestHandler.__init__(self,request,client_address,server)
""" Handle connection with mpd client. It gets client command,
execute it and send a respond."""
def handle(self):
welcome=u"OK MPD 0.13.0\n"
self.request.send(welcome.encode("utf-8"))
while True:
msg=""
try:
cmdlist=None
cmds=[]
while True:
self.data = self.rfile.readline().strip()
if len(self.data)==0 : raise IOError #To detect last EOF
if self.data == "command_list_ok_begin":
cmdlist="list_ok"
elif self.data == "command_list_begin":
cmdlist="list"
elif self.data == "command_list_end":
break
else:
cmds.append(self.data)
if not cmdlist:break
logger.debug("Commands received from %s" % self.client_address[0])
try:
for c in cmds:
logger.debug("Command '" + c + "'...")
msg=msg+self.__cmdExec(c)
if cmdlist=="list_ok" : msg=msg+"list_OK\n"
except MpdCommandError as e:
logger.info("Command Error: %s"%e.toMpdMsg())
msg=e.toMpdMsg()
except : raise
else:
msg=msg+"OK\n"
logger.debug("Message sent:\n\t\t"+msg.replace("\n","\n\t\t"))
umsg=unicode(msg,"utf-8",errors='replace')
self.request.send(msg.encode("utf-8"))
except IOError,e:
logger.debug("Client disconnected (%s)"% threading.currentThread().getName())
break
def __cmdExec(self,c):
""" Execute mpd client command. Take a string, parse it and
execute the corresponding server.Command function."""
try:
pcmd=[m.group() for m in re.compile('(\w+)|("([^"])+")').finditer(c)] # WARNING An argument cannot contains a '"'
cmd=pcmd[0]
args=[a[1:len(a)-1] for a in pcmd[1:]]
logger.debug("Command executed : %s %s for frontend '%s'" % (cmd,args,self.frontend.get()))
commandCls=self.__getCommandClass(cmd,self.frontend)
msg=commandCls(args,playlist=self.playlist,frontend=self.frontend,player=self.__class__.__player).run()
except MpdCommandError : raise
except CommandNotSupported : raise
except :
logger.critical("Unexpected error on command %s (%s): %s" % (c,self.frontend.get(),sys.exc_info()[0]))
raise
logger.debug("Respond:\n\t\t"+msg.replace("\n","\n\t\t"))
return msg
# Manage user rights
@classmethod
def RegisterCommand(cls,cls_cmd,users=['default']):
""" Register a command. Make this command supported by a mpd
server which use this request handler class. cls_cmd is a
class which inherits from :class:`command_base.Command`."""
cls.__SupportedCommands[cls_cmd.GetCommandName()]['class']=cls_cmd
for a in users : cls.__SupportedCommands[cls_cmd.GetCommandName()]['users'].append(a)
@classmethod
def UnregisterCommand(cls,commandName):
""" Unregister a command"""
cls.__SupportedCommands[commandName]=None
@classmethod
def UserPermissionsCommand(cls,user,commandName=None,group=None):
""" Add permissions for user 'user'. If commandName is not specified, group should be specified. """
if commandName != None:
cls.__SupportedCommands[commandNames]['users'].append(user)
elif group != None:
for c in cls.__SupportedCommands.itervalues():
if c['group']==group:
c['users'].append(user)
else:
raise TypeError
@classmethod
def SupportedCommand(cls):
"""Return a list of command and allowed users."""
return ["%s\t\t%s"%(k,v['users']) for (k,v) in cls.__SupportedCommands.iteritems() if v['class']!=None ]
def __getCommandClass(self,commandName,frontend):
""" To get a command class to execute on received command
string. This method raise supported command errors."""
if not self.__SupportedCommands.has_key(commandName):
logger.warning("Command '%s' is not a MPD command!" % commandName)
raise CommandNotMPDCommand(commandName)
elif self.__SupportedCommands[commandName]['class'] == None:
if self.__SupportedCommands[commandName]['neededBy'] != None:
logger.critical("Command '%s' is needed for client(s) %s" % (commandName," ".join(self.__SupportedCommands[commandName]['neededBy'])))
logger.warning("Command '%s' is not supported!" % commandName)
raise CommandNotSupported(commandName)
elif not (Frontend.GetDefaultUsername() in self.__SupportedCommands[commandName]['users']
or frontend.getUsername() in self.__SupportedCommands[commandName]['users']):
raise UserNotAllowed(commandName,frontend.getUsername())
else :
return self.__SupportedCommands[commandName]['class']
@classmethod
def SetPlayer(cls,player):
"""To set player object. It is passed to executed commands."""
cls.__player=player
@classmethod
def GetPlayer(cls):
"""To get player object associated to pympdserver."""
return cls.__player
class MpdServer(SocketServer.ThreadingMixIn,SocketServer.TCPServer):
""" Create a MPD server. By default, a request is treated via
:class:`MpdRequestHandler` class but you can specify an alternative
request class with RequestHandlerClass argument."""
requestHandler=MpdRequestHandler
""" The class which treats client requests. Use this attribute to
specify supported commands (see :class:`MpdRequestHandler`)."""
def __init__(self,port=6600,RequestHandlerClass=MpdRequestHandler):
self.host, self.port = "", port
self.requestHandler=RequestHandlerClass
SocketServer.TCPServer.__init__(self,(self.host,self.port),RequestHandlerClass)
def run(self):
"""Run MPD server in a blocking way."""
logger.info("Mpd Server is listening on port " + str(self.port))
self.serve_forever()
class MpdServerDaemon(MpdServer):
""" Create a deamonized MPD server. See :class:`MpdServer` for
more informations. When a MpdServerDaemon object is created, a
thread is started to treat clients request."""
def __init__(self,port=6600,mpdRequestHandler=MpdRequestHandler):
MpdServer.__init__(self,port,mpdRequestHandler)
self.thread = threading.Thread(target=self.run)
self.thread.setDaemon(True)
self.thread.start()
def quit(self):
"""Stop MPD server deamon."""
logger.info("Quiiting Mpd Server")
self.shutdown()
def wait(self,timeout=None):
""" Return True if mpd is alive, False otherwise. This method
is useful to catch Keyboard interrupt for instance."""
if timeout==None:
self.thread.join()
else:
self.thread.join(timeout)
return self.thread.isAlive()
| larcher/python-mpd-server | mpdserver/mpdserver.py | Python | gpl-3.0 | 16,989 |
import numpy as np
from scipy.linalg import kron
from scipy.sparse import identity
from copy import deepcopy
from mps import MPS,ket2mps,overlap,expect
from dmrg import DMRGEngine
from testdmrg import lhgen,rhgen
ket=np.random.rand(2**4)
mps=ket2mps(ket,2,4,cano='mixed',div=2)
ket2=np.random.rand(2**4)
mps2=ket2mps(ket2,2,4,cano='mixed',div=2)
opl=[np.array([[0.5,0.],[0.,-0.5]]),np.array([[1.,0.],[0.,1.]]),np.array([[1.,0.],[0.,1.]]),np.array([[1.,0.],[0.,1.]])]
OP=deepcopy(opl[0])
for i in range(3):
OP=kron(OP,np.array([[1.,0.],[0.,1.]]))
def test_tomps():
dmrg=DMRGEngine(lhgen,rhgen)
dmrg.finite(mwarmup=10,mlist=[10])
dmps=dmrg.tomps()
return dmps
def test_shape(mps):
for M in mps.Ms:
print M.shape
def test_shift(mps):
mps.shift(site=mps.L/2-1)
test_shape(mps)
test_cano(mps)
mps.shift(direct='l',site=mps.L-2)
def test_cano(mps):
for A in mps.As:
print np.tensordot(A.conjugate().transpose(),A,axes=([1,2],[1,0]))-identity(A.shape[-1])
for B in mps.Bs:
print np.tensordot(B,B.conjugate().transpose(),axes=([1,2],[1,0]))-identity(B.shape[0])
def test_toket():
tket=mps.toket()
print tket-ket
def test_overlap():
mps.contract_s()
mps2.contract_s()
ovlap=overlap(mps,mps2)
ov=ket.conjugate().transpose().dot(ket2)
print ovlap-ov
def test_expect():
mps.contract_s()
print expect(mps,opl)
print ket.conjugate().transpose().dot(OP).dot(ket)
if __name__=='__main__':
#dmps=test_tomps()
#test_shape(dmps)
#test_cano(dmps)
#test_toket()
#test_overlap()
#test_shift(mps)
test_expect(mps,opl)
| Lynn-015/NJU_DMRG | dmrg/testmps.py | Python | mit | 1,886 |
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
# The model for the analysis
from . model import canonical
from . import regionanalysis
from . import transferfunctions
from . import constraintbuilder
from . import dataflow
class HeapInformationProvider(object):
def __init__(self, storeGraph, regions):
self.storeGraph = storeGraph
self.regions = regions
def loadSlotName(self, node):
return node.annotation.reads[0][0]
#return (node.fieldtype, node.name.object)
def storeSlotName(self, node):
return node.annotation.modifies[0][0]
#return (node.fieldtype, node.name.object)
def indexSlotName(self, lcl, i):
iobj = self.storeGraph.extractor.getObject(i)
fieldName = self.storeGraph.canonical.fieldName('Array', iobj)
for ref in lcl.annotation.references[0]:
return ref.field(fieldName, ref.region.group.regionHint)
class OrderConstraints(object):
def __init__(self, sys, entryCode):
self.sys = sys
self.entryCode = entryCode
def processConstraint(self, c):
if c in self.processed: return
self.processed.add(c)
point = c.outputPoint
for next in self.sys.environment.observers.get(point, ()):
self.processConstraint(next)
c.priority = self.uid
self.uid += 1
def process(self):
self.uid = 1
self.processed = set()
for code in self.entryCode:
callPoint = self.sys.constraintbuilder.codeCallPoint(code)
for c in self.sys.environment.observers.get(callPoint, ()):
self.processConstraint(c)
self.sort()
def sort(self):
priority = lambda c: c.priority
for observers in self.sys.environment.observers.itervalues():
observers.sort(reverse=False, key=priority)
class RegionBasedShapeAnalysis(object):
def __init__(self, extractor, cpacanonical, info):
self.extractor = extractor
self.canonical = canonical.CanonicalObjects()
self.worklist = dataflow.Worklist()
self.environment = dataflow.DataflowEnvironment()
self.constraintbuilder = constraintbuilder.ShapeConstraintBuilder(self, self.processCode)
self.cpacanonical = cpacanonical
self.info = info
self.pending = set()
self.visited = set()
self.limit = 20000
self.aborted = set()
def process(self, trace=False, limit=0):
success = self.worklist.process(self, trace, limit)
if not success:
print
print "ITERATION LIMIT HIT"
self.worklist.worklist[:] = []
return success
def processCode(self, code):
if code not in self.visited:
self.pending.add(code)
self.visited.add(code)
def build(self):
while self.pending:
current = self.pending.pop()
print "BUILD", current
self.constraintbuilder.process(current)
def buildStructures(self, entryCode):
for code in entryCode:
self.processCode(code)
self.build()
order = OrderConstraints(self, entryCode)
order.process()
def addEntryPoint(self, code, selfobj, args):
self.processCode(code)
self.build()
callPoint = self.constraintbuilder.codeCallPoint(code)
# TODO generate all possible aliasing configuraions?
self.bindExisting(selfobj, 'self', callPoint)
sucess = self.process(trace=True)
if not sucess: self.aborted.add(selfobj)
for i, arg in enumerate(args):
self.bindExisting(arg, i, callPoint)
sucess = self.process(trace=True, limit=self.limit)
if not sucess: self.aborted.add(arg)
def bindExisting(self, obj, p, callPoint):
slot = self.canonical.localSlot(p)
expr = self.canonical.localExpr(slot)
refs = self.canonical.refs(slot)
type_ = self.cpacanonical.externalType(obj)
region = None
entry = refs
current = refs
externalReferences = True
allocated = False
hits = (expr,)
misses = ()
index = self.canonical.configuration(type_, region, entry, current, externalReferences, allocated)
paths = self.canonical.paths(hits, misses)
secondary = self.canonical.secondary(paths, externalReferences)
print
print "BIND"
print callPoint
print index
print secondary
print
self.environment.merge(self, callPoint, None, index, secondary)
def handleAllocations(self):
for (code, op), (point, target) in self.constraintbuilder.allocationPoint.iteritems():
print code
print op
print '\t', point
print '\t', target
slot = self.canonical.localSlot(target)
expr = self.canonical.localExpr(slot)
refs = self.canonical.refs(slot)
for obj in op.annotation.allocates[0]:
print '\t\t', obj
type_ = obj
region = None
entry = refs
current = refs
externalReferences = False
allocated = True
hits = (expr,)
misses = ()
index = self.canonical.configuration(type_, region, entry, current, externalReferences, allocated)
paths = self.canonical.paths(hits, misses)
secondary = self.canonical.secondary(paths, externalReferences)
self.environment.merge(self, point, None, index, secondary)
sucess = self.process(trace=True, limit=self.limit)
if not sucess: self.aborted.add(obj)
print
def summarize(self):
maxObjRefs = {}
maxFieldRefs = {}
fieldShares = {}
for point, context, index in self.environment._secondary.iterkeys():
for field, count in index.currentSet.counts.iteritems():
maxObjRefs[index.object] = max(maxObjRefs.get(index.object, 0), count)
maxFieldRefs[field] = max(maxFieldRefs.get(field, 0), count)
fieldShares[field] = fieldShares.get(field, False) or count > 1 or len(index.currentSet.counts) > 1
print
print "Obj Refs"
for obj, count in maxObjRefs.iteritems():
print obj, count
print
for obj, count in maxFieldRefs.iteritems():
print obj, count, fieldShares[obj]
def dumpStatistics(self):
print "Entries:", len(self.environment._secondary)
print "Unique Config:", len(self.canonical.configurationCache)
print "Max Worklist:", self.worklist.maxLength
print "Steps:", "%d/%d" % (self.worklist.usefulSteps, self.worklist.steps)
import collections
def evaluate(compiler):
with compiler.console.scope('shape analysis'):
regions = regionanalysis.evaluate(compiler.extractor, compiler.interface.entryPoint, compiler.liveCode)
rbsa = RegionBasedShapeAnalysis(compiler.extractor, compiler.storeGraph.canonical, HeapInformationProvider(compiler.storeGraph, regions))
rbsa.buildStructures(compiler.interface.entryCode())
for ep in compiler.interface.entryPoint:
rbsa.addEntryPoint(ep.code, ep.selfarg.getObject(compiler.extractor), [arg.getObject(compiler.extractor) for arg in ep.args])
rbsa.handleAllocations()
rbsa.dumpStatistics()
lut = collections.defaultdict(set)
for point, context, index in sorted(rbsa.environment._secondary.iterkeys()):
#if index.currentSet.containsParameter(): continue
if index.object in rbsa.aborted: continue
lut[index.object].add((point[0], index.currentSet))
for obj, indexes in lut.iteritems():
print obj
prevCode = None
for code, rc in sorted(indexes):
if rc and not rc.containsParameter():
if prevCode != code:
print '\t', code
prevCode = code
print '\t\t', rc
print
print
print "ABORTED"
for obj in rbsa.aborted:
print '\t', obj
rbsa.summarize()
print
rbsa.dumpStatistics()
| ncbray/pystream | bin/analysis/shape/__init__.py | Python | apache-2.0 | 7,687 |
#!/usr/bin/env python
# -*- coding: <utf-8> -*-
"""
This file is part of Spartacus project
Copyright (C) 2016 CSE
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from CapuaEnvironment.Instruction.Instruction import Instruction
from CapuaEnvironment.IntructionFetchUnit.FormDescription import formDescription
from CapuaEnvironment.MemoryArray.MemoryArray import MemoryArray
from Configuration.Configuration import MEMORY_START_AT
__author__ = "CSE"
__copyright__ = "Copyright 2015, CSE"
__credits__ = ["CSE"]
__license__ = "GPL"
__version__ = "2.1"
__maintainer__ = "CSE"
__status__ = "Dev"
class InstructionFetchUnit:
"""
The InstructionFetchUnit is used to extract actual instruction from memory.
It will build instruction using data extracted from the MemoryArray. In case the
access to memory is not allowed, memory cells will cause exception to be raised.
Since it is available, it is also used in the debugger in order to provide code
disassembling functionality.
"""
_memoryArray = None
def __init__(self, memoryArray: MemoryArray=None):
"""
Simply initialize an instance of this class... Nothing much to say. Actual logic is elsewhere.
:param memoryArray: This has to be a non None MemoryArray
"""
if memoryArray is None:
raise RuntimeError("Capua InstructionFetchUnit init error")
self._memoryArray = memoryArray
def fetchInstructionAtAddress(self, address=MEMORY_START_AT):
"""
This is the high level fetching method for this class. It is the only one
that should be called by the user. Note that the address is NOT validated here
since the actual validation logic lays in the MemoryIOController, MemoryArray
and MemoryCell code
:param address: int, The address where the fetch needs to happen
:return: Instruction, nextInstructionAddress
"""
instructionForm = self._fetchInstructionFormAtAddress(address)
instruction = self._fetchInstructionAtAddressUsingForm(address, instructionForm)
nextInstructionAddress = address + instructionForm["length"]
return instruction, nextInstructionAddress
def _fetchInstructionFormAtAddress(self, address=MEMORY_START_AT):
"""
This will fetch the first byte of the instruction at given address. Once fetched,
it uses that first byte to determine instruction format. Returns information on
the instruction format so that the whole instruction can be extracted from memory
:param address:
:return:
"""
instructionForm = None
mc = self._memoryArray.readMemory(address, 1)[0]
value = mc & 0xff # Making sure we have an 8 bits value
# Extracting type and instruction codes
typeCode = (value & 0b11110000) >> 4
instructionCode = value & 0b00001111
for form in formDescription:
if typeCode == formDescription[form]["typeCode"]:
if instructionCode in formDescription[form]["listing"]:
# We found the correct form!
instructionForm = formDescription[form]
break
if instructionForm is None:
# If we are here, no instruction were found that are corresponding
# a user is trying to execute an invalid instruction!
raise ValueError("Invalid instruction detected at address {}".format(hex(address)))
return instructionForm
def _fetchInstructionAtAddressUsingForm(self, address=MEMORY_START_AT, form=None):
"""
This will fetch the complete instruction information and build an instruction instance
using the extracted data. The instruction instance is returned to calling method
:param address: Address of the instruction that needs to be fetched
:param form: The form of the instruction that requires fetching
:return: An instance of the Instruction class
"""
instruction = None
# First, we get the memory bits that we need!
memorySlice = self._memoryArray.readMemory(address, form["length"])
# Now, build a big number (as in real big) with the extracted memory
binaryInstruction = 0
for mc in memorySlice:
binaryInstruction <<= 8
binaryInstruction |= mc & 0xff # Only 8 bits can be used at a time
# binaryInstruction is now a big number representing the instruction
# Time to create the instruction using this big number!
# Parsing of the details of the instruction will happen in the Instruction class
instruction = Instruction(binaryInstruction, form)
return instruction
| CommunicationsSecurityEstablishment/spartacus | CapuaEnvironment/IntructionFetchUnit/InstructionFetchUnit.py | Python | gpl-2.0 | 5,388 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2020 KuraLabs S.R.L
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Module that provides the module tree node :class:`APINode`.
This class will load the module identified by ``name`` and recursively build a
tree with all it's submodules and subpackages. In the process, each node
analyze and fetch the public API of that module.
``name`` can be any node, like the root package, or any subpackage or submodule
and a tree will be built from there. ``name`` must follow the standard
"dot notation" for importing a module.
This class will not assume any special naming, or perform any complex analysis
to determine what must be in the public interface. This is because it is not
only a difficult problem, but it involves analyzing deeply the namespace of the
module which can be quite expensive.
In general it is very difficult to determine in a module namespace what
elements are private or public declared locally, private or public but declared
in another module and brought into the module local namespace
(``from x import y``), third party library, Python standard library, etc. At
the end, any algorithm that tries to determine this will eventually fail to
meet the requirements or expectations of the developer, leaving false positives
or removing elements expected to be present in the public API.
For example, a common scenario is that some modules, specially package entry
points ``__init__.py``, can be setup to expose the public API of their sibling
modules, possible causing several objects to be identified as part of the
public API of both modules.
Because of this the approach taken by this module follows the rule in PEP20
"Explicit is better than implicit". In consequence, the node will consider
elements as public if they are explicitly listed in the ``__api__`` or
``__all__`` variables. It is up to the developer to list the elements that must
be published in the public API.
``__api__`` is a special variable introduced by this module, and it exists for
situation were for whatever reason the developer don't want to list in the
``__all__`` variable an element that needs to be published in the public API.
This class will extract all elements identified in ONE of those listings (not
the union), with ``__api__`` having the precedence. If none of those variables
exists in the module then it will be assumed that no public API exists for that
module and no futher actions will be taken.
If any of those variables exists this class will iterate all elements listed in
them and will catalog them in four categories:
- Functions.
- Exceptions.
- Classes.
- Variables.
Being Variables the default if it cannot be determined that an element belongs
to any of other categories.
"""
from logging import getLogger
from pkgutil import iter_modules
from traceback import format_exc
from importlib import import_module
from collections import OrderedDict
from inspect import isclass, isfunction
log = getLogger(__name__)
class APINode(object):
"""
Tree node class for module instrospection.
:param str name: Name of the module to build the tree from. It must follow
the "dot notation" of the import mechanism.
:param dict directory: Directory to store the index of all the modules.
If None, the default, the root node will create one a pass it to the
subnodes.
**Attributes:**
:var name: Name of the current module.
:var subname: Last part of the name of this module. For example if name is
``my.module.another`` the subname will be ``another``.
:var directory: Directory of the tree. This is a :py:class:`OrderedDict`
that will register all modules name with it's associated node
:class:`APINode`. All nodes of a tree share this index and thus
the whole tree can be queried from any node.
:var module: The loaded module.
:var subnodes: A list of :class:`APINode` with all child submodules
and subpackages.
:var subnodes_failed: A list of submodules and subpackages names that
failed to import.
**Public API categories:**
:var functions: A :py:class:`OrderedDict` of all functions found in the
public API of the module.
:var classes: A :py:class:`OrderedDict` of all classes found in the
public API of the module.
:var exceptions: A :py:class:`OrderedDict` of all exceptions found in the
public API of the module.
:var variables: A :py:class:`OrderedDict` of all other elements found in
the public API of the module.
In all categories the order on which the elements are listed is preserved.
"""
def __init__(self, name, directory=None):
self.module = import_module(name)
self.name = name
self.subname = name.split('.')[-1]
self.functions = OrderedDict()
self.classes = OrderedDict()
self.exceptions = OrderedDict()
self.variables = OrderedDict()
self.api = OrderedDict((
('functions', self.functions),
('classes', self.classes),
('exceptions', self.exceptions),
('variables', self.variables),
))
self.subnodes = []
self.subnodes_failed = []
self.directory = OrderedDict()
if directory is not None:
self.directory = directory
self._relevant = None
# Now that all node public attributes exists and module was imported
# register itself in the directory
self.directory[self.name] = self
# Check if package and iterate over subnodes
if hasattr(self.module, '__path__'):
for _, subname, ispkg in iter_modules(
self.module.__path__, self.module.__name__ + '.'):
log.info('Recursing into {}'.format(subname))
try:
subnode = APINode(subname, self.directory)
self.subnodes.append(subnode)
except Exception:
log.error('Failed to import {}'.format(subname))
log.error(format_exc())
self.subnodes_failed.append(subname)
# Fetch all public objects
public = OrderedDict()
for public_key in ['__api__', '__all__']:
if not hasattr(self.module, public_key):
continue
for obj_name in getattr(self.module, public_key):
if not hasattr(self.module, obj_name):
log.warning(
'Module {} doesn\'t have a element {}'.format(
self.name, obj_name
)
)
continue
public[obj_name] = getattr(self.module, obj_name)
break
# Categorize objects
for obj_name, obj in public.items():
if isclass(obj):
if issubclass(obj, Exception):
self.exceptions[obj_name] = obj
continue
self.classes[obj_name] = obj
continue
if isfunction(obj):
self.functions[obj_name] = obj
continue
self.variables[obj_name] = obj
# Flag to mark if this branch is relevant
# For self._relevant, None means undertermined
if self.is_root():
self.is_relevant()
def has_public_api(self):
"""
Check if this node has a public API.
:rtype: bool
:return: True if any category has at least one element.
"""
return any(self.api.values())
def is_leaf(self):
"""
Check if the current node is a leaf in the tree.
A leaf node not necessarily is a module, it can be a package without
modules (just the entry point ``__init__.py``).
:rtype: bool
:return: True if no other subnodes exists for this node.
"""
return not self.subnodes
def is_root(self):
"""
Check if the current node is the root node.
:rtype: bool
:return: True if the current node is the root node.
"""
for key in self.directory.keys():
return key == self.name
raise Exception('Empty directory!')
def is_relevant(self):
"""
Check if this branch of the tree is relevant.
A branch is relevant if the current node has a public API or if any of
its subnodes is relevant (in order to reach relevant nodes).
Relevancy is determined at initialization by the root node.
:rtype: bool
:return: True if the current node is relevant.
"""
if self._relevant is not None:
return self._relevant
relevant = False
if self.has_public_api() or \
any(s.is_relevant() for s in self.subnodes):
relevant = True
self._relevant = relevant
return self._relevant
def depth(self):
"""
Get the depth of the current node in the tree.
:rtype: int
:return: The depth of the node. For example, for node ``my.add.foo``
the depth is 3.
"""
return len(self.name.split('.'))
def get_module(self, name):
"""
Get a module node by it's name.
This is just a helper that does lookup on the directory index.
:rtype: :class:`APINode` or None
:return: The module node identified by ``name`` in the tree. ``None``
if the name doesn't exists.
"""
return self.directory.get(name, None)
def walk(self):
"""
Traverse the tree top-down.
:return: This method will yield tuples ``(node, [leaves])`` for each
node in the tree.
"""
if self.is_leaf():
raise StopIteration()
yield (self, [n for n in self.subnodes if n.is_leaf()])
for subnode in [n for n in self.subnodes if not n.is_leaf()]:
for step in subnode.walk():
yield step
def __iter__(self):
return self.walk
def tree(self, level=0, fullname=True):
"""
Pretty print the subtree at the current node.
For example, for the module ``confspec``:
::
confspec
confspec.manager [c]
confspec.options [c]
confspec.providers [c, v]
confspec.providers.dict [c]
confspec.providers.ini [c]
confspec.providers.json [c]
confspec.utils [f]
confspec.validation [f]
The tags at the right of the name shows what kind of elements are
present in the public interfaces of those modules.
:param int level: Indentation level.
:param bool fullname: Plot the full name of the module or just it's
subname.
"""
name = [(' ' * level)]
if fullname:
name.append(self.name)
else:
name.append(self.subname)
tags = []
for tag, category in zip(['f', 'c', 'e', 'v'], self.api.values()):
if category:
tags.append(tag)
if tags:
name.append(' [{}]'.format(', '.join(tags)))
output = [''.join(name)]
for subnode in self.subnodes:
output.append(subnode.tree(level=level + 1, fullname=fullname))
return '\n'.join(output)
def __str__(self):
return self.tree()
def __repr__(self):
return self.name
__all__ = ['APINode']
__api__ = []
| carlos-jenkins/autoapi | lib/autoapi/apinode.py | Python | apache-2.0 | 12,082 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
import tests_common
@utx.skipIfMissingFeatures(["NPT", "LENNARD_JONES"])
class IntegratorNPT(ut.TestCase):
"""This compares pressure and compressibility of a LJ system against
expected values."""
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
def setUp(self):
self.system.box_l = [5] * 3
self.system.time_step = 0.01
self.system.cell_system.skin = 0.25
def tearDown(self):
self.system.part.clear()
self.system.thermostat.turn_off()
self.system.integrator.set_vv()
def test_compressibility(self):
system = self.system
system.box_l = [5.86326165] * 3
data = np.genfromtxt(tests_common.abspath("data/npt_lj_system.data"))
p_ext = 2.0
system.part.add(pos=data[:, :3], v=data[:, 3:])
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1, sigma=1, cutoff=1.12246, shift=0.25)
system.thermostat.set_npt(kT=1.0, gamma0=2, gammav=0.004, seed=42)
system.integrator.set_isotropic_npt(ext_pressure=p_ext, piston=0.0001)
system.integrator.run(800)
avp = 0
n = 30000
skip_p = 8
ls = np.zeros(n)
for t in range(n):
system.integrator.run(2)
if t % skip_p == 0:
avp += system.analysis.pressure()['total']
ls[t] = system.box_l[0]
avp /= (n / skip_p)
Vs = np.array(ls)**3
compressibility = np.var(Vs) / np.average(Vs)
self.assertAlmostEqual(avp, p_ext, delta=0.02)
self.assertAlmostEqual(compressibility, 0.32, delta=0.02)
if __name__ == "__main__":
ut.main()
| espressomd/espresso | testsuite/python/integrator_npt_stats.py | Python | gpl-3.0 | 2,473 |
"""
The Beer Distribution Problem for the PuLP Modeller
Authors: Antony Phillips, Dr Stuart Mitchell 2007
"""
# Import PuLP modeler functions
from pulp import *
# Creates a list of all the supply nodes
Warehouses = ["A", "B"]
# Creates a dictionary for the number of units of supply for each supply node
supply = {"A": 1000,
"B": 4000}
# Creates a list of all demand nodes
Bars = ["1", "2", "3", "4", "5"]
# Creates a dictionary for the number of units of demand for each demand node
demand = {"1":500,
"2":900,
"3":1800,
"4":200,
"5":700,}
# Creates a list of costs of each transportation path
costs = [ #Bars
#1 2 3 4 5
[2,4,5,2,1],#A Warehouses
[3,1,3,2,3] #B
]
# The cost data is made into a dictionary
costs = makeDict([Warehouses,Bars],costs,0)
# Creates the 'prob' variable to contain the problem data
prob = LpProblem("Beer Distribution Problem",LpMinimize)
# Creates a list of tuples containing all the possible routes for transport
Routes = [(w,b) for w in Warehouses for b in Bars]
# A dictionary called 'Vars' is created to contain the referenced variables(the routes)
vars = LpVariable.dicts("Route",(Warehouses,Bars),0,None,LpInteger)
# The objective function is added to 'prob' first
prob += lpSum([vars[w][b]*costs[w][b] for (w,b) in Routes]), "Sum_of_Transporting_Costs"
# The supply maximum constraints are added to prob for each supply node (warehouse)
for w in Warehouses:
prob += lpSum([vars[w][b] for b in Bars])<=supply[w], "Sum_of_Products_out_of_Warehouse_%s"%w
# The demand minimum constraints are added to prob for each demand node (bar)
for b in Bars:
prob += lpSum([vars[w][b] for w in Warehouses])>=demand[b], "Sum_of_Products_into_Bar%s"%b
# The problem data is written to an .lp file
prob.writeLP("BeerDistributionProblem.lp")
# The problem is solved using PuLP's choice of Solver
prob.solve()
# The status of the solution is printed to the screen
print "Status:", LpStatus[prob.status]
# Each of the variables is printed with it's resolved optimum value
for v in prob.variables():
print v.name, "=", v.varValue
# The optimised objective function value is printed to the screen
print "Total Cost of Transportation = ", value(prob.objective)
| ruxkor/pulp-or | examples/BeerDistributionProblem.py | Python | mit | 2,318 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from setuptools import find_packages
from setuptools import setup
import py_swf
readme = open('README.rst').read()
doclink = """
Documentation
-------------
The full documentation is at http://py-swf.readthedocs.io/en/latest/"""
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
# py2 + setuptools asserts isinstance(name, str) so this needs str()
name=str('py-swf'),
version=py_swf.__version__,
description="A SWF client library that makes things easy for building workflow logic",
long_description=readme + '\n\n' + doclink + '\n\n' + history,
author=py_swf.__author__,
author_email=py_swf.__email__,
url='http://py-swf.readthedocs.io/en/latest/',
packages=find_packages(exclude=['tests*', 'testing']),
install_requires=[
'boto3==1.2.1',
'botocore==1.3.7',
],
zip_safe=False,
keywords=['py_swf', 'swf', 'amazon', 'workflow'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| quantsini/pyswf | setup.py | Python | mit | 1,452 |
#/***************************************************************************
# * Copyright (c) Victor Titov (DeepSOIC) *
# * ([email protected]) 2016 *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This library is free software; you can redistribute it and/or *
# * modify it under the terms of the GNU Library General Public *
# * License as published by the Free Software Foundation; either *
# * version 2 of the License, or (at your option) any later version. *
# * *
# * This library is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this library; see the file COPYING.LIB. If not, *
# * write to the Free Software Foundation, Inc., 59 Temple Place, *
# * Suite 330, Boston, MA 02111-1307, USA *
# * *
# ***************************************************************************/
__title__="BOPTools.JoinAPI module"
__author__ = "DeepSOIC"
__url__ = "http://www.freecadweb.org"
__doc__ = "JoinFeatures functions that operate on shapes."
import Part
from . import ShapeMerge
from . import generalFuseIsAvailable
from .GeneralFuseResult import GeneralFuseResult
from .Utils import compoundLeaves
def shapeOfMaxSize(list_of_shapes):
"""shapeOfMaxSize(list_of_shapes): finds the shape that has the largest mass in the list and returns it. The shapes in the list must be of same dimension."""
#first, check if shapes can be compared by size
ShapeMerge.dimensionOfShapes(list_of_shapes)
rel_precision = 1e-8
#find it!
max_size = -1e100 # max size encountered so far
count_max = 0 # number of shapes with size equal to max_size
shape_max = None # shape of max_size
for sh in list_of_shapes:
v = abs(Part.cast_to_shape(sh).Mass)
if v > max_size*(1 + rel_precision) :
max_size = v
shape_max = sh
count_max = 1
elif (1-rel_precision) * max_size <= v and v <= (1+rel_precision) * max_size :
count_max = count_max + 1
if count_max > 1 :
raise ValueError("There is more than one largest piece!")
return shape_max
def connect(list_of_shapes, tolerance = 0.0):
"""connect(list_of_shapes, tolerance = 0.0): connects solids (walled objects), shells and
wires by throwing off small parts that result when splitting them at intersections.
Compounds in list_of_shapes are automatically exploded, so self-intersecting compounds
are valid for connect."""
# explode all compounds before GFA.
new_list_of_shapes = []
for sh in list_of_shapes:
new_list_of_shapes.extend( compoundLeaves(sh) )
list_of_shapes = new_list_of_shapes
#test if shapes are compatible for connecting
dim = ShapeMerge.dimensionOfShapes(list_of_shapes)
if dim == 0:
raise TypeError("Cannot connect vertices!")
if len(list_of_shapes) < 2:
return Part.makeCompound(list_of_shapes)
if not generalFuseIsAvailable(): #fallback to legacy
result = list_of_shapes[0]
for i in range(1, len(list_of_shapes)):
result = connect_legacy(result, list_of_shapes[i], tolerance)
return result
pieces, map = list_of_shapes[0].generalFuse(list_of_shapes[1:], tolerance)
ao = GeneralFuseResult(list_of_shapes, (pieces, map))
ao.splitAggregates()
#print len(ao.pieces)," pieces total"
keepers = []
all_danglers = [] # debug
#add all biggest dangling pieces
for src in ao.source_shapes:
danglers = [piece for piece in ao.piecesFromSource(src) if len(ao.sourcesOfPiece(piece)) == 1]
all_danglers.extend(danglers)
largest = shapeOfMaxSize(danglers)
if largest is not None:
keepers.append(largest)
touch_test_list = Part.Compound(keepers)
#add all intersection pieces that touch danglers, triple intersection pieces that touch duals, and so on
for ii in range(2, ao.largestOverlapCount()+1):
list_ii_pieces = [piece for piece in ao.pieces if len(ao.sourcesOfPiece(piece)) == ii]
keepers_2_add = []
for piece in list_ii_pieces:
if ShapeMerge.isConnected(piece, touch_test_list):
keepers_2_add.append(piece)
if len(keepers_2_add) == 0:
break
keepers.extend(keepers_2_add)
touch_test_list = Part.Compound(keepers_2_add)
#merge, and we are done!
#print len(keepers)," pieces to keep"
return ShapeMerge.mergeShapes(keepers)
def connect_legacy(shape1, shape2, tolerance = 0.0):
"""connect_legacy(shape1, shape2, tolerance = 0.0): alternative implementation of
connect, without use of generalFuse. Slow. Provided for backwards compatibility, and
for older OCC."""
if tolerance>0.0:
import FreeCAD as App
App.Console.PrintWarning("connect_legacy does not support tolerance (yet).\n")
cut1 = shape1.cut(shape2)
cut1 = shapeOfMaxSize(cut1.childShapes())
cut2 = shape2.cut(shape1)
cut2 = shapeOfMaxSize(cut2.childShapes())
return cut1.multiFuse([cut2, shape2.common(shape1)])
#def embed(shape_base, shape_tool, tolerance = 0.0):
# (TODO)
def embed_legacy(shape_base, shape_tool, tolerance = 0.0):
"""embed_legacy(shape_base, shape_tool, tolerance = 0.0): alternative implementation of
embed, without use of generalFuse. Slow. Provided for backwards compatibility, and
for older OCC."""
if tolerance>0.0:
import FreeCAD as App
App.Console.PrintWarning("embed_legacy does not support tolerance (yet).\n")
# using legacy implementation, except adding support for shells
pieces = compoundLeaves(shape_base.cut(shape_tool))
piece = shapeOfMaxSize(pieces)
result = piece.fuse(shape_tool)
dim = ShapeMerge.dimensionOfShapes(pieces)
if dim == 2:
# fusing shells returns shells that are still split. Reassemble them
result = ShapeMerge.mergeShapes(result.Faces)
elif dim == 1:
result = ShapeMerge.mergeShapes(result.Edges)
return result
def cutout_legacy(shape_base, shape_tool, tolerance = 0.0):
"""cutout_legacy(shape_base, shape_tool, tolerance = 0.0): alternative implementation of
cutout, without use of generalFuse. Slow. Provided for backwards compatibility, and
for older OCC."""
if tolerance>0.0:
import FreeCAD as App
App.Console.PrintWarning("cutout_legacy does not support tolerance (yet).\n")
#if base is multi-piece, work on per-piece basis
shapes_base = compoundLeaves(shape_base)
if len(shapes_base) > 1:
result = []
for sh in shapes_base:
result.append(cutout(sh, shape_tool))
return Part.Compound(result)
shape_base = shapes_base[0]
pieces = compoundLeaves(shape_base.cut(shape_tool))
return shapeOfMaxSize(pieces)
| bblacey/FreeCAD-MacOS-CI | src/Mod/Part/BOPTools/JoinAPI.py | Python | lgpl-2.1 | 7,655 |
"""Basic checks for HomeKit motion sensors and contact sensors."""
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
from tests.components.homekit_controller.common import setup_test_component
V1_ON = ("fan", "on")
V1_ROTATION_DIRECTION = ("fan", "rotation.direction")
V1_ROTATION_SPEED = ("fan", "rotation.speed")
V2_ACTIVE = ("fanv2", "active")
V2_ROTATION_DIRECTION = ("fanv2", "rotation.direction")
V2_ROTATION_SPEED = ("fanv2", "rotation.speed")
V2_SWING_MODE = ("fanv2", "swing-mode")
def create_fan_service(accessory):
"""
Define fan v1 characteristics as per HAP spec.
This service is no longer documented in R2 of the public HAP spec but existing
devices out there use it (like the SIMPLEconnect fan)
"""
service = accessory.add_service(ServicesTypes.FAN)
cur_state = service.add_char(CharacteristicsTypes.ON)
cur_state.value = 0
direction = service.add_char(CharacteristicsTypes.ROTATION_DIRECTION)
direction.value = 0
speed = service.add_char(CharacteristicsTypes.ROTATION_SPEED)
speed.value = 0
def create_fanv2_service(accessory):
"""Define fan v2 characteristics as per HAP spec."""
service = accessory.add_service(ServicesTypes.FAN_V2)
cur_state = service.add_char(CharacteristicsTypes.ACTIVE)
cur_state.value = 0
direction = service.add_char(CharacteristicsTypes.ROTATION_DIRECTION)
direction.value = 0
speed = service.add_char(CharacteristicsTypes.ROTATION_SPEED)
speed.value = 0
swing_mode = service.add_char(CharacteristicsTypes.SWING_MODE)
swing_mode.value = 0
def create_fanv2_service_with_min_step(accessory):
"""Define fan v2 characteristics as per HAP spec."""
service = accessory.add_service(ServicesTypes.FAN_V2)
cur_state = service.add_char(CharacteristicsTypes.ACTIVE)
cur_state.value = 0
direction = service.add_char(CharacteristicsTypes.ROTATION_DIRECTION)
direction.value = 0
speed = service.add_char(CharacteristicsTypes.ROTATION_SPEED)
speed.value = 0
speed.minStep = 25
swing_mode = service.add_char(CharacteristicsTypes.SWING_MODE)
swing_mode.value = 0
def create_fanv2_service_without_rotation_speed(accessory):
"""Define fan v2 characteristics as per HAP spec."""
service = accessory.add_service(ServicesTypes.FAN_V2)
cur_state = service.add_char(CharacteristicsTypes.ACTIVE)
cur_state.value = 0
direction = service.add_char(CharacteristicsTypes.ROTATION_DIRECTION)
direction.value = 0
swing_mode = service.add_char(CharacteristicsTypes.SWING_MODE)
swing_mode.value = 0
async def test_fan_read_state(hass, utcnow):
"""Test that we can read the state of a HomeKit fan accessory."""
helper = await setup_test_component(hass, create_fan_service)
helper.characteristics[V1_ON].value = False
state = await helper.poll_and_get_state()
assert state.state == "off"
helper.characteristics[V1_ON].value = True
state = await helper.poll_and_get_state()
assert state.state == "on"
async def test_turn_on(hass, utcnow):
"""Test that we can turn a fan on."""
helper = await setup_test_component(hass, create_fan_service)
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice", "speed": "high"},
blocking=True,
)
assert helper.characteristics[V1_ON].value == 1
assert helper.characteristics[V1_ROTATION_SPEED].value == 100
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice", "speed": "medium"},
blocking=True,
)
assert helper.characteristics[V1_ON].value == 1
assert helper.characteristics[V1_ROTATION_SPEED].value == 66.0
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice", "speed": "low"},
blocking=True,
)
assert helper.characteristics[V1_ON].value == 1
assert helper.characteristics[V1_ROTATION_SPEED].value == 33.0
async def test_turn_on_off_without_rotation_speed(hass, utcnow):
"""Test that we can turn a fan on."""
helper = await setup_test_component(
hass, create_fanv2_service_without_rotation_speed
)
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 1
await hass.services.async_call(
"fan",
"turn_off",
{"entity_id": "fan.testdevice"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 0
async def test_turn_off(hass, utcnow):
"""Test that we can turn a fan off."""
helper = await setup_test_component(hass, create_fan_service)
helper.characteristics[V1_ON].value = 1
await hass.services.async_call(
"fan",
"turn_off",
{"entity_id": "fan.testdevice"},
blocking=True,
)
assert helper.characteristics[V1_ON].value == 0
async def test_set_speed(hass, utcnow):
"""Test that we set fan speed."""
helper = await setup_test_component(hass, create_fan_service)
helper.characteristics[V1_ON].value = 1
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "high"},
blocking=True,
)
assert helper.characteristics[V1_ROTATION_SPEED].value == 100
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "medium"},
blocking=True,
)
assert helper.characteristics[V1_ROTATION_SPEED].value == 66.0
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "low"},
blocking=True,
)
assert helper.characteristics[V1_ROTATION_SPEED].value == 33.0
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "off"},
blocking=True,
)
assert helper.characteristics[V1_ON].value == 0
async def test_set_percentage(hass, utcnow):
"""Test that we set fan speed by percentage."""
helper = await setup_test_component(hass, create_fan_service)
helper.characteristics[V1_ON].value = 1
await hass.services.async_call(
"fan",
"set_percentage",
{"entity_id": "fan.testdevice", "percentage": 66},
blocking=True,
)
assert helper.characteristics[V1_ROTATION_SPEED].value == 66
await hass.services.async_call(
"fan",
"set_percentage",
{"entity_id": "fan.testdevice", "percentage": 0},
blocking=True,
)
assert helper.characteristics[V1_ON].value == 0
async def test_speed_read(hass, utcnow):
"""Test that we can read a fans oscillation."""
helper = await setup_test_component(hass, create_fan_service)
helper.characteristics[V1_ON].value = 1
helper.characteristics[V1_ROTATION_SPEED].value = 100
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "high"
assert state.attributes["percentage"] == 100
assert state.attributes["percentage_step"] == 1.0
helper.characteristics[V1_ROTATION_SPEED].value = 50
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "medium"
assert state.attributes["percentage"] == 50
helper.characteristics[V1_ROTATION_SPEED].value = 25
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "low"
assert state.attributes["percentage"] == 25
helper.characteristics[V1_ON].value = 0
helper.characteristics[V1_ROTATION_SPEED].value = 0
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "off"
assert state.attributes["percentage"] == 0
async def test_set_direction(hass, utcnow):
"""Test that we can set fan spin direction."""
helper = await setup_test_component(hass, create_fan_service)
await hass.services.async_call(
"fan",
"set_direction",
{"entity_id": "fan.testdevice", "direction": "reverse"},
blocking=True,
)
assert helper.characteristics[V1_ROTATION_DIRECTION].value == 1
await hass.services.async_call(
"fan",
"set_direction",
{"entity_id": "fan.testdevice", "direction": "forward"},
blocking=True,
)
assert helper.characteristics[V1_ROTATION_DIRECTION].value == 0
async def test_direction_read(hass, utcnow):
"""Test that we can read a fans oscillation."""
helper = await setup_test_component(hass, create_fan_service)
helper.characteristics[V1_ROTATION_DIRECTION].value = 0
state = await helper.poll_and_get_state()
assert state.attributes["direction"] == "forward"
helper.characteristics[V1_ROTATION_DIRECTION].value = 1
state = await helper.poll_and_get_state()
assert state.attributes["direction"] == "reverse"
async def test_fanv2_read_state(hass, utcnow):
"""Test that we can read the state of a HomeKit fan accessory."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_ACTIVE].value = False
state = await helper.poll_and_get_state()
assert state.state == "off"
helper.characteristics[V2_ACTIVE].value = True
state = await helper.poll_and_get_state()
assert state.state == "on"
async def test_v2_turn_on(hass, utcnow):
"""Test that we can turn a fan on."""
helper = await setup_test_component(hass, create_fanv2_service)
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice", "speed": "high"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 1
assert helper.characteristics[V2_ROTATION_SPEED].value == 100
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice", "speed": "medium"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 1
assert helper.characteristics[V2_ROTATION_SPEED].value == 66.0
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice", "speed": "low"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 1
assert helper.characteristics[V2_ROTATION_SPEED].value == 33.0
await hass.services.async_call(
"fan",
"turn_off",
{"entity_id": "fan.testdevice"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 0
assert helper.characteristics[V2_ROTATION_SPEED].value == 33.0
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 1
assert helper.characteristics[V2_ROTATION_SPEED].value == 33.0
async def test_v2_turn_off(hass, utcnow):
"""Test that we can turn a fan off."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_ACTIVE].value = 1
await hass.services.async_call(
"fan",
"turn_off",
{"entity_id": "fan.testdevice"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 0
async def test_v2_set_speed(hass, utcnow):
"""Test that we set fan speed."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_ACTIVE].value = 1
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "high"},
blocking=True,
)
assert helper.characteristics[V2_ROTATION_SPEED].value == 100
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "medium"},
blocking=True,
)
assert helper.characteristics[V2_ROTATION_SPEED].value == 66
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "low"},
blocking=True,
)
assert helper.characteristics[V2_ROTATION_SPEED].value == 33
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "off"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 0
async def test_v2_set_percentage(hass, utcnow):
"""Test that we set fan speed by percentage."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_ACTIVE].value = 1
await hass.services.async_call(
"fan",
"set_percentage",
{"entity_id": "fan.testdevice", "percentage": 66},
blocking=True,
)
assert helper.characteristics[V2_ROTATION_SPEED].value == 66
await hass.services.async_call(
"fan",
"set_percentage",
{"entity_id": "fan.testdevice", "percentage": 0},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 0
async def test_v2_set_percentage_with_min_step(hass, utcnow):
"""Test that we set fan speed by percentage."""
helper = await setup_test_component(hass, create_fanv2_service_with_min_step)
helper.characteristics[V2_ACTIVE].value = 1
await hass.services.async_call(
"fan",
"set_percentage",
{"entity_id": "fan.testdevice", "percentage": 66},
blocking=True,
)
assert helper.characteristics[V2_ROTATION_SPEED].value == 75
await hass.services.async_call(
"fan",
"set_percentage",
{"entity_id": "fan.testdevice", "percentage": 0},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 0
async def test_v2_speed_read(hass, utcnow):
"""Test that we can read a fans oscillation."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_ACTIVE].value = 1
helper.characteristics[V2_ROTATION_SPEED].value = 100
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "high"
assert state.attributes["percentage"] == 100
helper.characteristics[V2_ROTATION_SPEED].value = 50
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "medium"
assert state.attributes["percentage"] == 50
helper.characteristics[V2_ROTATION_SPEED].value = 25
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "low"
assert state.attributes["percentage"] == 25
helper.characteristics[V2_ACTIVE].value = 0
helper.characteristics[V2_ROTATION_SPEED].value = 0
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "off"
assert state.attributes["percentage"] == 0
async def test_v2_set_direction(hass, utcnow):
"""Test that we can set fan spin direction."""
helper = await setup_test_component(hass, create_fanv2_service)
await hass.services.async_call(
"fan",
"set_direction",
{"entity_id": "fan.testdevice", "direction": "reverse"},
blocking=True,
)
assert helper.characteristics[V2_ROTATION_DIRECTION].value == 1
await hass.services.async_call(
"fan",
"set_direction",
{"entity_id": "fan.testdevice", "direction": "forward"},
blocking=True,
)
assert helper.characteristics[V2_ROTATION_DIRECTION].value == 0
async def test_v2_direction_read(hass, utcnow):
"""Test that we can read a fans oscillation."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_ROTATION_DIRECTION].value = 0
state = await helper.poll_and_get_state()
assert state.attributes["direction"] == "forward"
helper.characteristics[V2_ROTATION_DIRECTION].value = 1
state = await helper.poll_and_get_state()
assert state.attributes["direction"] == "reverse"
async def test_v2_oscillate(hass, utcnow):
"""Test that we can control a fans oscillation."""
helper = await setup_test_component(hass, create_fanv2_service)
await hass.services.async_call(
"fan",
"oscillate",
{"entity_id": "fan.testdevice", "oscillating": True},
blocking=True,
)
assert helper.characteristics[V2_SWING_MODE].value == 1
await hass.services.async_call(
"fan",
"oscillate",
{"entity_id": "fan.testdevice", "oscillating": False},
blocking=True,
)
assert helper.characteristics[V2_SWING_MODE].value == 0
async def test_v2_oscillate_read(hass, utcnow):
"""Test that we can read a fans oscillation."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_SWING_MODE].value = 0
state = await helper.poll_and_get_state()
assert state.attributes["oscillating"] is False
helper.characteristics[V2_SWING_MODE].value = 1
state = await helper.poll_and_get_state()
assert state.attributes["oscillating"] is True
| jawilson/home-assistant | tests/components/homekit_controller/test_fan.py | Python | apache-2.0 | 17,165 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
THEANO_LINKER = "cvm"
from controllers import TrainingController, TrainingValidator
from base import NeuralTrainer
from trainers import *
from optimize import *
from annealers import *
from customize_trainer import CustomizeTrainer
from util import wrap_core, multiple_l2_norm
from delayed_trainers import DelayedBatchSGDTrainer
from scipy_trainer import ScipyTrainer
from train_logger import TrainLogger | zomux/deepy | deepy/trainers/__init__.py | Python | mit | 452 |
#!/usr/bin/env python
########################################################################################################################
# Load configuration from specified file #
########################################################################################################################
# (C) Manuel Bernal Llinares <[email protected]>#
# Distributed under the Apache License 2.0 #
########################################################################################################################
import os
import json
import sys
import optparse
import uuid
import time
_config = None
def prepareConfig():
global _config
# print("Preparing config")
configFile = "./config.backup_daemon.json"
# Parse command line options
# Create the command line parser with its options
cmdl_version = '2015.03.05'
cmdl_parser = optparse.OptionParser(version=cmdl_version, conflict_handler='resolve')
cmdl_parser.add_option('-h', '--help', action='help', help='print this help text and exit')
cmdl_parser.add_option('-v', '--version', action='version', help='print program version and exit')
cmdl_parser.add_option('-c', '--config', dest='configFileName', metavar='PATH_TO_CONFIG_FILE',
help='specify a config file to use for the session')
(cmdl_options, cmdl_args) = cmdl_parser.parse_args()
if cmdl_options.configFileName:
configFile = cmdl_options.configFileName
# print("Reading config file " + configFile)
# Load config file
with open(configFile) as cf:
_config = json.load(cf)
_config['config'] = {}
_config['config']['file'] = configFile
# Setup logging settings
currentTime = time.localtime()
if "logger" not in _config:
_config['logger'] = {}
if "folder" not in _config['logger']:
_config['logger']['folder'] = './'
if "namespace" not in _config['logger']:
_config['logger']['namespace'] = 'mainSession'
if "filePath" not in _config['logger']:
_config['logger']['filePath'] = _config['logger']['folder'] \
+ "/" + "backup-" + str(currentTime.tm_year) \
+ format(currentTime.tm_mon, "02") \
+ format(currentTime.tm_mday, "02") \
+ "_" \
+ format(currentTime.tm_hour, "02") \
+ "." \
+ format(currentTime.tm_min, "02") \
+ "." \
+ format(currentTime.tm_sec, "02") \
+ ".log"
if "ouputAndErrorFilePath" not in _config['logger']:
_config['logger']['ouputAndErrorFilePath'] = _config['logger']['filePath'] + ".out"
def getConfig():
global _config
if not _config:
prepareConfig()
return _config
def main():
print("This module is not designed to be run alone")
if __name__ == "__main__":
main()
| sahmri/simpleBackup | settings.py | Python | apache-2.0 | 3,188 |
import pytest
import salt.modules.win_servermanager as win_servermanager
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {win_servermanager: {"__grains__": {"osversion": "6.2"}}}
def test_install():
"""
Test win_servermanager.install
"""
mock_out = {
"Success": True,
"RestartNeeded": 1,
"FeatureResult": [
{
"Id": 338,
"Name": "XPS-Viewer",
"DisplayName": "XPS Viewer",
"Success": True,
"RestartNeeded": False,
"Message": "",
"SkipReason": 0,
}
],
"ExitCode": 0,
}
expected = {
"ExitCode": 0,
"RestartNeeded": False,
"Restarted": False,
"Features": {
"XPS-Viewer": {
"DisplayName": "XPS Viewer",
"Message": "",
"RestartNeeded": False,
"SkipReason": 0,
"Success": True,
}
},
"Success": True,
}
mock_reboot = MagicMock(return_value=True)
with patch.object(
win_servermanager, "_pshell_json", return_value=mock_out
), patch.dict(win_servermanager.__salt__, {"system.reboot": mock_reboot}):
result = win_servermanager.install("XPS-Viewer")
assert result == expected
def test_install_restart():
"""
Test win_servermanager.install when restart=True
"""
mock_out = {
"Success": True,
"RestartNeeded": 1,
"FeatureResult": [
{
"Id": 338,
"Name": "XPS-Viewer",
"DisplayName": "XPS Viewer",
"Success": True,
"RestartNeeded": True,
"Message": "",
"SkipReason": 0,
}
],
"ExitCode": 0,
}
expected = {
"ExitCode": 0,
"RestartNeeded": True,
"Restarted": True,
"Features": {
"XPS-Viewer": {
"DisplayName": "XPS Viewer",
"Message": "",
"RestartNeeded": True,
"SkipReason": 0,
"Success": True,
}
},
"Success": True,
}
mock_reboot = MagicMock(return_value=True)
with patch.object(
win_servermanager, "_pshell_json", return_value=mock_out
), patch.dict(win_servermanager.__salt__, {"system.reboot": mock_reboot}):
result = win_servermanager.install("XPS-Viewer", restart=True)
mock_reboot.assert_called_once()
assert result == expected
| saltstack/salt | tests/pytests/unit/modules/test_win_servermanager.py | Python | apache-2.0 | 2,675 |
# Copyright 2015 Cloudbase Solutions Srl
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base test case for tests that do not rely on Tempest."""
import contextlib
import logging as std_logging
import os
import os.path
import traceback
import eventlet.timeout
import fixtures
import mock
from oslo_config import cfg
from oslo_utils import strutils
import testtools
CONF = cfg.CONF
LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
def bool_from_env(key, strict=False, default=False):
value = os.environ.get(key)
return strutils.bool_from_string(value, strict=strict, default=default)
class BaseTestCase(testtools.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.addCleanup(CONF.reset)
self.addCleanup(mock.patch.stopall)
if bool_from_env('OS_DEBUG'):
_level = std_logging.DEBUG
else:
_level = std_logging.INFO
capture_logs = bool_from_env('OS_LOG_CAPTURE')
if not capture_logs:
std_logging.basicConfig(format=LOG_FORMAT, level=_level)
self.log_fixture = self.useFixture(
fixtures.FakeLogger(
format=LOG_FORMAT,
level=_level,
nuke_handlers=capture_logs,
))
test_timeout = int(os.environ.get('OS_TEST_TIMEOUT', 0))
if test_timeout == -1:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if bool_from_env('OS_STDOUT_CAPTURE'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if bool_from_env('OS_STDERR_CAPTURE'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.addOnException(self.check_for_systemexit)
def check_for_systemexit(self, exc_info):
if isinstance(exc_info[1], SystemExit):
self.fail("A SystemExit was raised during the test. %s"
% traceback.format_exception(*exc_info))
@contextlib.contextmanager
def assert_max_execution_time(self, max_execution_time=5):
with eventlet.timeout.Timeout(max_execution_time, False):
yield
return
self.fail('Execution of this test timed out')
def assertOrderedEqual(self, expected, actual):
expect_val = self.sort_dict_lists(expected)
actual_val = self.sort_dict_lists(actual)
self.assertEqual(expect_val, actual_val)
def sort_dict_lists(self, dic):
for key, value in dic.items():
if isinstance(value, list):
dic[key] = sorted(value)
elif isinstance(value, dict):
dic[key] = self.sort_dict_lists(value)
return dic
def assertDictSupersetOf(self, expected_subset, actual_superset):
"""Checks that actual dict contains the expected dict.
After checking that the arguments are of the right type, this checks
that each item in expected_subset is in, and matches, what is in
actual_superset. Separate tests are done, so that detailed info can
be reported upon failure.
"""
if not isinstance(expected_subset, dict):
self.fail("expected_subset (%s) is not an instance of dict" %
type(expected_subset))
if not isinstance(actual_superset, dict):
self.fail("actual_superset (%s) is not an instance of dict" %
type(actual_superset))
for k, v in expected_subset.items():
self.assertIn(k, actual_superset)
self.assertEqual(v, actual_superset[k],
"Key %(key)s expected: %(exp)r, actual %(act)r" %
{'key': k, 'exp': v, 'act': actual_superset[k]})
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in kw.items():
CONF.set_override(k, v, group)
| gabriel-samfira/networking-hyperv | hyperv/tests/base.py | Python | apache-2.0 | 5,081 |
"""This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import string
import random
def process_file(filename, skip_header):
"""Makes a histogram that contains the words from a file.
filename: string
skip_header: boolean, whether to skip the Gutenberg header
Returns: map from each word to the number of times it appears.
"""
hist = {}
fp = file(filename)
if skip_header:
skip_gutenberg_header(fp)
for line in fp:
process_line(line, hist)
return hist
def skip_gutenberg_header(fp):
"""Reads from fp until it finds the line that ends the header.
fp: open file object
"""
for line in fp:
if line.startswith('*END*THE SMALL PRINT!'):
break
def process_line(line, hist):
"""Adds the words in the line to the histogram.
Modifies hist.
line: string
hist: histogram (map from word to frequency)
"""
# replace hyphens with spaces before splitting
line = line.replace('-', ' ')
for word in line.split():
# remove punctuation and convert to lowercase
word = word.strip(string.punctuation + string.whitespace)
word = word.lower()
# update the histogram
hist[word] = hist.get(word, 0) + 1
def most_common(hist):
"""Makes a list of the key-value pairs from a histogram and
sorts them in descending order by frequency."""
t = []
for key, value in hist.items():
t.append((value, key))
t.sort()
t.reverse()
return t
def print_most_common(hist, num=10):
"""Prints the most commons words in a histgram and their frequencies.
hist: histogram (map from word to frequency
num: number of words to print
"""
t = most_common(hist)
print 'The most common words are:'
for freq, word in t[:num]:
print word, '\t', freq
def subtract(d1, d2):
"""Returns a dictionary with all keys that appear in d1 but not d2.
d1, d2: dictionaries
"""
res = {}
for key in d1:
if key not in d2:
res[key] = None
return res
def total_words(hist):
"""Returns the total of the frequencies in a histogram."""
return sum(hist.values())
def different_words(hist):
"""Returns the number of different words in a histogram."""
return len(hist)
def random_word(hist):
"""Chooses a random word from a histogram.
The probability of each word is proportional to its frequency.
"""
t = []
for word, freq in hist.items():
t.extend([word] * freq)
return random.choice(t)
if __name__ == '__main__':
hist = process_file('emma.txt', skip_header=True)
print 'Total number of words:', total_words(hist)
print 'Number of different words:', different_words(hist)
t = most_common(hist)
print 'The most common words are:'
for freq, word in t[0:20]:
print word, '\t', freq
words = process_file('words.txt', skip_header=False)
diff = subtract(hist, words)
print "The words in the book that aren't in the word list are:"
for word in diff.keys():
print word,
print "\n\nHere are some random words from the book"
for i in range(100):
print random_word(hist),
| hacpai/reading-lists | Think Python/analyze_book.py | Python | bsd-2-clause | 3,358 |
#!/usr/bin/env python
"""
git-ftp: painless, quick and easy working copy syncing over FTP
Copyright (c) 2008-2012
Edward Z. Yang <[email protected]>, Mauro Lizaur <[email protected]> and
Niklas Fiekas <[email protected]>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import ftplib
import cStringIO
import re
import sys
import os.path
import posixpath # use this for ftp manipulation
import getpass
import ConfigParser
import optparse
import logging
import textwrap
import fnmatch
# Note about Tree.path/Blob.path: *real* Git trees and blobs don't
# actually provide path information, but the git-python bindings, as a
# convenience keep track of this if you access the blob from an index.
# This ends up considerably simplifying our code, but do be careful!
from distutils.version import LooseVersion
from git import __version__ as git_version
if LooseVersion(git_version) < '0.3.0':
print 'git-ftp requires git-python 0.3.0 or newer; %s provided.' % git_version
exit(1)
from git import Blob, Repo, Git, Submodule
class BranchNotFound(Exception):
pass
class FtpDataOldVersion(Exception):
pass
class FtpSslNotSupported(Exception):
pass
class SectionNotFound(Exception):
pass
def split_pattern(path): # TODO: Improve skeevy code
path = fnmatch.translate(path).split('\\/')
for i, p in enumerate(path[:-1]):
if p:
path[i] = p + '\\Z(?ms)'
return path
# ezyang: This code is pretty skeevy; there is probably a better,
# more obviously correct way of spelling it. Refactor me...
def is_ignored(path, regex):
regex = split_pattern(os.path.normcase(regex))
path = os.path.normcase(path).split('/')
regex_pos = path_pos = 0
if regex[0] == '': # leading slash - root dir must match
if path[0] != '' or not re.match(regex[1], path[1]):
return False
regex_pos = path_pos = 2
if not regex_pos: # find beginning of regex
for i, p in enumerate(path):
if re.match(regex[0], p):
regex_pos = 1
path_pos = i + 1
break
else:
return False
if len(path[path_pos:]) < len(regex[regex_pos:]):
return False
n = len(regex)
for r in regex[regex_pos:]: # match the rest
if regex_pos + 1 == n: # last item; if empty match anything
if re.match(r, ''):
return True
if not re.match(r, path[path_pos]):
return False
path_pos += 1
regex_pos += 1
return True
def main():
Git.git_binary = 'git' # Windows doesn't like env
repo, options, args = parse_args()
if repo.is_dirty() and not options.commit:
logging.warning("Working copy is dirty; uncommitted changes will NOT be uploaded")
base = options.ftp.remotepath
logging.info("Base directory is %s", base)
try:
branch = (h for h in repo.heads if h.name == options.branch).next()
except StopIteration:
raise BranchNotFound
commit = branch.commit
if options.commit:
commit = repo.commit(options.commit)
tree = commit.tree
if options.ftp.ssl:
if hasattr(ftplib, 'FTP_TLS'): # SSL new in 2.7+
ftp = ftplib.FTP_TLS(options.ftp.hostname, options.ftp.username, options.ftp.password)
ftp.prot_p()
logging.info("Using SSL")
else:
raise FtpSslNotSupported("Python is too old for FTP SSL. Try using Python 2.7 or later.")
else:
ftp = ftplib.FTP(options.ftp.hostname, options.ftp.username, options.ftp.password)
ftp.cwd(base)
# Check revision
hash = options.revision
if not options.force and not hash:
hashFile = cStringIO.StringIO()
try:
ftp.retrbinary('RETR git-rev.txt', hashFile.write)
hash = hashFile.getvalue().strip()
except ftplib.error_perm:
pass
# Load ftpignore rules, if any
patterns = []
gitftpignore = os.path.join(repo.working_dir, options.ftp.gitftpignore)
if os.path.isfile(gitftpignore):
with open(gitftpignore, 'r') as ftpignore:
patterns = parse_ftpignore(ftpignore)
patterns.append('/' + options.ftp.gitftpignore)
if not hash:
# Diffing against an empty tree will cause a full upload.
oldtree = get_empty_tree(repo)
else:
oldtree = repo.commit(hash).tree
if oldtree.hexsha == tree.hexsha:
logging.info('Nothing to do!')
else:
upload_diff(repo, oldtree, tree, ftp, [base], patterns)
ftp.storbinary('STOR git-rev.txt', cStringIO.StringIO(commit.hexsha))
ftp.quit()
def parse_ftpignore(rawPatterns):
patterns = []
for pat in rawPatterns:
pat = pat.rstrip()
if not pat or pat.startswith('#'):
continue
patterns.append(pat)
return patterns
def parse_args():
usage = 'usage: %prog [OPTIONS] [DIRECTORY]'
desc = """\
This script uploads files in a Git repository to a
website via FTP, but is smart and only uploads file
that have changed.
"""
parser = optparse.OptionParser(usage, description=textwrap.dedent(desc))
parser.add_option('-f', '--force', dest="force", action="store_true", default=False,
help="force the reupload of all files")
parser.add_option('-q', '--quiet', dest="quiet", action="store_true", default=False,
help="quiet output")
parser.add_option('-r', '--revision', dest="revision", default=None,
help="use this revision instead of the server stored one")
parser.add_option('-b', '--branch', dest="branch", default=None,
help="use this branch instead of the active one")
parser.add_option('-c', '--commit', dest="commit", default=None,
help="use this commit instead of HEAD")
parser.add_option('-s', '--section', dest="section", default=None,
help="use this section from ftpdata instead of branch name")
options, args = parser.parse_args()
configure_logging(options)
if len(args) > 1:
parser.error("too many arguments")
if args:
cwd = args[0]
else:
cwd = "."
repo = Repo(cwd)
if not options.branch:
options.branch = repo.active_branch.name
if not options.section:
options.section = options.branch
get_ftp_creds(repo, options)
return repo, options, args
def configure_logging(options):
logger = logging.getLogger()
if not options.quiet:
logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter("%(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
def format_mode(mode):
return "%o" % (mode & 0o777)
class FtpData():
password = None
username = None
hostname = None
remotepath = None
ssl = None
gitftpignore = None
def get_ftp_creds(repo, options):
"""
Retrieves the data to connect to the FTP from .git/ftpdata
or interactively.
ftpdata format example:
[branch]
username=me
password=s00perP4zzw0rd
hostname=ftp.hostname.com
remotepath=/htdocs
ssl=yes
gitftpignore=.gitftpignore
Please note that it isn't necessary to have this file,
you'll be asked for the data every time you upload something.
"""
ftpdata = os.path.join(repo.git_dir, "ftpdata")
options.ftp = FtpData()
cfg = ConfigParser.ConfigParser()
if os.path.isfile(ftpdata):
logging.info("Using .git/ftpdata")
cfg.read(ftpdata)
if (not cfg.has_section(options.section)):
if cfg.has_section('ftp'):
raise FtpDataOldVersion("Please rename the [ftp] section to [branch]. " +
"Take a look at the README for more information")
else:
raise SectionNotFound("Your .git/ftpdata file does not contain a section " +
"named '%s'" % options.section)
# just in case you do not want to store your ftp password.
try:
options.ftp.password = cfg.get(options.section, 'password')
except ConfigParser.NoOptionError:
options.ftp.password = getpass.getpass('FTP Password: ')
options.ftp.username = cfg.get(options.section, 'username')
options.ftp.hostname = cfg.get(options.section, 'hostname')
options.ftp.remotepath = cfg.get(options.section, 'remotepath')
try:
options.ftp.ssl = boolish(cfg.get(options.section, 'ssl'))
except ConfigParser.NoOptionError:
options.ftp.ssl = False
try:
options.ftp.gitftpignore = cfg.get(options.section, 'gitftpignore')
except ConfigParser.NoOptionError:
options.ftp.gitftpignore = '.gitftpignore'
else:
print "Please configure settings for branch '%s'" % options.section
options.ftp.username = raw_input('FTP Username: ')
options.ftp.password = getpass.getpass('FTP Password: ')
options.ftp.hostname = raw_input('FTP Hostname: ')
options.ftp.remotepath = raw_input('Remote Path: ')
if hasattr(ftplib, 'FTP_TLS'):
options.ftp.ssl = ask_ok('Use SSL? ')
else:
logging.warning("SSL not supported, defaulting to no")
# set default branch
if ask_ok("Should I write ftp details to .git/ftpdata? "):
cfg.add_section(options.section)
cfg.set(options.section, 'username', options.ftp.username)
cfg.set(options.section, 'password', options.ftp.password)
cfg.set(options.section, 'hostname', options.ftp.hostname)
cfg.set(options.section, 'remotepath', options.ftp.remotepath)
cfg.set(options.section, 'ssl', options.ftp.ssl)
f = open(ftpdata, 'w')
cfg.write(f)
def get_empty_tree(repo):
return repo.tree(repo.git.hash_object('-w', '-t', 'tree', os.devnull))
def upload_diff(repo, oldtree, tree, ftp, base, ignored):
"""
Upload and/or delete items according to a Git diff between two trees.
upload_diff requires, that the ftp working directory is set to the base
of the current repository before it is called.
Keyword arguments:
repo -- The git.Repo to upload objects from
oldtree -- The old tree to diff against. An empty tree will cause a full
upload of the new tree.
tree -- The new tree. An empty tree will cause a full removal of all
objects of the old tree.
ftp -- The active ftplib.FTP object to upload contents to
base -- The list of base directory and submodule paths to upload contents
to in ftp.
For example, base = ['www', 'www']. base must exist and must not
have a trailing slash.
ignored -- The list of patterns explicitly ignored by gitftpignore.
"""
# -z is used so we don't have to deal with quotes in path matching
diff = repo.git.diff("--name-status", "--no-renames", "-z", oldtree.hexsha, tree.hexsha)
diff = iter(diff.split("\0"))
for line in diff:
if not line:
continue
status, file = line, next(diff)
assert status in ['A', 'D', 'M']
filepath = posixpath.join(*(['/'] + base[1:] + [file]))
if is_ignored_path(filepath, ignored):
logging.info('Skipped ' + filepath)
continue
if status == "D":
try:
ftp.delete(file)
logging.info('Deleted ' + file)
except ftplib.error_perm:
logging.warning('Failed to delete ' + file)
# Now let's see if we need to remove some subdirectories
def generate_parent_dirs(x):
# invariant: x is a filename
while '/' in x:
x = posixpath.dirname(x)
yield x
for dir in generate_parent_dirs(file):
try:
# unfortunately, dir in tree doesn't work for subdirs
tree[dir]
except KeyError:
try:
ftp.rmd(dir)
logging.debug('Cleaned away ' + dir)
except ftplib.error_perm:
logging.info('Did not clean away ' + dir)
break
else:
node = tree[file]
if status == "A":
# try building up the parent directory
subtree = tree
if isinstance(node, Blob):
directories = file.split("/")[:-1]
else:
# for submodules also add the directory itself
assert isinstance(node, Submodule)
directories = file.split("/")
for c in directories:
subtree = subtree / c
try:
ftp.mkd(subtree.path)
except ftplib.error_perm:
pass
if isinstance(node, Blob):
upload_blob(node, ftp)
else:
module = node.module()
module_tree = module.commit(node.hexsha).tree
if status == "A":
module_oldtree = get_empty_tree(module)
else:
oldnode = oldtree[file]
assert isinstance(oldnode, Submodule) # TODO: What if not?
module_oldtree = module.commit(oldnode.hexsha).tree
module_base = base + [node.path]
logging.info('Entering submodule %s', node.path)
ftp.cwd(posixpath.join(*module_base))
upload_diff(module, module_oldtree, module_tree, ftp, module_base, ignored)
logging.info('Leaving submodule %s', node.path)
ftp.cwd(posixpath.join(*base))
def is_ignored_path(path, patterns, quiet=False):
"""Returns true if a filepath is ignored by gitftpignore."""
if is_special_file(path):
return True
for pat in patterns:
if is_ignored(path, pat):
return True
return False
def is_special_file(name):
"""Returns true if a file is some special Git metadata and not content."""
return posixpath.basename(name) in ['.gitignore', '.gitattributes', '.gitmodules']
def upload_blob(blob, ftp, quiet=False):
"""
Uploads a blob. Pre-condition on ftp is that our current working
directory is the root directory of the repository being uploaded
(that means DON'T use ftp.cwd; we'll use full paths appropriately).
"""
if not quiet:
logging.info('Uploading ' + blob.path)
try:
ftp.delete(blob.path)
except ftplib.error_perm:
pass
ftp.storbinary('STOR ' + blob.path, blob.data_stream)
try:
ftp.voidcmd('SITE CHMOD ' + format_mode(blob.mode) + ' ' + blob.path)
except ftplib.error_perm:
# Ignore Windows chmod errors
logging.warning('Failed to chmod ' + blob.path)
pass
def boolish(s):
if s in ('1', 'true', 'y', 'ye', 'yes', 'on'):
return True
if s in ('0', 'false', 'n', 'no', 'off'):
return False
return None
def ask_ok(prompt, retries=4, complaint='Yes or no, please!'):
while True:
ok = raw_input(prompt).lower()
r = boolish(ok)
if r is not None:
return r
retries = retries - 1
if retries < 0:
raise IOError('Wrong user input.')
print complaint
if __name__ == "__main__":
main()
| householdsteve/bearded-octo-ironman | git-ftp.py | Python | gpl-2.0 | 16,737 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-21 08:43
import datetime
from django.db import migrations, models
import django.db.models.deletion
import seahub.base.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AnonymousShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('repo_owner', seahub.base.fields.LowerCaseCharField(max_length=255)),
('repo_id', models.CharField(max_length=36)),
('anonymous_email', seahub.base.fields.LowerCaseCharField(max_length=255)),
('token', models.CharField(max_length=25, unique=True)),
],
),
migrations.CreateModel(
name='ExtraGroupsSharePermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('repo_id', models.CharField(db_index=True, max_length=36)),
('group_id', models.IntegerField(db_index=True)),
('permission', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='ExtraSharePermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('repo_id', models.CharField(db_index=True, max_length=36)),
('share_to', models.CharField(db_index=True, max_length=255)),
('permission', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='FileShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', seahub.base.fields.LowerCaseCharField(db_index=True, max_length=255)),
('repo_id', models.CharField(db_index=True, max_length=36)),
('path', models.TextField()),
('token', models.CharField(max_length=100, unique=True)),
('ctime', models.DateTimeField(default=datetime.datetime.now)),
('view_cnt', models.IntegerField(default=0)),
('s_type', models.CharField(db_index=True, default=b'f', max_length=2)),
('password', models.CharField(max_length=128, null=True)),
('expire_date', models.DateTimeField(null=True)),
('permission', models.CharField(choices=[(b'view_download', b'View and download'), (b'view_only', b'Disable download')], db_index=True, default=b'view_download', max_length=50)),
],
),
migrations.CreateModel(
name='OrgFileShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('org_id', models.IntegerField(db_index=True)),
('file_share', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='share.FileShare')),
],
),
migrations.CreateModel(
name='PrivateFileDirShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('from_user', seahub.base.fields.LowerCaseCharField(db_index=True, max_length=255)),
('to_user', seahub.base.fields.LowerCaseCharField(db_index=True, max_length=255)),
('repo_id', models.CharField(db_index=True, max_length=36)),
('path', models.TextField()),
('token', models.CharField(max_length=10, unique=True)),
('permission', models.CharField(max_length=5)),
('s_type', models.CharField(default=b'f', max_length=5)),
],
),
migrations.CreateModel(
name='UploadLinkShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', seahub.base.fields.LowerCaseCharField(db_index=True, max_length=255)),
('repo_id', models.CharField(db_index=True, max_length=36)),
('path', models.TextField()),
('token', models.CharField(max_length=100, unique=True)),
('ctime', models.DateTimeField(default=datetime.datetime.now)),
('view_cnt', models.IntegerField(default=0)),
('password', models.CharField(max_length=128, null=True)),
('expire_date', models.DateTimeField(null=True)),
],
),
]
| miurahr/seahub | seahub/share/migrations/0001_initial.py | Python | apache-2.0 | 4,833 |
import sys
from PyQt4 import QtGui,QtCore
#add frontend here
class Exampleone(QtGui.QWidget):
#remove this
def __init__(self):
super(Exampleone, self).__init__()
self.initUI()
def initUI(self):
cb = QtGui.QCheckBox('time pass]', self)
cb.move(20, 20)
cb.toggle()
cb.stateChanged.connect(self.changeTitle)
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('QtGui.QCheckBox')
self.show()
def changeTitle(self, state):
if state == QtCore.Qt.Checked:
self.setWindowTitle('QtGui.QCheckBox')
else:
self.setWindowTitle('')
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
cb = QtGui.QCheckBox('Show title', self)
cb.move(20, 20)
cb.toggle()
cb.stateChanged.connect(self.changeTitle)
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('QtGui.QCheckBox')
self.show()
def changeTitle(self, state):
if state == QtCore.Qt.Checked:
self.setWindowTitle('QtGui.QCheckBox')
else:
self.setWindowTitle('')
class App(QtGui.QMainWindow):
def __init__(self):
super(App, self).__init__()
self.initUI()
def testme(self):
print "Opening a new popup window..."
self.w = Example()
self.w.show()
def Scat(self):
print "Opening a new popup sca window..."
self.w = Exampleone()
self.w.show()
pass
def initUI(self):
exitAction = QtGui.QAction(QtGui.QIcon('exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(QtGui.qApp.quit)
self.statusBar()
menubar = self.menuBar()
fileMenu = menubar.addMenu('&EDA')
fileMenu.addAction('Scatter Plot',self.Scat)
menubar = self.menuBar()
fileMenu = menubar.addMenu('Regression and Anova')
fileMenu.addAction('Regression',self.Scat)
fileMenu.addAction('Mixed effect',self.Scat)
fileMenu.addAction('Anova',self.Scat)
menubar = self.menuBar()
fileMenu = menubar.addMenu('Machine learning')
fileMenu1 = fileMenu.addMenu('Dumb')
fileMenu1.addAction('classification',self.testme)
fileMenu1.addAction('cluster Analysis',self.Scat)
fileMenu1.addAction('feature selection',self.Scat)
menubar = self.menuBar()
fileMenu = menubar.addMenu('Visualisation')
fileMenu.addAction('Scatter Plots',self.testme)
fileMenu.addAction('Dendograms',self.testme)
fileMenu.addAction('Biplots',self.testme)
fileMenu.addAction('Andrew Plots',self.testme)
fileMenu.addAction('Glyph Plots',self.testme)
menubar = self.menuBar()
fileMenu = menubar.addMenu('Goodness Fit')
fileMenu.addAction('Chi-square',self.testme)
menubar = self.menuBar()
fileMenu = menubar.addMenu('DOE')
fileMenu.addAction('Fractional factorial',self.testme)
menubar = self.menuBar()
fileMenu = menubar.addMenu('Distribution')
fileMenu.addAction('All-in-one',self.testme)
self.setGeometry(1000, 300, 300, 200)
self.setWindowTitle('Statistical Toolkit v1.0')
self.show()
def main():
app = QtGui.QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| mlskit/astromlskit | FRONTEND/FrontEnd.py | Python | gpl-3.0 | 3,807 |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="layout.scene.yaxis.tickfont", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/layout/scene/yaxis/tickfont/_size.py | Python | mit | 505 |
import sys
# This is a test module for Python. It looks in the standard
# places for various *.py files. If these are moved, you must
# change this module too.
try:
import os
except:
print """Could not import the standard "os" module.
Please check your PYTHONPATH environment variable."""
sys.exit(1)
try:
import symbol
except:
print """Could not import the standard "symbol" module. If this is
a PC, you should add the dos_8x3 directory to your PYTHONPATH."""
sys.exit(1)
import os
for dir in sys.path:
file = os.path.join(dir, "os.py")
if os.path.isfile(file):
test = os.path.join(dir, "test")
if os.path.isdir(test):
# Add the "test" directory to PYTHONPATH.
sys.path = sys.path + [test]
import regrtest # Standard Python tester.
regrtest.main()
| teeple/pns_server | work/install/Python-2.7.4/PC/testpy.py | Python | gpl-2.0 | 834 |
from unittest import TestCase
from z80 import Z80
from z80 import add_8bit, add_16bit, sub_8bit, sub_16bit
from z80 import rotate_right, rotate_right_through_carry
from z80 import rotate_left, rotate_left_through_carry
from z80 import ALUResult
from z80 import signed_8bit
from z80 import shift_left, shift_right_arithmetic, shift_right_logical
from z80 import swap
from z80 import bit, set_bit, reset_bit
class MockMem(dict):
"""
Little Endian!
"""
def read_byte(self, addr):
x = self.get(addr, 0)
self[addr] = x
return x
def write_byte(self, val, addr):
self[addr] = val
def read_word(self, addr):
l = self.get(addr, 0)
self[addr] = l
h = self.get(addr + 1, 0)
self[addr + 1] = h
return (h << 8) + l
def write_word(self, val, addr):
self[addr] = val & 0xFF
self[addr + 1] = (val >> 8) & 0xFF
class Z80Tests(TestCase):
def test_dispatch(self):
m = MockMem()
m[0] = 0x4 # inc b
z = Z80(m)
cycles = z.dispatch()
self.assertEqual(z.b, 1)
self.assertEqual(z.pc, 1)
self.assertEqual(cycles, 4)
def test_set_flags(self):
res1 = ALUResult(0, True, True, True, True)
res2 = ALUResult(0, False, False, False, False)
z = Z80(None)
z.set_flags("znhc", res1)
self.assertTrue(z.z_flag)
self.assertTrue(z.n_flag)
self.assertTrue(z.h_flag)
self.assertTrue(z.c_flag)
z.set_flags("znhc", res2)
self.assertFalse(z.z_flag)
self.assertFalse(z.n_flag)
self.assertFalse(z.h_flag)
self.assertFalse(z.c_flag)
z.set_flags("zn", res1)
self.assertTrue(z.z_flag)
self.assertTrue(z.n_flag)
self.assertFalse(z.h_flag)
self.assertFalse(z.c_flag)
def test_branching(self):
m = MockMem()
m[0] = 0x20 # jr nz 4
m[1] = 0x4
m[6] = 0x20
m[7] = 0x0
z = Z80(m)
cycles = z.dispatch()
self.assertEqual(cycles, 12)
self.assertEqual(z.pc, 6)
z.z_flag = True
cycles = z.dispatch()
self.assertEqual(cycles, 8)
self.assertEqual(z.pc, 8)
def test_daa(self):
m = MockMem()
m[0] = 0x27
z = Z80(m)
z.a = 0x3C
cycles = z.dispatch()
self.assertEqual(z.a, 0x42)
def test_return(self):
m = MockMem()
m[0] = 0xC0 # ret_nz
m[8] = 0x55
m[9] = 0xAA
z = Z80(m)
z.sp = 8
cycles = z.dispatch()
self.assertEqual(cycles, 20)
self.assertEqual(z.sp, 10)
self.assertEqual(z.pc, 0xAA55)
def test_push(self):
m = MockMem()
z = Z80(m)
z.sp = 8
z._push(0xAA55)
self.assertEqual(z.sp, 6)
self.assertEqual(m.read_word(6), 0xAA55)
def test_pop(self):
m = MockMem()
m[8] = 0x55
m[9] = 0xAA
z = Z80(m)
z.sp = 8
val = z._pop()
self.assertEqual(val, 0xAA55)
self.assertEqual(z.sp, 0xA)
def test_extra_ops(self):
m = MockMem()
m[0] = 0xCB # extra ops
m[1] = 0x00 # rlc b
z = Z80(m)
z.b = 0x8
cycles = z.dispatch()
self.assertEqual(z.b, 0x10)
self.assertEqual(z.pc, 2)
class Add8BitTests(TestCase):
def test_8bit_add(self):
res = add_8bit(4, 4)
self.assertEqual(res.result, 8)
self.assertFalse(res.n_flag)
def test_8bit_add_carry_set(self):
res = add_8bit(0xF0, 0x11)
self.assertEqual(res.result, 1)
self.assertTrue(res.c_flag)
def test_8bit_add_carry_clear(self):
res = add_8bit(0xE0, 0x10)
self.assertEqual(res.result, 0xF0)
self.assertFalse(res.c_flag)
def test_8bit_add_half_set(self):
res = add_8bit(0xF, 1)
self.assertEqual(res.result, 0x10)
self.assertTrue(res.h_flag)
def test_8bit_add_half_clear(self):
res = add_8bit(0xE, 1)
self.assertEqual(res.result, 0xF)
self.assertFalse(res.h_flag)
def test_8bit_add_zero(self):
res = add_8bit(0, 0)
self.assertEqual(res.result, 0)
self.assertTrue(res.z_flag)
def test_8bit_add_with_carry_flags_set(self):
res = add_8bit(0xF8, 0x7, c=1)
self.assertEqual(res.result, 0)
self.assertTrue(res.c_flag)
self.assertTrue(res.h_flag)
self.assertTrue(res.z_flag)
def test_8bit_add_with_carry_flags_clear(self):
res = add_8bit(0xF0, 0x1, c=1)
self.assertEqual(res.result, 0xF2)
self.assertFalse(res.c_flag)
self.assertFalse(res.h_flag)
self.assertFalse(res.z_flag)
class Sub8BitTests(TestCase):
def test_8bit_sub(self):
res = sub_8bit(8, 4)
self.assertEqual(res.result, 4)
self.assertTrue(res.n_flag)
def test_8bit_sub_carry_clear(self):
res = sub_8bit(0xF0, 0x10)
self.assertEqual(res.result, 0xE0)
self.assertFalse(res.c_flag)
def test_8bit_sub_carry_set(self):
res = sub_8bit(0xE0, 0xF0)
self.assertEqual(res.result, 0xF0)
self.assertTrue(res.c_flag)
def test_8bit_sub_half_set(self):
res = sub_8bit(0xF, 0x1)
self.assertEqual(res.result, 0xE)
self.assertTrue(res.h_flag)
def test_8bit_sub_half_clear(self):
res = sub_8bit(0xE, 0xF)
self.assertEqual(res.result, 0xFF)
self.assertFalse(res.h_flag)
def test_8bit_sub_zero(self):
res = sub_8bit(0xF, 0xF)
self.assertEqual(res.result, 0)
self.assertTrue(res.z_flag)
def test_8bit_sub_with_carry_flags_set(self):
res = sub_8bit(0xF, 0xE, c=1)
self.assertEqual(res.result, 0)
self.assertFalse(res.c_flag)
self.assertTrue(res.h_flag)
self.assertTrue(res.z_flag)
def test_8bit_sub_with_carry_flags_clear(self):
res = sub_8bit(0xF0, 0xF0, c=1)
self.assertEqual(res.result, 0xFF)
self.assertTrue(res.c_flag)
self.assertFalse(res.h_flag)
self.assertFalse(res.z_flag)
class Add16BitTests(TestCase):
def test_16bit_add(self):
res = add_16bit(4, 4)
self.assertEqual(res.result, 8)
self.assertFalse(res.n_flag)
def test_16bit_add_carry(self):
res = add_16bit(0xF000, 0x1100)
self.assertEqual(res.result, 0x100)
self.assertTrue(res.c_flag)
self.assertFalse(res.h_flag)
self.assertFalse(res.z_flag)
def test_16bit_add_half(self):
res = add_16bit(0xF00, 0x100)
self.assertEqual(res.result, 0x1000)
self.assertTrue(res.h_flag)
self.assertFalse(res.c_flag)
self.assertFalse(res.z_flag)
def test_16bit_add_zero(self):
res = add_16bit(0, 0)
self.assertEqual(res.result, 0)
self.assertTrue(res.z_flag)
self.assertFalse(res.c_flag)
self.assertFalse(res.h_flag)
class Sub16BitTests(TestCase):
def test_16bit_sub(self):
res = sub_16bit(8, 4)
self.assertEqual(res.result, 4)
self.assertTrue(res.n_flag)
def test_16bit_sub_carry_clear(self):
res = sub_16bit(0xF000, 0x1000)
self.assertEqual(res.result, 0xE000)
self.assertFalse(res.c_flag)
def test_16bit_sub_carry_set(self):
res = sub_16bit(0xE000, 0xF000)
self.assertEqual(res.result, 0xF000)
self.assertTrue(res.c_flag)
def test_16bit_sub_half_set(self):
res = sub_16bit(0xF00, 0x100)
self.assertEqual(res.result, 0xE00)
self.assertTrue(res.h_flag)
def test_16bit_sub_half_clear(self):
res = sub_16bit(0xE00, 0xF00)
self.assertEqual(res.result, 0xFF00)
self.assertFalse(res.h_flag)
def test_16bit_sub_zero(self):
res = sub_16bit(0xF00, 0xF00)
self.assertEqual(res.result, 0)
self.assertTrue(res.z_flag)
class RotateRightTests(TestCase):
def test_rotate_right(self):
res = rotate_right(0x10)
self.assertEqual(res.result, 0x8)
self.assertFalse(res.n_flag)
self.assertFalse(res.h_flag)
def test_rotate_right_wrap(self):
res = rotate_right(1)
self.assertEqual(res.result, 0x80)
self.assertTrue(res.c_flag)
def test_rotate_right_zero(self):
res = rotate_right(0)
self.assertEqual(res.result, 0)
self.assertTrue(res.z_flag)
def test_rotate_right_through_carry(self):
res = rotate_right_through_carry(0x10)
self.assertEqual(res.result, 0x8)
self.assertFalse(res.n_flag)
self.assertFalse(res.h_flag)
def test_rotate_right_through_carry_with_carry(self):
res = rotate_right_through_carry(0x10, c=1)
self.assertEqual(res.result, 0x88)
self.assertFalse(res.c_flag)
def test_rotate_right_through_carry_set_carry(self):
res = rotate_right_through_carry(1)
self.assertEqual(res.result, 0)
self.assertTrue(res.c_flag)
self.assertTrue(res.z_flag)
class RotateLeftTests(TestCase):
def test_rotate_left(self):
res = rotate_left(0x8)
self.assertEqual(res.result, 0x10)
self.assertFalse(res.n_flag)
self.assertFalse(res.h_flag)
def test_rotate_left_wraps(self):
res = rotate_left(0x80)
self.assertEqual(res.result, 0x1)
self.assertTrue(res.c_flag)
def test_rotate_left_zero(self):
res = rotate_left(0)
self.assertEqual(res.result, 0)
self.assertTrue(res.z_flag)
def test_rotate_left_through_carry(self):
res = rotate_left_through_carry(0x8)
self.assertEqual(res.result, 0x10)
self.assertFalse(res.n_flag)
self.assertFalse(res.h_flag)
def test_rotate_left_through_carry_with_carry(self):
res = rotate_left_through_carry(0x8, c=1)
self.assertEqual(res.result, 0x11)
self.assertFalse(res.c_flag)
def test_rotate_left_through_carry_set_carry(self):
res = rotate_left_through_carry(0x80)
self.assertEqual(res.result, 0)
self.assertTrue(res.c_flag)
self.assertTrue(res.z_flag)
class Signed8BitTests(TestCase):
def test_positive(self):
res = signed_8bit(1)
self.assertEqual(res, 1)
def test_negative(self):
res = signed_8bit(0xFF)
self.assertEqual(res, -1)
class ShiftLeftTests(TestCase):
def test_shift_left(self):
res = shift_left(0x8)
self.assertEqual(res.result, 0x10)
self.assertFalse(res.n_flag)
self.assertFalse(res.h_flag)
def test_shift_left_zero(self):
res = shift_left(0)
self.assertEqual(res.result, 0)
self.assertTrue(res.z_flag)
def test_shift_left_carry(self):
res = shift_left(0x80)
self.assertEqual(res.result, 0)
self.assertTrue(res.z_flag)
self.assertTrue(res.c_flag)
class ShiftRightTests(TestCase):
def test_shift_right_arithmetic(self):
res = shift_right_arithmetic(0x81)
self.assertEqual(res.result, 0xC0)
self.assertTrue(res.c_flag)
def test_shift_right_logical(self):
res = shift_right_logical(0x81)
self.assertEqual(res.result, 0x40)
self.assertTrue(res.c_flag)
class SwapTests(TestCase):
def test_swap(self):
res = swap(0xA5)
self.assertEqual(res.result, 0x5A)
class BitTests(TestCase):
def test_bit(self):
res = bit(0x5, 2)
self.assertFalse(res.z_flag)
res = bit(0x5, 3)
self.assertTrue(res.z_flag)
def test_reset_bit(self):
res = reset_bit(0x5, 2)
self.assertEqual(res.result, 0x1)
res = reset_bit(0x5, 3)
self.assertEqual(res.result, 0x5)
def test_set_bit(self):
res = set_bit(0x5, 2)
self.assertEqual(res.result, 0x5)
res = set_bit(0x5, 3)
self.assertEqual(res.result, 0xD)
| zbyrne/gameboy | pyboy/test_z80.py | Python | gpl-2.0 | 11,995 |
# -*- coding: utf-8 -*-
import os
from utils import make_dir
PROJECT_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
INSTANCE_FOLDER_PATH = os.path.join(PROJECT_ROOT, 'instance')
class BaseConfig(object):
PROJECT = "rootio"
DEBUG = False
TESTING = False
ADMINS = ['[email protected]']
# http://flask.pocoo.org/docs/quickstart/#sessions
SECRET_KEY = 'SeekritKey'
LOG_FOLDER = os.path.join(INSTANCE_FOLDER_PATH, 'logs')
make_dir(LOG_FOLDER)
# Fild upload, should override in production.
# Limited the maximum allowed payload to 16 megabytes.
# http://flask.pocoo.org/docs/patterns/fileuploads/#improving-uploads
MAX_CONTENT_LENGTH = 16 * 1024 * 1024
UPLOAD_FOLDER = os.path.join(INSTANCE_FOLDER_PATH, 'uploads')
make_dir(UPLOAD_FOLDER)
class DefaultConfig(BaseConfig):
DEBUG = True
# Flask-Sqlalchemy: http://packages.python.org/Flask-SQLAlchemy/config.html
SQLALCHEMY_ECHO = True
# SQLITE for prototyping.
SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:NLPog1986@localhost'
# Postgres for production.
#SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:password@localhost'
# Flask-babel: http://pythonhosted.org/Flask-Babel/
ACCEPT_LANGUAGES = ['en','lg','luo','sw',]
BABEL_DEFAULT_LOCALE = 'lg_UG' #see http://cldr.unicode.org/index/cldr-spec/picking-the-right-language-code
#abd http://unicode.org/cldr/utility/languageid.jsp
# Flask-cache: http://pythonhosted.org/Flask-Cache/
CACHE_TYPE = 'simple'
CACHE_DEFAULT_TIMEOUT = 60
# Flask-mail: http://pythonhosted.org/flask-mail/
# https://bitbucket.org/danjac/flask-mail/issue/3/problem-with-gmails-smtp-server
MAIL_DEBUG = DEBUG
MAIL_SERVER = 'smtp.gmail.com'
MAIL_USE_TLS = True
MAIL_USE_SSL = False
# Should put MAIL_USERNAME and MAIL_PASSWORD in production under instance folder.
MAIL_USERNAME = 'gmail_username'
MAIL_PASSWORD = 'gmail_password'
DEFAULT_MAIL_SENDER = '%[email protected]' % MAIL_USERNAME
# Flask-openid: http://pythonhosted.org/Flask-OpenID/
OPENID_FS_STORE_PATH = os.path.join(INSTANCE_FOLDER_PATH, 'openid')
make_dir(OPENID_FS_STORE_PATH)
class TestConfig(BaseConfig):
TESTING = True
CSRF_ENABLED = False
SQLALCHEMY_ECHO = False
SQLALCHEMY_DATABASE_URI = 'sqlite://'
| fcl-93/rootio_web | rootio/config-server-backup.py | Python | agpl-3.0 | 2,381 |
import sqlite3
import os
def init():
"""
Creates and initializes settings database.
Doesn't do anything if the file already exists. Remove the local copy to recreate the database.
"""
if not os.path.isfile("settings.sqlite"):
app_db_connection = sqlite3.connect('settings.sqlite')
app_db = app_db_connection.cursor()
app_db.execute("CREATE TABLE oauth (site, rate_remaining, rate_reset)")
app_db.execute("INSERT INTO oauth VALUES ('reddit', 30, 60)")
app_db_connection.commit()
app_db_connection.close()
if __name__ == "__main__":
init()
| Pasotaku/Anime-Feud-Survey-Backend | Old Python Code/settings_db_init.py | Python | mit | 617 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-02-27 01:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tab', '0005_merge'),
]
operations = [
migrations.AlterField(
model_name='judge',
name='ballot_code',
field=models.CharField(blank=True, max_length=256, null=True, unique=True),
),
]
| jolynch/mit-tab | mittab/apps/tab/migrations/0006_auto_20180227_0142.py | Python | mit | 433 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from urllib import unquote_plus
import urllib2
from django.db import models
from django.conf import settings
from django.http import QueryDict
from django.utils.http import urlencode
from paypal.standard.models import PayPalStandardBase
from paypal.standard.conf import POSTBACK_ENDPOINT, SANDBOX_POSTBACK_ENDPOINT
from paypal.standard.pdt.signals import pdt_successful, pdt_failed
# ### Todo: Move this logic to conf.py:
# if paypal.standard.pdt is in installed apps
# ... then check for this setting in conf.py
class PayPalSettingsError(Exception):
"""Raised when settings are incorrect."""
try:
IDENTITY_TOKEN = settings.PAYPAL_IDENTITY_TOKEN
except:
raise PayPalSettingsError("You must set PAYPAL_IDENTITY_TOKEN in settings.py. Get this token by enabling PDT in your PayPal account.")
class PayPalPDT(PayPalStandardBase):
format = u"<PDT: %s %s>"
amt = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
cm = models.CharField(max_length=255, blank=True)
sig = models.CharField(max_length=255, blank=True)
tx = models.CharField(max_length=255, blank=True)
st = models.CharField(max_length=32, blank=True)
class Meta:
db_table = "paypal_pdt"
verbose_name = "PayPal PDT"
def _postback(self):
"""
Perform PayPal PDT Postback validation.
Sends the transaction ID and business token to PayPal which responses with
SUCCESS or FAILED.
"""
postback_dict = dict(cmd="_notify-synch", at=IDENTITY_TOKEN, tx=self.tx)
postback_params = urlencode(postback_dict)
return urllib2.urlopen(self.get_endpoint(), postback_params).read()
def get_endpoint(self):
"""Use the sandbox when in DEBUG mode as we don't have a test_ipn variable in pdt."""
if settings.DEBUG:
return SANDBOX_POSTBACK_ENDPOINT
else:
return POSTBACK_ENDPOINT
def _verify_postback(self):
# ### Now we don't really care what result was, just whether a flag was set or not.
from paypal.standard.pdt.forms import PayPalPDTForm
result = False
response_list = self.response.split('\n')
response_dict = {}
for i, line in enumerate(response_list):
unquoted_line = unquote_plus(line).strip()
if i == 0:
self.st = unquoted_line
if self.st == "SUCCESS":
result = True
else:
if self.st != "SUCCESS":
self.set_flag(line)
break
try:
if not unquoted_line.startswith(' -'):
k, v = unquoted_line.split('=')
response_dict[k.strip()] = v.strip()
except ValueError, e:
pass
qd = QueryDict('', mutable=True)
qd.update(response_dict)
qd.update(dict(ipaddress=self.ipaddress, st=self.st, flag_info=self.flag_info))
pdt_form = PayPalPDTForm(qd, instance=self)
pdt_form.save(commit=False)
def send_signals(self):
# Send the PDT signals...
if self.flag:
pdt_failed.send(sender=self)
else:
pdt_successful.send(sender=self) | bluestemscott/librarygadget | librarygadget/paypal/standard/pdt/models.py | Python | mit | 3,495 |
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Process Android resources to generate R.java, and prepare for packaging.
This will crunch images and generate v14 compatible resources
(see generate_v14_compatible_resources.py).
"""
import optparse
import os
import re
import shutil
import sys
import zipfile
import generate_v14_compatible_resources
from util import build_utils
def ParseArgs(args):
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--android-sdk', help='path to the Android SDK folder')
parser.add_option('--android-sdk-tools',
help='path to the Android SDK build tools folder')
parser.add_option('--non-constant-id', action='store_true')
parser.add_option('--android-manifest', help='AndroidManifest.xml path')
parser.add_option('--custom-package', help='Java package for R.java')
parser.add_option('--resource-dirs',
help='Directories containing resources of this target.')
parser.add_option('--dependencies-res-zips',
help='Resources from dependents.')
parser.add_option('--resource-zip-out',
help='Path for output zipped resources.')
parser.add_option('--R-dir',
help='directory to hold generated R.java.')
parser.add_option('--srcjar-out',
help='Path to srcjar to contain generated R.java.')
parser.add_option('--proguard-file',
help='Path to proguard.txt generated file')
parser.add_option(
'--v14-verify-only',
action='store_true',
help='Do not generate v14 resources. Instead, just verify that the '
'resources are already compatible with v14, i.e. they don\'t use '
'attributes that cause crashes on certain devices.')
parser.add_option(
'--extra-res-packages',
help='Additional package names to generate R.java files for')
parser.add_option(
'--extra-r-text-files',
help='For each additional package, the R.txt file should contain a '
'list of resources to be included in the R.java file in the format '
'generated by aapt')
parser.add_option(
'--all-resources-zip-out',
help='Path for output of all resources. This includes resources in '
'dependencies.')
parser.add_option('--stamp', help='File to touch on success')
(options, args) = parser.parse_args(args)
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = (
'android_sdk',
'android_sdk_tools',
'android_manifest',
'dependencies_res_zips',
'resource_dirs',
'resource_zip_out',
)
build_utils.CheckOptions(options, parser, required=required_options)
if (options.R_dir is None) == (options.srcjar_out is None):
raise Exception('Exactly one of --R-dir or --srcjar-out must be specified.')
return options
def CreateExtraRJavaFiles(
r_dir, extra_packages, extra_r_text_files):
if len(extra_packages) != len(extra_r_text_files):
raise Exception('--extra-res-packages and --extra-r-text-files'
'should have the same length')
java_files = build_utils.FindInDirectory(r_dir, "R.java")
if len(java_files) != 1:
return
r_java_file = java_files[0]
r_java_contents = open(r_java_file).read()
for package in extra_packages:
package_r_java_dir = os.path.join(r_dir, *package.split('.'))
build_utils.MakeDirectory(package_r_java_dir)
package_r_java_path = os.path.join(package_r_java_dir, 'R.java')
open(package_r_java_path, 'w').write(
re.sub(r'package [.\w]*;', 'package %s;' % package, r_java_contents))
# TODO(cjhopman): These extra package's R.java files should be filtered to
# only contain the resources listed in their R.txt files. At this point, we
# have already compiled those other libraries, so doing this would only
# affect how the code in this .apk target could refer to the resources.
def DidCrunchFail(returncode, stderr):
"""Determines whether aapt crunch failed from its return code and output.
Because aapt's return code cannot be trusted, any output to stderr is
an indication that aapt has failed (http://crbug.com/314885), except
lines that contain "libpng warning", which is a known non-error condition
(http://crbug.com/364355).
"""
if returncode != 0:
return True
for line in stderr.splitlines():
if line and not 'libpng warning' in line:
return True
return False
def ZipResources(resource_dirs, zip_path):
# Python zipfile does not provide a way to replace a file (it just writes
# another file with the same name). So, first collect all the files to put
# in the zip (with proper overriding), and then zip them.
files_to_zip = dict()
for d in resource_dirs:
for root, _, files in os.walk(d):
for f in files:
archive_path = os.path.join(os.path.relpath(root, d), f)
path = os.path.join(root, f)
files_to_zip[archive_path] = path
with zipfile.ZipFile(zip_path, 'w') as outzip:
for archive_path, path in files_to_zip.iteritems():
outzip.write(path, archive_path)
def main():
args = build_utils.ExpandFileArgs(sys.argv[1:])
options = ParseArgs(args)
android_jar = os.path.join(options.android_sdk, 'android.jar')
aapt = os.path.join(options.android_sdk_tools, 'aapt')
input_files = []
with build_utils.TempDir() as temp_dir:
deps_dir = os.path.join(temp_dir, 'deps')
build_utils.MakeDirectory(deps_dir)
v14_dir = os.path.join(temp_dir, 'v14')
build_utils.MakeDirectory(v14_dir)
gen_dir = os.path.join(temp_dir, 'gen')
build_utils.MakeDirectory(gen_dir)
input_resource_dirs = build_utils.ParseGypList(options.resource_dirs)
for resource_dir in input_resource_dirs:
generate_v14_compatible_resources.GenerateV14Resources(
resource_dir,
v14_dir,
options.v14_verify_only)
dep_zips = build_utils.ParseGypList(options.dependencies_res_zips)
input_files += dep_zips
dep_subdirs = []
for z in dep_zips:
subdir = os.path.join(deps_dir, os.path.basename(z))
if os.path.exists(subdir):
raise Exception('Resource zip name conflict: ' + os.path.basename(z))
build_utils.ExtractAll(z, path=subdir)
dep_subdirs.append(subdir)
# Generate R.java. This R.java contains non-final constants and is used only
# while compiling the library jar (e.g. chromium_content.jar). When building
# an apk, a new R.java file with the correct resource -> ID mappings will be
# generated by merging the resources from all libraries and the main apk
# project.
package_command = [aapt,
'package',
'-m',
'-M', options.android_manifest,
'--auto-add-overlay',
'-I', android_jar,
'--output-text-symbols', gen_dir,
'-J', gen_dir]
for d in input_resource_dirs:
package_command += ['-S', d]
for d in dep_subdirs:
package_command += ['-S', d]
if options.non_constant_id:
package_command.append('--non-constant-id')
if options.custom_package:
package_command += ['--custom-package', options.custom_package]
if options.proguard_file:
package_command += ['-G', options.proguard_file]
build_utils.CheckOutput(package_command, print_stderr=False)
if options.extra_res_packages:
CreateExtraRJavaFiles(
gen_dir,
build_utils.ParseGypList(options.extra_res_packages),
build_utils.ParseGypList(options.extra_r_text_files))
# This is the list of directories with resources to put in the final .zip
# file. The order of these is important so that crunched/v14 resources
# override the normal ones.
zip_resource_dirs = input_resource_dirs + [v14_dir]
base_crunch_dir = os.path.join(temp_dir, 'crunch')
# Crunch image resources. This shrinks png files and is necessary for
# 9-patch images to display correctly. 'aapt crunch' accepts only a single
# directory at a time and deletes everything in the output directory.
for idx, d in enumerate(input_resource_dirs):
crunch_dir = os.path.join(base_crunch_dir, str(idx))
build_utils.MakeDirectory(crunch_dir)
zip_resource_dirs.append(crunch_dir)
aapt_cmd = [aapt,
'crunch',
'-C', crunch_dir,
'-S', d]
build_utils.CheckOutput(aapt_cmd, fail_func=DidCrunchFail)
ZipResources(zip_resource_dirs, options.resource_zip_out)
if options.all_resources_zip_out:
ZipResources(
zip_resource_dirs + dep_subdirs, options.all_resources_zip_out)
if options.R_dir:
build_utils.DeleteDirectory(options.R_dir)
shutil.copytree(gen_dir, options.R_dir)
else:
build_utils.ZipDir(options.srcjar_out, gen_dir)
if options.depfile:
input_files += build_utils.GetPythonDependencies()
build_utils.WriteDepfile(options.depfile, input_files)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
main()
| ondra-novak/chromium.src | build/android/gyp/process_resources.py | Python | bsd-3-clause | 9,505 |
class Program:
"""Data type encapsulating an executable BASIC program."""
def __init__(self):
pass
| chaosotter/python-basic | lib/runtime/program.py | Python | mit | 116 |
# Headers
__author__ = 'Somsubhra Bairi'
__email__ = '[email protected]'
# All imports
from os import walk, path, stat
from extras import Logger
# The Kucera Francis frequency class
class KFFrequency:
# Constructor for the Kucera Francis frequency class
def __init__(self, in_dir, out_file, dict_file):
self.in_dir = in_dir
self.out_file = out_file
self.dict_file = dict_file
self.kf_val = {}
self.kf_res = {}
# Run the Kucera Francis frequency calculator
def run(self):
# Build up the Kucera Francis dictionary
dictionary = open(self.dict_file, 'r')
Logger.log_message("Reading " + self.dict_file)
for line in dictionary.readlines():
cols = line.split(';')
self.kf_val[cols[0]] = int(cols[1])
dictionary.close()
# Check for the input directory
try:
stat(self.in_dir)
except:
Logger.log_error('Input text not found')
return
Logger.log_message('Running Kucera Francis frequency counter')
for(dir_path, _, file_names) in walk(self.in_dir):
for file_name in file_names:
in_file = path.join(dir_path, file_name)
self.count_kf_frequency(in_file)
Logger.log_success('Finished Kucera Francis frequency counting')
Logger.log_message('Writing results to ' + self.out_file)
self.dump_results()
Logger.log_success('Finished writing results to ' + self.out_file)
# Count Kucera Francis frequency for a file
def count_kf_frequency(self, in_file):
Logger.log_message('Counting Kucera Francis frequency for ' + in_file)
input_file = open(in_file, 'r')
for line in input_file.readlines():
for word in line.split():
if word.isdigit():
continue
if word in self.kf_val:
# If word is present in the psycholinguistic dictionary
self.kf_res[word] = self.kf_val[word]
else:
self.kf_res[word] = 0
# Dump the results to output file
def dump_results(self):
output_file = open(self.out_file, 'w')
for word in self.kf_res:
output_file.write(word + ";" + str(self.kf_res[word]) + "\n")
output_file.close() | Somsubhra/Enrich | src/features/kf_frequency.py | Python | mit | 2,397 |
import numpy as np
from scipy import *
from scipy.sparse import *
from itertools import izip
import operator
def sort_dic_by_value (dic,reverse=False):
return sorted(dic.iteritems(), key=operator.itemgetter(1),reverse=reverse)
## Maximum value of a dictionary
def dict_max(dic):
aux = dict(map(lambda item: (item[1],item[0]),dic.items()))
if aux.keys() == []:
return 0
max_value = max(aux.keys())
return max_value,aux[max_value]
############
## Dot products that works for sparse matrix as well
## Taken from:
## http://old.nabble.com/Sparse-matrices-and-dot-product-td30315992.html
############
def spdot(A, B):
"The same as np.dot(A, B), except it works even if A or B or both might be sparse."
if issparse(A) and issparse(B):
return A * B
elif issparse(A) and not issparse(B):
return (A * B).view(type=B.__class__)
elif not issparse(A) and issparse(B):
return (B.T * A.T).T.view(type=A.__class__)
else:
return np.dot(A, B)
##############
### Gets a perpendicualar line in 2D
##############
def perp_2d(a):
res = 1./a
res = res[:,] * [-1,1]
return res
def l2norm(a):
value = 0
for i in xrange(a.shape[1]):
value += np.dot(a[:,i],a[:,i])
return np.sqrt(value)
def l2norm_squared(a):
value = 0
for i in xrange(a.shape[1]):
value += np.dot(a[:,i],a[:,i])
return value
#######
## Normalizes an array to sum to one, either column wize, or row wize or the full array.
## Column wize - 0 default
## Rown wize - 1 default
## All - 2 default
########
def normalize_array(a,direction="column"):
b = a.copy()
if(direction == "column"):
sums = np.sum(b,0)
return np.nan_to_num(b/sums)
elif(direction == "row"):
sums =np.sum(b,1)
return np.nan_to_num((b.transpose() / sums).transpose())
elif(direction == "all"):
sums = np.sum(b)
return np.nan_to_num(b / sums)
else:
print "Error non existing normalization"
return b
| iarroyof/lxmls-toolkit | lxmls/util/my_math_utils.py | Python | mit | 1,945 |
from datetime import date
from django.conf import settings
from django.core import mail
from django.urls import reverse
from extforms.forms import SelfOrganisedSubmissionExternalForm
from extrequests.models import SelfOrganisedSubmission
from workshops.models import Curriculum, Language
from workshops.tests.base import TestBase
class TestSelfOrganisedSubmissionExternalForm(TestBase):
"""Test external (accessible to non-logged in users) form."""
def test_fields_presence(self):
"""Test if the form shows correct fields."""
form = SelfOrganisedSubmissionExternalForm()
fields_left = set(form.fields.keys())
fields_right = set(
[
"personal",
"family",
"email",
"secondary_email",
"institution",
"institution_other_name",
"institution_other_URL",
"institution_department",
"workshop_format",
"workshop_format_other",
"start",
"end",
"workshop_url",
"workshop_types",
"workshop_types_other_explain",
"country",
"language",
"public_event",
"public_event_other",
"additional_contact",
"data_privacy_agreement",
"code_of_conduct_agreement",
"host_responsibilities",
"workshop_listed",
"online_inperson",
"captcha",
]
)
self.assertEqual(fields_left, fields_right)
def test_request_added(self):
"""Ensure the request is successfully added to the pool, and
notification email is sent."""
data = {
"personal": "Harry",
"family": "Potter",
"email": "[email protected]",
"institution_other_name": "Ministry of Magic",
"institution_other_URL": "magic.gov.uk",
"workshop_format": "periodic",
"workshop_format_other": "",
"start": date(2020, 11, 7),
"end": date(2020, 11, 8),
"workshop_url": "",
"workshop_types": [
Curriculum.objects.filter(active=True)
.exclude(mix_match=True)
.first()
.pk,
],
"workshop_types_other_explain": "",
"country": "GB",
"language": Language.objects.get(name="English").pk,
"public_event": "closed",
"public_event_other": "",
"additional_contact": "",
"data_privacy_agreement": True,
"code_of_conduct_agreement": True,
"host_responsibilities": True,
"online_inperson": "inperson",
}
self.passCaptcha(data)
rv = self.client.post(reverse("selforganised_submission"), data, follow=True)
self.assertEqual(rv.status_code, 200)
content = rv.content.decode("utf-8")
if "form" in rv.context:
self.assertEqual(
rv.context["form"].is_valid(), True, dict(rv.context["form"].errors)
)
self.assertNotIn("Please fix errors in the form below", content)
self.assertIn("Thank you for submitting self-organised workshop", content)
self.assertEqual(SelfOrganisedSubmission.objects.all().count(), 1)
self.assertEqual(SelfOrganisedSubmission.objects.all()[0].state, "p")
# 1 email for autoresponder, 1 email for admins
self.assertEqual(len(mail.outbox), 2)
# save the email messages for test debuggig
# with open('email0.eml', 'wb') as f:
# f.write(mail.outbox[0].message().as_bytes())
# with open('email1.eml', 'wb') as f:
# f.write(mail.outbox[1].message().as_bytes())
# before tests, check if the template invalid string exists
self.assertTrue(settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"])
# test autoresponder email
msg = mail.outbox[0]
self.assertEqual(msg.subject, "Self-organised submission confirmation")
self.assertEqual(msg.recipients(), ["[email protected]"])
self.assertNotIn(
settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"], msg.body
)
# test email for admins
msg = mail.outbox[1]
self.assertEqual(
msg.subject,
"New self-organised submission: Ministry of Magic",
)
self.assertEqual(msg.recipients(), ["[email protected]"])
self.assertNotIn(
settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"], msg.body
)
| pbanaszkiewicz/amy | amy/extforms/tests/test_selforganised_submission_form.py | Python | mit | 4,753 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BBCodeTag'
db.create_table('precise_bbcode_bbcodetag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tag_name', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=20)),
('tag_definition', self.gf('django.db.models.fields.TextField')()),
('html_replacement', self.gf('django.db.models.fields.TextField')()),
('helpline', self.gf('django.db.models.fields.CharField')(max_length=120, null=True, blank=True)),
('display_on_editor', self.gf('django.db.models.fields.BooleanField')(default=False)),
('newline_closes', self.gf('django.db.models.fields.BooleanField')(default=False)),
('same_tag_closes', self.gf('django.db.models.fields.BooleanField')(default=False)),
('standalone', self.gf('django.db.models.fields.BooleanField')(default=False)),
('render_embedded', self.gf('django.db.models.fields.BooleanField')(default=True)),
('escape_html', self.gf('django.db.models.fields.BooleanField')(default=True)),
('replace_links', self.gf('django.db.models.fields.BooleanField')(default=True)),
('strip', self.gf('django.db.models.fields.BooleanField')(default=False)),
('swallow_trailing_newline', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('precise_bbcode', ['BBCodeTag'])
def backwards(self, orm):
# Deleting model 'BBCodeTag'
db.delete_table('precise_bbcode_bbcodetag')
models = {
'precise_bbcode.bbcodetag': {
'Meta': {'object_name': 'BBCodeTag'},
'display_on_editor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'escape_html': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'helpline': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'html_replacement': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'newline_closes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'render_embedded': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'replace_links': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'same_tag_closes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'standalone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'strip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'swallow_trailing_newline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tag_definition': ('django.db.models.fields.TextField', [], {}),
'tag_name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'})
}
}
complete_apps = ['precise_bbcode'] | slasyz/django-precise-bbcode | precise_bbcode/south_migrations/0001_initial.py | Python | bsd-3-clause | 3,304 |
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import encodeutils
import six
from magnum.common import cert_manager
from magnum.common import exception
from magnum.common import short_id
from magnum.common.x509 import operations as x509
import magnum.conf
import os
import shutil
import tempfile
CONDUCTOR_CLIENT_NAME = six.u('Magnum-Conductor')
LOG = logging.getLogger(__name__)
CONF = magnum.conf.CONF
def _generate_ca_cert(issuer_name, context=None):
"""Generate and store ca_cert
:param issuer_name: CA subject name
:returns: CA cert uuid and CA cert, CA private key password
"""
ca_password = short_id.generate_id()
ca_cert = x509.generate_ca_certificate(issuer_name,
encryption_password=ca_password)
ca_cert_ref = cert_manager.get_backend().CertManager.store_cert(
certificate=ca_cert['certificate'],
private_key=ca_cert['private_key'],
private_key_passphrase=ca_password,
name=issuer_name,
context=context,
)
LOG.debug('CA cert is created: %s', ca_cert_ref)
return ca_cert_ref, ca_cert, ca_password
def _generate_client_cert(issuer_name, ca_cert, ca_password, context=None):
"""Generate and store magnum_client_cert
:param issuer_name: CA subject name
:param ca_cert: CA certificate
:param ca_password: CA private key password
:returns: Magnum client cert uuid
"""
client_password = short_id.generate_id()
# TODO(strigazi): set subject name and organization per driver
# For RBAC kubernetes cluster we need the client to have:
# subject_name: admin
# organization_name system:masters
# Non kubernetes drivers are not using the certificates fields
# for authorization
subject_name = 'admin'
organization_name = 'system:masters'
client_cert = x509.generate_client_certificate(
issuer_name,
subject_name,
organization_name,
ca_cert['private_key'],
encryption_password=client_password,
ca_key_password=ca_password,
)
magnum_cert_ref = cert_manager.get_backend().CertManager.store_cert(
certificate=client_cert['certificate'],
private_key=client_cert['private_key'],
private_key_passphrase=client_password,
name=CONDUCTOR_CLIENT_NAME,
context=context
)
LOG.debug('Magnum client cert is created: %s', magnum_cert_ref)
return magnum_cert_ref
def _get_issuer_name(cluster):
issuer_name = cluster.name
# When user create a Cluster without name, the cluster.name is None.
# We should use cluster.uuid as issuer name.
if issuer_name is None:
issuer_name = cluster.uuid
return issuer_name
def generate_certificates_to_cluster(cluster, context=None):
"""Generate ca_cert and magnum client cert and set to cluster
:param cluster: The cluster to set CA cert and magnum client cert
:returns: CA cert uuid and magnum client cert uuid
"""
try:
issuer_name = _get_issuer_name(cluster)
LOG.debug('Start to generate certificates: %s', issuer_name)
ca_cert_ref, ca_cert, ca_password = _generate_ca_cert(issuer_name,
context=context)
etcd_ca_cert_ref, _, _ = _generate_ca_cert(issuer_name,
context=context)
fp_ca_cert_ref, _, _ = _generate_ca_cert(issuer_name,
context=context)
magnum_cert_ref = _generate_client_cert(issuer_name,
ca_cert,
ca_password,
context=context)
cluster.ca_cert_ref = ca_cert_ref
cluster.magnum_cert_ref = magnum_cert_ref
cluster.etcd_ca_cert_ref = etcd_ca_cert_ref
cluster.front_proxy_ca_cert_ref = fp_ca_cert_ref
except Exception:
LOG.exception('Failed to generate certificates for Cluster: %s',
cluster.uuid)
raise exception.CertificatesToClusterFailed(cluster_uuid=cluster.uuid)
def get_cluster_ca_certificate(cluster, context=None, ca_cert_type=None):
ref = cluster.ca_cert_ref
if ca_cert_type == "etcd":
ref = cluster.etcd_ca_cert_ref
elif ca_cert_type in ["front_proxy", "front-proxy"]:
ref = cluster.front_proxy_ca_cert_ref
ca_cert = cert_manager.get_backend().CertManager.get_cert(
ref,
resource_ref=cluster.uuid,
context=context
)
return ca_cert
def get_cluster_magnum_cert(cluster, context=None):
magnum_cert = cert_manager.get_backend().CertManager.get_cert(
cluster.magnum_cert_ref,
resource_ref=cluster.uuid,
context=context
)
return magnum_cert
def create_client_files(cluster, context=None):
if not os.path.isdir(CONF.cluster.temp_cache_dir):
LOG.debug("Certificates will not be cached in the filesystem: they "
"will be created as tempfiles.")
ca_cert = get_cluster_ca_certificate(cluster, context)
magnum_cert = get_cluster_magnum_cert(cluster, context)
ca_file = tempfile.NamedTemporaryFile(mode="w+")
ca_file.write(encodeutils.safe_decode(ca_cert.get_certificate()))
ca_file.flush()
key_file = tempfile.NamedTemporaryFile(mode="w+")
key_file.write(encodeutils.safe_decode(
magnum_cert.get_decrypted_private_key()))
key_file.flush()
cert_file = tempfile.NamedTemporaryFile(mode="w+")
cert_file.write(encodeutils.safe_decode(magnum_cert.get_certificate()))
cert_file.flush()
else:
cached_cert_dir = os.path.join(CONF.cluster.temp_cache_dir,
cluster.uuid)
cached_ca_file = os.path.join(cached_cert_dir, 'ca.crt')
cached_key_file = os.path.join(cached_cert_dir, 'client.key')
cached_cert_file = os.path.join(cached_cert_dir, 'client.crt')
if not os.path.isdir(cached_cert_dir):
os.mkdir(cached_cert_dir)
ca_cert = get_cluster_ca_certificate(cluster, context)
magnum_cert = get_cluster_magnum_cert(cluster, context)
ca_file = open(cached_ca_file, "w+")
ca_file.write(encodeutils.safe_decode(ca_cert.get_certificate()))
ca_file.flush()
key_file = open(cached_key_file, "w+")
key_file.write(encodeutils.safe_decode(
magnum_cert.get_decrypted_private_key()))
key_file.flush()
cert_file = open(cached_cert_file, "w+")
cert_file.write(
encodeutils.safe_decode(magnum_cert.get_certificate()))
cert_file.flush()
os.chmod(cached_ca_file, 0o600)
os.chmod(cached_key_file, 0o600)
os.chmod(cached_cert_file, 0o600)
else:
ca_file = open(cached_ca_file, "r")
key_file = open(cached_key_file, "r")
cert_file = open(cached_cert_file, "r")
return ca_file, key_file, cert_file
def sign_node_certificate(cluster, csr, ca_cert_type=None, context=None):
ref = cluster.ca_cert_ref
if ca_cert_type == "etcd":
ref = cluster.etcd_ca_cert_ref
elif ca_cert_type in ["front_proxy", "front-proxy"]:
ref = cluster.front_proxy_ca_cert_ref
ca_cert = cert_manager.get_backend().CertManager.get_cert(
ref,
resource_ref=cluster.uuid,
context=context
)
node_cert = x509.sign(csr,
_get_issuer_name(cluster),
ca_cert.get_private_key(),
ca_cert.get_private_key_passphrase())
return node_cert
def delete_certificates_from_cluster(cluster, context=None):
"""Delete ca cert and magnum client cert from cluster
:param cluster: The cluster which has certs
"""
for cert_ref in ['ca_cert_ref', 'magnum_cert_ref']:
try:
cert_ref = getattr(cluster, cert_ref, None)
if cert_ref:
cert_manager.get_backend().CertManager.delete_cert(
cert_ref, resource_ref=cluster.uuid, context=context)
except Exception:
LOG.warning("Deleting certs is failed for Cluster %s",
cluster.uuid)
def delete_client_files(cluster, context=None):
cached_cert_dir = os.path.join(CONF.cluster.temp_cache_dir,
cluster.uuid)
try:
if os.path.isdir(cached_cert_dir):
shutil.rmtree(cached_cert_dir)
except Exception:
LOG.warning("Deleting client files failed for Cluster %s",
cluster.uuid)
| ArchiFleKs/magnum | magnum/conductor/handlers/common/cert_manager.py | Python | apache-2.0 | 9,401 |
"""
PyPixel Cache wrapper by @WireSegal and TheDestruc7i0n
You may use this code, as long as you give credit
http://pypixel.thedestruc7i0n.ca/
Allows you to make calls to the Hypixel API through python.
Updated by TheDestruc7i0n to be able to Cache files
"""
#Old imports
import json
import urllib2
import time
#Thanks http://stackoverflow.com/users/100297/martijn-pieters !
import requests_cache, requests
import logging
from datetime import (
datetime,
timedelta
)
from requests.exceptions import (
ConnectionError,
Timeout,
)
from requests_cache.core import (
dispatch_hook,
CachedSession,
)
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class FallbackCachedSession(CachedSession):
"""
Cached session that'll reuse expired cache data on timeouts
This allows survival in case the backend is down, living of stale
data until it comes back.
"""
def send(self, request, **kwargs):
# this *bypasses* CachedSession.send; we want to call the method
# CachedSession.send() would have delegated to!
session_send = super(CachedSession, self).send
if (self._is_cache_disabled or
request.method not in self._cache_allowable_methods):
response = session_send(request, **kwargs)
response.from_cache = False
return response
cache_key = self.cache.create_key(request)
def send_request_and_cache_response(stale=None):
try:
response = session_send(request, **kwargs)
except (Timeout, ConnectionError):
if stale is None:
raise
log.warning('No response received, reusing stale response for '
'%s', request.url)
return stale
if stale is not None and response.status_code == 500:
log.warning('Response gave 500 error, reusing stale response '
'for %s', request.url)
return stale
if response.status_code in self._cache_allowable_codes:
self.cache.save_response(cache_key, response)
response.from_cache = False
return response
response, timestamp = self.cache.get_response_and_time(cache_key)
if response is None:
return send_request_and_cache_response()
if self._cache_expire_after is not None:
is_expired = datetime.utcnow() - timestamp > self._cache_expire_after
if is_expired:
self.cache.delete(cache_key)
# try and get a fresh response, but if that fails reuse the
# stale one
return send_request_and_cache_response(stale=response)
# dispatch hook here, because we've removed it before pickling
response.from_cache = True
response = dispatch_hook('response', request.hooks, response, **kwargs)
return response
def basecache_delete(self, key):
# We don't really delete; we instead set the timestamp to
# datetime.min. This way we can re-use stale values if the backend
# fails
try:
if key not in self.responses:
key = self.keys_map[key]
self.responses[key] = self.responses[key][0], datetime.min
except KeyError:
return
from requests_cache.backends.base import BaseCache
BaseCache.delete = basecache_delete
requests_cache.install_cache(
'HypixelAPI', backend='sqlite', expire_after=600,
session_factory=FallbackCachedSession)
#Begin old code, with changes
def expandUrlData(data):
string = "?" # the base for any url
dataStrings = []
for i in data:
dataStrings.append(i+"="+data[i])
string += "&".join(dataStrings)
return string
def urlopen(url, t, ua, params={}):
url += expandUrlData(params)
res = requests.get(url, headers = { 'User-Agent': ua }, timeout = t)
html = res.json()
#html = urllib2.urlopen(req).read()
try:
html["Cached"] = res.from_cache
except AttributeError:
html["Cached"] = True
#html["Timestamp"] = int(time.time()*1000)
return html
def noncache_urlopen(url, t, ua, params={}):
url += expandUrlData(params)
req = urllib2.Request(url, headers = { 'User-Agent': ua })
html = urllib2.urlopen(req, timeout = t).read()
return json.loads(html)
class HypixelAPI:
base = "https://api.hypixel.net/"
def __init__(self, key, timeout = 3, ua = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'):
self.key = key
self.timeout = timeout
self.ua = ua
self.baseParams = {"key": self.key}
def keyRequest(self):
url = self.base + "key"
params = self.baseParams
timeout = self.timeout
ua = self.ua
return noncache_urlopen(url, timeout, ua, params)
def boosters(self):
url = self.base + "boosters"
params = self.baseParams
timeout = self.timeout
ua = self.ua
return urlopen(url, timeout, ua, params)
def friends(self, username):
url = self.base + "friends"
params = self.baseParams
timeout = self.timeout
ua = self.ua
params["player"] = username
return urlopen(url, timeout, ua, params)
def guildByMember(self, username):
url = self.base + "findGuild"
params = self.baseParams
timeout = self.timeout
ua = self.ua
params["byPlayer"] = username
return urlopen(url, timeout, ua, params)
def guildByName(self, name):
url = self.base + "findGuild"
params = self.baseParams
timeout = self.timeout
ua = self.ua
params["byName"] = name
return urlopen(url, timeout, ua, params)
def guildByID(self, guildID):
url = self.base + "guild"
params = self.baseParams
timeout = self.timeout
ua = self.ua
params["id"] = guildID
return urlopen(url, timeout, ua, params)
def session(self, username):
url = self.base + "session"
params = self.baseParams
timeout = self.timeout
ua = self.ua
params["player"] = username
return noncache_urlopen(url, timeout, ua, params)
def userByUUID(self, uuid):
url = self.base + "player"
params = self.baseParams
timeout = self.timeout
ua = self.ua
params["uuid"] = uuid
return urlopen(url, timeout, ua, params)
def userByName(self, name):
url = self.base + "player"
params = self.baseParams
timeout = self.timeout
ua = self.ua
params["name"] = name
return urlopen(url, timeout, ua, params)
class MultiKeyAPI:
def __init__(self, keys, delay = 5, debug = False, timeout = 3, ua = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'):
self.apis = [HypixelAPI(i, timeout = timeout, ua = ua) for i in keys]
self.apii = 0
self.api = self.apis[self.apii]
self.delay = delay
self.debug = debug
def _changeInstance(self):
self.apii += 1
self.apii %= len(self.apis)
self.api = self.apis[self.apii]
def _throttleproofAPICall(self, callType, *args):
loaded = getattr(self.api, callType)(*args)
while "throttle" in loaded:
if self.debug:
print("Throttled, changing instance")
time.sleep(self.delay)
self._changeInstance()
loaded = getattr(self.api, callType)(*args)
return loaded
def keyRequest(self): return self._throttleproofAPICall("keyRequest")
def boosters(self): return self._throttleproofAPICall("boosters")
def friends(self, username): return self._throttleproofAPICall("friends", username)
def guildByMember(self, username): return self._throttleproofAPICall("guildByMember", username)
def guildByName(self, name): return self._throttleproofAPICall("guildByName", name)
def guildByID(self, guildID): return self._throttleproofAPICall("guildByID", guildID)
def session(self, username): return self._throttleproofAPICall("session", username)
def userByUUID(self, uuid): return self._throttleproofAPICall("userByUUID", uuid)
def userByName(self, name): return self._throttleproofAPICall("userByName", name)
| destruc7i0n/PyPixel_Cache | pypixel_cache.py | Python | mit | 7,497 |
# -*- mode: python; coding: utf-8 -*-
# Copyright (C) 2017 Laboratoire de Recherche et
# Développement de l'Epita
#
# This file is part of Spot, a model checking library.
#
# Spot is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Spot is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import spot
aut = spot.translate('(Ga -> Gb) W c')
si = spot.scc_info(aut)
assert (spot.decompose_scc(si, 2).to_str('hoa', '1.1') == """HOA: v1.1
States: 3
Start: 0
AP: 3 "b" "a" "c"
acc-name: Buchi
Acceptance: 1 Inf(0)
properties: trans-labels explicit-labels state-acc !complete
properties: deterministic terminal
--BODY--
State: 0
[!1&!2] 0
[1&!2] 1
State: 1
[!1&!2] 0
[1&!2] 1
[1&2] 2
State: 2
[1] 2
--END--""")
try:
spot.decompose_scc(si, 4)
except RuntimeError:
pass
else:
raise AssertionError
assert (spot.decompose_scc(si, 0, True).to_str('hoa', '1.1') == """HOA: v1.1
States: 4
Start: 0
AP: 3 "b" "a" "c"
acc-name: Buchi
Acceptance: 1 Inf(0)
properties: trans-labels explicit-labels state-acc complete
properties: deterministic terminal
--BODY--
State: 0
[!1&!2] 0
[2] 1
[1&!2] 2
State: 1 {0}
[t] 1
State: 2
[!1&!2] 0
[!1&2] 1
[1&!2] 2
[1&2] 3
State: 3
[!1] 1
[1] 3
--END--""")
assert (spot.decompose_scc(si, 2, True).to_str('hoa', '1.1') == """HOA: v1.1
States: 2
Start: 0
AP: 3 "b" "a" "c"
acc-name: Buchi
Acceptance: 1 Inf(0)
properties: trans-labels explicit-labels trans-acc !complete
properties: deterministic !weak
--BODY--
State: 0
[!1&!2] 0 {0}
[1&!2] 1
State: 1
[!1&!2] 0 {0}
[1&!2] 1
--END--""")
try:
spot.decompose_scc(si, 3, True)
except RuntimeError:
pass
else:
raise AssertionError
| mcc-petrinets/formulas | spot/tests/python/decompose_scc.py | Python | mit | 2,129 |
# Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hmac
import unittest
from hashlib import sha1
from contextlib import contextmanager
from time import time
from webob import Request, Response
from swift.common.middleware import tempauth, tempurl
class FakeMemcache(object):
def __init__(self):
self.store = {}
def get(self, key):
return self.store.get(key)
def set(self, key, value, timeout=0):
self.store[key] = value
return True
def incr(self, key, timeout=0):
self.store[key] = self.store.setdefault(key, 0) + 1
return self.store[key]
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
class FakeApp(object):
def __init__(self, status_headers_body_iter=None):
self.calls = 0
self.status_headers_body_iter = status_headers_body_iter
if not self.status_headers_body_iter:
self.status_headers_body_iter = iter([('404 Not Found', {
'x-test-header-one-a': 'value1',
'x-test-header-two-a': 'value2',
'x-test-header-two-b': 'value3'}, '')])
self.request = None
def __call__(self, env, start_response):
self.calls += 1
self.request = Request.blank('', environ=env)
if 'swift.authorize' in env:
resp = env['swift.authorize'](self.request)
if resp:
return resp(env, start_response)
status, headers, body = self.status_headers_body_iter.next()
return Response(status=status, headers=headers,
body=body)(env, start_response)
class TestTempURL(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.auth = tempauth.filter_factory({})(self.app)
self.tempurl = tempurl.filter_factory({})(self.auth)
def _make_request(self, path, **kwargs):
req = Request.blank(path, **kwargs)
req.environ['swift.cache'] = FakeMemcache()
return req
def test_passthrough(self):
resp = self._make_request('/v1/a/c/o').get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' not in resp.body)
def test_get_valid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(resp.headers['content-disposition'],
'attachment; filename=o')
self.assertEquals(resp.environ['swift.authorize_override'], True)
self.assertEquals(resp.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_put_not_allowed_by_get(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'REQUEST_METHOD': 'PUT',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_put_valid(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'REQUEST_METHOD': 'PUT',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(resp.environ['swift.authorize_override'], True)
self.assertEquals(resp.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_not_allowed_by_put(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_missing_sig(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'QUERY_STRING': 'temp_url_expires=%s' % expires})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_missing_expires(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'QUERY_STRING': 'temp_url_sig=%s' % sig})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_bad_path(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_no_key(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_head_allowed_by_get(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(resp.environ['swift.authorize_override'], True)
self.assertEquals(resp.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_head_allowed_by_put(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(resp.environ['swift.authorize_override'], True)
self.assertEquals(resp.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_head_otherwise_not_allowed(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
# Deliberately fudge expires to show HEADs aren't just automatically
# allowed.
expires += 1
req = self._make_request(path,
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
def test_post_not_allowed(self):
method = 'POST'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'REQUEST_METHOD': 'POST',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_delete_not_allowed(self):
method = 'DELETE'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'REQUEST_METHOD': 'DELETE',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_unknown_not_allowed(self):
method = 'UNKNOWN'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'REQUEST_METHOD': 'UNKNOWN',
'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_changed_path_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path + '2',
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_changed_sig_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
if sig[-1] != '0':
sig = sig[:-1] + '0'
else:
sig = sig[:-1] + '1'
req = self._make_request(path,
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_changed_expires_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' %
(sig, expires + 1)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_different_key_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key + '2')
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
def test_removed_incoming_header(self):
self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-remove-this'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, headers={'x-remove-this': 'value'},
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertTrue('x-remove-this' not in self.app.request.headers)
def test_removed_incoming_headers_match(self):
self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-remove-this-*',
'incoming_allow_headers': 'x-remove-this-except-this'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
headers={'x-remove-this-one': 'value1',
'x-remove-this-except-this': 'value2'},
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertTrue('x-remove-this-one' not in self.app.request.headers)
self.assertEquals(
self.app.request.headers['x-remove-this-except-this'], 'value2')
def test_removed_outgoing_header(self):
self.tempurl = tempurl.filter_factory({
'outgoing_remove_headers': 'x-test-header-one-a'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertTrue('x-test-header-one-a' not in resp.headers)
self.assertEquals(resp.headers['x-test-header-two-a'], 'value2')
def test_removed_outgoing_headers_match(self):
self.tempurl = tempurl.filter_factory({
'outgoing_remove_headers': 'x-test-header-two-*',
'outgoing_allow_headers': 'x-test-header-two-b'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path,
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)})
req.environ['swift.cache'].set('temp-url-key/a', key)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(resp.headers['x-test-header-one-a'], 'value1')
self.assertTrue('x-test-header-two-a' not in resp.headers)
self.assertEquals(resp.headers['x-test-header-two-b'], 'value3')
def test_get_account(self):
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'HEAD', 'PATH_INFO': '/v1/a/c/o'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/o'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/v1/a/c/o'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'POST', 'PATH_INFO': '/v1/a/c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'DELETE', 'PATH_INFO': '/v1/a/c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'UNKNOWN', 'PATH_INFO': '/v1/a/c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c//////'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c///o///'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a//o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1//c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '//a/c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v2/a/c/o'}), None)
def test_get_temp_url_info(self):
s = 'f5d5051bddf5df7e27c628818738334f'
e = int(time() + 86400)
self.assertEquals(self.tempurl._get_temp_url_info({'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (s, e)}), (s, e))
self.assertEquals(self.tempurl._get_temp_url_info({}), (None, None))
self.assertEquals(self.tempurl._get_temp_url_info({'QUERY_STRING':
'temp_url_expires=%s' % e}), (None, e))
self.assertEquals(self.tempurl._get_temp_url_info({'QUERY_STRING':
'temp_url_sig=%s' % s}), (s, None))
self.assertEquals(self.tempurl._get_temp_url_info({'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=bad' % s}), (s, 0))
e = int(time() - 1)
self.assertEquals(self.tempurl._get_temp_url_info({'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s' % (s, e)}), (s, 0))
def test_get_key_memcache(self):
self.app.status_headers_body_iter = iter([('404 Not Found', {}, '')])
self.assertEquals(
self.tempurl._get_key({}, 'a'), None)
self.app.status_headers_body_iter = iter([('404 Not Found', {}, '')])
self.assertEquals(
self.tempurl._get_key({'swift.cache': None}, 'a'), None)
mc = FakeMemcache()
self.app.status_headers_body_iter = iter([('404 Not Found', {}, '')])
self.assertEquals(
self.tempurl._get_key({'swift.cache': mc}, 'a'), None)
mc.set('temp-url-key/a', 'abc')
self.assertEquals(
self.tempurl._get_key({'swift.cache': mc}, 'a'), 'abc')
def test_get_key_from_source(self):
self.app.status_headers_body_iter = \
iter([('200 Ok', {'x-account-meta-temp-url-key': 'abc'}, '')])
mc = FakeMemcache()
self.assertEquals(
self.tempurl._get_key({'swift.cache': mc}, 'a'), 'abc')
self.assertEquals(mc.get('temp-url-key/a'), 'abc')
def test_get_hmac(self):
self.assertEquals(self.tempurl._get_hmac(
{'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/o'},
1, 'abc'),
'026d7f7cc25256450423c7ad03fc9f5ffc1dab6d')
self.assertEquals(self.tempurl._get_hmac(
{'REQUEST_METHOD': 'HEAD', 'PATH_INFO': '/v1/a/c/o'},
1, 'abc', request_method='GET'),
'026d7f7cc25256450423c7ad03fc9f5ffc1dab6d')
def test_invalid(self):
def _start_response(status, headers, exc_info=None):
self.assertTrue(status, '401 Unauthorized')
self.assertTrue('Temp URL invalid' in
''.join(self.tempurl._invalid({'REQUEST_METHOD': 'GET'},
_start_response)))
self.assertEquals('',
''.join(self.tempurl._invalid({'REQUEST_METHOD': 'HEAD'},
_start_response)))
def test_clean_incoming_headers(self):
irh = ''
iah = ''
env = {'HTTP_TEST_HEADER': 'value'}
tempurl.TempURL(None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah})._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER' in env)
irh = 'test-header'
iah = ''
env = {'HTTP_TEST_HEADER': 'value'}
tempurl.TempURL(None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah})._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER' not in env)
irh = 'test-header-*'
iah = ''
env = {'HTTP_TEST_HEADER_ONE': 'value',
'HTTP_TEST_HEADER_TWO': 'value'}
tempurl.TempURL(None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah})._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER_ONE' not in env)
self.assertTrue('HTTP_TEST_HEADER_TWO' not in env)
irh = 'test-header-*'
iah = 'test-header-two'
env = {'HTTP_TEST_HEADER_ONE': 'value',
'HTTP_TEST_HEADER_TWO': 'value'}
tempurl.TempURL(None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah})._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER_ONE' not in env)
self.assertTrue('HTTP_TEST_HEADER_TWO' in env)
irh = 'test-header-* test-other-header'
iah = 'test-header-two test-header-yes-*'
env = {'HTTP_TEST_HEADER_ONE': 'value',
'HTTP_TEST_HEADER_TWO': 'value',
'HTTP_TEST_OTHER_HEADER': 'value',
'HTTP_TEST_HEADER_YES': 'value',
'HTTP_TEST_HEADER_YES_THIS': 'value'}
tempurl.TempURL(None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah})._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER_ONE' not in env)
self.assertTrue('HTTP_TEST_HEADER_TWO' in env)
self.assertTrue('HTTP_TEST_OTHER_HEADER' not in env)
self.assertTrue('HTTP_TEST_HEADER_YES' not in env)
self.assertTrue('HTTP_TEST_HEADER_YES_THIS' in env)
def test_clean_outgoing_headers(self):
orh = ''
oah = ''
hdrs = {'test-header': 'value'}
hdrs = dict(tempurl.TempURL(None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.iteritems()))
self.assertTrue('test-header' in hdrs)
orh = 'test-header'
oah = ''
hdrs = {'test-header': 'value'}
hdrs = dict(tempurl.TempURL(None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.iteritems()))
self.assertTrue('test-header' not in hdrs)
orh = 'test-header-*'
oah = ''
hdrs = {'test-header-one': 'value',
'test-header-two': 'value'}
hdrs = dict(tempurl.TempURL(None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.iteritems()))
self.assertTrue('test-header-one' not in hdrs)
self.assertTrue('test-header-two' not in hdrs)
orh = 'test-header-*'
oah = 'test-header-two'
hdrs = {'test-header-one': 'value',
'test-header-two': 'value'}
hdrs = dict(tempurl.TempURL(None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.iteritems()))
self.assertTrue('test-header-one' not in hdrs)
self.assertTrue('test-header-two' in hdrs)
orh = 'test-header-* test-other-header'
oah = 'test-header-two test-header-yes-*'
hdrs = {'test-header-one': 'value',
'test-header-two': 'value',
'test-other-header': 'value',
'test-header-yes': 'value',
'test-header-yes-this': 'value'}
hdrs = dict(tempurl.TempURL(None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.iteritems()))
self.assertTrue('test-header-one' not in hdrs)
self.assertTrue('test-header-two' in hdrs)
self.assertTrue('test-other-header' not in hdrs)
self.assertTrue('test-header-yes' not in hdrs)
self.assertTrue('test-header-yes-this' in hdrs)
if __name__ == '__main__':
unittest.main()
| pvo/swift | test/unit/common/middleware/test_tempurl.py | Python | apache-2.0 | 28,059 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
import contextlib
import copy
import uuid
import mock
import mox
from neutronclient.common import exceptions
from neutronclient.v2_0 import client
from oslo.config import cfg
import six
from nova.compute import flavors
from nova import context
from nova import exception
from nova.network import model
from nova.network import neutronv2
from nova.network.neutronv2 import api as neutronapi
from nova.network.neutronv2 import constants
from nova import objects
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova.pci import pci_manager
from nova.pci import pci_whitelist
from nova import policy
from nova import test
from nova.tests import fake_instance
from nova import utils
CONF = cfg.CONF
# NOTE: Neutron client raises Exception which is discouraged by HACKING.
# We set this variable here and use it for assertions below to avoid
# the hacking checks until we can make neutron client throw a custom
# exception class instead.
NEUTRON_CLIENT_EXCEPTION = Exception
class MyComparator(mox.Comparator):
def __init__(self, lhs):
self.lhs = lhs
def _com_dict(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for key, value in lhs.iteritems():
if key not in rhs:
return False
rhs_value = rhs[key]
if not self._com(value, rhs_value):
return False
return True
def _com_list(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for lhs_value in lhs:
if lhs_value not in rhs:
return False
return True
def _com(self, lhs, rhs):
if lhs is None:
return rhs is None
if isinstance(lhs, dict):
if not isinstance(rhs, dict):
return False
return self._com_dict(lhs, rhs)
if isinstance(lhs, list):
if not isinstance(rhs, list):
return False
return self._com_list(lhs, rhs)
if isinstance(lhs, tuple):
if not isinstance(rhs, tuple):
return False
return self._com_list(lhs, rhs)
return lhs == rhs
def equals(self, rhs):
return self._com(self.lhs, rhs)
def __repr__(self):
return str(self.lhs)
class TestNeutronClient(test.TestCase):
def test_withtoken(self):
self.flags(url='http://anyhost/', group='neutron')
self.flags(url_timeout=30, group='neutron')
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
auth_strategy=CONF.neutron.auth_strategy,
endpoint_url=CONF.neutron.url,
token=my_context.auth_token,
timeout=CONF.neutron.url_timeout,
insecure=False,
ca_cert=None).AndReturn(None)
self.mox.ReplayAll()
neutronv2.get_client(my_context)
def test_withouttoken(self):
my_context = context.RequestContext('userid', 'my_tenantid')
self.assertRaises(exceptions.Unauthorized,
neutronv2.get_client,
my_context)
def test_withtoken_context_is_admin(self):
self.flags(url='http://anyhost/', group='neutron')
self.flags(url_timeout=30, group='neutron')
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token',
is_admin=True)
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
auth_strategy=CONF.neutron.auth_strategy,
endpoint_url=CONF.neutron.url,
token=my_context.auth_token,
timeout=CONF.neutron.url_timeout,
insecure=False,
ca_cert=None).AndReturn(None)
self.mox.ReplayAll()
# Note that although we have admin set in the context we
# are not asking for an admin client, and so we auth with
# our own token
neutronv2.get_client(my_context)
def test_withouttoken_keystone_connection_error(self):
self.flags(auth_strategy='keystone', group='neutron')
self.flags(url='http://anyhost/', group='neutron')
my_context = context.RequestContext('userid', 'my_tenantid')
self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
neutronv2.get_client,
my_context)
def test_reuse_admin_token(self):
self.flags(url='http://anyhost/', group='neutron')
self.flags(url_timeout=30, group='neutron')
token_store = neutronv2.AdminTokenStore.get()
token_store.admin_auth_token = 'new_token'
my_context = context.RequestContext('userid', 'my_tenantid',
auth_token='token')
with contextlib.nested(
mock.patch.object(client.Client, "list_networks",
side_effect=mock.Mock),
mock.patch.object(client.Client, 'get_auth_info',
return_value={'auth_token': 'new_token1'}),
):
client1 = neutronv2.get_client(my_context, True)
client1.list_networks(retrieve_all=False)
self.assertEqual('new_token1', token_store.admin_auth_token)
client1 = neutronv2.get_client(my_context, True)
client1.list_networks(retrieve_all=False)
self.assertEqual('new_token1', token_store.admin_auth_token)
def test_admin_token_updated(self):
self.flags(url='http://anyhost/', group='neutron')
self.flags(url_timeout=30, group='neutron')
token_store = neutronv2.AdminTokenStore.get()
token_store.admin_auth_token = 'new_token'
tokens = [{'auth_token': 'new_token1'}, {'auth_token': 'new_token'}]
my_context = context.RequestContext('userid', 'my_tenantid',
auth_token='token')
with contextlib.nested(
mock.patch.object(client.Client, "list_networks",
side_effect=mock.Mock),
mock.patch.object(client.Client, 'get_auth_info',
side_effect=tokens.pop),
):
client1 = neutronv2.get_client(my_context, True)
client1.list_networks(retrieve_all=False)
self.assertEqual('new_token', token_store.admin_auth_token)
client1 = neutronv2.get_client(my_context, True)
client1.list_networks(retrieve_all=False)
self.assertEqual('new_token1', token_store.admin_auth_token)
class TestNeutronv2Base(test.TestCase):
def setUp(self):
super(TestNeutronv2Base, self).setUp()
self.context = context.RequestContext('userid', 'my_tenantid')
setattr(self.context,
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
self.instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance',
'availability_zone': 'nova',
'host': 'some_host',
'security_groups': []}
self.instance2 = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance2',
'availability_zone': 'nova',
'security_groups': []}
self.nets1 = [{'id': 'my_netid1',
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': 'my_tenantid'}]
self.nets2 = []
self.nets2.append(self.nets1[0])
self.nets2.append({'id': 'my_netid2',
'name': 'my_netname2',
'subnets': ['mysubnid2'],
'tenant_id': 'my_tenantid'})
self.nets3 = self.nets2 + [{'id': 'my_netid3',
'name': 'my_netname3',
'tenant_id': 'my_tenantid'}]
self.nets4 = [{'id': 'his_netid4',
'name': 'his_netname4',
'tenant_id': 'his_tenantid'}]
# A network request with external networks
self.nets5 = self.nets1 + [{'id': 'the-external-one',
'name': 'out-of-this-world',
'router:external': True,
'tenant_id': 'should-be-an-admin'}]
# A network request with a duplicate
self.nets6 = []
self.nets6.append(self.nets1[0])
self.nets6.append(self.nets1[0])
# A network request with a combo
self.nets7 = []
self.nets7.append(self.nets2[1])
self.nets7.append(self.nets1[0])
self.nets7.append(self.nets2[1])
self.nets7.append(self.nets1[0])
# A network request with only external network
self.nets8 = [self.nets5[1]]
# A network that is both shared and external
self.nets9 = [{'id': 'net_id', 'name': 'net_name',
'router:external': True, 'shared': True}]
self.nets = [self.nets1, self.nets2, self.nets3, self.nets4,
self.nets5, self.nets6, self.nets7, self.nets8,
self.nets9]
self.port_address = '10.0.1.2'
self.port_data1 = [{'network_id': 'my_netid1',
'device_id': self.instance2['uuid'],
'device_owner': 'compute:nova',
'id': 'my_portid1',
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'status': 'DOWN',
'admin_state_up': True,
'fixed_ips': [{'ip_address': self.port_address,
'subnet_id': 'my_subid1'}],
'mac_address': 'my_mac1', }]
self.float_data1 = [{'port_id': 'my_portid1',
'fixed_ip_address': self.port_address,
'floating_ip_address': '172.0.1.2'}]
self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9',
'subnet_id': 'my_subid1'}],
'status': 'ACTIVE',
'admin_state_up': True}]
self.port_address2 = '10.0.2.2'
self.port_data2 = []
self.port_data2.append(self.port_data1[0])
self.port_data2.append({'network_id': 'my_netid2',
'device_id': self.instance['uuid'],
'admin_state_up': True,
'status': 'ACTIVE',
'device_owner': 'compute:nova',
'id': 'my_portid2',
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'fixed_ips':
[{'ip_address': self.port_address2,
'subnet_id': 'my_subid2'}],
'mac_address': 'my_mac2', })
self.float_data2 = []
self.float_data2.append(self.float_data1[0])
self.float_data2.append({'port_id': 'my_portid2',
'fixed_ip_address': '10.0.2.2',
'floating_ip_address': '172.0.2.2'})
self.port_data3 = [{'network_id': 'my_netid1',
'device_id': 'device_id3',
'status': 'DOWN',
'admin_state_up': True,
'device_owner': 'compute:nova',
'id': 'my_portid3',
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'fixed_ips': [], # no fixed ip
'mac_address': 'my_mac3', }]
self.subnet_data1 = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': 'my_netid1',
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2 = []
self.subnet_data_n = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': 'my_netid1',
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']},
{'id': 'my_subid2',
'cidr': '20.0.1.0/24',
'network_id': 'my_netid2',
'gateway_ip': '20.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2.append({'id': 'my_subid2',
'cidr': '10.0.2.0/24',
'network_id': 'my_netid2',
'gateway_ip': '10.0.2.1',
'dns_nameservers': ['8.8.2.1', '8.8.2.2']})
self.fip_pool = {'id': '4fdbfd74-eaf8-4884-90d9-00bd6f10c2d3',
'name': 'ext_net',
'router:external': True,
'tenant_id': 'admin_tenantid'}
self.fip_pool_nova = {'id': '435e20c3-d9f1-4f1b-bee5-4611a1dd07db',
'name': 'nova',
'router:external': True,
'tenant_id': 'admin_tenantid'}
self.fip_unassociated = {'tenant_id': 'my_tenantid',
'id': 'fip_id1',
'floating_ip_address': '172.24.4.227',
'floating_network_id': self.fip_pool['id'],
'port_id': None,
'fixed_ip_address': None,
'router_id': None}
fixed_ip_address = self.port_data2[1]['fixed_ips'][0]['ip_address']
self.fip_associated = {'tenant_id': 'my_tenantid',
'id': 'fip_id2',
'floating_ip_address': '172.24.4.228',
'floating_network_id': self.fip_pool['id'],
'port_id': self.port_data2[1]['id'],
'fixed_ip_address': fixed_ip_address,
'router_id': 'router_id1'}
self._returned_nw_info = []
self.mox.StubOutWithMock(neutronv2, 'get_client')
self.moxed_client = self.mox.CreateMock(client.Client)
self.addCleanup(CONF.reset)
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.stubs.UnsetAll)
def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
# TODO(mriedem): Remove this conversion when all neutronv2 APIs are
# converted to handling instance objects.
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
self.instance2 = fake_instance.fake_instance_obj(self.context,
**self.instance2)
api = neutronapi.API()
self.mox.StubOutWithMock(api, 'get_instance_nw_info')
has_portbinding = False
has_extra_dhcp_opts = False
dhcp_options = kwargs.get('dhcp_options')
if dhcp_options is not None:
has_extra_dhcp_opts = True
if kwargs.get('portbinding'):
has_portbinding = True
api.extensions[constants.PORTBINDING_EXT] = 1
self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
neutronv2.get_client(
mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn(
self.moxed_client)
api._refresh_neutron_extensions_cache(mox.IgnoreArg())
else:
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
# Net idx is 1-based for compatibility with existing unit tests
nets = self.nets[net_idx - 1]
ports = {}
fixed_ips = {}
macs = kwargs.get('macs')
if macs:
macs = set(macs)
req_net_ids = []
ordered_networks = []
port = {}
if 'requested_networks' in kwargs:
for request in kwargs['requested_networks']:
if request.port_id:
if request.port_id == 'my_portid3':
self.moxed_client.show_port(request.port_id
).AndReturn(
{'port': {'id': 'my_portid3',
'network_id': 'my_netid1',
'mac_address': 'my_mac1',
'device_id': kwargs.get('_device') and
self.instance2.uuid or
''}})
ports['my_netid1'] = [self.port_data1[0],
self.port_data3[0]]
ports[request.port_id] = self.port_data3[0]
request.network_id = 'my_netid1'
if macs is not None:
macs.discard('my_mac1')
else:
self.moxed_client.show_port(request.port_id).AndReturn(
{'port': {'id': 'my_portid1',
'network_id': 'my_netid1',
'mac_address': 'my_mac1',
'device_id': kwargs.get('_device') and
self.instance2.uuid or
''}})
ports[request.port_id] = self.port_data1[0]
request.network_id = 'my_netid1'
if macs is not None:
macs.discard('my_mac1')
else:
fixed_ips[request.network_id] = request.address
req_net_ids.append(request.network_id)
ordered_networks.append(request)
else:
for n in nets:
ordered_networks.append(
objects.NetworkRequest(network_id=n['id']))
if kwargs.get('_break') == 'pre_list_networks':
self.mox.ReplayAll()
return api
# search all req_net_ids as in api.py
search_ids = req_net_ids
if search_ids:
mox_list_params = {'id': mox.SameElementsAs(search_ids)}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
mox_list_params = {'tenant_id': self.instance.project_id,
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
mox_list_params = {'shared': True}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': []})
if (('requested_networks' not in kwargs or
kwargs['requested_networks'].as_tuples() == [(None, None, None)])
and len(nets) > 1):
self.mox.ReplayAll()
return api
ports_in_requested_net_order = []
nets_in_requested_net_order = []
for request in ordered_networks:
port_req_body = {
'port': {
'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
},
}
# Network lookup for available network_id
network = None
for net in nets:
if net['id'] == request.network_id:
network = net
break
# if net_id did not pass validate_networks() and not available
# here then skip it safely not continuing with a None Network
else:
continue
if has_portbinding:
port_req_body['port']['binding:host_id'] = (
self.instance.get('host'))
if not has_portbinding:
api._populate_neutron_extension_values(mox.IgnoreArg(),
self.instance, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
else:
# since _populate_neutron_extension_values() will call
# _has_port_binding_extension()
api._has_port_binding_extension(mox.IgnoreArg()).\
AndReturn(has_portbinding)
api._has_port_binding_extension(mox.IgnoreArg()).\
AndReturn(has_portbinding)
if request.port_id:
port = ports[request.port_id]
self.moxed_client.update_port(request.port_id,
MyComparator(port_req_body)
).AndReturn(
{'port': port})
ports_in_requested_net_order.append(request.port_id)
else:
request.address = fixed_ips.get(request.network_id)
if request.address:
port_req_body['port']['fixed_ips'] = [
{'ip_address': str(request.address)}]
port_req_body['port']['network_id'] = request.network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = \
self.instance.project_id
if macs:
port_req_body['port']['mac_address'] = macs.pop()
if has_portbinding:
port_req_body['port']['binding:host_id'] = (
self.instance.get('host'))
res_port = {'port': {'id': 'fake'}}
if has_extra_dhcp_opts:
port_req_body['port']['extra_dhcp_opts'] = dhcp_options
if kwargs.get('_break') == 'mac' + request.network_id:
self.mox.ReplayAll()
return api
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn(res_port)
ports_in_requested_net_order.append(res_port['port']['id'])
nets_in_requested_net_order.append(network)
api.get_instance_nw_info(mox.IgnoreArg(),
self.instance,
networks=nets_in_requested_net_order,
port_ids=ports_in_requested_net_order
).AndReturn(self._returned_nw_info)
self.mox.ReplayAll()
return api
def _verify_nw_info(self, nw_inf, index=0):
id_suffix = index + 1
self.assertEqual('10.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index]['address'])
self.assertEqual('172.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index].floating_ip_addresses()[0])
self.assertEqual('my_netname%s' % id_suffix,
nw_inf[index]['network']['label'])
self.assertEqual('my_portid%s' % id_suffix, nw_inf[index]['id'])
self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address'])
self.assertEqual('10.0.%s.0/24' % id_suffix,
nw_inf[index]['network']['subnets'][0]['cidr'])
ip_addr = model.IP(address='8.8.%s.1' % id_suffix,
version=4, type='dns')
self.assertIn(ip_addr, nw_inf[index]['network']['subnets'][0]['dns'])
def _get_instance_nw_info(self, number):
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(mox.IgnoreArg(),
self.instance['uuid'],
mox.IgnoreArg())
port_data = number == 1 and self.port_data1 or self.port_data2
nets = number == 1 and self.nets1 or self.nets2
net_info_cache = []
for port in port_data:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
instance = copy.copy(self.instance)
# This line here does not wrap net_info_cache in jsonutils.dumps()
# intentionally to test the other code path when it's not unicode.
instance['info_cache'] = {'network_info': net_info_cache}
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data})
net_ids = [port['network_id'] for port in port_data]
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.list_networks(
id=net_ids).AndReturn({'networks': nets})
for i in xrange(1, number + 1):
float_data = number == 1 and self.float_data1 or self.float_data2
for ip in port_data[i - 1]['fixed_ips']:
float_data = [x for x in float_data
if x['fixed_ip_address'] == ip['ip_address']]
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=port_data[i - 1]['id']).AndReturn(
{'floatingips': float_data})
subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2
self.moxed_client.list_subnets(
id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn(
{'subnets': subnet_data})
self.moxed_client.list_ports(
network_id=subnet_data[0]['network_id'],
device_owner='network:dhcp').AndReturn(
{'ports': []})
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context, instance)
for i in xrange(0, number):
self._verify_nw_info(nw_inf, i)
def _allocate_for_instance(self, net_idx=1, **kwargs):
api = self._stub_allocate_for_instance(net_idx, **kwargs)
return api.allocate_for_instance(self.context, self.instance, **kwargs)
class TestNeutronv2(TestNeutronv2Base):
def setUp(self):
super(TestNeutronv2, self).setUp()
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
def test_get_instance_nw_info_1(self):
# Test to get one port in one network and subnet.
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(1)
def test_get_instance_nw_info_2(self):
# Test to get one port in each of two networks and subnets.
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(2)
def test_get_instance_nw_info_with_nets_add_interface(self):
# This tests that adding an interface to an instance does not
# remove the first instance from the instance.
network_model = model.Network(id='network_id',
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': self.port_data2[0]['id'],
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
self.port_data2,
self.nets2,
[self.port_data2[1]['id']])
def test_get_instance_nw_info_remove_ports_from_neutron(self):
# This tests that when a port is removed in neutron it
# is also removed from the nova.
network_model = model.Network(id=self.port_data2[0]['network_id'],
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': 'network_id',
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
self.port_data2,
None,
None)
def test_get_instance_nw_info_ignores_neturon_ports(self):
# Tests that only ports in the network_cache are updated
# and ports returned from neutron that match the same
# instance_id/device_id are ignored.
port_data2 = copy.copy(self.port_data2)
# set device_id on the ports to be the same.
port_data2[1]['device_id'] = port_data2[0]['device_id']
network_model = model.Network(id='network_id',
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': 'network_id',
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
port_data2,
None,
None)
def _fake_get_instance_nw_info_helper(self, network_cache,
current_neutron_ports,
networks=None, port_ids=None):
"""Helper function to test get_instance_nw_info.
:param network_cache - data already in the nova network cache.
:param current_neutron_ports - updated list of ports from neutron.
:param networks - networks of ports being added to instance.
:param port_ids - new ports being added to instance.
"""
# keep a copy of the original ports/networks to pass to
# get_instance_nw_info() as the code below changes them.
original_port_ids = copy.copy(port_ids)
original_networks = copy.copy(networks)
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg())
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': current_neutron_ports})
ifaces = network_cache['info_cache']['network_info']
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
nets = [{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces]
if networks is None:
self.moxed_client.list_networks(
id=net_ids).AndReturn({'networks': nets})
else:
networks = networks + [
dict(id=iface['network']['id'],
name=iface['network']['label'],
tenant_id=iface['network']['meta']['tenant_id'])
for iface in ifaces]
port_ids = [iface['id'] for iface in ifaces] + port_ids
index = 0
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
for ip in current_neutron_port['fixed_ips']:
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=current_neutron_port['id']).AndReturn(
{'floatingips': [self.float_data2[index]]})
self.moxed_client.list_subnets(
id=mox.SameElementsAs([ip['subnet_id']])
).AndReturn(
{'subnets': [self.subnet_data_n[index]]})
self.moxed_client.list_ports(
network_id=current_neutron_port['network_id'],
device_owner='network:dhcp').AndReturn(
{'ports': self.dhcp_port_data1})
index += 1
self.mox.ReplayAll()
self.instance['info_cache'] = network_cache
instance = copy.copy(self.instance)
instance['info_cache'] = network_cache['info_cache']
nw_infs = api.get_instance_nw_info(self.context,
instance,
networks=original_networks,
port_ids=original_port_ids)
self.assertEqual(index, len(nw_infs))
# ensure that nic ordering is preserved
for iface_index in range(index):
self.assertEqual(nw_infs[iface_index]['id'],
port_ids[iface_index])
def test_get_instance_nw_info_without_subnet(self):
# Test get instance_nw_info for a port without subnet.
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg())
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': self.port_data3})
self.moxed_client.list_networks(
id=[self.port_data1[0]['network_id']]).AndReturn(
{'networks': self.nets1})
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
net_info_cache = []
for port in self.port_data3:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
instance = copy.copy(self.instance)
instance['info_cache'] = {'network_info':
six.text_type(
jsonutils.dumps(net_info_cache))}
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context,
instance)
id_suffix = 3
self.assertEqual(0, len(nw_inf.fixed_ips()))
self.assertEqual('my_netname1', nw_inf[0]['network']['label'])
self.assertEqual('my_portid%s' % id_suffix, nw_inf[0]['id'])
self.assertEqual('my_mac%s' % id_suffix, nw_inf[0]['address'])
self.assertEqual(0, len(nw_inf[0]['network']['subnets']))
def test_refresh_neutron_extensions_cache(self):
api = neutronapi.API()
# Note: Don't want the default get_client from setUp()
self.mox.ResetAll()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.QOS_QUEUE}]})
self.mox.ReplayAll()
api._refresh_neutron_extensions_cache(mox.IgnoreArg())
self.assertEqual(
{constants.QOS_QUEUE: {'name': constants.QOS_QUEUE}},
api.extensions)
def test_populate_neutron_extension_values_rxtx_factor(self):
api = neutronapi.API()
# Note: Don't want the default get_client from setUp()
self.mox.ResetAll()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.QOS_QUEUE}]})
self.mox.ReplayAll()
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 1
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, flavor))
instance = {'system_metadata': sys_meta}
port_req_body = {'port': {}}
api._populate_neutron_extension_values(self.context, instance,
None, port_req_body)
self.assertEqual(port_req_body['port']['rxtx_factor'], 1)
def test_allocate_for_instance_1(self):
# Allocate one port in one network env.
self._allocate_for_instance(1)
def test_allocate_for_instance_2(self):
# Allocate one port in two networks env.
api = self._stub_allocate_for_instance(net_idx=2)
self.assertRaises(exception.NetworkAmbiguous,
api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_accepts_macs_kwargs_None(self):
# The macs kwarg should be accepted as None.
self._allocate_for_instance(1, macs=None)
def test_allocate_for_instance_accepts_macs_kwargs_set(self):
# The macs kwarg should be accepted, as a set, the
# _allocate_for_instance helper checks that the mac is used to create a
# port.
self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
def test_allocate_for_instance_accepts_only_portid(self):
# Make sure allocate_for_instance works when only a portid is provided
self._returned_nw_info = self.port_data1
result = self._allocate_for_instance(
requested_networks=objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id='my_portid1')]))
self.assertEqual(self.port_data1, result)
def test_allocate_for_instance_not_enough_macs_via_ports(self):
# using a hypervisor MAC via a pre-created port will stop it being
# used to dynamically create a port on a network. We put the network
# first in requested_networks so that if the code were to not pre-check
# requested ports, it would incorrectly assign the mac and not fail.
requested_networks = objects.NetworkRequestList(
objects = [
objects.NetworkRequest(network_id=self.nets2[1]['id']),
objects.NetworkRequest(port_id='my_portid1')])
api = self._stub_allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac1']),
_break='mac' + self.nets2[1]['id'])
self.assertRaises(exception.PortNotFree,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['my_mac1']))
def test_allocate_for_instance_not_enough_macs(self):
# If not enough MAC addresses are available to allocate to networks, an
# error should be raised.
# We could pass in macs=set(), but that wouldn't tell us that
# allocate_for_instance tracks used macs properly, so we pass in one
# mac, and ask for two networks.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']),
objects.NetworkRequest(network_id=self.nets2[0]['id'])])
api = self._stub_allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac2']),
_break='mac' + self.nets2[0]['id'])
with mock.patch.object(api, '_delete_ports'):
self.assertRaises(exception.PortNotFree,
api.allocate_for_instance, self.context,
self.instance,
requested_networks=requested_networks,
macs=set(['my_mac2']))
def test_allocate_for_instance_two_macs_two_networks(self):
# If two MACs are available and two networks requested, two new ports
# get made and no exceptions raised.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']),
objects.NetworkRequest(network_id=self.nets2[0]['id'])])
self._allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac2', 'my_mac1']))
def test_allocate_for_instance_mac_conflicting_requested_port(self):
# specify only first and last network
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id='my_portid1')])
api = self._stub_allocate_for_instance(
net_idx=1, requested_networks=requested_networks,
macs=set(['unknown:mac']),
_break='pre_list_networks')
self.assertRaises(exception.PortNotUsable,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['unknown:mac']))
def test_allocate_for_instance_without_requested_networks(self):
api = self._stub_allocate_for_instance(net_idx=3)
self.assertRaises(exception.NetworkAmbiguous,
api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_with_requested_non_available_network(self):
"""verify that a non available network is ignored.
self.nets2 (net_idx=2) is composed of self.nets3[0] and self.nets3[1]
Do not create a port on a non available network self.nets3[2].
"""
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets3[0], self.nets3[2], self.nets3[1])])
self._allocate_for_instance(net_idx=2,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets3[1], self.nets3[0], self.nets3[2])])
self._allocate_for_instance(net_idx=3,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_fixedip(self):
# specify only first and last network
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=self.nets1[0]['id'],
address='10.0.1.0')])
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_port(self):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id='my_portid1')])
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_no_networks(self):
"""verify the exception thrown when there are no networks defined."""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
self.moxed_client.list_networks(
tenant_id=self.instance.project_id,
shared=False).AndReturn(
{'networks': model.NetworkInfo([])})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': model.NetworkInfo([])})
self.mox.ReplayAll()
nwinfo = api.allocate_for_instance(self.context, self.instance)
self.assertEqual(len(nwinfo), 0)
def test_allocate_for_instance_ex1(self):
"""verify we will delete created ports
if we fail to allocate all net resources.
Mox to raise exception when creating a second port.
In this case, the code should delete the first created port.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg()).MultipleTimes().\
AndReturn(False)
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets2[0], self.nets2[1])])
self.moxed_client.list_networks(
id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2})
index = 0
for network in self.nets2:
binding_port_req_body = {
'port': {
'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
},
}
port_req_body = {
'port': {
'network_id': network['id'],
'admin_state_up': True,
'tenant_id': self.instance.project_id,
},
}
port_req_body['port'].update(binding_port_req_body['port'])
port = {'id': 'portid_' + network['id']}
api._populate_neutron_extension_values(self.context,
self.instance, None, binding_port_req_body).AndReturn(None)
if index == 0:
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn({'port': port})
else:
NeutronOverQuota = exceptions.OverQuotaClient()
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(NeutronOverQuota)
index += 1
self.moxed_client.delete_port('portid_' + self.nets2[0]['id'])
self.mox.ReplayAll()
self.assertRaises(exception.PortLimitExceeded,
api.allocate_for_instance,
self.context, self.instance,
requested_networks=requested_networks)
def test_allocate_for_instance_ex2(self):
"""verify we have no port to delete
if we fail to allocate the first net resource.
Mox to raise exception when creating the first port.
In this case, the code should not delete any ports.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg()).MultipleTimes().\
AndReturn(False)
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets2[0], self.nets2[1])])
self.moxed_client.list_networks(
id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2})
binding_port_req_body = {
'port': {
'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
},
}
port_req_body = {
'port': {
'network_id': self.nets2[0]['id'],
'admin_state_up': True,
'device_id': self.instance.uuid,
'tenant_id': self.instance.project_id,
},
}
api._populate_neutron_extension_values(self.context,
self.instance, None, binding_port_req_body).AndReturn(None)
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(
Exception("fail to create port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance,
self.context, self.instance,
requested_networks=requested_networks)
def test_allocate_for_instance_no_port_or_network(self):
class BailOutEarly(Exception):
pass
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_available_networks')
# Make sure we get an empty list and then bail out of the rest
# of the function
api._get_available_networks(self.context, self.instance.project_id,
[]).AndRaise(BailOutEarly)
self.mox.ReplayAll()
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest()])
self.assertRaises(BailOutEarly,
api.allocate_for_instance,
self.context, self.instance,
requested_networks=requested_networks)
def test_allocate_for_instance_second_time(self):
# Make sure that allocate_for_instance only returns ports that it
# allocated during _that_ run.
new_port = {'id': 'fake'}
self._returned_nw_info = self.port_data1 + [new_port]
nw_info = self._allocate_for_instance()
self.assertEqual(nw_info, [new_port])
def test_allocate_for_instance_port_in_use(self):
# If a port is already in use, an exception should be raised.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id='my_portid1')])
api = self._stub_allocate_for_instance(
requested_networks=requested_networks,
_break='pre_list_networks',
_device=True)
self.assertRaises(exception.PortInUse,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks)
def test_allocate_for_instance_with_externalnet_forbidden(self):
"""Only one network is available, it's external, and the client
is unauthorized to use it.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
# no networks in the tenant
self.moxed_client.list_networks(
tenant_id=self.instance.project_id,
shared=False).AndReturn(
{'networks': model.NetworkInfo([])})
# external network is shared
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': self.nets8})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.ExternalNetworkAttachForbidden,
api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_with_externalnet_multiple(self):
"""Multiple networks are available, one the client is authorized
to use, and an external one the client is unauthorized to use.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
# network found in the tenant
self.moxed_client.list_networks(
tenant_id=self.instance.project_id,
shared=False).AndReturn(
{'networks': self.nets1})
# external network is shared
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': self.nets8})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(
exception.NetworkAmbiguous,
api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_with_externalnet_admin_ctx(self):
"""Only one network is available, it's external, and the client
is authorized.
"""
admin_ctx = context.RequestContext('userid', 'my_tenantid',
is_admin=True)
api = self._stub_allocate_for_instance(net_idx=8)
api.allocate_for_instance(admin_ctx, self.instance)
def test_allocate_for_instance_with_external_shared_net(self):
"""Only one network is available, it's external and shared."""
ctx = context.RequestContext('userid', 'my_tenantid')
api = self._stub_allocate_for_instance(net_idx=9)
api.allocate_for_instance(ctx, self.instance)
def _deallocate_for_instance(self, number, requested_networks=None):
# TODO(mriedem): Remove this conversion when all neutronv2 APIs are
# converted to handling instance objects.
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
port_data = number == 1 and self.port_data1 or self.port_data2
ret_data = copy.deepcopy(port_data)
if requested_networks:
if isinstance(requested_networks, objects.NetworkRequestList):
# NOTE(danms): Temporary and transitional
with mock.patch('nova.utils.is_neutron', return_value=True):
requested_networks = requested_networks.as_tuples()
for net, fip, port, request_id in requested_networks:
ret_data.append({'network_id': net,
'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
'id': port,
'status': 'DOWN',
'admin_state_up': True,
'fixed_ips': [],
'mac_address': 'fake_mac', })
self.moxed_client.list_ports(
device_id=self.instance.uuid).AndReturn(
{'ports': ret_data})
if requested_networks:
for net, fip, port, request_id in requested_networks:
self.moxed_client.update_port(port)
for port in reversed(port_data):
self.moxed_client.delete_port(port['id'])
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(self.context,
self.instance.uuid,
{'network_info': '[]'})
self.mox.ReplayAll()
api = neutronapi.API()
api.deallocate_for_instance(self.context, self.instance,
requested_networks=requested_networks)
def test_deallocate_for_instance_1_with_requested(self):
requested = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='fake-net',
address='1.2.3.4',
port_id='fake-port')])
# Test to deallocate in one port env.
self._deallocate_for_instance(1, requested_networks=requested)
def test_deallocate_for_instance_2_with_requested(self):
requested = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='fake-net',
address='1.2.3.4',
port_id='fake-port')])
# Test to deallocate in one port env.
self._deallocate_for_instance(2, requested_networks=requested)
def test_deallocate_for_instance_1(self):
# Test to deallocate in one port env.
self._deallocate_for_instance(1)
def test_deallocate_for_instance_2(self):
# Test to deallocate in two ports env.
self._deallocate_for_instance(2)
def test_deallocate_for_instance_port_not_found(self):
# TODO(mriedem): Remove this conversion when all neutronv2 APIs are
# converted to handling instance objects.
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
port_data = self.port_data1
self.moxed_client.list_ports(
device_id=self.instance.uuid).AndReturn(
{'ports': port_data})
NeutronNotFound = exceptions.NeutronClientException(status_code=404)
for port in reversed(port_data):
self.moxed_client.delete_port(port['id']).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
api = neutronapi.API()
api.deallocate_for_instance(self.context, self.instance)
def _test_deallocate_port_for_instance(self, number):
port_data = number == 1 and self.port_data1 or self.port_data2
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.delete_port(port_data[0]['id'])
net_info_cache = []
for port in port_data:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
instance = copy.copy(self.instance)
instance['info_cache'] = {'network_info':
six.text_type(
jsonutils.dumps(net_info_cache))}
api = neutronapi.API()
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data[1:]})
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
net_ids = [port['network_id'] for port in port_data]
self.moxed_client.list_networks(id=net_ids).AndReturn(
{'networks': nets})
float_data = number == 1 and self.float_data1 or self.float_data2
for data in port_data[1:]:
for ip in data['fixed_ips']:
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=data['id']).AndReturn(
{'floatingips': float_data[1:]})
for port in port_data[1:]:
self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({})
self.mox.ReplayAll()
nwinfo = api.deallocate_port_for_instance(self.context, instance,
port_data[0]['id'])
self.assertEqual(len(nwinfo), len(port_data[1:]))
if len(port_data) > 1:
self.assertEqual(nwinfo[0]['network']['id'], 'my_netid2')
def test_deallocate_port_for_instance_1(self):
# Test to deallocate the first and only port
self._test_deallocate_port_for_instance(1)
def test_deallocate_port_for_instance_2(self):
# Test to deallocate the first port of two
self._test_deallocate_port_for_instance(2)
def test_list_ports(self):
search_opts = {'parm': 'value'}
self.moxed_client.list_ports(**search_opts)
self.mox.ReplayAll()
neutronapi.API().list_ports(self.context, **search_opts)
def test_show_port(self):
self.moxed_client.show_port('foo')
self.mox.ReplayAll()
neutronapi.API().show_port(self.context, 'foo')
def test_validate_networks(self):
requested_networks = [('my_netid1', None, None, None),
('my_netid2', None, None, None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 50}})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_without_port_quota_on_network_side(self):
requested_networks = [('my_netid1', None, None, None),
('my_netid2', None, None, None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {}})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_ex_1(self):
requested_networks = [('my_netid1', None, None, None)]
self.moxed_client.list_networks(
id=mox.SameElementsAs(['my_netid1'])).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 50}})
self.mox.ReplayAll()
api = neutronapi.API()
try:
api.validate_networks(self.context, requested_networks, 1)
except exception.NetworkNotFound as ex:
self.assertIn("my_netid2", six.text_type(ex))
def test_validate_networks_ex_2(self):
requested_networks = [('my_netid1', None, None, None),
('my_netid2', None, None, None),
('my_netid3', None, None, None)]
ids = ['my_netid1', 'my_netid2', 'my_netid3']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets1})
self.mox.ReplayAll()
api = neutronapi.API()
try:
api.validate_networks(self.context, requested_networks, 1)
except exception.NetworkNotFound as ex:
self.assertIn("my_netid2, my_netid3", six.text_type(ex))
def test_validate_networks_duplicate_disable(self):
"""Verify that the correct exception is thrown when duplicate
network ids are passed to validate_networks, when nova config flag
allow_duplicate_networks is set to its default value: False
"""
requested_networks = [('my_netid1', None, None, None),
('my_netid1', None, None, None)]
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exception.NetworkDuplicated,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_duplicate_enable(self):
"""Verify that no duplicateNetworks exception is thrown when duplicate
network ids are passed to validate_networks, when nova config flag
allow_duplicate_networks is set to its non default value: True
"""
self.flags(allow_duplicate_networks=True, group='neutron')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='my_netid1'),
objects.NetworkRequest(network_id='my_netid1')])
ids = ['my_netid1', 'my_netid1']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 50}})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_allocate_for_instance_with_requested_networks_duplicates(self):
# specify a duplicate network to allocate to instance
self.flags(allow_duplicate_networks=True, group='neutron')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets6[0], self.nets6[1])])
self._allocate_for_instance(net_idx=6,
requested_networks=requested_networks)
def test_allocate_for_instance_requested_networks_duplicates_port(self):
# specify first port and last port that are in same network
self.flags(allow_duplicate_networks=True, group='neutron')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port['id'])
for port in (self.port_data1[0], self.port_data3[0])])
self._allocate_for_instance(net_idx=6,
requested_networks=requested_networks)
def test_allocate_for_instance_requested_networks_duplicates_combo(self):
# specify a combo net_idx=7 : net2, port in net1, net2, port in net1
self.flags(allow_duplicate_networks=True, group='neutron')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='my_netid2'),
objects.NetworkRequest(port_id=self.port_data1[0]['id']),
objects.NetworkRequest(network_id='my_netid2'),
objects.NetworkRequest(port_id=self.port_data3[0]['id'])])
self._allocate_for_instance(net_idx=7,
requested_networks=requested_networks)
def test_validate_networks_not_specified(self):
requested_networks = objects.NetworkRequestList(objects=[])
self.moxed_client.list_networks(
tenant_id=self.context.project_id,
shared=False).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_networks(
shared=True).AndReturn(
{'networks': self.nets2})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkAmbiguous,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_not_found(self):
# Verify that the correct exception is thrown when a non existent
# port is passed to validate_networks.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(
network_id='my_netid1',
port_id='3123-ad34-bc43-32332ca33e')])
NeutronNotFound = exceptions.NeutronClientException(status_code=404)
self.moxed_client.show_port(requested_networks[0].port_id).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exception.PortNotFound,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_show_rasies_non404(self):
# Verify that the correct exception is thrown when a non existent
# port is passed to validate_networks.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(
network_id='my_netid1',
port_id='3123-ad34-bc43-32332ca33e')])
NeutronNotFound = exceptions.NeutronClientException(status_code=0)
self.moxed_client.show_port(requested_networks[0].port_id).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exceptions.NeutronClientException,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_in_use(self):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=self.port_data3[0]['id'])])
self.moxed_client.show_port(self.port_data3[0]['id']).\
AndReturn({'port': self.port_data3[0]})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.PortInUse,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_no_subnet_id(self):
port_a = self.port_data3[0]
port_a['device_id'] = None
port_a['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id'])])
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.PortRequiresFixedIP,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_no_subnet_id(self):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='his_netid4')])
ids = ['his_netid4']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets4})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkRequiresSubnet,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_ports_in_same_network_disable(self):
"""Verify that duplicateNetworks exception is thrown when ports on same
duplicate network are passed to validate_networks, when nova config
flag allow_duplicate_networks is set to its default False
"""
self.flags(allow_duplicate_networks=False, group='neutron')
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data1[0]
self.assertEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id']),
objects.NetworkRequest(port_id=port_b['id'])])
self.moxed_client.show_port(port_a['id']).AndReturn(
{'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn(
{'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkDuplicated,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_ports_in_same_network_enable(self):
"""Verify that duplicateNetworks exception is not thrown when ports
on same duplicate network are passed to validate_networks, when nova
config flag allow_duplicate_networks is set to its True
"""
self.flags(allow_duplicate_networks=True, group='neutron')
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data1[0]
self.assertEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id']),
objects.NetworkRequest(port_id=port_b['id'])])
self.moxed_client.show_port(port_a['id']).AndReturn(
{'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn(
{'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_ports_not_in_same_network(self):
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data2[1]
self.assertNotEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id']),
objects.NetworkRequest(port_id=port_b['id'])])
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_no_quota(self):
# Test validation for a request for one instance needing
# two ports, where the quota is 2 and 2 ports are in use
# => instances which can be created = 0
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='my_netid1'),
objects.NetworkRequest(network_id='my_netid2')])
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 2}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(max_count, 0)
def test_validate_networks_with_ports_and_networks(self):
# Test validation for a request for one instance needing
# one port allocated via nova with another port being passed in.
port_b = self.port_data2[1]
port_b['device_id'] = None
port_b['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='my_netid1'),
objects.NetworkRequest(port_id=port_b['id'])])
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
ids = ['my_netid1']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 5}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(max_count, 1)
def test_validate_networks_one_port_and_no_networks(self):
# Test that show quota is not called if no networks are
# passed in and only ports.
port_b = self.port_data2[1]
port_b['device_id'] = None
port_b['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_b['id'])])
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(max_count, 1)
def test_validate_networks_some_quota(self):
# Test validation for a request for two instance needing
# two ports each, where the quota is 5 and 2 ports are in use
# => instances which can be created = 1
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='my_netid1'),
objects.NetworkRequest(network_id='my_netid2')])
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 5}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 2)
self.assertEqual(max_count, 1)
def test_validate_networks_unlimited_quota(self):
# Test validation for a request for two instance needing
# two ports each, where the quota is -1 (unlimited)
# => instances which can be created = 1
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='my_netid1'),
objects.NetworkRequest(network_id='my_netid2')])
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': -1}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 2)
self.assertEqual(max_count, 2)
def test_validate_networks_no_quota_but_ports_supplied(self):
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data2[1]
self.assertNotEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id']),
objects.NetworkRequest(port_id=port_b['id'])])
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(max_count, 1)
def _mock_list_ports(self, port_data=None):
if port_data is None:
port_data = self.port_data2
address = self.port_address
self.moxed_client.list_ports(
fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn(
{'ports': port_data})
self.mox.ReplayAll()
return address
def test_get_instance_uuids_by_ip_filter(self):
self._mock_list_ports()
filters = {'ip': '^10\\.0\\.1\\.2$'}
api = neutronapi.API()
result = api.get_instance_uuids_by_ip_filter(self.context, filters)
self.assertEqual(self.instance2['uuid'], result[0]['instance_uuid'])
self.assertEqual(self.instance['uuid'], result[1]['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_no_ports(self):
address = self._mock_list_ports(port_data=[])
api = neutronapi.API()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.get_fixed_ip_by_address,
self.context, address)
def test_get_fixed_ip_by_address_succeeds_for_1_port(self):
address = self._mock_list_ports(port_data=self.port_data1)
api = neutronapi.API()
result = api.get_fixed_ip_by_address(self.context, address)
self.assertEqual(self.instance2['uuid'], result['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self):
address = self._mock_list_ports()
api = neutronapi.API()
self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances,
api.get_fixed_ip_by_address,
self.context, address)
def _get_available_networks(self, prv_nets, pub_nets,
req_ids=None, context=None):
api = neutronapi.API()
nets = prv_nets + pub_nets
if req_ids:
mox_list_params = {'id': req_ids}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
mox_list_params = {'tenant_id': self.instance['project_id'],
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': prv_nets})
mox_list_params = {'shared': True}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': pub_nets})
self.mox.ReplayAll()
rets = api._get_available_networks(
context if context else self.context,
self.instance['project_id'],
req_ids)
self.assertEqual(rets, nets)
def test_get_available_networks_all_private(self):
self._get_available_networks(prv_nets=self.nets2, pub_nets=[])
def test_get_available_networks_all_public(self):
self._get_available_networks(prv_nets=[], pub_nets=self.nets2)
def test_get_available_networks_private_and_public(self):
self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4)
def test_get_available_networks_with_network_ids(self):
prv_nets = [self.nets3[0]]
pub_nets = [self.nets3[-1]]
# specify only first and last network
req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])]
self._get_available_networks(prv_nets, pub_nets, req_ids)
def test_get_available_networks_with_custom_policy(self):
rules = {'network:attach_external_network':
common_policy.parse_rule('')}
policy.set_rules(rules)
req_ids = [net['id'] for net in self.nets5]
self._get_available_networks(self.nets5, pub_nets=[], req_ids=req_ids)
def test_get_floating_ip_pools(self):
api = neutronapi.API()
search_opts = {'router:external': True}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.mox.ReplayAll()
pools = api.get_floating_ip_pools(self.context)
expected = [self.fip_pool['name'], self.fip_pool_nova['name']]
self.assertEqual(expected, pools)
def _get_expected_fip_model(self, fip_data, idx=0):
expected = {'id': fip_data['id'],
'address': fip_data['floating_ip_address'],
'pool': self.fip_pool['name'],
'project_id': fip_data['tenant_id'],
'fixed_ip_id': fip_data['port_id'],
'fixed_ip':
{'address': fip_data['fixed_ip_address']},
'instance': ({'uuid': self.port_data2[idx]['device_id']}
if fip_data['port_id']
else None)}
if expected['instance'] is not None:
expected['fixed_ip']['instance_uuid'] = \
expected['instance']['uuid']
return expected
def _test_get_floating_ip(self, fip_data, idx=0, by_address=False):
api = neutronapi.API()
fip_id = fip_data['id']
net_id = fip_data['floating_network_id']
address = fip_data['floating_ip_address']
if by_address:
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [fip_data]})
else:
self.moxed_client.show_floatingip(fip_id).\
AndReturn({'floatingip': fip_data})
self.moxed_client.show_network(net_id).\
AndReturn({'network': self.fip_pool})
if fip_data['port_id']:
self.moxed_client.show_port(fip_data['port_id']).\
AndReturn({'port': self.port_data2[idx]})
self.mox.ReplayAll()
expected = self._get_expected_fip_model(fip_data, idx)
if by_address:
fip = api.get_floating_ip_by_address(self.context, address)
else:
fip = api.get_floating_ip(self.context, fip_id)
self.assertEqual(expected, fip)
def test_get_floating_ip_unassociated(self):
self._test_get_floating_ip(self.fip_unassociated, idx=0)
def test_get_floating_ip_associated(self):
self._test_get_floating_ip(self.fip_associated, idx=1)
def test_get_floating_ip_by_address(self):
self._test_get_floating_ip(self.fip_unassociated, idx=0,
by_address=True)
def test_get_floating_ip_by_address_associated(self):
self._test_get_floating_ip(self.fip_associated, idx=1,
by_address=True)
def test_get_floating_ip_by_address_not_found(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': []})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpNotFoundForAddress,
api.get_floating_ip_by_address,
self.context, address)
def test_get_floating_ip_by_id_not_found(self):
api = neutronapi.API()
NeutronNotFound = exceptions.NeutronClientException(status_code=404)
floating_ip_id = self.fip_unassociated['id']
self.moxed_client.show_floatingip(floating_ip_id).\
AndRaise(NeutronNotFound)
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpNotFound,
api.get_floating_ip,
self.context, floating_ip_id)
def test_get_floating_ip_raises_non404(self):
api = neutronapi.API()
NeutronNotFound = exceptions.NeutronClientException(status_code=0)
floating_ip_id = self.fip_unassociated['id']
self.moxed_client.show_floatingip(floating_ip_id).\
AndRaise(NeutronNotFound)
self.mox.ReplayAll()
self.assertRaises(exceptions.NeutronClientException,
api.get_floating_ip,
self.context, floating_ip_id)
def test_get_floating_ip_by_address_multiple_found(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated] * 2})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpMultipleFoundForAddress,
api.get_floating_ip_by_address,
self.context, address)
def test_get_floating_ips_by_project(self):
api = neutronapi.API()
project_id = self.context.project_id
self.moxed_client.list_floatingips(tenant_id=project_id).\
AndReturn({'floatingips': [self.fip_unassociated,
self.fip_associated]})
search_opts = {'router:external': True}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.moxed_client.list_ports(tenant_id=project_id).\
AndReturn({'ports': self.port_data2})
self.mox.ReplayAll()
expected = [self._get_expected_fip_model(self.fip_unassociated),
self._get_expected_fip_model(self.fip_associated, idx=1)]
fips = api.get_floating_ips_by_project(self.context)
self.assertEqual(expected, fips)
def _test_get_instance_id_by_floating_address(self, fip_data,
associated=False):
api = neutronapi.API()
address = fip_data['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [fip_data]})
if associated:
self.moxed_client.show_port(fip_data['port_id']).\
AndReturn({'port': self.port_data2[1]})
self.mox.ReplayAll()
if associated:
expected = self.port_data2[1]['device_id']
else:
expected = None
fip = api.get_instance_id_by_floating_address(self.context, address)
self.assertEqual(expected, fip)
def test_get_instance_id_by_floating_address(self):
self._test_get_instance_id_by_floating_address(self.fip_unassociated)
def test_get_instance_id_by_floating_address_associated(self):
self._test_get_instance_id_by_floating_address(self.fip_associated,
associated=True)
def test_allocate_floating_ip(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context, 'ext_net')
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_allocate_floating_ip_addr_gen_fail(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndRaise(exceptions.IpAddressGenerationFailureClient)
self.mox.ReplayAll()
self.assertRaises(exception.NoMoreFloatingIps,
api.allocate_floating_ip, self.context, 'ext_net')
def test_allocate_floating_ip_exhausted_fail(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndRaise(exceptions.ExternalIpAddressExhaustedClient)
self.mox.ReplayAll()
self.assertRaises(exception.NoMoreFloatingIps,
api.allocate_floating_ip, self.context, 'ext_net')
def test_allocate_floating_ip_with_pool_id(self):
api = neutronapi.API()
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'id': pool_id}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context, pool_id)
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_allocate_floating_ip_with_default_pool(self):
api = neutronapi.API()
pool_name = self.fip_pool_nova['name']
pool_id = self.fip_pool_nova['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool_nova]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context)
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_release_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fip_id = self.fip_unassociated['id']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.delete_floatingip(fip_id)
self.mox.ReplayAll()
api.release_floating_ip(self.context, address)
def test_disassociate_and_release_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fip_id = self.fip_unassociated['id']
floating_ip = {'address': address}
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.delete_floatingip(fip_id)
self.mox.ReplayAll()
api.disassociate_and_release_floating_ip(self.context, None,
floating_ip)
def test_release_floating_ip_associated(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpAssociated,
api.release_floating_ip, self.context, address)
def _setup_mock_for_refresh_cache(self, api, instances):
nw_info = self.mox.CreateMock(model.NetworkInfo)
self.mox.StubOutWithMock(api, '_get_instance_nw_info')
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
for instance in instances:
nw_info.json()
api._get_instance_nw_info(mox.IgnoreArg(), instance).\
AndReturn(nw_info)
api.db.instance_info_cache_update(mox.IgnoreArg(),
instance['uuid'],
mox.IgnoreArg())
def test_associate_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fixed_address = self.port_address2
fip_id = self.fip_unassociated['id']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[1]]})
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': self.fip_associated['port_id'],
'fixed_ip_address': fixed_address}})
self._setup_mock_for_refresh_cache(api, [self.instance])
self.mox.ReplayAll()
api.associate_floating_ip(self.context, self.instance,
address, fixed_address)
@mock.patch('nova.objects.Instance.get_by_uuid')
def test_reassociate_floating_ip(self, mock_get):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
new_fixed_address = self.port_address
fip_id = self.fip_associated['id']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance2['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[0]]})
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': 'my_portid1',
'fixed_ip_address': new_fixed_address}})
self.moxed_client.show_port(self.fip_associated['port_id']).\
AndReturn({'port': self.port_data2[1]})
mock_get.return_value = fake_instance.fake_instance_obj(
self.context, **self.instance)
self._setup_mock_for_refresh_cache(api, [mock_get.return_value,
self.instance2])
self.mox.ReplayAll()
api.associate_floating_ip(self.context, self.instance2,
address, new_fixed_address)
def test_associate_floating_ip_not_found_fixed_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
fixed_address = self.fip_associated['fixed_ip_address']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[0]]})
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.associate_floating_ip, self.context,
self.instance, address, fixed_address)
def test_disassociate_floating_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
fip_id = self.fip_associated['id']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': None}})
self._setup_mock_for_refresh_cache(api, [self.instance])
self.mox.ReplayAll()
api.disassociate_floating_ip(self.context, self.instance, address)
def test_add_fixed_ip_to_instance(self):
api = neutronapi.API()
self._setup_mock_for_refresh_cache(api, [self.instance])
network_id = 'my_netid1'
search_opts = {'network_id': network_id}
self.moxed_client.list_subnets(
**search_opts).AndReturn({'subnets': self.subnet_data_n})
search_opts = {'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
'network_id': network_id}
self.moxed_client.list_ports(
**search_opts).AndReturn({'ports': self.port_data1})
port_req_body = {
'port': {
'fixed_ips': [{'subnet_id': 'my_subid1'},
{'subnet_id': 'my_subid1'}],
},
}
port = self.port_data1[0]
port['fixed_ips'] = [{'subnet_id': 'my_subid1'}]
self.moxed_client.update_port('my_portid1',
MyComparator(port_req_body)).AndReturn({'port': port})
self.mox.ReplayAll()
api.add_fixed_ip_to_instance(self.context, self.instance, network_id)
def test_remove_fixed_ip_from_instance(self):
api = neutronapi.API()
self._setup_mock_for_refresh_cache(api, [self.instance])
address = '10.0.0.3'
zone = 'compute:%s' % self.instance['availability_zone']
search_opts = {'device_id': self.instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
self.moxed_client.list_ports(
**search_opts).AndReturn({'ports': self.port_data1})
port_req_body = {
'port': {
'fixed_ips': [],
},
}
port = self.port_data1[0]
port['fixed_ips'] = []
self.moxed_client.update_port('my_portid1',
MyComparator(port_req_body)).AndReturn({'port': port})
self.mox.ReplayAll()
api.remove_fixed_ip_from_instance(self.context, self.instance, address)
def test_list_floating_ips_without_l3_support(self):
api = neutronapi.API()
NeutronNotFound = exceptions.NeutronClientException(
status_code=404)
self.moxed_client.list_floatingips(
fixed_ip_address='1.1.1.1', port_id=1).AndRaise(NeutronNotFound)
self.mox.ReplayAll()
neutronv2.get_client('fake')
floatingips = api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 1)
self.assertEqual(floatingips, [])
def test_nw_info_get_ips(self):
fake_port = {
'fixed_ips': [
{'ip_address': '1.1.1.1'}],
'id': 'port-id',
}
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 'port-id').AndReturn(
[{'floating_ip_address': '10.0.0.1'}])
self.mox.ReplayAll()
neutronv2.get_client('fake')
result = api._nw_info_get_ips(self.moxed_client, fake_port)
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['address'], '1.1.1.1')
self.assertEqual(result[0]['floating_ips'][0]['address'], '10.0.0.1')
def test_nw_info_get_subnets(self):
fake_port = {
'fixed_ips': [
{'ip_address': '1.1.1.1'},
{'ip_address': '2.2.2.2'}],
'id': 'port-id',
}
fake_subnet = model.Subnet(cidr='1.0.0.0/8')
fake_ips = [model.IP(x['ip_address']) for x in fake_port['fixed_ips']]
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_subnets_from_port')
api._get_subnets_from_port(self.context, fake_port).AndReturn(
[fake_subnet])
self.mox.ReplayAll()
neutronv2.get_client('fake')
subnets = api._nw_info_get_subnets(self.context, fake_port, fake_ips)
self.assertEqual(len(subnets), 1)
self.assertEqual(len(subnets[0]['ips']), 1)
self.assertEqual(subnets[0]['ips'][0]['address'], '1.1.1.1')
def _test_nw_info_build_network(self, vif_type):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id',
'binding:vif_type': vif_type,
}
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
api = neutronapi.API()
self.mox.ReplayAll()
neutronv2.get_client('fake')
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertEqual(net['subnets'], fake_subnets)
self.assertEqual(net['id'], 'net-id')
self.assertEqual(net['label'], 'foo')
self.assertEqual(net.get_meta('tenant_id'), 'tenant')
self.assertEqual(net.get_meta('injected'), CONF.flat_injected)
return net, iid
def test_nw_info_build_network_ovs(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_OVS)
self.assertEqual(net['bridge'], CONF.neutron.ovs_bridge)
self.assertNotIn('should_create_bridge', net)
self.assertEqual(iid, 'port-id')
def test_nw_info_build_network_dvs(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_DVS)
self.assertEqual('foo-net-id', net['bridge'])
self.assertNotIn('should_create_bridge', net)
self.assertNotIn('ovs_interfaceid', net)
self.assertIsNone(iid)
def test_nw_info_build_network_bridge(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE)
self.assertEqual(net['bridge'], 'brqnet-id')
self.assertTrue(net['should_create_bridge'])
self.assertIsNone(iid)
def test_nw_info_build_network_other(self):
net, iid = self._test_nw_info_build_network(None)
self.assertIsNone(net['bridge'])
self.assertNotIn('should_create_bridge', net)
self.assertIsNone(iid)
def test_nw_info_build_no_match(self):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id1',
'tenant_id': 'tenant',
'binding:vif_type': model.VIF_TYPE_OVS,
}
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id2', 'name': 'foo', 'tenant_id': 'tenant'}]
api = neutronapi.API()
self.mox.ReplayAll()
neutronv2.get_client('fake')
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertEqual(fake_subnets, net['subnets'])
self.assertEqual('net-id1', net['id'])
self.assertEqual('net-id1', net['id'])
self.assertEqual('tenant', net['meta']['tenant_id'])
def test_build_network_info_model(self):
api = neutronapi.API()
fake_inst = {'project_id': 'fake', 'uuid': 'uuid',
'info_cache': {'network_info': []}}
fake_ports = [
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port1',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:01',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'binding:vif_details': {},
},
# admin_state_up=False and status='DOWN' thus vif.active=True
{'id': 'port2',
'network_id': 'net-id',
'admin_state_up': False,
'status': 'DOWN',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:02',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'binding:vif_details': {},
},
# admin_state_up=True and status='DOWN' thus vif.active=False
{'id': 'port0',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'DOWN',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:03',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'binding:vif_details': {},
},
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port3',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:04',
'binding:vif_type': model.VIF_TYPE_HW_VEB,
'binding:vnic_type': model.VNIC_TYPE_DIRECT,
'binding:profile': {'pci_vendor_info': '1137:0047',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'},
'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'},
},
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port4',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:05',
'binding:vif_type': model.VIF_TYPE_802_QBH,
'binding:vnic_type': model.VNIC_TYPE_MACVTAP,
'binding:profile': {'pci_vendor_info': '1137:0047',
'pci_slot': '0000:0a:00.2',
'physical_network': 'phynet1'},
'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'},
},
# admin_state_up=True and status='ACTIVE' thus vif.active=True
# This port has no binding:vnic_type to verify default is assumed
{'id': 'port5',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:06',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
# No binding:vnic_type
'binding:vif_details': {},
},
# This does not match the networks we provide below,
# so it should be ignored (and is here to verify that)
{'id': 'port6',
'network_id': 'other-net-id',
'admin_state_up': True,
'status': 'DOWN',
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
},
]
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [
{'id': 'net-id',
'name': 'foo',
'tenant_id': 'fake',
}
]
neutronv2.get_client(mox.IgnoreArg(), admin=True).MultipleTimes(
).AndReturn(self.moxed_client)
self.moxed_client.list_ports(
tenant_id='fake', device_id='uuid').AndReturn(
{'ports': fake_ports})
self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
self.mox.StubOutWithMock(api, '_get_subnets_from_port')
requested_ports = [fake_ports[2], fake_ports[0], fake_ports[1],
fake_ports[3], fake_ports[4], fake_ports[5]]
for requested_port in requested_ports:
api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', requested_port['id']).AndReturn(
[{'floating_ip_address': '10.0.0.1'}])
for requested_port in requested_ports:
api._get_subnets_from_port(self.context, requested_port
).AndReturn(fake_subnets)
self.mox.ReplayAll()
neutronv2.get_client('fake')
nw_infos = api._build_network_info_model(self.context, fake_inst,
fake_nets,
[fake_ports[2]['id'],
fake_ports[0]['id'],
fake_ports[1]['id'],
fake_ports[3]['id'],
fake_ports[4]['id'],
fake_ports[5]['id']])
self.assertEqual(len(nw_infos), 6)
index = 0
for nw_info in nw_infos:
self.assertEqual(nw_info['address'],
requested_ports[index]['mac_address'])
self.assertEqual(nw_info['devname'], 'tapport' + str(index))
self.assertIsNone(nw_info['ovs_interfaceid'])
self.assertEqual(nw_info['type'],
requested_ports[index]['binding:vif_type'])
if nw_info['type'] == model.VIF_TYPE_BRIDGE:
self.assertEqual(nw_info['network']['bridge'], 'brqnet-id')
self.assertEqual(nw_info['vnic_type'],
requested_ports[index].get('binding:vnic_type',
model.VNIC_TYPE_NORMAL))
self.assertEqual(nw_info.get('details'),
requested_ports[index].get('binding:vif_details'))
self.assertEqual(nw_info.get('profile'),
requested_ports[index].get('binding:profile'))
index += 1
self.assertEqual(nw_infos[0]['active'], False)
self.assertEqual(nw_infos[1]['active'], True)
self.assertEqual(nw_infos[2]['active'], True)
self.assertEqual(nw_infos[3]['active'], True)
self.assertEqual(nw_infos[4]['active'], True)
self.assertEqual(nw_infos[5]['active'], True)
self.assertEqual(nw_infos[0]['id'], 'port0')
self.assertEqual(nw_infos[1]['id'], 'port1')
self.assertEqual(nw_infos[2]['id'], 'port2')
self.assertEqual(nw_infos[3]['id'], 'port3')
self.assertEqual(nw_infos[4]['id'], 'port4')
self.assertEqual(nw_infos[5]['id'], 'port5')
@mock.patch('nova.network.neutronv2.api.API._nw_info_get_subnets')
@mock.patch('nova.network.neutronv2.api.API._nw_info_get_ips')
@mock.patch('nova.network.neutronv2.api.API._nw_info_build_network')
@mock.patch('nova.network.neutronv2.api.API._gather_port_ids_and_networks')
def test_build_network_info_model_empty(
self, mock_gather_port_ids_and_networks,
mock_nw_info_build_network,
mock_nw_info_get_ips,
mock_nw_info_get_subnets):
api = neutronapi.API()
fake_inst = objects.Instance()
fake_inst.project_id = 'fake'
fake_inst.uuid = 'uuid'
fake_inst.info_cache = objects.InstanceInfoCache()
fake_inst.info_cache.network_info = model.NetworkInfo()
fake_ports = [
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port1',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:01',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'binding:vif_details': {},
},
]
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
neutronv2.get_client(mox.IgnoreArg(), admin=True).MultipleTimes(
).AndReturn(self.moxed_client)
self.moxed_client.list_ports(
tenant_id='fake', device_id='uuid').AndReturn(
{'ports': fake_ports})
mock_gather_port_ids_and_networks.return_value = (None, None)
mock_nw_info_build_network.return_value = (None, None)
mock_nw_info_get_ips.return_value = []
mock_nw_info_get_subnets.return_value = fake_subnets
self.mox.ReplayAll()
neutronv2.get_client('fake')
nw_infos = api._build_network_info_model(
self.context, fake_inst)
self.assertEqual(len(nw_infos), 1)
def test_get_subnets_from_port(self):
api = neutronapi.API()
port_data = copy.copy(self.port_data1[0])
subnet_data1 = copy.copy(self.subnet_data1)
subnet_data1[0]['host_routes'] = [
{'destination': '192.168.0.0/24', 'nexthop': '1.0.0.10'}
]
self.moxed_client.list_subnets(
id=[port_data['fixed_ips'][0]['subnet_id']]
).AndReturn({'subnets': subnet_data1})
self.moxed_client.list_ports(
network_id=subnet_data1[0]['network_id'],
device_owner='network:dhcp').AndReturn({'ports': []})
self.mox.ReplayAll()
subnets = api._get_subnets_from_port(self.context, port_data)
self.assertEqual(len(subnets), 1)
self.assertEqual(len(subnets[0]['routes']), 1)
self.assertEqual(subnets[0]['routes'][0]['cidr'],
subnet_data1[0]['host_routes'][0]['destination'])
self.assertEqual(subnets[0]['routes'][0]['gateway']['address'],
subnet_data1[0]['host_routes'][0]['nexthop'])
def test_get_all_empty_list_networks(self):
api = neutronapi.API()
self.moxed_client.list_networks().AndReturn({'networks': []})
self.mox.ReplayAll()
networks = api.get_all(self.context)
self.assertEqual(networks, [])
def test_get_floating_ips_by_fixed_address(self):
# NOTE(lbragstad): We need to reset the mocks in order to assert
# a NotImplementedError is raised when calling the method under test.
self.mox.ResetAll()
fake_fixed = '192.168.1.4'
api = neutronapi.API()
self.assertRaises(NotImplementedError,
api.get_floating_ips_by_fixed_address,
self.context, fake_fixed)
@mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
def test_get_port_vnic_info_1(self, mock_get_client):
api = neutronapi.API()
self.mox.ResetAll()
test_port = {
'port': {'id': 'my_port_id1',
'network_id': 'net-id',
'binding:vnic_type': model.VNIC_TYPE_DIRECT,
},
}
test_net = {'network': {'provider:physical_network': 'phynet1'}}
mock_client = mock_get_client()
mock_client.show_port.return_value = test_port
mock_client.show_network.return_value = test_net
vnic_type, phynet_name = api._get_port_vnic_info(
self.context, mock_client, test_port['port']['id'])
mock_client.show_port.assert_called_once_with(test_port['port']['id'],
fields=['binding:vnic_type', 'network_id'])
mock_client.show_network.assert_called_once_with(
test_port['port']['network_id'],
fields='provider:physical_network')
self.assertEqual(model.VNIC_TYPE_DIRECT, vnic_type)
self.assertEqual(phynet_name, 'phynet1')
def _test_get_port_vnic_info(self, mock_get_client,
binding_vnic_type=None):
api = neutronapi.API()
self.mox.ResetAll()
test_port = {
'port': {'id': 'my_port_id2',
'network_id': 'net-id',
},
}
if binding_vnic_type:
test_port['port']['binding:vnic_type'] = binding_vnic_type
mock_client = mock_get_client()
mock_client.show_port.return_value = test_port
vnic_type, phynet_name = api._get_port_vnic_info(
self.context, mock_client, test_port['port']['id'])
mock_client.show_port.assert_called_once_with(test_port['port']['id'],
fields=['binding:vnic_type', 'network_id'])
self.assertEqual(model.VNIC_TYPE_NORMAL, vnic_type)
self.assertFalse(phynet_name)
@mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
def test_get_port_vnic_info_2(self, mock_get_client):
self._test_get_port_vnic_info(mock_get_client,
binding_vnic_type=model.VNIC_TYPE_NORMAL)
@mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
def test_get_port_vnic_info_3(self, mock_get_client):
self._test_get_port_vnic_info(mock_get_client)
@mock.patch.object(neutronapi.API, "_get_port_vnic_info")
@mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
def test_create_pci_requests_for_sriov_ports(self, mock_get_client,
mock_get_port_vnic_info):
api = neutronapi.API()
self.mox.ResetAll()
requested_networks = objects.NetworkRequestList(
objects = [
objects.NetworkRequest(port_id='my_portid1'),
objects.NetworkRequest(network_id='net1'),
objects.NetworkRequest(port_id='my_portid2'),
objects.NetworkRequest(port_id='my_portid3'),
objects.NetworkRequest(port_id='my_portid4')])
pci_requests = objects.InstancePCIRequests(requests=[])
mock_get_port_vnic_info.side_effect = [
(model.VNIC_TYPE_DIRECT, 'phynet1'),
(model.VNIC_TYPE_NORMAL, ''),
(model.VNIC_TYPE_MACVTAP, 'phynet1'),
(model.VNIC_TYPE_MACVTAP, 'phynet2')
]
api.create_pci_requests_for_sriov_ports(
None, pci_requests, requested_networks)
self.assertEqual(3, len(pci_requests.requests))
has_pci_request_id = [net.pci_request_id is not None for net in
requested_networks.objects]
expected_results = [True, False, False, True, True]
self.assertEqual(expected_results, has_pci_request_id)
class TestNeutronv2WithMock(test.TestCase):
"""Used to test Neutron V2 API with mock."""
def setUp(self):
super(TestNeutronv2WithMock, self).setUp()
self.api = neutronapi.API()
self.context = context.RequestContext(
'fake-user', 'fake-project',
auth_token='bff4a5a6b9eb4ea2a6efec6eefb77936')
@mock.patch('nova.openstack.common.lockutils.lock')
def test_get_instance_nw_info_locks_per_instance(self, mock_lock):
instance = objects.Instance(uuid=uuid.uuid4())
api = neutronapi.API()
mock_lock.side_effect = test.TestingException
self.assertRaises(test.TestingException,
api.get_instance_nw_info, 'context', instance)
mock_lock.assert_called_once_with('refresh_cache-%s' % instance.uuid)
def _test_validate_networks_fixed_ip_no_dup(self, nets, requested_networks,
ids, list_port_values):
def _fake_list_ports(**search_opts):
for args, return_value in list_port_values:
if args == search_opts:
return return_value
self.fail('Unexpected call to list_ports %s' % search_opts)
with contextlib.nested(
mock.patch.object(client.Client, 'list_ports',
side_effect=_fake_list_ports),
mock.patch.object(client.Client, 'list_networks',
return_value={'networks': nets}),
mock.patch.object(client.Client, 'show_quota',
return_value={'quota': {'port': 50}})) as (
list_ports_mock, list_networks_mock, show_quota_mock):
self.api.validate_networks(self.context, requested_networks, 1)
self.assertEqual(len(list_port_values),
len(list_ports_mock.call_args_list))
list_networks_mock.assert_called_once_with(id=ids)
show_quota_mock.assert_called_once_with(tenant_id='fake-project')
def test_validate_networks_fixed_ip_no_dup1(self):
# Test validation for a request for a network with a
# fixed ip that is not already in use because no fixed ips in use
nets1 = [{'id': 'my_netid1',
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': 'fake-project'}]
requested_networks = [('my_netid1', '10.0.1.2', None, None)]
ids = ['my_netid1']
list_port_values = [({'network_id': 'my_netid1',
'fixed_ips': 'ip_address=10.0.1.2',
'fields': 'device_id'},
{'ports': []}),
({'tenant_id': 'fake-project'},
{'ports': []})]
self._test_validate_networks_fixed_ip_no_dup(nets1, requested_networks,
ids, list_port_values)
def test_validate_networks_fixed_ip_no_dup2(self):
# Test validation for a request for a network with a
# fixed ip that is not already in use because not used on this net id
nets2 = [{'id': 'my_netid1',
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': 'fake-project'},
{'id': 'my_netid2',
'name': 'my_netname2',
'subnets': ['mysubnid2'],
'tenant_id': 'fake-project'}]
requested_networks = [('my_netid1', '10.0.1.2', None, None),
('my_netid2', '10.0.1.3', None, None)]
ids = ['my_netid1', 'my_netid2']
list_port_values = [({'network_id': 'my_netid1',
'fixed_ips': 'ip_address=10.0.1.2',
'fields': 'device_id'},
{'ports': []}),
({'network_id': 'my_netid2',
'fixed_ips': 'ip_address=10.0.1.3',
'fields': 'device_id'},
{'ports': []}),
({'tenant_id': 'fake-project'},
{'ports': []})]
self._test_validate_networks_fixed_ip_no_dup(nets2, requested_networks,
ids, list_port_values)
def test_validate_networks_fixed_ip_dup(self):
# Test validation for a request for a network with a
# fixed ip that is already in use
requested_networks = [('my_netid1', '10.0.1.2', None, None)]
list_port_mock_params = {'network_id': 'my_netid1',
'fixed_ips': 'ip_address=10.0.1.2',
'fields': 'device_id'}
list_port_mock_return = {'ports': [({'device_id': 'my_deviceid'})]}
with mock.patch.object(client.Client, 'list_ports',
return_value=list_port_mock_return) as (
list_ports_mock):
self.assertRaises(exception.FixedIpAlreadyInUse,
self.api.validate_networks,
self.context, requested_networks, 1)
list_ports_mock.assert_called_once_with(**list_port_mock_params)
def test_allocate_floating_ip_exceed_limit(self):
# Verify that the correct exception is thrown when quota exceed
pool_name = 'dummy'
api = neutronapi.API()
with contextlib.nested(
mock.patch.object(client.Client, 'create_floatingip'),
mock.patch.object(api,
'_get_floating_ip_pool_id_by_name_or_id')) as (
create_mock, get_mock):
create_mock.side_effect = exceptions.OverQuotaClient()
self.assertRaises(exception.FloatingIpLimitExceeded,
api.allocate_floating_ip,
self.context, pool_name)
def test_create_port_for_instance_no_more_ip(self):
instance = fake_instance.fake_instance_obj(self.context)
net = {'id': 'my_netid1',
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': instance['project_id']}
with mock.patch.object(client.Client, 'create_port',
side_effect=exceptions.IpAddressGenerationFailureClient()) as (
create_port_mock):
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone}}
self.assertRaises(exception.NoMoreFixedIps,
self.api._create_port,
neutronv2.get_client(self.context),
instance, net['id'], port_req_body)
create_port_mock.assert_called_once_with(port_req_body)
@mock.patch.object(client.Client, 'create_port',
side_effect=exceptions.MacAddressInUseClient())
def test_create_port_for_instance_mac_address_in_use(self,
create_port_mock):
# Create fake data.
instance = fake_instance.fake_instance_obj(self.context)
net = {'id': 'my_netid1',
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': instance['project_id']}
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone,
'mac_address': 'XX:XX:XX:XX:XX:XX'}}
available_macs = set(['XX:XX:XX:XX:XX:XX'])
# Run the code.
self.assertRaises(exception.PortInUse,
self.api._create_port,
neutronv2.get_client(self.context),
instance, net['id'], port_req_body,
available_macs=available_macs)
# Assert the calls.
create_port_mock.assert_called_once_with(port_req_body)
def test_get_network_detail_not_found(self):
api = neutronapi.API()
expected_exc = exceptions.NetworkNotFoundClient()
network_uuid = '02cacbca-7d48-4a2c-8011-43eecf8a9786'
with mock.patch.object(client.Client, 'show_network',
side_effect=expected_exc) as (
fake_show_network):
self.assertRaises(exception.NetworkNotFound,
api.get,
self.context,
network_uuid)
fake_show_network.assert_called_once_with(network_uuid)
def test_deallocate_for_instance_uses_delete_helper(self):
# setup fake data
instance = fake_instance.fake_instance_obj(self.context)
port_data = {'ports': [{'id': str(uuid.uuid4())}]}
ports = set([port['id'] for port in port_data.get('ports')])
api = neutronapi.API()
# setup mocks
mock_client = mock.Mock()
mock_client.list_ports.return_value = port_data
with contextlib.nested(
mock.patch.object(neutronv2, 'get_client',
return_value=mock_client),
mock.patch.object(api, '_delete_ports')
) as (
mock_get_client, mock_delete
):
# run the code
api.deallocate_for_instance(self.context, instance)
# assert the calls
mock_client.list_ports.assert_called_once_with(
device_id=instance.uuid)
mock_delete.assert_called_once_with(
mock_client, instance, ports, raise_if_fail=True)
def _test_delete_ports(self, expect_raise):
results = [exceptions.NeutronClientException, None]
mock_client = mock.Mock()
with mock.patch.object(mock_client, 'delete_port',
side_effect=results):
api = neutronapi.API()
api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1', 'port2'],
raise_if_fail=expect_raise)
def test_delete_ports_raise(self):
self.assertRaises(exceptions.NeutronClientException,
self._test_delete_ports, True)
def test_delete_ports_no_raise(self):
self._test_delete_ports(False)
def test_delete_ports_never_raise_404(self):
mock_client = mock.Mock()
mock_client.delete_port.side_effect = exceptions.PortNotFoundClient
api = neutronapi.API()
api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1'],
raise_if_fail=True)
mock_client.delete_port.assert_called_once_with('port1')
def test_deallocate_port_for_instance_fails(self):
mock_client = mock.Mock()
api = neutronapi.API()
with contextlib.nested(
mock.patch.object(neutronv2, 'get_client',
return_value=mock_client),
mock.patch.object(api, '_delete_ports',
side_effect=exceptions.Unauthorized),
mock.patch.object(api, 'get_instance_nw_info')
) as (
get_client, delete_ports, get_nw_info
):
self.assertRaises(exceptions.Unauthorized,
api.deallocate_port_for_instance,
self.context, instance={'uuid': 'fake'},
port_id='fake')
# make sure that we didn't try to reload nw info
self.assertFalse(get_nw_info.called)
class TestNeutronv2ModuleMethods(test.TestCase):
def test_gather_port_ids_and_networks_wrong_params(self):
api = neutronapi.API()
# Test with networks not None and port_ids is None
self.assertRaises(exception.NovaException,
api._gather_port_ids_and_networks,
'fake_context', 'fake_instance',
[{'network': {'name': 'foo'}}], None)
# Test with networks is None and port_ids not None
self.assertRaises(exception.NovaException,
api._gather_port_ids_and_networks,
'fake_context', 'fake_instance',
None, ['list', 'of', 'port_ids'])
def test_ensure_requested_network_ordering_no_preference_ids(self):
l = [1, 2, 3]
neutronapi._ensure_requested_network_ordering(
lambda x: x,
l,
None)
def test_ensure_requested_network_ordering_no_preference_hashes(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
neutronapi._ensure_requested_network_ordering(
lambda x: x['id'],
l,
None)
self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}])
def test_ensure_requested_network_ordering_with_preference(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
neutronapi._ensure_requested_network_ordering(
lambda x: x['id'],
l,
[1, 2, 3])
self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}])
class TestNeutronv2Portbinding(TestNeutronv2Base):
def test_allocate_for_instance_portbinding(self):
self._allocate_for_instance(1, portbinding=True)
def test_populate_neutron_extension_values_binding(self):
api = neutronapi.API()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.PORTBINDING_EXT}]})
self.mox.ReplayAll()
host_id = 'my_host_id'
instance = {'host': host_id}
port_req_body = {'port': {}}
api._populate_neutron_extension_values(self.context, instance,
None, port_req_body)
self.assertEqual(port_req_body['port']['binding:host_id'], host_id)
self.assertFalse(port_req_body['port'].get('binding:profile'))
@mock.patch.object(pci_whitelist, 'get_pci_device_devspec')
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
def test_populate_neutron_extension_values_binding_sriov(self,
mock_get_instance_pci_devs,
mock_get_pci_device_devspec):
api = neutronapi.API()
host_id = 'my_host_id'
instance = {'host': host_id}
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_dev = {'vendor_id': '1377',
'product_id': '0047',
'address': '0000:0a:00.1',
}
PciDevice = collections.namedtuple('PciDevice',
['vendor_id', 'product_id', 'address'])
mydev = PciDevice(**pci_dev)
profile = {'pci_vendor_info': '1377:0047',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1',
}
mock_get_instance_pci_devs.return_value = [mydev]
devspec = mock.Mock()
devspec.get_tags.return_value = {'physical_network': 'phynet1'}
mock_get_pci_device_devspec.return_value = devspec
api._populate_neutron_binding_profile(instance,
pci_req_id, port_req_body)
self.assertEqual(port_req_body['port']['binding:profile'], profile)
def test_migrate_instance_finish_binding_false(self):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(False)
self.mox.ReplayAll()
api.migrate_instance_finish(self.context, None, None)
def test_migrate_instance_finish_binding_true(self):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(True)
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
search_opts = {'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id']}
ports = {'ports': [{'id': 'test1'}]}
self.moxed_client.list_ports(**search_opts).AndReturn(ports)
migration = {'source_compute': self.instance.get('host'),
'dest_compute': 'dest_host', }
port_req_body = {'port':
{'binding:host_id': migration['dest_compute']}}
self.moxed_client.update_port('test1',
port_req_body).AndReturn(None)
self.mox.ReplayAll()
api.migrate_instance_finish(self.context, self.instance, migration)
def test_migrate_instance_finish_binding_true_exception(self):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(True)
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
search_opts = {'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id']}
ports = {'ports': [{'id': 'test1'}]}
self.moxed_client.list_ports(**search_opts).AndReturn(ports)
migration = {'source_compute': self.instance.get('host'),
'dest_compute': 'dest_host', }
port_req_body = {'port':
{'binding:host_id': migration['dest_compute']}}
self.moxed_client.update_port('test1',
port_req_body).AndRaise(
Exception("fail to update port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
api.migrate_instance_finish,
self.context, self.instance, migration)
def test_associate_not_implemented(self):
api = neutronapi.API()
self.assertRaises(NotImplementedError,
api.associate,
self.context, 'id')
class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base):
def setUp(self):
super(TestNeutronv2ExtraDhcpOpts, self).setUp()
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
def test_allocate_for_instance_1_with_extra_dhcp_opts_turned_off(self):
self._allocate_for_instance(1, extra_dhcp_opts=False)
def test_allocate_for_instance_extradhcpopts(self):
dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
self._allocate_for_instance(1, dhcp_options=dhcp_opts)
class TestNeutronClientForAdminScenarios(test.TestCase):
def _test_get_client_for_admin(self, use_id=False, admin_context=False):
def client_mock(*args, **kwargs):
client.Client.httpclient = mock.MagicMock()
self.flags(auth_strategy=None, group='neutron')
self.flags(url='http://anyhost/', group='neutron')
self.flags(url_timeout=30, group='neutron')
if use_id:
self.flags(admin_tenant_id='admin_tenant_id', group='neutron')
self.flags(admin_user_id='admin_user_id', group='neutron')
if admin_context:
my_context = context.get_admin_context()
else:
my_context = context.RequestContext('userid', 'my_tenantid',
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
kwargs = {
'auth_url': CONF.neutron.admin_auth_url,
'password': CONF.neutron.admin_password,
'endpoint_url': CONF.neutron.url,
'auth_strategy': None,
'timeout': CONF.neutron.url_timeout,
'insecure': False,
'ca_cert': None,
'token': None}
if use_id:
kwargs['tenant_id'] = CONF.neutron.admin_tenant_id
kwargs['user_id'] = CONF.neutron.admin_user_id
else:
kwargs['tenant_name'] = CONF.neutron.admin_tenant_name
kwargs['username'] = CONF.neutron.admin_username
client.Client.__init__(**kwargs).WithSideEffects(client_mock)
self.mox.ReplayAll()
# clean global
token_store = neutronv2.AdminTokenStore.get()
token_store.admin_auth_token = None
if admin_context:
# Note that the context does not contain a token but is
# an admin context which will force an elevation to admin
# credentials.
neutronv2.get_client(my_context)
else:
# Note that the context is not elevated, but the True is passed in
# which will force an elevation to admin credentials even though
# the context has an auth_token.
neutronv2.get_client(my_context, True)
def test_get_client_for_admin(self):
self._test_get_client_for_admin()
def test_get_client_for_admin_with_id(self):
self._test_get_client_for_admin(use_id=True)
def test_get_client_for_admin_context(self):
self._test_get_client_for_admin(admin_context=True)
def test_get_client_for_admin_context_with_id(self):
self._test_get_client_for_admin(use_id=True, admin_context=True)
| redhat-openstack/nova | nova/tests/network/test_neutronv2.py | Python | apache-2.0 | 147,363 |
"""
Copyright 2015 Stefano Terna
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import json
import logging
from iottly import rpi_agent
def main():
#define as many loop functions
#loop functions are being runned in an infinite loop
def loop1():
logging.info('loop1')
#a = 1/0
#msg is a dictionary (json):
msg = {"timerevent": {"loop1message":1}}
agent.send_msg(msg)
time.sleep(1)
def loop2():
logging.info('loop2')
#msg is a dictionary (json):
msg = {"timerevent": {"loop1message":2}}
agent.send_msg(msg)
time.sleep(1)
#define the callback to receive messages from broker:
def new_message(msg):
#received message is a dictionary
logging.info(msg)
agent.send_msg(msg)
if "ECHO" in msg.keys():
agent.close()
#instantiate the agent passing:
# - the message callback
# - a list with the loop functions
#agent = rpi_agent.RPiIottlyAgent(new_message, [loop1])
agent = rpi_agent.RPiIottlyAgent(new_message, [])
agent.start()
if __name__ == '__main__':
main()
| iottly/iottly-device-agent-py | iottly-device-agent-py/example.py | Python | apache-2.0 | 1,652 |
#!/usr/bin/env python
# coding=utf-8
__author__ = 'Xevaquor'
__license__ = 'MIT'
from pacman import *
class HumanAgent(object):
def __init__(self, agent_index):
self.agent_index = agent_index
def make_decision(self, state, game, input_state):
legal_moves = game.get_legal_moves(state)
# self.current_game_state = self.game.apply_move(self.current_game_state, random.sample(legal_moves, 1)[0], 0)
move = 'Stop'
for m in [x for x in legal_moves if x != 'Stop']:
if input_state[m]:
move = m
assert move is not None
return move | Xevaquor/aipac | HumanAgent.py | Python | mit | 620 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# roboturtle documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 19 00:46:39 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from recommonmark.parser import CommonMarkParser
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
source_parsers = {
'.md': CommonMarkParser,
}
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'roboturtle'
copyright = '2016, Nicholas A. Del Grosso'
author = 'Nicholas A. Del Grosso'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme' # can also use 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'roboturtle v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'roboturtledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'roboturtle.tex', 'roboturtle Documentation',
'Nicholas A. Del Grosso', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'roboturtle', 'roboturtle Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'roboturtle', 'roboturtle Documentation',
author, 'roboturtle', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| erfindergarden/roboturtle | docs/conf.py | Python | mit | 9,619 |
from .question_answer_similarity import QuestionAnswerSimilarity
from .tuple_entailment import MultipleChoiceTupleEntailmentModel
from .tuple_inference import TupleInferenceModel
concrete_models = { # pylint: disable=invalid-name
'QuestionAnswerSimilarity': QuestionAnswerSimilarity,
'MultipleChoiceTupleEntailmentModel': MultipleChoiceTupleEntailmentModel,
'TupleInferenceModel': TupleInferenceModel,
}
| matt-gardner/deep_qa | deep_qa/models/multiple_choice_qa/__init__.py | Python | apache-2.0 | 438 |
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_in
from nose.tools import assert_raises
from nose.tools import assert_true
import networkx as nx
import dagology as dag
class TestCubeSpace(object):
""" Unit tests for the cube space model"""
def test_number_of_nodes(self):
N = 80
for p in [0.0, 0.1, 0.9, 1.0]:
G = dag.cube_space_graph(N, 5, p)
assert_equal(G.number_of_nodes(), N)
def test_number_of_edges(self):
N = 100
G = dag.cube_space_graph(N, 2, 0.)
assert_true(G.number_of_edges() == 0)
G = dag.cube_space_graph(N, 1, 1.)
assert_equal(G.number_of_edges(), (N*(N-1)/2))
| JamesClough/dagology | dagology/generators/tests/test_cube_space.py | Python | mit | 726 |
"""SCons.Tool.gs
Tool-specific initialization for Ghostscript.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gs.py 4369 2009/09/19 15:58:29 scons"
import SCons.Action
import SCons.Platform
import SCons.Util
# Ghostscript goes by different names on different platforms...
platform = SCons.Platform.platform_default()
if platform == 'os2':
gs = 'gsos2'
elif platform == 'win32':
gs = 'gswin32c'
else:
gs = 'gs'
GhostscriptAction = None
def generate(env):
"""Add Builders and construction variables for Ghostscript to an
Environment."""
global GhostscriptAction
if GhostscriptAction is None:
GhostscriptAction = SCons.Action.Action('$GSCOM', '$GSCOMSTR')
import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.ps', GhostscriptAction)
env['GS'] = gs
env['GSFLAGS'] = SCons.Util.CLVar('-dNOPAUSE -dBATCH -sDEVICE=pdfwrite')
env['GSCOM'] = '$GS $GSFLAGS -sOutputFile=$TARGET $SOURCES'
def exists(env):
if 'PS2PDF' in env:
return env.Detect(env['PS2PDF'])
else:
return env.Detect(gs) or SCons.Util.WhereIs(gs)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| looooo/pivy | scons/scons-local-1.2.0.d20090919/SCons/Tool/gs.py | Python | isc | 2,545 |
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimization_config.py."""
import tensorflow as tf
from official.modeling.optimization.configs import learning_rate_config as lr_cfg
from official.modeling.optimization.configs import optimization_config
from official.modeling.optimization.configs import optimizer_config as opt_cfg
class OptimizerConfigTest(tf.test.TestCase):
def test_no_optimizer(self):
optimizer = optimization_config.OptimizationConfig({}).optimizer.get()
self.assertEqual(optimizer, None)
def test_no_lr_schedule(self):
lr = optimization_config.OptimizationConfig({}).learning_rate.get()
self.assertEqual(lr, None)
def test_no_warmup_schedule(self):
warmup = optimization_config.OptimizationConfig({}).warmup.get()
self.assertEqual(warmup, None)
def test_config(self):
opt_config = optimization_config.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {} # default config
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {}
},
'warmup': {
'type': 'linear'
}
})
self.assertEqual(opt_config.optimizer.get(),
opt_cfg.SGDConfig())
self.assertEqual(opt_config.learning_rate.get(),
lr_cfg.PolynomialLrConfig())
self.assertEqual(opt_config.warmup.get(),
lr_cfg.LinearWarmupConfig())
if __name__ == '__main__':
tf.test.main()
| tombstone/models | official/modeling/optimization/configs/optimization_config_test.py | Python | apache-2.0 | 2,164 |
from mendeley.exception import MendeleyException
from mendeley.models.catalog import *
from mendeley.resources.base import add_query_params, ListResource, GetByIdResource
class Catalog(GetByIdResource):
"""
Top-level resource for accessing catalog documents.
"""
_url = '/catalog'
def __init__(self, session):
self.session = session
def get(self, id, view=None):
"""
Retrieves a catalog document by ID.
:param id: the ID of the document to get.
:param view: the view to get. One of 'bib', 'client', 'stats', 'all'.
:return: a :class:`CatalogDocument <mendeley.models.catalog.CatalogDocument>`.
"""
return super(Catalog, self).get(id, view=view)
def by_identifier(self, arxiv=None, doi=None, isbn=None, issn=None, pmid=None, scopus=None, filehash=None,
view=None):
"""
Retrieves a catalog document by an external identifier. Only one identifier may be specified.
:param arxiv: ArXiV ID.
:param doi: DOI.
:param isbn: ISBN.
:param issn: ISSN.
:param pmid: PubMed ID.
:param scopus: Scopus ID (EID).
:param filehash: SHA-1 filehash.
:param view: the view to get. One of 'bib', 'client', 'stats', 'all'.
:return: a :class:`CatalogDocument <mendeley.models.catalog.CatalogDocument>`.
"""
url = add_query_params('/catalog', {'arxiv': arxiv, 'doi': doi, 'isbn': isbn, 'issn': issn, 'pmid': pmid,
'scopus': scopus, 'filehash': filehash, 'view': view})
obj_type = view_type(view)
rsp = self.session.get(url, headers={'Accept': obj_type.content_type})
if len(rsp.json()) == 0:
raise MendeleyException('Catalog document not found')
return obj_type(self.session, rsp.json()[0])
def lookup(self, arxiv=None, doi=None, pmid=None, filehash=None, title=None, authors=None, year=None, source=None,
view=None):
"""
Finds the closest matching catalog document to a supplied set of metadata.
:param arxiv: ArXiV ID.
:param doi: DOI.
:param pmid: PubMed ID.
:param filehash: SHA-1 filehash.
:param title: Title.
:param authors: Authors.
:param year: Year.
:param source: Source.
:param view: the view to get. One of 'bib', 'client', 'stats', 'all'.
:return: a :class:`CatalogDocument <mendeley.models.catalog.CatalogDocument>`.
"""
url = add_query_params('/metadata', {'arxiv': arxiv, 'doi': doi, 'pmid': pmid, 'filehash': filehash,
'title': title, 'authors': authors, 'year': year, 'source': source})
obj_type = view_type(view)
rsp = self.session.get(url, headers={'Accept': 'application/vnd.mendeley-document-lookup.1+json'})
return LookupResponse(self.session, rsp.json(), view, obj_type)
def search(self, query, view=None):
"""
Searches the catalog for documents.
:param query: the search query to execute.
:param view: the view to get. One of 'bib', 'client', 'stats', 'all'.
:return: a :class:`CatalogSearch <mendeley.resources.catalog.CatalogSearch>` resource, from which results can be
retrieved.
"""
return CatalogSearch(self.session, query=query, view=view)
def advanced_search(self, title=None, author=None, source=None, abstract=None, min_year=None, max_year=None,
open_access=None, view=None):
"""
Executes an advanced catalog search, where individual fields can be searched on.
:param title: Title.
:param author: Author.
:param source: Source.
:param abstract: Abstract.
:param min_year: Minimum year for documents to return.
:param max_year: Maximum year for documents to return.
:param open_access: If 'true', only returns open access documents.
:param view: the view to get. One of 'bib', 'client', 'stats', 'all'.
:return: a :class:`CatalogSearch <mendeley.resources.catalog.CatalogSearch>` resource, from which results can be
retrieved.
"""
return CatalogSearch(self.session, title=title, author=author, source=source, abstract=abstract,
min_year=min_year, max_year=max_year, open_access=open_access, view=view)
@property
def _session(self):
return self.session
def _obj_type(self, **kwargs):
return view_type(kwargs.get('view'))
class CatalogSearch(ListResource):
"""
Resource for accessing the results of a catalog search.
"""
def __init__(self, session, **kwargs):
self.session = session
self.params = kwargs
def list(self, page_size=None):
"""
Retrieves search results, as a paginated collection.
:param page_size: the number of search results to return on each page. Defaults to 20.
:return: a :class:`Page <mendeley.pagination.Page>` of
:class:`CatalogDocuments <mendeley.models.catalog.CatalogDocument>`.
"""
return super(CatalogSearch, self).list(page_size)
def iter(self, page_size=None):
"""
Retrieves search results, as an iterator.
:param page_size: the number of search results to retrieve at a time. Defaults to 20.
:return: an iterator of :class:`CatalogDocuments <mendeley.models.catalog.CatalogDocument>`.
"""
return super(CatalogSearch, self).iter(page_size)
def _obj_type(self, **kwargs):
return view_type(self.params['view'])
@property
def _url(self):
return add_query_params('/search/catalog', self.params)
@property
def _session(self):
return self.session
def view_type(view):
return {
'bib': CatalogBibDocument,
'client': CatalogClientDocument,
'stats': CatalogStatsDocument,
'all': CatalogAllDocument,
'core': CatalogDocument
}.get(view, CatalogDocument) | Mendeley/mendeley-python-sdk | mendeley/resources/catalog.py | Python | apache-2.0 | 6,150 |
#!/usr/bin/env python3
# © 2015 James R. Barlow: github.com/jbarlow83
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import atexit
import logging
import re
from collections import defaultdict
from contextlib import ExitStack
from decimal import Decimal
from enum import Enum
from functools import partial
from math import hypot, inf, isclose
from os import PathLike
from pathlib import Path
from typing import (
Container,
Dict,
Iterator,
List,
Mapping,
NamedTuple,
Optional,
Tuple,
Union,
)
from warnings import warn
from pikepdf import (
Object,
Pdf,
PdfImage,
PdfInlineImage,
PdfMatrix,
parse_content_stream,
)
from ocrmypdf._concurrent import Executor, SerialExecutor
from ocrmypdf.exceptions import EncryptedPdfError, InputFileError
from ocrmypdf.helpers import Resolution, available_cpu_count, pikepdf_enable_mmap
from ocrmypdf.pdfinfo.layout import get_page_analysis, get_text_boxes
logger = logging.getLogger()
Colorspace = Enum('Colorspace', 'gray rgb cmyk lab icc index sep devn pattern jpeg2000')
Encoding = Enum(
'Encoding', 'ccitt jpeg jpeg2000 jbig2 asciihex ascii85 lzw flate runlength'
)
FRIENDLY_COLORSPACE: Dict[str, Colorspace] = {
'/DeviceGray': Colorspace.gray,
'/CalGray': Colorspace.gray,
'/DeviceRGB': Colorspace.rgb,
'/CalRGB': Colorspace.rgb,
'/DeviceCMYK': Colorspace.cmyk,
'/Lab': Colorspace.lab,
'/ICCBased': Colorspace.icc,
'/Indexed': Colorspace.index,
'/Separation': Colorspace.sep,
'/DeviceN': Colorspace.devn,
'/Pattern': Colorspace.pattern,
'/G': Colorspace.gray, # Abbreviations permitted in inline images
'/RGB': Colorspace.rgb,
'/CMYK': Colorspace.cmyk,
'/I': Colorspace.index,
}
FRIENDLY_ENCODING: Dict[str, Encoding] = {
'/CCITTFaxDecode': Encoding.ccitt,
'/DCTDecode': Encoding.jpeg,
'/JPXDecode': Encoding.jpeg2000,
'/JBIG2Decode': Encoding.jbig2,
'/CCF': Encoding.ccitt, # Abbreviations permitted in inline images
'/DCT': Encoding.jpeg,
'/AHx': Encoding.asciihex,
'/A85': Encoding.ascii85,
'/LZW': Encoding.lzw,
'/Fl': Encoding.flate,
'/RL': Encoding.runlength,
}
FRIENDLY_COMP: Dict[Colorspace, int] = {
Colorspace.gray: 1,
Colorspace.rgb: 3,
Colorspace.cmyk: 4,
Colorspace.lab: 3,
Colorspace.index: 1,
}
UNIT_SQUARE = (1.0, 0.0, 0.0, 1.0, 0.0, 0.0)
def _is_unit_square(shorthand):
values = map(float, shorthand)
pairwise = zip(values, UNIT_SQUARE)
return all(isclose(a, b, rel_tol=1e-3) for a, b in pairwise)
class XobjectSettings(NamedTuple):
name: str
shorthand: Tuple[float, float, float, float, float, float]
stack_depth: int
class InlineSettings(NamedTuple):
iimage: PdfInlineImage
shorthand: Tuple[float, float, float, float, float, float]
stack_depth: int
class ContentsInfo(NamedTuple):
xobject_settings: List[XobjectSettings]
inline_images: List[InlineSettings]
found_vector: bool
found_text: bool
name_index: Mapping[str, List[XobjectSettings]]
class TextboxInfo(NamedTuple):
bbox: Tuple[float, float, float, float]
is_visible: bool
is_corrupt: bool
class VectorMarker:
pass
class TextMarker:
pass
def _normalize_stack(graphobjs):
"""Convert runs of qQ's in the stack into single graphobjs"""
for operands, operator in graphobjs:
operator = str(operator)
if re.match(r'Q*q+$', operator): # Zero or more Q, one or more q
for char in operator: # Split into individual
yield ([], char) # Yield individual
else:
yield (operands, operator)
def _interpret_contents(contentstream: Object, initial_shorthand=UNIT_SQUARE):
"""Interpret the PDF content stream.
The stack represents the state of the PDF graphics stack. We are only
interested in the current transformation matrix (CTM) so we only track
this object; a full implementation would need to track many other items.
The CTM is initialized to the mapping from user space to device space.
PDF units are 1/72". In a PDF viewer or printer this matrix is initialized
to the transformation to device space. For example if set to
(1/72, 0, 0, 1/72, 0, 0) then all units would be calculated in inches.
Images are always considered to be (0, 0) -> (1, 1). Before drawing an
image there should be a 'cm' that sets up an image coordinate system
where drawing from (0, 0) -> (1, 1) will draw on the desired area of the
page.
PDF units suit our needs so we initialize ctm to the identity matrix.
According to the PDF specification, the maximum stack depth is 32. Other
viewers tolerate some amount beyond this. We issue a warning if the
stack depth exceeds the spec limit and set a hard limit beyond this to
bound our memory requirements. If the stack underflows behavior is
undefined in the spec, but we just pretend nothing happened and leave the
CTM unchanged.
"""
stack = []
ctm = PdfMatrix(initial_shorthand)
xobject_settings: List[XobjectSettings] = []
inline_images: List[InlineSettings] = []
name_index = defaultdict(lambda: [])
found_vector = False
found_text = False
vector_ops = set('S s f F f* B B* b b*'.split())
text_showing_ops = set("""TJ Tj " '""".split())
image_ops = set('BI ID EI q Q Do cm'.split())
operator_whitelist = ' '.join(vector_ops | text_showing_ops | image_ops)
for n, graphobj in enumerate(
_normalize_stack(parse_content_stream(contentstream, operator_whitelist))
):
operands, operator = graphobj
if operator == 'q':
stack.append(ctm)
if len(stack) > 32: # See docstring
if len(stack) > 128:
raise RuntimeError(
"PDF graphics stack overflowed hard limit, operator %i" % n
)
warn("PDF graphics stack overflowed spec limit")
elif operator == 'Q':
try:
ctm = stack.pop()
except IndexError:
# Keeping the ctm the same seems to be the only sensible thing
# to do. Just pretend nothing happened, keep calm and carry on.
warn("PDF graphics stack underflowed - PDF may be malformed")
elif operator == 'cm':
ctm = PdfMatrix(operands) @ ctm
elif operator == 'Do':
image_name = operands[0]
settings = XobjectSettings(
name=image_name, shorthand=ctm.shorthand, stack_depth=len(stack)
)
xobject_settings.append(settings)
name_index[str(image_name)].append(settings)
elif operator == 'INLINE IMAGE': # BI/ID/EI are grouped into this
iimage = operands[0]
inline = InlineSettings(
iimage=iimage, shorthand=ctm.shorthand, stack_depth=len(stack)
)
inline_images.append(inline)
elif operator in vector_ops:
found_vector = True
elif operator in text_showing_ops:
found_text = True
return ContentsInfo(
xobject_settings=xobject_settings,
inline_images=inline_images,
found_vector=found_vector,
found_text=found_text,
name_index=name_index,
)
def _get_dpi(ctm_shorthand, image_size) -> Resolution:
"""Given the transformation matrix and image size, find the image DPI.
PDFs do not include image resolution information within image data.
Instead, the PDF page content stream describes the location where the
image will be rasterized, and the effective resolution is the ratio of the
pixel size to raster target size.
Normally a scanned PDF has the paper size set appropriately but this is
not guaranteed. The most common case is a cropped image will change the
page size (/CropBox) without altering the page content stream. That means
it is not sufficient to assume that the image fills the page, even though
that is the most common case.
A PDF image may be scaled (always), cropped, translated, rotated in place
to an arbitrary angle (rarely) and skewed. Only equal area mappings can
be expressed, that is, it is not necessary to consider distortions where
the effective DPI varies with position.
To determine the image scale, transform an offset axis vector v0 (0, 0),
width-axis vector v0 (1, 0), height-axis vector vh (0, 1) with the matrix,
which gives the dimensions of the image in PDF units. From there we can
compare to actual image dimensions. PDF uses
row vector * matrix_transposed unlike the traditional
matrix * column vector.
The offset, width and height vectors can be combined in a matrix and
multiplied by the transform matrix. Then we want to calculated
magnitude(width_vector - offset_vector)
and
magnitude(height_vector - offset_vector)
When the above is worked out algebraically, the effect of translation
cancels out, and the vector magnitudes become functions of the nonzero
transformation matrix indices. The results of the derivation are used
in this code.
pdfimages -list does calculate the DPI in some way that is not completely
naive, but it does not get the DPI of rotated images right, so cannot be
used anymore to validate this. Photoshop works, or using Acrobat to
rotate the image back to normal.
It does not matter if the image is partially cropped, or even out of the
/MediaBox.
"""
a, b, c, d, _, _ = ctm_shorthand
# Calculate the width and height of the image in PDF units
image_drawn = hypot(a, b), hypot(c, d)
def calc(drawn, pixels, inches_per_pt=72.0):
# The scale of the image is pixels per unit of default user space (1/72")
scale = pixels / drawn if drawn != 0 else inf
dpi = scale * inches_per_pt
return dpi
dpi_w, dpi_h = (calc(image_drawn[n], image_size[n]) for n in range(2))
return Resolution(dpi_w, dpi_h)
class ImageInfo:
DPI_PREC = Decimal('1.000')
_comp: Optional[int]
_name: str
def __init__(
self,
*,
name='',
pdfimage: Optional[Object] = None,
inline: Optional[PdfInlineImage] = None,
shorthand=None,
):
self._name = str(name)
self._shorthand = shorthand
pim: Union[PdfInlineImage, PdfImage]
if inline is not None:
self._origin = 'inline'
pim = inline
elif pdfimage is not None:
self._origin = 'xobject'
pim = PdfImage(pdfimage)
else:
raise ValueError("Either pdfimage or inline must be set")
self._width = pim.width
self._height = pim.height
# If /ImageMask is true, then this image is a stencil mask
# (Images that draw with this stencil mask will have a reference to
# it in their /Mask, but we don't actually need that information)
if pim.image_mask:
self._type = 'stencil'
else:
self._type = 'image'
self._bpc = int(pim.bits_per_component)
try:
self._enc = FRIENDLY_ENCODING.get(pim.filters[0])
except IndexError:
self._enc = None
try:
self._color = FRIENDLY_COLORSPACE.get(pim.colorspace or '')
except NotImplementedError:
self._color = None
if self._enc == Encoding.jpeg2000:
self._color = Colorspace.jpeg2000
if self._color == Colorspace.icc:
# Check the ICC profile to determine actual colorspace
pim_icc = pim.icc
if pim_icc.profile.xcolor_space == 'GRAY':
self._comp = 1
elif pim_icc.profile.xcolor_space == 'CMYK':
self._comp = 4
else:
self._comp = 3
else:
if isinstance(self._color, Colorspace):
self._comp = FRIENDLY_COMP.get(self._color)
else:
self._comp = None
# Bit of a hack... infer grayscale if component count is uncertain
# but encoding only supports monochrome.
if self._comp is None and self._enc in (Encoding.ccitt, Encoding.jbig2):
self._comp = FRIENDLY_COMP[Colorspace.gray]
@property
def name(self):
return self._name
@property
def type_(self):
return self._type
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def bpc(self):
return self._bpc
@property
def color(self):
return self._color if self._color is not None else '?'
@property
def comp(self):
return self._comp if self._comp is not None else '?'
@property
def enc(self):
return self._enc if self._enc is not None else 'image'
@property
def renderable(self):
return self.dpi.is_finite and self.width >= 0 and self.height >= 0
@property
def dpi(self):
return _get_dpi(self._shorthand, (self._width, self._height))
def __repr__(self):
class_locals = {
attr: getattr(self, attr, None)
for attr in dir(self)
if not attr.startswith('_')
}
return (
"<ImageInfo '{name}' {type_} {width}x{height} {color} "
"{comp} {bpc} {enc} {dpi}>"
).format(**class_locals)
def _find_inline_images(contentsinfo: ContentsInfo) -> Iterator[ImageInfo]:
"Find inline images in the contentstream"
for n, inline in enumerate(contentsinfo.inline_images):
yield ImageInfo(
name='inline-%02d' % n, shorthand=inline.shorthand, inline=inline.iimage
)
def _image_xobjects(container) -> Iterator[Tuple[Object, str]]:
"""Search for all XObject-based images in the container
Usually the container is a page, but it could also be a Form XObject
that contains images. Filter out the Form XObjects which are dealt with
elsewhere.
Generate a sequence of tuples (image, xobj container), where container,
where xobj is the name of the object and image is the object itself,
since the object does not know its own name.
"""
if '/Resources' not in container:
return
resources = container['/Resources']
if '/XObject' not in resources:
return
xobjs = resources['/XObject'].as_dict()
for xobj in xobjs:
candidate: Object = xobjs[xobj]
if '/Subtype' not in candidate:
continue
if candidate['/Subtype'] == '/Image':
pdfimage = candidate
yield (pdfimage, xobj)
def _find_regular_images(
container: Object, contentsinfo: ContentsInfo
) -> Iterator[ImageInfo]:
"""Find images stored in the container's /Resources /XObject
Usually the container is a page, but it could also be a Form XObject
that contains images.
Generates images with their DPI at time of drawing.
"""
for pdfimage, xobj in _image_xobjects(container):
if xobj not in contentsinfo.name_index:
continue
for draw in contentsinfo.name_index[xobj]:
if draw.stack_depth == 0 and _is_unit_square(draw.shorthand):
# At least one PDF in the wild (and test suite) draws an image
# when the graphics stack depth is 0, meaning that the image
# gets drawn into a square of 1x1 PDF units (or 1/72",
# or 0.35 mm). The equivalent DPI will be >100,000. Exclude
# these from our DPI calculation for the page.
continue
yield ImageInfo(name=draw.name, pdfimage=pdfimage, shorthand=draw.shorthand)
def _find_form_xobject_images(pdf: Pdf, container: Object, contentsinfo: ContentsInfo):
"""Find any images that are in Form XObjects in the container
The container may be a page, or a parent Form XObject.
"""
if '/Resources' not in container:
return
resources = container['/Resources']
if '/XObject' not in resources:
return
xobjs = resources['/XObject'].as_dict()
for xobj in xobjs:
candidate = xobjs[xobj]
if candidate['/Subtype'] != '/Form':
continue
form_xobject = candidate
for settings in contentsinfo.xobject_settings:
if settings.name != xobj:
continue
# Find images once for each time this Form XObject is drawn.
# This could be optimized to cache the multiple drawing events
# but in practice both Form XObjects and multiple drawing of the
# same object are both very rare.
ctm_shorthand = settings.shorthand
yield from _process_content_streams(
pdf=pdf, container=form_xobject, shorthand=ctm_shorthand
)
def _process_content_streams(
*, pdf: Pdf, container: Object, shorthand=None
) -> Iterator[Union[VectorMarker, TextMarker, ImageInfo]]:
"""Find all individual instances of images drawn in the container
Usually the container is a page, but it may also be a Form XObject.
On a typical page images are stored inline or as regular images
in an XObject.
Form XObjects may include inline images, XObject images,
and recursively, other Form XObjects; and also vector graphic objects.
Every instance of an image being drawn somewhere is flattened and
treated as a unique image, since if the same image is drawn multiple times
on one page it may be drawn at differing resolutions, and our objective
is to find the resolution at which the page can be rastered without
downsampling.
"""
if container.get('/Type') == '/Page' and '/Contents' in container:
initial_shorthand = shorthand or UNIT_SQUARE
elif container.get('/Type') == '/XObject' and container['/Subtype'] == '/Form':
# Set the CTM to the state it was when the "Do" operator was
# encountered that is drawing this instance of the Form XObject
ctm = PdfMatrix(shorthand) if shorthand else PdfMatrix.identity()
# A Form XObject may provide its own matrix to map form space into
# user space. Get this if one exists
form_shorthand = container.get('/Matrix', PdfMatrix.identity())
form_matrix = PdfMatrix(form_shorthand)
# Concatenate form matrix with CTM to ensure CTM is correct for
# drawing this instance of the XObject
ctm = form_matrix @ ctm
initial_shorthand = ctm.shorthand
else:
return
contentsinfo = _interpret_contents(container, initial_shorthand)
if contentsinfo.found_vector:
yield VectorMarker()
if contentsinfo.found_text:
yield TextMarker()
yield from _find_inline_images(contentsinfo)
yield from _find_regular_images(container, contentsinfo)
yield from _find_form_xobject_images(pdf, container, contentsinfo)
def _page_has_text(text_blocks, page_width, page_height) -> bool:
"""Smarter text detection that ignores text in margins"""
pw, ph = float(page_width), float(page_height)
margin_ratio = 0.125
interior_bbox = (
margin_ratio * pw, # left
(1 - margin_ratio) * ph, # top
(1 - margin_ratio) * pw, # right
margin_ratio * ph, # bottom (first quadrant: bottom < top)
)
def rects_intersect(a, b) -> bool:
"""
Where (a,b) are 4-tuple rects (left-0, top-1, right-2, bottom-3)
https://stackoverflow.com/questions/306316/determine-if-two-rectangles-overlap-each-other
Formula assumes all boxes are in first quadrant
"""
return a[0] < b[2] and a[2] > b[0] and a[1] > b[3] and a[3] < b[1]
has_text = False
for bbox in text_blocks:
if rects_intersect(bbox, interior_bbox):
has_text = True
break
return has_text
def simplify_textboxes(miner, textbox_getter) -> Iterator[TextboxInfo]:
"""Extract only limited content from text boxes
We do this to save memory and ensure that our objects are pickleable.
"""
for box in textbox_getter(miner):
first_line = box._objs[0]
first_char = first_line._objs[0]
visible = first_char.rendermode != 3
corrupt = first_char.get_text() == '\ufffd'
yield TextboxInfo(box.bbox, visible, corrupt)
worker_pdf = None
def _pdf_pageinfo_sync_init(pdf: Pdf, infile: Path, pdfminer_loglevel):
global worker_pdf # pylint: disable=global-statement
pikepdf_enable_mmap()
logging.getLogger('pdfminer').setLevel(pdfminer_loglevel)
# If the pdf is not opened, open a copy for our worker process to use
if pdf is None:
worker_pdf = Pdf.open(infile)
def on_process_close():
worker_pdf.close()
# Close when this process exits
atexit.register(on_process_close)
def _pdf_pageinfo_sync(args):
pageno, thread_pdf, infile, check_pages, detailed_analysis = args
pdf = thread_pdf if thread_pdf is not None else worker_pdf
with ExitStack() as stack:
if not pdf: # When called with SerialExecutor
pdf = stack.enter_context(Pdf.open(infile))
page = PageInfo(pdf, pageno, infile, check_pages, detailed_analysis)
return page
def _pdf_pageinfo_concurrent(
pdf,
executor: Executor,
infile,
progbar,
max_workers,
check_pages,
detailed_analysis=False,
):
pages = [None] * len(pdf.pages)
def update_pageinfo(result, pbar):
page = result
if not page:
raise InputFileError("Could read a page in the PDF")
pages[page.pageno] = page
pbar.update()
if max_workers is None:
max_workers = available_cpu_count()
total = len(pdf.pages)
use_threads = False # No performance gain if threaded due to GIL
n_workers = min(1 + len(pages) // 4, max_workers)
if n_workers == 1:
# But if we decided on only one worker, there is no point in using
# a separate process.
use_threads = True
# If we use a thread, we can pass the already-open Pdf for them to use
# If we use processes, we pass a None which tells the init function to open its
# own
initial_pdf = pdf if use_threads else None
contexts = (
(n, initial_pdf, infile, check_pages, detailed_analysis) for n in range(total)
)
assert n_workers == 1 if use_threads else n_workers >= 1, "Not multithreadable"
executor(
use_threads=use_threads,
max_workers=n_workers,
tqdm_kwargs=dict(
total=total, desc="Scanning contents", unit='page', disable=not progbar
),
worker_initializer=partial(
_pdf_pageinfo_sync_init,
initial_pdf,
infile,
logging.getLogger('pdfminer').level,
),
task=_pdf_pageinfo_sync,
task_arguments=contexts,
task_finished=update_pageinfo,
)
return pages
class PageInfo:
_has_text: Optional[bool]
_has_vector: Optional[bool]
_images: List[ImageInfo]
def __init__(
self,
pdf: Pdf,
pageno: int,
infile: PathLike,
check_pages: Container[int],
detailed_analysis: bool = False,
):
self._pageno = pageno
self._infile = infile
self._detailed_analysis = detailed_analysis
self._gather_pageinfo(pdf, pageno, infile, check_pages, detailed_analysis)
def _gather_pageinfo(
self,
pdf: Pdf,
pageno: int,
infile: PathLike,
check_pages: Container[int],
detailed_analysis: bool,
):
page = pdf.pages[pageno]
mediabox = [Decimal(d) for d in page.MediaBox.as_list()]
width_pt = mediabox[2] - mediabox[0]
height_pt = mediabox[3] - mediabox[1]
check_this_page = pageno in check_pages
if check_this_page and detailed_analysis:
pscript5_mode = str(pdf.docinfo.get('/Creator')).startswith('PScript5')
miner = get_page_analysis(infile, pageno, pscript5_mode)
self._textboxes = list(simplify_textboxes(miner, get_text_boxes))
bboxes = (box.bbox for box in self._textboxes)
self._has_text = _page_has_text(bboxes, width_pt, height_pt)
else:
self._textboxes = []
self._has_text = None # i.e. "no information"
userunit = page.get('/UserUnit', Decimal(1.0))
if not isinstance(userunit, Decimal):
userunit = Decimal(userunit)
self._userunit = userunit
self._width_inches = width_pt * userunit / Decimal(72.0)
self._height_inches = height_pt * userunit / Decimal(72.0)
try:
self._rotate = int(page['/Rotate'])
except KeyError:
self._rotate = 0
userunit_shorthand = (userunit, 0, 0, userunit, 0, 0)
if check_this_page:
self._has_vector = False
self._has_text = False
self._images = []
for ci in _process_content_streams(
pdf=pdf, container=page, shorthand=userunit_shorthand
):
if isinstance(ci, VectorMarker):
self._has_vector = True
elif isinstance(ci, TextMarker):
self._has_text = True
elif isinstance(ci, ImageInfo):
self._images.append(ci)
else:
raise NotImplementedError()
else:
self._has_vector = None # i.e. "no information"
self._has_text = None
self._images = []
self._dpi = None
if self._images:
dpi = Resolution(0.0, 0.0).take_max(
image.dpi for image in self._images if image.renderable
)
self._dpi = dpi
self._width_pixels = int(round(dpi.x * float(self._width_inches)))
self._height_pixels = int(round(dpi.y * float(self._height_inches)))
@property
def pageno(self) -> int:
return self._pageno
@property
def has_text(self) -> bool:
return bool(self._has_text)
@property
def has_corrupt_text(self) -> bool:
if not self._detailed_analysis:
raise NotImplementedError('Did not do detailed analysis')
return any(tbox.is_corrupt for tbox in self._textboxes)
@property
def has_vector(self) -> bool:
return bool(self._has_vector)
@property
def width_inches(self) -> Decimal:
return self._width_inches
@property
def height_inches(self) -> Decimal:
return self._height_inches
@property
def width_pixels(self) -> int:
return int(round(float(self.width_inches) * self.dpi.x))
@property
def height_pixels(self) -> int:
return int(round(float(self.height_inches) * self.dpi.y))
@property
def rotation(self) -> int:
return self._rotate
@rotation.setter
def rotation(self, value):
if value in (0, 90, 180, 270, 360, -90, -180, -270):
self._rotate = value
else:
raise ValueError("rotation must be a cardinal angle")
@property
def images(self):
return self._images
def get_textareas(
self, visible: Optional[bool] = None, corrupt: Optional[bool] = None
):
def predicate(obj, want_visible, want_corrupt):
result = True
if want_visible is not None:
if obj.is_visible != want_visible:
result = False
if want_corrupt is not None:
if obj.is_corrupt != want_corrupt:
result = False
return result
if not self._textboxes:
if visible is not None and corrupt is not None:
raise NotImplementedError('Incomplete information on textboxes')
return self._textboxes
return (obj.bbox for obj in self._textboxes if predicate(obj, visible, corrupt))
@property
def dpi(self) -> Resolution:
if self._dpi is None:
return Resolution(0.0, 0.0)
return self._dpi
@property
def userunit(self) -> Decimal:
return self._userunit
@property
def min_version(self) -> str:
if self.userunit is not None:
return '1.6'
else:
return '1.5'
def __repr__(self):
return (
f'<PageInfo '
f'pageno={self.pageno} {self.width_inches}"x{self.height_inches}" '
f'rotation={self.rotation} dpi={self.dpi} has_text={self.has_text}>'
)
DEFAULT_EXECUTOR = SerialExecutor()
class PdfInfo:
"""Get summary information about a PDF"""
def __init__(
self,
infile,
*,
detailed_analysis: bool = False,
progbar: bool = False,
max_workers: int = None,
check_pages=None,
executor: Executor = DEFAULT_EXECUTOR,
):
self._infile = infile
if check_pages is None:
check_pages = range(0, 1_000_000_000)
with Pdf.open(infile) as pdf:
if pdf.is_encrypted:
raise EncryptedPdfError() # Triggered by encryption with empty passwd
self._pages = _pdf_pageinfo_concurrent(
pdf,
executor,
infile,
progbar,
max_workers,
check_pages=check_pages,
detailed_analysis=detailed_analysis,
)
self._needs_rendering = pdf.Root.get('/NeedsRendering', False)
self._has_acroform = False
if '/AcroForm' in pdf.Root:
if len(pdf.Root.AcroForm.get('/Fields', [])) > 0:
self._has_acroform = True
elif '/XFA' in pdf.Root.AcroForm:
self._has_acroform = True
@property
def pages(self):
return self._pages
@property
def min_version(self) -> str:
# The minimum PDF is the maximum version that any particular page needs
return max(page.min_version for page in self.pages)
@property
def has_userunit(self) -> bool:
return any(page.userunit != 1.0 for page in self.pages)
@property
def has_acroform(self) -> bool:
return self._has_acroform
@property
def filename(self) -> Union[str, Path]:
if not isinstance(self._infile, (str, Path)):
raise NotImplementedError("can't get filename from stream")
return self._infile
@property
def needs_rendering(self) -> bool:
return self._needs_rendering
def __getitem__(self, item) -> PageInfo:
return self._pages[item]
def __len__(self):
return len(self._pages)
def __repr__(self):
return f"<PdfInfo('...'), page count={len(self)}>"
def main():
import argparse # pylint: disable=import-outside-toplevel
from pprint import pprint # pylint: disable=import-outside-toplevel
parser = argparse.ArgumentParser()
parser.add_argument('infile')
args = parser.parse_args()
pdfinfo = PdfInfo(args.infile)
pprint(pdfinfo)
for page in pdfinfo.pages:
pprint(page)
for im in page.images:
pprint(im)
if __name__ == '__main__':
main()
| jbarlow83/OCRmyPDF | src/ocrmypdf/pdfinfo/info.py | Python | gpl-3.0 | 31,669 |
from brushcutter import *
import numpy as np
direxamples='/Volumes/P4/workdir/raphael/work_conserve_remap/'
merrafile = direxamples + 'MERRA_rain_3hours_1980_2days_conserve.nc'
eraigrd = direxamples + 'lsm_erainterim_roms_conserve.nc'
# ---------- define segments on ERAint grid -----------------------
domain = obc_segment('domain', eraigrd,target_model='regular',istart=0,iend=511,jstart=0, jend=255)
# as a test we can do MERRA to MERRA too
#domain = obc_segment('domain', merrafile,target_model='regular',istart=0,iend=539,jstart=0, jend=360)
# ---------- define variables on each segment ------------------
rain_domain = obc_variable(domain,'rain',geometry='line')
# ---------- list segments and variables to be written -------
list_segments = [domain]
list_variables = [rain_domain]
list_vectvariables = []
# ---------- bilinear interpolation from MERRA file -------------------
interp_t2s_bilin = rain_domain.interpolate_from(merrafile,'rain',timename='rain_time',frame=0,coord_names=['lon','lat'],method='bilinear',drown='no',autocrop=False)
lib_ioncdf.write_obc_file(list_segments,list_variables,list_vectvariables,rain_domain.timesrc,output='merra_rain_bilin_fr0000.nc')
for kt in np.arange(1,16):
rain_domain.interpolate_from(merrafile,'rain',timename='rain_time',frame=kt,coord_names=['lon','lat'],method='bilinear',drown='no',interpolator=interp_t2s_bilin,autocrop=False)
lib_ioncdf.write_obc_file(list_segments,list_variables,list_vectvariables,rain_domain.timesrc,output='merra_rain_bilin_fr' + str(kt).zfill(4) + '.nc')
# ---------- conservative interpolation from MERRA file -------------------
interp_t2s_conserve = rain_domain.interpolate_from(merrafile,'rain',timename='rain_time',frame=0,coord_names=['lon','lat'],method='conserve',drown='no',use_gridspec=True)
lib_ioncdf.write_obc_file(list_segments,list_variables,list_vectvariables,rain_domain.timesrc,output='merra_rain_conserve_fr0000.nc')
for kt in np.arange(1,16):
rain_domain.interpolate_from(merrafile,'rain',timename='rain_time',frame=kt,coord_names=['lon','lat'],method='conserve',drown='no',interpolator=interp_t2s_conserve,use_gridspec=True)
lib_ioncdf.write_obc_file(list_segments,list_variables,list_vectvariables,rain_domain.timesrc,output='merra_rain_conserve_fr' + str(kt).zfill(4) + '.nc')
| raphaeldussin/brushcutter | examples/forcing/example_precip_MERRA.py | Python | gpl-3.0 | 2,297 |
class Solution:
# @param a list of integers
# @return an integer
def removeDuplicates(self, A):
if len(A) <= 1:
return len(A)
cnt = 1
for i in range(1, len(A)):
if A[i] != A[cnt-1]:
A[cnt] = A[i]
cnt += 1
return cnt
| kunth/Leetcode | Python/remove-duplicates-from-sorted-array.py | Python | gpl-2.0 | 316 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by René Meusel
This file is part of the CernVM File System auxiliary tools.
"""
from M2Crypto import X509
class Certificate:
""" Wraps an X.509 certificate object as stored in CVMFS repositories """
def __init__(self, certificate_file):
self._certificate_file = certificate_file
cert = X509.load_cert_string(self._certificate_file.read())
self.openssl_certificate = cert
def __str__(self):
return "<Certificate " + self.get_fingerprint() + ">"
def __repr__(self):
return self.__str__()
def get_openssl_certificate(self):
""" return the certificate as M2Crypto.X509 object """
return self.openssl_certificate
def get_fingerprint(self, algorithm='sha1'):
""" returns the fingerprint of the X509 certificate """
fp = self.openssl_certificate.get_fingerprint(algorithm)
return ':'.join([ x + y for x, y in zip(fp[0::2], fp[1::2]) ])
def verify(self, signature, message):
""" verify a given signature to an expected 'message' string """
pubkey = self.openssl_certificate.get_pubkey()
pubkey.reset_context(md='sha1')
pubkey.verify_init()
pubkey.verify_update(message)
return pubkey.verify_final(signature)
| reneme/python-cvmfsutils | cvmfs/certificate.py | Python | bsd-3-clause | 1,323 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
import sys
from hashlib import sha1
from pants.build_graph.build_graph import sort_targets
from pants.build_graph.target import Target
from pants.invalidation.build_invalidator import BuildInvalidator, CacheKeyGenerator
from pants.util.dirutil import safe_mkdir
class VersionedTargetSet(object):
"""Represents a list of targets, a corresponding CacheKey, and a flag determining whether the
list of targets is currently valid.
When invalidating a single target, this can be used to represent that target as a singleton.
When checking the artifact cache, this can also be used to represent a list of targets that are
built together into a single artifact.
"""
@staticmethod
def from_versioned_targets(versioned_targets):
first_target = versioned_targets[0]
cache_manager = first_target._cache_manager
# Quick sanity check; all the versioned targets should have the same cache manager.
# TODO(ryan): the way VersionedTargets store their own links to a single CacheManager instance
# feels hacky; see if there's a cleaner way for callers to handle awareness of the CacheManager.
for versioned_target in versioned_targets:
if versioned_target._cache_manager != cache_manager:
raise ValueError("Attempting to combine versioned targets {} and {} with different"
" CacheManager instances: {} and {}".format(first_target, versioned_target,
cache_manager,
versioned_target._cache_manager))
return VersionedTargetSet(cache_manager, versioned_targets)
def __init__(self, cache_manager, versioned_targets):
self._cache_manager = cache_manager
self.versioned_targets = versioned_targets
self.targets = [vt.target for vt in versioned_targets]
# The following line is a no-op if cache_key was set in the VersionedTarget __init__ method.
self.cache_key = CacheKeyGenerator.combine_cache_keys([vt.cache_key
for vt in versioned_targets])
# NB: previous_cache_key may be None on the first build of a target.
self.previous_cache_key = cache_manager.previous_key(self.cache_key)
self.valid = self.previous_cache_key == self.cache_key
self.num_chunking_units = self.cache_key.num_chunking_units
if cache_manager.invalidation_report:
cache_manager.invalidation_report.add_vts(cache_manager, self.targets, self.cache_key,
self.valid, phase='init')
self._results_dir = None
self._previous_results_dir = None
# True if the results_dir for this VT was created incrementally via clone of the
# previous results_dir.
self.is_incremental = False
def update(self):
self._cache_manager.update(self)
def force_invalidate(self):
self._cache_manager.force_invalidate(self)
@property
def has_results_dir(self):
return self._results_dir is not None
@property
def results_dir(self):
"""The directory that stores results for this version of these targets."""
if self._results_dir is None:
raise ValueError('No results_dir was created for {}'.format(self))
return self._results_dir
@property
def previous_results_dir(self):
"""The directory that stores results for the previous version of these targets.
Only valid if is_incremental is true.
TODO: Exposing old results is a bit of an abstraction leak, because ill-behaved Tasks could
mutate them.
"""
if self._previous_results_dir is None:
raise ValueError('There is no previous_results_dir for: {}'.format(self))
return self._previous_results_dir
def __repr__(self):
return 'VTS({}, {})'.format(','.join(target.address.spec for target in self.targets),
'valid' if self.valid else 'invalid')
class VersionedTarget(VersionedTargetSet):
"""This class represents a singleton VersionedTargetSet, and has links to VersionedTargets that
the wrapped target depends on (after having resolved through any "alias" targets.
"""
def __init__(self, cache_manager, target, cache_key):
if not isinstance(target, Target):
raise ValueError("The target {} must be an instance of Target but is not.".format(target.id))
self.target = target
self.cache_key = cache_key
# Must come after the assignments above, as they are used in the parent's __init__.
super(VersionedTarget, self).__init__(cache_manager, [self])
self.id = target.id
def create_results_dir(self, root_dir, allow_incremental):
"""Ensures that a results_dir exists under the given root_dir for this versioned target.
If incremental=True, attempts to clone the results_dir for the previous version of this target
to the new results dir. Otherwise, simply ensures that the results dir exists.
"""
def dirname(key):
# TODO: Shorten cache_key hashes in general?
return os.path.join(root_dir, key.id, sha1(key.hash).hexdigest()[:12])
new_dir = dirname(self.cache_key)
self._results_dir = new_dir
if self.valid:
return
if allow_incremental and self.previous_cache_key:
self.is_incremental = True
old_dir = dirname(self.previous_cache_key)
self._previous_results_dir = old_dir
if os.path.isdir(old_dir) and not os.path.isdir(new_dir):
shutil.copytree(old_dir, new_dir)
else:
safe_mkdir(new_dir)
def __repr__(self):
return 'VT({}, {})'.format(self.target.id, 'valid' if self.valid else 'invalid')
class InvalidationCheck(object):
"""The result of calling check() on a CacheManager.
Each member is a list of VersionedTargetSet objects. Sorting of the targets depends
on how you order the InvalidationCheck from the InvalidationCacheManager.
Tasks may need to perform no, some or all operations on either of these, depending on how they
are implemented.
"""
@classmethod
def _partition_versioned_targets(cls, versioned_targets, partition_size_hint, vt_colors=None):
"""Groups versioned targets so that each group has roughly the same number of sources.
versioned_targets is a list of VersionedTarget objects [vt1, vt2, vt3, vt4, vt5, vt6, ...].
Returns a list of VersionedTargetSet objects, e.g., [VT1, VT2, VT3, ...] representing the
same underlying targets. E.g., VT1 is the combination of [vt1, vt2, vt3], VT2 is the combination
of [vt4, vt5] and VT3 is [vt6].
The new versioned targets are chosen to have roughly partition_size_hint sources.
If vt_colors is specified, it must be a map from VersionedTarget -> opaque 'color' values.
Two VersionedTargets will be in the same partition only if they have the same color.
This is useful as a compromise between flat mode, where we build all targets in a
single compiler invocation, and non-flat mode, where we invoke a compiler for each target,
which may lead to lots of compiler startup overhead. A task can choose instead to build one
group at a time.
"""
res = []
# Hack around the python outer scope problem.
class VtGroup(object):
def __init__(self):
self.vts = []
self.total_chunking_units = 0
current_group = VtGroup()
def add_to_current_group(vt):
current_group.vts.append(vt)
current_group.total_chunking_units += vt.num_chunking_units
def close_current_group():
if len(current_group.vts) > 0:
new_vt = VersionedTargetSet.from_versioned_targets(current_group.vts)
res.append(new_vt)
current_group.vts = []
current_group.total_chunking_units = 0
current_color = None
for vt in versioned_targets:
if vt_colors:
color = vt_colors.get(vt, current_color)
if current_color is None:
current_color = color
if color != current_color:
close_current_group()
current_color = color
add_to_current_group(vt)
if current_group.total_chunking_units > 1.5 * partition_size_hint and len(current_group.vts) > 1:
# Too big. Close the current group without this vt and add it to the next one.
current_group.vts.pop()
close_current_group()
add_to_current_group(vt)
elif current_group.total_chunking_units > partition_size_hint:
close_current_group()
close_current_group() # Close the last group, if any.
return res
def __init__(self, all_vts, invalid_vts, partition_size_hint=None, target_colors=None):
# target_colors is specified by Target. We need it by VersionedTarget.
vt_colors = {}
if target_colors:
for vt in all_vts:
if vt.target in target_colors:
vt_colors[vt] = target_colors[vt.target]
# All the targets, valid and invalid.
self.all_vts = all_vts
# All the targets, partitioned if so requested.
self.all_vts_partitioned = \
self._partition_versioned_targets(all_vts, partition_size_hint, vt_colors) \
if (partition_size_hint or vt_colors) else all_vts
# Just the invalid targets.
self.invalid_vts = invalid_vts
# Just the invalid targets, partitioned if so requested.
self.invalid_vts_partitioned = \
self._partition_versioned_targets(invalid_vts, partition_size_hint, vt_colors) \
if (partition_size_hint or vt_colors) else invalid_vts
class InvalidationCacheManager(object):
"""Manages cache checks, updates and invalidation keeping track of basic change
and invalidation statistics.
Note that this is distinct from the ArtifactCache concept, and should probably be renamed.
"""
class CacheValidationError(Exception):
"""Indicates a problem accessing the cache."""
def __init__(self,
cache_key_generator,
build_invalidator_dir,
invalidate_dependents,
fingerprint_strategy=None,
invalidation_report=None,
task_name=None):
self._cache_key_generator = cache_key_generator
self._task_name = task_name or 'UNKNOWN'
self._invalidate_dependents = invalidate_dependents
self._invalidator = BuildInvalidator(build_invalidator_dir)
self._fingerprint_strategy = fingerprint_strategy
self.invalidation_report = invalidation_report
def update(self, vts):
"""Mark a changed or invalidated VersionedTargetSet as successfully processed."""
for vt in vts.versioned_targets:
self._invalidator.update(vt.cache_key)
vt.valid = True
self._invalidator.update(vts.cache_key)
vts.valid = True
def force_invalidate(self, vts):
"""Force invalidation of a VersionedTargetSet."""
for vt in vts.versioned_targets:
self._invalidator.force_invalidate(vt.cache_key)
vt.valid = False
self._invalidator.force_invalidate(vts.cache_key)
vts.valid = False
def check(self,
targets,
partition_size_hint=None,
target_colors=None,
topological_order=False):
"""Checks whether each of the targets has changed and invalidates it if so.
Returns a list of VersionedTargetSet objects (either valid or invalid). The returned sets
'cover' the input targets, possibly partitioning them, with one caveat: if the FingerprintStrategy
opted out of fingerprinting a target because it doesn't contribute to invalidation, then that
target will be excluded from all_vts, invalid_vts, and the partitioned VTS.
Callers can inspect these vts and rebuild the invalid ones, for example.
If target_colors is specified, it must be a map from Target -> opaque 'color' values.
Two Targets will be in the same partition only if they have the same color.
"""
all_vts = self.wrap_targets(targets, topological_order=topological_order)
invalid_vts = filter(lambda vt: not vt.valid, all_vts)
return InvalidationCheck(all_vts, invalid_vts, partition_size_hint, target_colors)
@property
def task_name(self):
return self._task_name
def wrap_targets(self, targets, topological_order=False):
"""Wrap targets and their computed cache keys in VersionedTargets.
If the FingerprintStrategy opted out of providing a fingerprint for a target, that target will not
have an associated VersionedTarget returned.
Returns a list of VersionedTargets, each representing one input target.
"""
def vt_iter():
if topological_order:
sorted_targets = [t for t in reversed(sort_targets(targets)) if t in targets]
else:
sorted_targets = sorted(targets)
for target in sorted_targets:
target_key = self._key_for(target)
if target_key is not None:
yield VersionedTarget(self, target, target_key)
return list(vt_iter())
def previous_key(self, cache_key):
return self._invalidator.previous_key(cache_key)
def _key_for(self, target):
try:
return self._cache_key_generator.key_for_target(target,
transitive=self._invalidate_dependents,
fingerprint_strategy=self._fingerprint_strategy)
except Exception as e:
# This is a catch-all for problems we haven't caught up with and given a better diagnostic.
# TODO(Eric Ayers): If you see this exception, add a fix to catch the problem earlier.
exc_info = sys.exc_info()
new_exception = self.CacheValidationError("Problem validating target {} in {}: {}"
.format(target.id, target.address.spec_path, e))
raise self.CacheValidationError, new_exception, exc_info[2]
| jtrobec/pants | src/python/pants/invalidation/cache_manager.py | Python | apache-2.0 | 14,023 |
#!/usr/bin/env python
"""
Seshat
Web App/API framework built on top of gevent
Main framework app
For more information, see: https://github.com/JoshAshby/
http://xkcd.com/353/
Josh Ashby
2012
http://joshashby.com
[email protected]
"""
from gevent import monkey; monkey.patch_all()
import gevent
#To use fastCGI
#from gevent_fastcgi.server import WSGIServer
#To use plain WSGI
from gevent.pywsgi import WSGIServer
import logging
from seshat.dispatch import dispatch
import traceback
import config.config as c
logger = logging.getLogger(c.general.logName+".seshat")
def main():
"""
Server
Sets up the server and all that messy stuff
"""
if c.general.serverPort and type(c.general.serverPort) is str:
port = int(c.general.serverPort)
else:
port = 8000
if not c.general.serverAddress:
address = "127.0.0.1"
else:
address = c.general.serverAddress
server = WSGIServer((address, port), dispatch, log=None)
logger.info("""Now serving py as a fastcgi server at %(address)s:%(port)i
Press Ctrl+c if running as non daemon mode, or send a stop signal
""" % {"address": address, "port": port})
return server
def serveForever():
"""
Server
Starts the server
"""
server = main()
try:
server.serve_forever()
logger.warn("Shutdown py operations.")
except Exception as exc:
logger.critical("""Shutdown py operations, here's why: %s""" % exc)
gevent.shutdown
except KeyboardInterrupt:
logger.critical("""Shutdown py operations for a KeyboardInterrupt. Bye!""")
gevent.shutdown
except:
logger.critical(traceback.format_exc())
else:
logger.critical("""Shutdown py operations for unknown reason, possibly a KeyboardInterrupt...""")
gevent.shutdown
| JoshAshby/Fla.gr | app/seshat/framework.py | Python | mit | 1,849 |
from urllib import urlopen
import datetime
import pickle
import os
import json
import httpretty
class ScraperTestMakerMixin(object):
def testmaker_url_get(self, url):
data = urlopen(url).read()
self.mock_http_data[url] = data
return data
def make_test(self):
'''
Stores results of all http requests and the results of parse
'''
if not self.mockable:
raise NotImplementedError
self.url_get = self.testmaker_url_get
self.mock_http_data = {}
results = self.parse()
test_data = {
'created_at' :datetime.datetime.now(),
'url_data' : self.mock_http_data,
'results' : results
}
with open('tests/%s.dat' % self.__class__.__name__, 'wb') as jsonfile:
pickle.dump(test_data, jsonfile)
def make_mocks(self):
self.mock_http_data = {}
data = self.parse()
with open('tests/%s.json' % self.__class__.__name__, 'wb') as jsonfile:
json.dump(self.mock_http_data, jsonfile)
return data
class ScraperTestRunnerMixin(object):
def has_test(self):
test_filepath = 'tests/%s.dat' % self.__class__.__name__
return os.path.isfile(test_filepath)
def run_test(self):
'''
Mocks http responses and compares parse() results to expected
values. A 'False' results does not necessarily mean the code is wrong,
just that results did not match exactly
'''
json_filepath = 'tests/%s.dat' % self.__class__.__name__
if os.path.isfile(json_filepath):
with open(json_filepath, 'rb') as json_file:
test_data = pickle.load(json_file)
httpretty.enable()
url_data = test_data['url_data']
for url in url_data:
httpretty.register_uri(httpretty.GET,
url,
body = url_data[url])
results = self.parse()
httpretty.disable()
httpretty.reset()
sort_key = lambda event: event['title']
results.sort(key=sort_key)
expected_results = sorted(test_data['results'], key=sort_key)
same = False
if len(results) == len(expected_results):
same = True
for result, expected in zip(results, expected_results):
same = same and result == expected
return same, results, expected_results
| bhitov/boston_events_scraper | lib/test.py | Python | gpl-3.0 | 2,619 |
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
desopts = UnwrapElement(IN[0])
elementlist = list()
for item in desopts:
elementlist.append(doc.GetElement(item.get_Parameter(BuiltInParameter.OPTION_SET_ID).AsElementId()))
OUT = elementlist | andydandy74/ClockworkForDynamo | nodes/0.7.x/python/DesignOption.DesignOptionSet.py | Python | mit | 424 |
# -*- coding: utf-8 -*-
import time
import typing
import queue
import multiprocessing.queues
from abc import ABCMeta, abstractmethod
from .runner import TestRunner
from .loader import load_test_from_dict
from .suite import TestSuite
import logging
logger = logging.getLogger(__name__)
class BaseTestConsumer(TestRunner, metaclass=ABCMeta):
"""
Base consumer class.
Sub-class must implement method: _consume()
Parameters
----------
*args, **kwargs:
pass-through to TestRunner
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._testsuite = TestSuite()
self._testsuite._run_tests = lambda result: self._consume()
self.add_testsuite(self._testsuite)
@abstractmethod
def _consume(self):
pass
def load_and_test(self, data: dict) -> None:
test = load_test_from_dict(data, validate=True)
if test:
self._testsuite.add_test(test)
self._testsuite.run_test(test, self.result)
class QueueTestConsumer(BaseTestConsumer):
"""
Consume test from Queue.
Parameters
----------
queue: queue.Queue or multiprocessing.queues.JoinableQueue
The queue consume from.
auto_stop: bool, optional
Auto stop when queue is empty.
*args, **kwargs:
pass-through to TestRunner
"""
def __init__(self, queue: typing.Union[queue.Queue, multiprocessing.queues.JoinableQueue],
auto_stop: bool = True, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queue = queue
self.auto_stop = auto_stop
def _consume(self) -> None:
while True:
try:
data = self.queue.get_nowait()
except queue.Empty:
if self.auto_stop:
break
else:
try:
self.load_and_test(data)
except Exception as err:
logger.exception(err)
finally:
self.queue.task_done()
finally:
if self.result.should_abort:
break
else:
while self.result.should_pause:
time.sleep(self.result.PAUSE_INTERVAL)
| yyang179/ngta | ngta/consumer.py | Python | mit | 2,323 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Fabian Barkhau <[email protected]>
# License: MIT (see LICENSE.TXT file)
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| F483/bikesurf.org | apps/station/tests.py | Python | mit | 532 |
# -*- coding: utf-8 -*-
#
# This file is part of the Christine project
#
# Copyright (c) 2006-2007 Marco Antonio Islas Cruz
#
# Christine is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Christine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# @category Pattern
# @package Singleton
# @author Miguel Vazquez Gocobachi <[email protected]>
# @author Marco Antonio Islas Cruz <[email protected]>
# @copyright 2007 Christine Development Group
# @license http://www.gnu.org/licenses/gpl.txt
# @version $Id: Singleton.py,v 1.1 2009/12/07 17:50:14 scksrd Exp $
#
# Singleton implements
#
class Singleton(object):
#
# Manage instance
#
# @var object
__Instance = None
#
# Name of the self
#
# @var string
__Name = 'Singleton'
#
# callback
#
def __new__(self,*args):
if (not self.__Instance):
self.__Instance = super(Singleton, self).__new__(self,*args)
else:
self.__init__ = self.__doNothing
return self.__Instance
def __doNothing(self,*args):
'''
This method do nothing. is used to override the __init__ method
and then, do not re-declare values that may be declared at first
use of __init__ (When no instance was made).
'''
pass
#
# Sets name of the self
#
def setName(self, value = 'Singleton'):
self.__Name = value
#
# Returns name of the class
#
# @return string
def getName(self):
return self.__Name
#
# Returns Id of the object
#
# @return integer
def getId(self):
"""
Returns singleton Id
"""
return id(self)
| dksr/REMIND | python/base/utils/Singleton.py | Python | mit | 2,080 |
"""pytest configuration
Extends output capture as needed by pybind11: ignore constructors, optional unordered lines.
Adds docstring and exceptions message sanitizers: ignore Python 2 vs 3 differences.
"""
import pytest
import textwrap
import difflib
import re
import sys
import contextlib
import platform
import gc
_unicode_marker = re.compile(r'u(\'[^\']*\')')
_long_marker = re.compile(r'([0-9])L')
_hexadecimal = re.compile(r'0x[0-9a-fA-F]+')
def _strip_and_dedent(s):
"""For triple-quote strings"""
return textwrap.dedent(s.lstrip('\n').rstrip())
def _split_and_sort(s):
"""For output which does not require specific line order"""
return sorted(_strip_and_dedent(s).splitlines())
def _make_explanation(a, b):
"""Explanation for a failed assert -- the a and b arguments are List[str]"""
return ["--- actual / +++ expected"] + [line.strip('\n') for line in difflib.ndiff(a, b)]
class Output(object):
"""Basic output post-processing and comparison"""
def __init__(self, string):
self.string = string
self.explanation = []
def __str__(self):
return self.string
def __eq__(self, other):
# Ignore constructor/destructor output which is prefixed with "###"
a = [line for line in self.string.strip().splitlines() if not line.startswith("###")]
b = _strip_and_dedent(other).splitlines()
if a == b:
return True
else:
self.explanation = _make_explanation(a, b)
return False
class Unordered(Output):
"""Custom comparison for output without strict line ordering"""
def __eq__(self, other):
a = _split_and_sort(self.string)
b = _split_and_sort(other)
if a == b:
return True
else:
self.explanation = _make_explanation(a, b)
return False
class Capture(object):
def __init__(self, capfd):
self.capfd = capfd
self.out = ""
self.err = ""
def __enter__(self):
self.capfd.readouterr()
return self
def __exit__(self, *_):
self.out, self.err = self.capfd.readouterr()
def __eq__(self, other):
a = Output(self.out)
b = other
if a == b:
return True
else:
self.explanation = a.explanation
return False
def __str__(self):
return self.out
def __contains__(self, item):
return item in self.out
@property
def unordered(self):
return Unordered(self.out)
@property
def stderr(self):
return Output(self.err)
@pytest.fixture
def capture(capsys):
"""Extended `capsys` with context manager and custom equality operators"""
return Capture(capsys)
class SanitizedString(object):
def __init__(self, sanitizer):
self.sanitizer = sanitizer
self.string = ""
self.explanation = []
def __call__(self, thing):
self.string = self.sanitizer(thing)
return self
def __eq__(self, other):
a = self.string
b = _strip_and_dedent(other)
if a == b:
return True
else:
self.explanation = _make_explanation(a.splitlines(), b.splitlines())
return False
def _sanitize_general(s):
s = s.strip()
s = s.replace("pybind11_tests.", "m.")
s = s.replace("unicode", "str")
s = _long_marker.sub(r"\1", s)
s = _unicode_marker.sub(r"\1", s)
return s
def _sanitize_docstring(thing):
s = thing.__doc__
s = _sanitize_general(s)
return s
@pytest.fixture
def doc():
"""Sanitize docstrings and add custom failure explanation"""
return SanitizedString(_sanitize_docstring)
def _sanitize_message(thing):
s = str(thing)
s = _sanitize_general(s)
s = _hexadecimal.sub("0", s)
return s
@pytest.fixture
def msg():
"""Sanitize messages and add custom failure explanation"""
return SanitizedString(_sanitize_message)
# noinspection PyUnusedLocal
def pytest_assertrepr_compare(op, left, right):
"""Hook to insert custom failure explanation"""
if hasattr(left, 'explanation'):
return left.explanation
@contextlib.contextmanager
def suppress(exception):
"""Suppress the desired exception"""
try:
yield
except exception:
pass
def gc_collect():
''' Run the garbage collector twice (needed when running
reference counting tests with PyPy) '''
gc.collect()
gc.collect()
def pytest_namespace():
"""Add import suppression and test requirements to `pytest` namespace"""
try:
import numpy as np
except ImportError:
np = None
try:
import scipy
except ImportError:
scipy = None
try:
from pybind11_tests.eigen import have_eigen
except ImportError:
have_eigen = False
pypy = platform.python_implementation() == "PyPy"
skipif = pytest.mark.skipif
return {
'suppress': suppress,
'requires_numpy': skipif(not np, reason="numpy is not installed"),
'requires_scipy': skipif(not np, reason="scipy is not installed"),
'requires_eigen_and_numpy': skipif(not have_eigen or not np,
reason="eigen and/or numpy are not installed"),
'requires_eigen_and_scipy': skipif(not have_eigen or not scipy,
reason="eigen and/or scipy are not installed"),
'unsupported_on_pypy': skipif(pypy, reason="unsupported on PyPy"),
'unsupported_on_py2': skipif(sys.version_info.major < 3,
reason="unsupported on Python 2.x"),
'gc_collect': gc_collect
}
def _test_import_pybind11():
"""Early diagnostic for test module initialization errors
When there is an error during initialization, the first import will report the
real error while all subsequent imports will report nonsense. This import test
is done early (in the pytest configuration file, before any tests) in order to
avoid the noise of having all tests fail with identical error messages.
Any possible exception is caught here and reported manually *without* the stack
trace. This further reduces noise since the trace would only show pytest internals
which are not useful for debugging pybind11 module issues.
"""
# noinspection PyBroadException
try:
import pybind11_tests # noqa: F401 imported but unused
except Exception as e:
print("Failed to import pybind11_tests from pytest:")
print(" {}: {}".format(type(e).__name__, e))
sys.exit(1)
_test_import_pybind11()
| google/aistreams | third_party/pybind11/tests/conftest.py | Python | apache-2.0 | 6,671 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.