code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
""" Validators for wx widgets.
Copyright (c) Karol Będkowski, 2006-2013
This file is part of wxGTD
This is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, version 2.
"""
__author__ = "Karol Będkowski"
__copyright__ = "Copyright (c) Karol Będkowski, 2006-2013"
__version__ = '2013-04-21'
__all__ = ['ValidatorDv', 'Validator', 'ValidatorDate', 'ValidatorTime',
'ValidatorColorStr']
from .validator import Validator, ValidatorDv, ValidatorDate, ValidatorTime, \
ValidatorColorStr
| KarolBedkowski/wxgtd | wxgtd/wxtools/validators/__init__.py | Python | gpl-2.0 | 638 |
##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from _Gaffer import _UndoContext
class UndoContext() :
State = _UndoContext.State
def __init__( self, script, state=_UndoContext.State.Enabled, mergeGroup="" ) :
self.__script = script
self.__state = state
self.__mergeGroup = mergeGroup
def __enter__( self ) :
self.__context = _UndoContext( self.__script, self.__state, self.__mergeGroup )
def __exit__( self, type, value, traceBack ) :
del self.__context
| cedriclaunay/gaffer | python/Gaffer/UndoContext.py | Python | bsd-3-clause | 2,289 |
#!/usr/bin/env python
#
# Raspberry Pi Garage door controller
# Main controller software
# $Id: ggpiod.py,v 1.2 2014/06/18 17:07:36 bob Exp $
#
# Author : Bob Rathbone
# Site : http://www.bobrathbone.com
#
# This interprets and executes garage door commands
#
# License: GNU V3, See https://www.gnu.org/copyleft/gpl.html
#
# Disclaimer: Software is provided as is and absolutly no warranties are implied or given.
# The authors shall not be liable for any loss or damage however caused.
#
import os
import RPi.GPIO as GPIO
import logging
import signal
import subprocess
import sys
import time
import shutil
from ggpiod_daemon import Daemon
VERSION = "1.1"
TIMEOUT = 18
RELAYTIME = 2
# Door position definitions
OPEN = 0
CLOSED = 1
TRANSIT = 2
ERROR=3
CloseRelay = False
OpenDoor = False
CloseDoor = False
class MyDaemon(Daemon):
def run(self):
global OPEN
global CLOSED
global TRANSIT
global OpenDoor
global CloseDoor
global CloseRelay
OpenDoor = False
CloseDoor = False
CloseRelay = False
last_position = -1
logmsg('ggpiod running pid ' + str(os.getpid()), logging.INFO)
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT) # Door relay
GPIO.setup(22, GPIO.IN) # Door closed switch
GPIO.setup(4, GPIO.IN) # Door open switch
GPIO.setup(24, GPIO.IN) # Permanent +3.3V
signal.signal(signal.SIGUSR1, receive_signal)
signal.signal(signal.SIGUSR2, receive_signal)
signal.signal(signal.SIGHUP, receive_signal)
# Main loop check door switches and waits for open/close door signals
while True:
# Get door position
door_closed = GPIO.input(22)
door_open = GPIO.input(4)
if door_closed:
position = CLOSED
elif door_open:
position = OPEN
elif not door_open and not door_closed:
position = TRANSIT
if position != last_position:
if position == OPEN:
logmsg('Door open switch ' + str(door_open), logging.INFO)
elif position == CLOSED:
logmsg('Door closed switch ' + str(door_closed), logging.INFO)
elif position == TRANSIT:
logmsg('Door in transit ', logging.INFO)
setup_photo(position)
last_position = position
if OpenDoor and position != OPEN:
CloseRelay = True
if CloseDoor and position != CLOSED:
CloseRelay = True
if CloseRelay:
GPIO.output(18, True)
logmsg('Operate door relay', logging.INFO)
time.sleep(RELAYTIME)
GPIO.output(18, False)
OpenDoor = False
CloseDoor = False
CloseRelay = False
time.sleep(1)
def status(self):
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "ggpiod status: not running"
logmsg(message, logging.INFO)
print message
else:
message = "ggpiod running pid " + str(pid)
logmsg(message, logging.INFO)
print message
return
def open(self):
logmsg('Open door', logging.INFO)
os.system("/usr/bin/sudo kill -SIGUSR1 `cat /var/run/ggpiod.pid`")
return
def close(self):
logmsg('Close door', logging.INFO)
os.system("/usr/bin/sudo kill -SIGUSR2 `cat /var/run/ggpiod.pid`")
return
def relay(self):
logmsg('Operate relay', logging.INFO)
os.system("/usr/bin/sudo kill -SIGHUP `cat /var/run/ggpiod.pid`")
return
def version(self):
msg = 'Version ' + VERSION
logmsg(msg, logging.INFO)
print msg
return
# End of class overrides
# Signal routines
def receive_signal(signum, stack):
global OpenDoor
global CloseDoor
global CloseRelay
logmsg('Received signal ' + str(signum), logging.DEBUG)
if signum == signal.SIGUSR1:
logmsg('Open door command received ' + str(signum), logging.INFO)
OpenDoor = True;
if signum == signal.SIGUSR2:
logmsg('Close door command received ' + str(signum), logging.INFO)
CloseDoor = True;
if signum == signal.SIGHUP:
logmsg('Close relay command received ' + str(signum), logging.INFO)
CloseRelay = True;
return
# Logging routine
def logmsg(message, level):
# Set up logging, level can be INFO, WARNING, ERROR, DEBUG
logger = logging.getLogger('gipiod')
hdlr = logging.FileHandler('/var/log/ggpiod.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(level)
if level == logging.INFO:
logger.info(message)
if level == logging.WARNING:
logger.warning(message)
if level == logging.DEBUG:
logger.debug(message)
if level == logging.ERROR:
logger.error(message)
logger.removeHandler(hdlr)
hdlr.close()
return
# Setup the new door position
def setup_photo(position):
global OPEN
global CLOSED
global TRANSIT
logmsg('position ' + str(position), logging.DEBUG)
WWW="/var/www/garage";
DOOR_OPEN_IMG = WWW + "/" + "garage_door_open.jpg";
DOOR_CLOSED_IMG = WWW + "/" + "garage_door_closed.jpg";
DOOR_TRANSIT_IMG = WWW + "/" + "garage_door_transit.jpg";
DOOR_POSITION_IMG = WWW + "/" + "garage_door_position.jpg";
if position == OPEN:
shutil.copyfile (DOOR_OPEN_IMG, DOOR_POSITION_IMG)
elif position == CLOSED:
shutil.copyfile (DOOR_CLOSED_IMG, DOOR_POSITION_IMG)
elif position == TRANSIT:
shutil.copyfile (DOOR_TRANSIT_IMG, DOOR_POSITION_IMG)
return
### Main routine ###
if __name__ == "__main__":
daemon = MyDaemon('/var/run/ggpiod.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'status' == sys.argv[1]:
daemon.status()
elif 'open' == sys.argv[1]:
daemon.open()
elif 'close' == sys.argv[1]:
daemon.close()
elif 'relay' == sys.argv[1]:
daemon.relay()
elif 'version' == sys.argv[1]:
daemon.version()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|status|open|close|relay" % sys.argv[0]
sys.exit(2)
| bobrathbone/ggpiod | cgi-bin/ggpiod.py | Python | gpl-3.0 | 6,261 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
linhas = open('/Users/lflrocha/Desktop/t_voz_transcricao.txt','r').readlines()
saida = open('/Users/lflrocha/Desktop/t_voz_transcricao2.txt','w')
conteudo = {}
linhas = linhas.replace('\n',' ')
linhas = linhas.replace('<br> ','\n')
for linha in linhas:
a = linha.split('###',1)
b = a[1].split('###')
data = b[0].replace('"','')
texto = b[2]
conteudo[data] = texto
out = ''
for key in sorted(conteudo.iterkeys()):
out = out + key + "###" + conteudo[key]
saida.write(out)
| lflrocha/ebc.scripts | conteudos/tiraQuebraLinha.py | Python | unlicense | 581 |
import os, sys, subprocess, time, datetime
from neuralfingerprint import (build_morgan_deep_net, build_conv_deep_net,
normalize_array, adam, build_batched_grad,
mean_squared_error, binary_classification_nll,
load_data_slices, build_mean_predictor)
import json
from autograd import grad
import autograd.numpy as np
import autograd.numpy.random as npr
log = sys.stderr.write
"""Reads params, train_data, test_data as a line_pickle from stdin. Here's an example:
params = dict(num_records = 20,
model = dict(net_type = 'morgan', # 'morgan' | 'conv'
fp_length = 512,
fp_depth = 5,
conv_width = 20, # conv net only
h1_size = 300,
L2_reg = np.exp(-6),),
train = dict(num_iters = 100,
batch_size = 100,
init_scale = np.exp(-4),
step_size = np.exp(-5),
seed = 0,)
task = dict(name = 'delaney',
train_slices = [[0, 800]],
test_slices = [[800, 1000]],))
"""
datasets_info = dict(
delaney = dict(
nll_func = mean_squared_error,
target_name = 'measured log solubility in mols per litre',
data_file = '2015-05-24-delaney/delaney-processed.csv'),
toxin = dict(
nll_func = binary_classification_nll,
target_name = 'target',
data_file = '2015-05-22-tox/sr-mmp.smiles-processed.csv'),
malaria = dict(
nll_func = mean_squared_error,
target_name = 'activity',
data_file = '2015-06-03-malaria/malaria-processed.csv'),
cep = dict(
nll_func = mean_squared_error,
target_name = 'PCE',
data_file = '2015-06-02-cep-pce/cep-processed.csv'))
def main(params):
train_data, test_data, nll_func = load_task_data(**params['task'])
log('Loaded {} train data points and {} test data points. Running'
.format(len(train_data[0]), len(test_data[0])))
net_objects = build_predictor(nll_func=nll_func, **params['model'])
def compute_nll(predictor, inputs, targets):
return nll_func(predictor(inputs), targets)
num_iters, num_records = params['train']['num_iters'], params['num_records']
record_idxs = set(map(int, np.linspace(num_iters - 1, 0, num_records)))
training_curve = []
def callback(predictor, i):
if i in record_idxs:
log(".")
training_curve.append( (i, compute_nll(predictor, *train_data),
compute_nll(predictor, *test_data )) )
start_time = time.time()
train_nn(net_objects, train_data[0], train_data[1], callback,
normalize_outputs = (nll_func == mean_squared_error), **params['train'])
stats = dict(minutes_duration = (time.time() - start_time) / 60.0,
timestamp = str(datetime.datetime.now()),
host_name = subprocess.check_output(['hostname'])[:-1],
training_curve = training_curve)
log("Done!\n")
return params, stats
def build_predictor(net_type, fp_length, fp_depth, conv_width, h1_size, L2_reg, nll_func):
if net_type == 'mean':
return build_mean_predictor(nll_func)
elif net_type == 'conv_plus_linear':
vanilla_net_params = dict(layer_sizes = [fp_length],
normalize=True, L2_reg = L2_reg, nll_func=nll_func)
conv_params = dict(num_hidden_features = [conv_width] * fp_depth,
fp_length = fp_length)
return build_conv_deep_net(conv_params, vanilla_net_params)
elif net_type == 'morgan_plus_linear':
vanilla_net_params = dict(layer_sizes = [fp_length],
normalize=True, L2_reg = L2_reg, nll_func=nll_func)
return build_morgan_deep_net(fp_length, fp_depth, vanilla_net_params)
elif net_type == 'conv_plus_net':
vanilla_net_params = dict(layer_sizes = [fp_length, h1_size],
normalize=True, L2_reg = L2_reg, nll_func=nll_func)
conv_params = dict(num_hidden_features = [conv_width] * fp_depth,
fp_length = fp_length)
return build_conv_deep_net(conv_params, vanilla_net_params)
elif net_type == 'morgan_plus_net':
vanilla_net_params = dict(layer_sizes = [fp_length, h1_size],
normalize=True, L2_reg = L2_reg, nll_func=nll_func)
return build_morgan_deep_net(fp_length, fp_depth, vanilla_net_params)
else:
raise Exception("Unknown network type.")
def train_nn(net_objects, smiles, raw_targets, callback, normalize_outputs,
seed, init_scale, batch_size, num_iters, **opt_params):
loss_fun, pred_fun, net_parser = net_objects
init_weights = init_scale * npr.RandomState(seed).randn(len(net_parser))
if normalize_outputs:
targets, undo_norm = normalize_array(raw_targets)
else:
targets, undo_norm = raw_targets, lambda x : x
def make_predict_func(new_weights):
return lambda new_smiles : undo_norm(pred_fun(new_weights, new_smiles))
def opt_callback(weights, i):
callback(make_predict_func(weights), i)
grad_fun = build_batched_grad(grad(loss_fun), batch_size, smiles, targets)
trained_weights = adam(grad_fun, init_weights, callback=opt_callback,
num_iters=num_iters, **opt_params)
return trained_weights
def load_task_data(name, train_slices, test_slices):
dataset_info = datasets_info[name]
data_dir = os.path.join(os.path.dirname(__file__), '../data/')
full_data_path = os.path.join(data_dir, dataset_info['data_file'])
train_data, test_data = load_data_slices(
full_data_path,
[[slice(*bounds) for bounds in train_slices],
[slice(*bounds) for bounds in test_slices ]],
input_name='smiles',
target_name=dataset_info['target_name'])
return train_data, test_data, dataset_info['nll_func']
if __name__ == '__main__':
# Takes in serialized hyperparameters, and outputs serialized training and test statistics.
json.dump(main(json.load(sys.stdin)), sys.stdout, indent=4, sort_keys=True)
| HIPS/neural-fingerprint | experiment_scripts/eval_methods.py | Python | mit | 6,419 |
#!/usr/bin/env python
'''
Run brat using the built-in Python CGI server for testing purposes.
Author: Pontus Stenetorp <pontus stenetorp se>
Version: 2012-07-01
'''
from BaseHTTPServer import HTTPServer, test as simple_http_server_test
from CGIHTTPServer import CGIHTTPRequestHandler
# Note: It is a terrible idea to import the function below, but we don't have
# a choice if we want to emulate the super-class is_cgi method.
from CGIHTTPServer import _url_collapse_path_split
from sys import stderr
from urlparse import urlparse
# Note: The only reason that we sub-class in order to pull is the stupid
# is_cgi method that assumes the usage of specific CGI directories, I simply
# refuse to play along with this kind of non-sense.
class BRATCGIHTTPRequestHandler(CGIHTTPRequestHandler):
def is_cgi(self):
# Having a CGI suffix is really a big hint of being a CGI script.
if urlparse(self.path).path.endswith('.cgi'):
self.cgi_info = _url_collapse_path_split(self.path)
return True
else:
return CGIHTTPRequestHandler.is_cgi(self)
def main(args):
# BaseHTTPServer will look for the port in argv[1] or default to 8000
try:
try:
port = int(args[1])
except ValueError:
raise TypeError
except TypeError:
print >> stderr, '%s is not a valid port number' % args[1]
return -1
except IndexError:
port = 8000
print >> stderr, 'WARNING: This server is for testing purposes only!'
print >> stderr, (' You can also use it for trying out brat before '
'deploying on a "real" web server such as Apache.')
print >> stderr, (' Using this web server to run brat on an open '
'network is a security risk!')
print >> stderr
print >> stderr, 'You can access the test server on:'
print >> stderr
print >> stderr, ' http://localhost:%s/' % port
print >> stderr
simple_http_server_test(BRATCGIHTTPRequestHandler, HTTPServer)
if __name__ == '__main__':
from sys import argv
exit(main(argv))
| wolfe-pack/moro | public/javascripts/brat/testserver.py | Python | bsd-2-clause | 2,109 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
PUBLIC_APPIDS = '''
smartladder8|sandaojushi3|ftencentuck|baidufirefoxtieba|chromeichi|aitaiyokani|smartladder3|mzmzmz001|smartladder4|chrome360q|smartladder6|goagent-dup001|kawaiiushioplus|smartladdercanada|gongongid02|goagent-dup003|goagent-dup002|gonggongid03|ippotsukobeta|gonggongid01|gonggongid07|gonggongid06|kawaiiushionoserve|gonggongid04|kawaiiushio2|chromelucky|gonggongid09|yanlun001|smartladderchina|smartladder1|kawaiiushio1|kawaiiushio6|kawaiiushio7|saosaiko|kawaiiushio5|smartladderjapan|bakajing600|sekaiwakerei|yugongxisaiko|gonggongid08|smartladder2|baiduchrometieba|kawaiiushio4|gonggongid05|bakabaka300|fangbingxingtodie|f360uck|chromesaiko|chromeqq|kawaiiushio|ilovesmartladder|smartladder7|gongmin700|qq325862401|kawaiiushio8|smartladderkoera|gonggongid10|kawaiiushio9|smartladderuk|smartladderhongkong|chrometieba|flowerwakawaii|feijida600|window8saiko|gfwdies|smartladdertaiwan|akb48daisukilove|smartladderus|diaoyudaobelongtochinasaiko|jianiwoxiangni
'''
import ConfigParser, os, re, urlparse, os.path as ospath, random
from cStringIO import StringIO
rulefiles = lambda v:[v.replace(r'\n','\n') if v.startswith('string://') else v for v in v.split('|')]
class Common(object):
v = '''def %s(self, *a):
try:
return self.CONFIG.%s(*a[:-1])
except:
return a[-1]
'''
for k in ('get', 'getint', 'getfloat', 'getboolean', 'items', 'remove_option'):
exec(v % (k, k))
del k, v
def parse_pac_config(self):
v = self.get('pac', 'py_default', '') or 'FORWARD'
self.PY_DEFAULT = (v.split('|') * 3)[:3]
if self.PAC_FILE:
v = self.get('pac', 'default', '') or self._PAC_DEFAULT
self.PAC_DEFAULT = (v.split('|') * 3)[:3]
else:
self.PAC_DEFAULT = self.PY_DEFAULT
def get_rule_cfg(key, default):
PAC_RULELIST = v = self.get('pac', key, default)
if v.startswith('!'):
if self.PAC_FILE:
v = self.items(v.lstrip('!').strip(), ())
v = [(rulefiles(v),k.upper()) for k,v in v if k and v]
else:
v = self.items('py_'+v.lstrip('!').strip(), ())
sp = {'FORBID':'False', 'WEB':'None'}
v = [(rulefiles(v),sp.get(k.upper()) or k.upper()) for k,v in v if k and v]
PAC_RULELIST = v
elif v:
TARGET_PAC = self.TARGET_PAAS
if self.PAC_FILE:
TARGET_PAC = self.TARGET_LISTEN
if not TARGET_PAC:
TARGET_PAC = '*:*'
elif ':' not in TARGET_PAC:
TARGET_PAC = '*:' + TARGET_PAC
TARGET_PAC = 'PROXY %s;DIRECT' % TARGET_PAC
PAC_RULELIST = [(rulefiles(v), TARGET_PAC)]
return PAC_RULELIST
self.PAC_RULELIST = get_rule_cfg('rulelist', '')
self.PAC_IPLIST = get_rule_cfg('iplist', '')
def __init__(self, INPUT):
ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>[^=\s][^=]*)\s*(?P<vi>[=])\s*(?P<value>.*)$')
CONFIG = self.CONFIG = ConfigParser.ConfigParser()
try:
CONFIG.read(INPUT)
except ConfigParser.MissingSectionHeaderError:
with open(INPUT, 'rb') as fp: v = fp.read()
v = v[v.find('['):]
try:
with open(INPUT, 'wb') as fp: fp.write(v)
except IOError:
pass
CONFIG.readfp(StringIO(v), INPUT)
self.LISTEN_IP = self.get('listen', 'ip', '127.0.0.1')
self.LISTEN_PORT = self.getint('listen', 'port', 8086)
self.USERNAME = self.get('listen', 'username', None)
self.WEB_USERNAME = self.get('listen', 'web_username', 'admin')
self.WEB_PASSWORD = self.get('listen', 'web_password', 'admin')
self.WEB_AUTHLOCAL = self.getboolean('listen', 'web_authlocal', False)
if self.USERNAME is not None:
self.PASSWORD = self.get('listen', 'password', '')
self.BASIC_AUTH = self.getboolean('listen', 'basic_auth', True)
self.DISABLE_SOCKS4 = self.getboolean('listen', 'disable_socks4', False)
self.DISABLE_SOCKS5 = self.getboolean('listen', 'disable_socks5', False)
self.CERT_WILDCARD = self.getboolean('listen', 'cert_wildcard', True)
self.TASKS_DELAY = self.getint('listen', 'tasks_delay', 0)
self.FETCH_KEEPALIVE = self.getboolean('urlfetch', 'keep_alive', True)
self.FETCH_TIMEOUT = self.getfloat('urlfetch', 'timeout', -1)
self.FORWARD_TIMEOUT = self.getfloat('urlfetch', 'fwd_timeout', -1)
self.FETCH_ARGS = v = {}
k = self.getfloat('urlfetch', 'gae_timeout', -1)
if k >= 0: v['timeout'] = k or None
k = self.getint('urlfetch', 'gae_crlf', 0)
if k > 0: v['crlf'] = k
self.DEBUG_LEVEL = self.getint('urlfetch', 'debug', -1)
GAE_PROFILE = 'gae'; self.GAE_HANDLER = False
self.GAE_ENABLE = self.getboolean('gae', 'enable', CONFIG.has_section('gae'))
if self.GAE_ENABLE:
self.GAE_LISTEN = self.get('gae', 'listen', '8087')
if self.LISTEN_PORT == 8087 and self.GAE_LISTEN == '8087':
self.LISTEN_PORT = 8086
v = self.get('gae', 'appid', '').replace('.appspot.com', '')
if not v or v == 'appid1|appid2':
self.GAE_APPIDS = v = re.sub(r'\s+', '', PUBLIC_APPIDS).split('|')
random.shuffle(v)
else:
self.GAE_APPIDS = v.split('|')
self.GAE_PASSWORD = self.get('gae', 'password', '')
self.GAE_PATH = self.get('gae', 'path', '/fetch.py')
GAE_PROFILE = self.get('gae', 'profile', GAE_PROFILE)
self.GAE_MAXTHREADS = self.getint('gae', 'max_threads', 0)
v = self.getint('gae', 'fetch_mode', 0)
self.GAE_FETCHMOD = 0 if v <= 0 else (2 if v >= 2 else 1)
self.GAE_PROXY = self.get('gae', 'proxy', 'default')
self.GAE_HANDLER = self.GAE_LISTEN and self.getboolean('gae', 'find_handler', True)
self.PAAS_ENABLE = self.getboolean('paas', 'enable', CONFIG.has_section('paas'))
if self.PAAS_ENABLE:
self.PAAS_LISTEN = self.get('paas', 'listen', '')
self.PAAS_PASSWORD = self.get('paas', 'password', '')
self.PAAS_FETCHSERVER = CONFIG.get('paas', 'fetchserver').split('|')
self.PAAS_PROXY = self.get('paas', 'proxy', 'default')
self.SOCKS5_ENABLE = self.getboolean('socks5', 'enable', CONFIG.has_section('socks5'))
if self.SOCKS5_ENABLE:
self.SOCKS5_LISTEN = self.get('socks5', 'listen', '')
self.SOCKS5_PASSWORD = self.get('socks5', 'password', '')
self.SOCKS5_FETCHSERVER = CONFIG.get('socks5', 'fetchserver')
self.SOCKS5_PROXY = self.get('socks5', 'proxy', 'default')
OLD_PLUGIN = []
d = {'gaeproxy':'OGAE', 'forold':'OOLD', 'goagent':'OGA', 'simple':'OSP', 'simple2':'OSP2'}
for k in d:
if self.getboolean(k, 'enable', CONFIG.has_section(k)):
url = self.get(k, 'url', '')
if url: url = url.split('|')
else:
url = self.get(k, 'appid', '')
if not url: continue
url = ['https://%s.appspot.com/%s.py' % (i,k) for i in url.split('|')]
crypto = (self.get(k, 'crypto', '') + '|'*20).split('|')
key = self.get(k, 'password', '').decode('string-escape')
key = (key + ('|'+key)*20).split('|')
proxy = [v.split(',') if ',' in v else v for v in (self.get(k, 'proxy', 'default')+'|'*20).split('|')]
configs = []
for url,crypto,key,proxy in zip(url,crypto,key,proxy):
config = {'url':url, 'key':key}
if crypto: config['crypto'] = crypto
if proxy == 'none':
config['proxy'] = None
elif proxy:
config['proxy'] = proxy
configs.append(config)
for v in ('max_threads', 'range0', 'range'):
configs[0][v] = self.getint(k, v, 0)
OLD_PLUGIN.append((d[k], k, configs, self.get(k, 'listen', '')))
self.OLD_PLUGIN = OLD_PLUGIN or False
self.TARGET_PAAS = self.GAE_ENABLE and 'GAE' or self.PAAS_ENABLE and 'PAAS' or self.SOCKS5_ENABLE and 'SOCKS5' or self.OLD_PLUGIN and self.OLD_PLUGIN[0][0]
self.TARGET_LISTEN = self.GAE_ENABLE and self.GAE_LISTEN or self.PAAS_ENABLE and self.PAAS_LISTEN or self.SOCKS5_ENABLE and self.SOCKS5_LISTEN or self.OLD_PLUGIN and self.OLD_PLUGIN[0][3]
v = self.getint('proxy', 'enable', 0)
self._PAC_DEFAULT = 'DIRECT'; self.GLOBAL_PROXY = None
if v > 0:
PROXIES = []
for i in xrange(1,v+1):
v = self.get('proxy', 'proxy%d'%i, '')
if not v: break
PROXIES.append(v)
if not PROXIES:
PROXY_HOST = CONFIG.get('proxy', 'host')
PROXY_PORT = CONFIG.getint('proxy', 'port')
PROXY_USERNAME = self.get('proxy', 'username', '')
PROXY_PASSWROD = self.get('proxy', 'password', '')
self._PAC_DEFAULT= 'PROXY %s:%s;DIRECT' % (PROXY_HOST, PROXY_PORT)
if PROXY_USERNAME:
PROXY_HOST = '%s:%s@%s' % (PROXY_USERNAME, PROXY_PASSWROD, PROXY_HOST)
PROXIES.append('http://%s:%s' % (PROXY_HOST, PROXY_PORT))
self.GLOBAL_PROXY = PROXIES[0] if len(PROXIES) == 1 else tuple(PROXIES)
self.HTTPS_TARGET = {}
if self.getboolean('forward', 'enable', CONFIG.has_section('forward')):
self.remove_option('forward', 'enable', '')
for k,v in self.items('forward', ()):
self.HTTPS_TARGET[k.upper()] = '(%s)'%v if '"' in v or "'" in v else repr(v)
self.PAC_ENABLE = self.getboolean('pac', 'enable', True)
v = self.getint('pac', 'https_mode', 2)
self.PAC_HTTPSMODE = 0 if v <= 0 else (2 if v >= 2 else 1)
v = self.get('pac', 'file', '').replace('goagent', 'proxy')
self.PAC_FILE = v and v.split('|')
self.parse_pac_config()
self.GOOGLE_MODE = self.get(GAE_PROFILE, 'mode', 'http')
v = self.get(GAE_PROFILE, 'hosts', '')
self.GOOGLE_HOSTS = ' '.join(v and tuple(v.split('|')) or ())
v = self.get(GAE_PROFILE, 'sites', '')
self.GOOGLE_SITES = v and tuple(v.split('|')) or ()
v = self.get(GAE_PROFILE, 'forcehttps', ''); v = v and v.split('|') or ()
GOOGLE_FORCEHTTPS = [(i if '/' in i else ('http://%s/'%('*'+i if i.startswith('.') else i))) for i in v]
v = self.get(GAE_PROFILE, 'noforcehttps', ''); v = v and v.split('|') or ()
GOOGLE_FORCEHTTPS.extend(['@@%s'%(i if '/' in i else ('http://%s/'%('*'+i if i.startswith('.') else i))) for i in v])
self.GOOGLE_FORCEHTTPS = ' \n '.join(GOOGLE_FORCEHTTPS)
v = self.get(GAE_PROFILE, 'withgae', '')
GOOGLE_WITHGAE = v and tuple(v.split('|')) or ()
self.TRUE_HTTPS = self.TARGET_PAAS and self.get(GAE_PROFILE, 'truehttps', '').replace('|', ' ').strip()
self.NOTRUE_HTTPS = self.TRUE_HTTPS and self.get(GAE_PROFILE, 'notruehttps', '').replace('|', ' ').strip()
self.FETCHMAX_LOCAL = self.getint('fetchmax', 'local', 3)
self.FETCHMAX_SERVER = self.getint('fetchmax', 'server', 0)
self.AUTORANGE_ENABLE = self.getboolean('autorange', 'enable', False)
def get_rules(opt, key, d=''):
v = self.get(opt, key, d)
if v.startswith('!'):
v = v.lstrip('!').strip()
return v and rulefiles(v)
else:
return v.replace(r'\n', '\n').strip()
self.AUTORANGE_RULES = get_rules('autorange', 'rules')
v = self.get('autorange', 'hosts', ''); v = v and v.split('|') or ()
v = ' \n '.join([(i if '/' in i else ('||%s'%i.lstrip('.') if i.startswith('.') else 'http*://%s/'%i)) for i in v])
if isinstance(self.AUTORANGE_RULES, list):
self.AUTORANGE_RULES.append('string://' + v)
elif v:
self.AUTORANGE_RULES = ' \n '.join((v, self.AUTORANGE_RULES))
self.AUTORANGE_MAXSIZE = self.getint('autorange', 'maxsize', 1000000)
self.AUTORANGE_WAITSIZE = self.getint('autorange', 'waitsize', 500000)
self.AUTORANGE_BUFSIZE = self.getint('autorange', 'bufsize', 8192)
assert self.AUTORANGE_BUFSIZE <= self.AUTORANGE_WAITSIZE <= self.AUTORANGE_MAXSIZE
self.REMOTE_DNS = self.DNS_RESOLVE = self.CRLF_RULES = self.HOSTS_RULES = ''; self.HOSTS = {}
if self.getboolean('hosts', 'enable', CONFIG.has_section('hosts')):
self.REMOTE_DNS = v = self.get('hosts', 'dns', '')
if v: self.REMOTE_DNS = v if ',' in v else repr(v)
self.DNS_RESOLVE = self.get('hosts', 'resolve', '').replace('|', ' ').strip()
self.HOSTS_CRLF = self.getint('hosts', 'crlf', 0)
self.CRLF_RULES = self.HOSTS_CRLF > 0 and get_rules('hosts', 'crlf_rules')
self.HOSTS_RULES = self.TARGET_PAAS and get_rules('hosts', 'rules')
for v in ('enable', 'rules', 'crlf', 'crlf_rules', 'dns', 'resolve'):
self.remove_option('hosts', v, '')
for k,v in self.items('hosts', ()):
if v.startswith('profile:'):
v = self.get(GAE_PROFILE, v[8:], '')
else:
m = re.match(r'\[(\w+)\](\w+)', v)
if m:
v = v.replace(m.group(0), self.get(m.group(1), m.group(2), ''))
v = v.replace('|', ' ').strip()
if k and v: self.HOSTS[k] = v
self.THIRD_APPS = []
if self.getboolean('third', 'enable', CONFIG.has_section('third')):
self.remove_option('third', 'enable', '')
self.THIRD_APPS = [(k,v if v[0] in ('"',"'") else repr(v)) for k,v in self.items('third', ()) if v]
self.USERAGENT_STRING = self.getboolean('useragent', 'enable', True) and self.get('useragent', 'string', '')
self.USERAGENT_MATCH = self.USERAGENT_STRING and self.get('useragent', 'match', '')
self.USERAGENT_RULES = self.USERAGENT_MATCH and get_rules('useragent', 'rules')
self.FALLBACK_RULES = self.TARGET_PAAS and get_rules('urlfetch', 'nofallback',
r'/^https?:\/\/(?:[\w-]+|127(?:\.\d+){3}|10(?:\.\d+){3}|192\.168(?:\.\d+){2}|172\.[1-3]\d(?:\.\d+){2}|\[.+?\])(?::\d+)?\//')
self.AUTORANGE_RULES = (self.GAE_ENABLE or self.OLD_PLUGIN) and self.AUTORANGE_ENABLE and self.AUTORANGE_RULES
self.PAC_ENABLE = (self.PAC_RULELIST or self.PAC_IPLIST) and self.PAC_ENABLE and 'PAC_ENABLE'
self.GOOGLE_WITHGAE = False
if self.TARGET_PAAS and self.GOOGLE_SITES and not self.GLOBAL_PROXY:
self.GOOGLE_WITHGAE = ' \n '.join([(i if '/' in i else '||%s'%i.lstrip('.')) for i in GOOGLE_WITHGAE])
v = ' \n '.join(['||%s'%i.lstrip('.') for i in self.GOOGLE_SITES])
if isinstance(self.HOSTS_RULES, basestring):
self.HOSTS_RULES = ' \n '.join((self.HOSTS_RULES, v))
else:
self.HOSTS_RULES.append('string://' + v)
self.NEED_PAC = self.GOOGLE_FORCEHTTPS or self.USERAGENT_RULES or self.FALLBACK_RULES or self.AUTORANGE_RULES or self.CRLF_RULES or self.HOSTS_RULES or self.GOOGLE_WITHGAE or self.PAC_ENABLE
def tob(s, enc='utf-8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf-8', err='strict'):
return s.decode(enc, err) if isinstance(s, str) else unicode(s)
class SimpleTemplate(object):
"""SimpleTemplate from bottle"""
blocks = ('if', 'elif', 'else', 'try', 'except', 'finally', 'for', 'while',
'with', 'def', 'class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
re_pytokens = re.compile(r'''
(''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types)
|'(?:[^\\']|\\.)+?' # Single quotes (')
|"(?:[^\\"]|\\.)+?" # Double quotes (")
|'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (')
|"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (")
|\#.* # Comments
)''', re.VERBOSE)
def __init__(self, source, encoding='utf-8'):
self.source = source
self.encoding = encoding
self._str = lambda x: touni(repr(x), encoding)
self._escape = lambda x: touni(x, encoding)
@classmethod
def split_comment(cls, code):
""" Removes comments (#...) from python code. """
if '#' not in code: return code
#: Remove comments only (leave quoted strings as they are)
subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)
return re.sub(cls.re_pytokens, subf, code)
@property
def co(self):
# print self.code
return compile(self.code, '<string>', 'exec')
@property
def code(self):
stack = [] # Current Code indentation
lineno = 0 # Current line of code
ptrbuffer = [] # Buffer for printable strings and token tuple instances
codebuffer = [] # Buffer for generated python code
multiline = dedent = oneline = False
template = self.source
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'): yield 'RAW', part[1:]
else: yield 'CMD', part
else: yield 'TXT', part
def flush(): # Flush the ptrbuffer
if not ptrbuffer: return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT': cline += repr(value)
elif token == 'RAW': cline += '_str(%s)' % value
elif token == 'CMD': cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
cline = '_printlist([' + cline + '])'
del ptrbuffer[:] # Do this before calling code() again
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = touni(line, self.encoding)
sline = line.lstrip()
if lineno <= 2:
m = re.match(r"%\s*#.*coding[:=]\s*([-\w.]+)", sline)
if m: self.encoding = m.group(1)
if m: line = line.replace('coding','coding (removed)')
if sline and sline[0] == '%' and sline[:2] != '%%':
line = line.split('%',1)[1].lstrip() # Full line following the %
cline = self.split_comment(line).strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() # You are actually reading this? Good luck, it's a mess :)
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks # "else:"
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':') # "if 1: pass"
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
else:
code(line)
else: # Line starting with text (not '%') or '%%' (escaped)
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def execute(self, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
env = {}
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__})
env.update(kwargs)
eval(self.co, env)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
for dictarg in args: kwargs.update(dictarg)
stdout = []
self.execute(stdout, kwargs)
return ''.join(stdout)
template = r"""# -*- coding: utf-8 -*-
# 是否使用ini作为配置文件,0不使用
ini_config = {{MTIME}}
# 监听ip
listen_ip = '{{LISTEN_IP}}'
# 监听端口
listen_port = {{LISTEN_PORT}}
# 是否使用通配符证书
cert_wildcard = {{int(CERT_WILDCARD)}}
# 更新PAC时也许还没联网,等待tasks_delay秒后才开始更新
tasks_delay = {{!TASKS_DELAY}}
# WEB界面是否对本机也要求认证
web_authlocal = {{int(WEB_AUTHLOCAL)}}
# 登录WEB界面的用户名
web_username = {{!WEB_USERNAME}}
# 登录WEB界面的密码
web_password = {{!WEB_PASSWORD}}
# 全局代理
global_proxy = {{!GLOBAL_PROXY}}
# URLFetch参数
fetch_keepalive = {{int(FETCH_KEEPALIVE)}}
%if FETCH_TIMEOUT >= 0:
fetch_timeout = {{!FETCH_TIMEOUT or None}}
%end
%if FORWARD_TIMEOUT >= 0:
forward_timeout = {{!FORWARD_TIMEOUT or None}}
%end
%if DEBUG_LEVEL >= 0:
debuglevel = {{!DEBUG_LEVEL}}
%end
check_update = 0
def config():
Forward, set_dns, set_resolve, set_hosts, check_auth, redirect_https = import_from('util')
%for k,v in HTTPS_TARGET.iteritems():
{{k}} = Forward({{v}})
%HTTPS_TARGET[k] = k
%end
RAW_FORWARD = FORWARD = Forward()
%if REMOTE_DNS:
set_dns({{REMOTE_DNS}})
%end
%if DNS_RESOLVE:
set_resolve({{!DNS_RESOLVE}})
%end
google_sites = {{!GOOGLE_SITES}}
google_hosts = {{!GOOGLE_HOSTS}}
set_hosts(google_sites, google_hosts)
%for k,v in HOSTS.iteritems():
%if k and v:
set_hosts({{!k}}, {{repr(v) if v != GOOGLE_HOSTS else 'google_hosts'}})
%end
%end
from plugins import misc; misc = install('misc', misc)
PAGE = misc.Page('page.html')
%HTTPS_TARGET.update({'FORWARD':'FORWARD', 'RAW_FORWARD':'RAW_FORWARD', 'False':'False', 'None':'None','PAGE':'None'})
%if TARGET_PAAS:
from plugins import paas; paas = install('paas', paas)
%end #TARGET_PAAS
%if GAE_ENABLE:
%HTTPS_TARGET['GAE'] = 'None'
GAE = paas.GAE(appids={{!GAE_APPIDS}}\\
%if GAE_LISTEN:
, listen={{!GAE_LISTEN}}\\
%end
%if GAE_PASSWORD:
, password={{!GAE_PASSWORD}}\\
%end
%if GAE_PATH:
, path={{!GAE_PATH}}\\
%end
%if GOOGLE_MODE == 'https':
, scheme='https'\\
%end
%if GAE_PROXY != 'default':
, proxy={{!GAE_PROXY}}\\
%end
, hosts=google_hosts\\
%if AUTORANGE_MAXSIZE and AUTORANGE_MAXSIZE != 1000000:
, maxsize={{!AUTORANGE_MAXSIZE}}\\
%end
%if AUTORANGE_WAITSIZE and AUTORANGE_WAITSIZE != 500000:
, waitsize={{!AUTORANGE_WAITSIZE}}\\
%end
%if AUTORANGE_BUFSIZE and AUTORANGE_BUFSIZE != 8192:
, bufsize={{!AUTORANGE_BUFSIZE}}\\
%end
%if FETCHMAX_LOCAL and FETCHMAX_LOCAL != 3:
, local_times={{!FETCHMAX_LOCAL}}\\
%end
%if FETCHMAX_SERVER and FETCHMAX_SERVER != 3:
, server_times={{!FETCHMAX_SERVER}}\\
%end
%if GAE_MAXTHREADS:
, max_threads={{!GAE_MAXTHREADS}}\\
%end
%if GAE_FETCHMOD:
, fetch_mode={{!GAE_FETCHMOD}}\\
%end
%if FETCH_ARGS:
, fetch_args={{!FETCH_ARGS}}\\
%end
)
%end #GAE_ENABLE
%if PAAS_ENABLE:
%HTTPS_TARGET['PAAS'] = 'None'
%for i,k in enumerate(PAAS_FETCHSERVER):
PAAS{{i+1 if len(PAAS_FETCHSERVER) > 1 else ''}} = paas.PAAS(url={{!k}}\\
%if PAAS_LISTEN and i == 0:
, listen={{!PAAS_LISTEN}}\\
%end
%if PAAS_PASSWORD:
, password={{!PAAS_PASSWORD}}\\
%end
%if PAAS_PROXY != 'default':
, proxy={{!PAAS_PROXY}}\\
%end
%if FETCH_ARGS:
, fetch_args={{!FETCH_ARGS}}\\
%end
)
%end
%if len(PAAS_FETCHSERVER) > 1:
%k = ['PAAS%d'%i for i in xrange(1, len(PAAS_FETCHSERVER)+1)]
%HTTPS_TARGET.update(dict.fromkeys(k,'None'))
PAASS = ({{', '.join(k)}})
from random import choice
PAAS = lambda req: choice(PAASS)(req)
server = paas.data.get('PAAS_server')
if server:
def find_handler(req):
if req.proxy_type.endswith('http'):
return PAAS
server.find_handler = find_handler
%end
%end #PAAS_ENABLE
%if SOCKS5_ENABLE:
%HTTPS_TARGET['SOCKS5'] = 'SOCKS5'
SOCKS5 = paas.SOCKS5(url={{!SOCKS5_FETCHSERVER}}\\
%if SOCKS5_LISTEN:
, listen={{!SOCKS5_LISTEN}}\\
%end
%if SOCKS5_PASSWORD:
, password={{!SOCKS5_PASSWORD}}\\
%end
%if SOCKS5_PROXY != 'default':
, proxy={{!SOCKS5_PROXY}}\\
%end
)
%end #SOCKS5_ENABLE
%if OLD_PLUGIN:
from old import old; old = install('old', old)
%for n,k,c,p in OLD_PLUGIN:
{{n}} = old.{{k}}({{!c}}, {{!p}})
%HTTPS_TARGET[n] = 'None'
%end
%end #OLD_PLUGIN
%if NEED_PAC:
PacFile, RuleList, HostList = import_from('pac')
%end #NEED_PAC
%if GOOGLE_FORCEHTTPS:
forcehttps_sites = RuleList({{!GOOGLE_FORCEHTTPS}})
%end
%if AUTORANGE_RULES:
autorange_rules = RuleList({{!AUTORANGE_RULES}})
%if GAE_ENABLE:
_GAE = GAE; GAE = lambda req: _GAE(req, autorange_rules.match(req.url, req.proxy_host[0]))
%end
%if OLD_PLUGIN:
%for n,k,c,p in OLD_PLUGIN:
_{{n}} = {{n}}; {{n}} = lambda req: _{{n}}(req, autorange_rules.match(req.url, req.proxy_host[0]))
%end
%end #OLD_PLUGIN
%end
%if USERAGENT_RULES:
import re; useragent_match = re.compile({{!USERAGENT_MATCH}}).search
useragent_rules = RuleList({{!USERAGENT_RULES}})
%end
%if GOOGLE_WITHGAE:
withgae_sites = RuleList({{!GOOGLE_WITHGAE}})
%end #GOOGLE_WITHGAE
%if TRUE_HTTPS:
%if NOTRUE_HTTPS:
notruehttps_sites = HostList({{!NOTRUE_HTTPS}})
%end
truehttps_sites = HostList({{!TRUE_HTTPS}})
%end #TRUE_HTTPS
%if CRLF_RULES:
crlf_rules = RuleList({{!CRLF_RULES}})
%end #CRLF_RULES
%if HOSTS_RULES:
hosts_rules = RuleList({{!HOSTS_RULES}})
%end #HOSTS_RULES
%if TARGET_PAAS:
_HttpsFallback = ({{TARGET_PAAS}},)
%if FALLBACK_RULES:
nofallback_rules = RuleList({{!FALLBACK_RULES}})
def FORWARD(req):
if req.proxy_type.endswith('http'):
if nofallback_rules.match(req.url, req.proxy_host[0]):
return RAW_FORWARD(req)
return RAW_FORWARD(req, {{TARGET_PAAS}})
url = build_fake_url(req.proxy_type, req.proxy_host)
if nofallback_rules.match(url, req.proxy_host[0]):
return RAW_FORWARD(req)
return RAW_FORWARD(req, _HttpsFallback)
%else:
def FORWARD(req):
if req.proxy_type.endswith('http'):
return RAW_FORWARD(req, {{TARGET_PAAS}})
return RAW_FORWARD(req, _HttpsFallback)
%end
%end
%PY_DEFAULT = (([v for v in PY_DEFAULT if v in HTTPS_TARGET] or ['FORWARD']) * 3)[:3]
%if PAC_ENABLE:
%if PAC_FILE:
%NEED_PAC = NEED_PAC != 'PAC_ENABLE'
rulelist = (
%for k,v in PAC_RULELIST:
({{!k}}, {{!v}}),
%end #PAC_RULELIST
)
iplist = (
%for k,v in PAC_IPLIST:
({{!k}}, {{!v}}),
%end #PAC_IPLIST
)
PacFile(rulelist, iplist, {{!PAC_FILE}}, {{!PAC_DEFAULT}})
%else:
%PAC_DEFAULT = PY_DEFAULT
%PAC_RULELIST = [(k,v) for k,v in PAC_RULELIST if v in HTTPS_TARGET]
%PAC_IPLIST = [(k,v) for k,v in PAC_IPLIST if v in HTTPS_TARGET]
%PAC_ENABLE = PAC_RULELIST or PAC_IPLIST
%NEED_PAC = NEED_PAC != 'PAC_ENABLE' or PAC_ENABLE
%if PAC_RULELIST:
rulelist = (
%for k,v in PAC_RULELIST:
(RuleList({{!k}}), {{v}}),
%end #PAC_RULELIST
)
%if PAC_HTTPSMODE == 2:
httpslist = (
%for i,k in enumerate(PAC_RULELIST):
(rulelist[{{i}}][0], {{HTTPS_TARGET[k[1]]}}),
%end #PAC_RULELIST
)
unparse_netloc = import_from('utils')
def build_fake_url(type, host):
if type == 'https': port = 443
elif host[1] % 1000 == 443: type, port = 'https', 443
else: type, port = 'http', 80
return '%s://%s/' % (type, unparse_netloc(host, port))
%end #PAC_HTTPSMODE
%end #PAC_RULELIST
%if PAC_IPLIST:
IpList, makeIpFinder = import_from('pac')
iplist = (
%for k,v in PAC_IPLIST:
(IpList({{!k}}), {{v}}),
%end #PAC_IPLIST
)
findHttpProxyByIpList = makeIpFinder(iplist, [{{', '.join(PAC_DEFAULT)}}])
findHttpsProxyByIpList = makeIpFinder(iplist, [{{', '.join([HTTPS_TARGET[v] for v in PAC_DEFAULT])}}])
%end #PAC_IPLIST
%end #PAC_FILE
%end #PAC_ENABLE
%if THIRD_APPS:
from plugins import third; third = install('third', third)
%for k,v in THIRD_APPS:
third.run({{v}}) #{{k}}
%end
%end
%if USERNAME:
auth_checker = check_auth({{!USERNAME}}, {{!PASSWORD}}\\
%if DISABLE_SOCKS4:
, socks4=False\\
%end
%if DISABLE_SOCKS5 and not SOCKS5_ENABLE:
, socks5=False\\
%end
%if BASIC_AUTH:
, digest=False\\
%end
)
%end #USERNAME
%if GAE_ENABLE:
%if GAE_HANDLER:
%if USERNAME:
@auth_checker
%end
def find_gae_handler(req):
proxy_type = req.proxy_type
host, port = req.proxy_host
if proxy_type.endswith('http'):
url = req.url
%if USERAGENT_RULES:
if useragent_match(req.headers.get('User-Agent','')) and useragent_rules.match(url, host):
req.headers['User-Agent'] = {{!USERAGENT_STRING}}
%end
%if GOOGLE_WITHGAE:
if withgae_sites.match(url, host):
return GAE
%end
%if GOOGLE_FORCEHTTPS:
needhttps = req.scheme == 'http' and forcehttps_sites.match(url, host) and req.content_length == 0
if needhttps and getattr(req, '_r', '') != url:
req._r = url
return redirect_https
%end
%if CRLF_RULES:
if crlf_rules.match(url, host):
req.crlf = {{HOSTS_CRLF}}
return FORWARD
%end
%if HOSTS_RULES:
if \\
%if GOOGLE_FORCEHTTPS:
not needhttps and \\
%end
hosts_rules.match(url, host):
return FORWARD
%end
return GAE
%if TRUE_HTTPS:
%if NOTRUE_HTTPS:
if notruehttps_sites.match(host): return
%end
if truehttps_sites.match(host): return FORWARD
%end
%else:
def find_gae_handler(req):
if req.proxy_type.endswith('http'): return GAE
%end #GAE_HANDLER
paas.data['GAE_server'].find_handler = find_gae_handler
%end #GAE_ENABLE
%if USERNAME:
@auth_checker
%end
def find_proxy_handler(req):
%if TARGET_PAAS or NEED_PAC:
proxy_type = req.proxy_type
host, port = req.proxy_host
if proxy_type.endswith('http'):
url = req.url
%if USERAGENT_RULES:
if useragent_match(req.headers.get('User-Agent','')) and useragent_rules.match(url, host):
req.headers['User-Agent'] = {{!USERAGENT_STRING}}
%end
%if GOOGLE_WITHGAE:
if withgae_sites.match(url, host):
return {{TARGET_PAAS}}
%end
%if GOOGLE_FORCEHTTPS:
needhttps = req.scheme == 'http' and forcehttps_sites.match(url, host) and req.content_length == 0
if needhttps and getattr(req, '_r', '') != url:
req._r = url
return redirect_https
%end
%if CRLF_RULES:
if crlf_rules.match(url, host):
req.crlf = {{HOSTS_CRLF}}
return FORWARD
%end
%if HOSTS_RULES:
if \\
%if GOOGLE_FORCEHTTPS:
not needhttps and \\
%end
hosts_rules.match(url, host):
return FORWARD
%end
%if PAC_ENABLE and not PAC_FILE:
%if PAC_RULELIST:
for rule,target in rulelist:
if rule.match(url, host):
return target
%end
%if PAC_IPLIST:
return findHttpProxyByIpList(host)
%else:
return {{PY_DEFAULT[0]}}
%end
%elif TARGET_PAAS:
return {{TARGET_PAAS}}
%else:
return FORWARD
%end
%if TRUE_HTTPS:
%if NOTRUE_HTTPS:
if notruehttps_sites.match(host): return
%end
if truehttps_sites.match(host): return FORWARD
%end
%if PAC_ENABLE and not PAC_FILE and PAC_HTTPSMODE == 2:
%if PAC_RULELIST:
url = build_fake_url(proxy_type, (host, port))
for rule,target in httpslist:
if rule.match(url, host):
return target
%end
%if PAC_IPLIST:
return findHttpsProxyByIpList(host)
%else:
return {{HTTPS_TARGET[PY_DEFAULT[0]]}}
%end
%elif PAC_HTTPSMODE == 0:
return {{HTTPS_TARGET[PY_DEFAULT[0]]}}
%end
%else:
return FORWARD
%end
return find_proxy_handler
"""
def make_config(INPUT=None, OUTPUT=None):
if not (INPUT and OUTPUT):
if INPUT:
OUTPUT = ospath.join(ospath.dirname(INPUT), 'config.py')
elif OUTPUT:
INPUT = ospath.join(ospath.dirname(OUTPUT), 'proxy.ini')
else:
if '__loader__' in globals() and __loader__:
DIR = ospath.dirname(__loader__.archive)
else:
DIR = ospath.dirname(__file__)
INPUT = ospath.join(DIR, 'proxy.ini')
OUTPUT = ospath.join(DIR, 'config.py')
config = Common(INPUT).__dict__
# from pprint import pprint
# pprint(config)
config['MTIME'] = int(os.stat(INPUT).st_mtime)
code = SimpleTemplate(template).render(**config)
# print code
return tob(code), OUTPUT
if __name__ == '__main__':
code, OUTPUT = make_config()
with open(OUTPUT, 'wb') as fp:
fp.write(code)
| ivyswen/gargoyle | package/base-files/files/etc/wallproxy/src.zip/make_config.py | Python | gpl-2.0 | 34,828 |
from .. import api
from .. import mysql
| modulo-/knoydart | api/api_0/apiRequest/__init__.py | Python | apache-2.0 | 40 |
from django.contrib import admin
from partners.models import Partners
# Register your models here.
admin.site.register(Partners) | zqsun/Buybuybiz_pinax | partners/admin.py | Python | mit | 128 |
"""Extended file operations available in POSIX.
f = posixfile.open(filename, [mode, [bufsize]])
will create a new posixfile object
f = posixfile.fileopen(fileobject)
will create a posixfile object from a builtin file object
f.file()
will return the original builtin file object
f.dup()
will return a new file object based on a new filedescriptor
f.dup2(fd)
will return a new file object based on the given filedescriptor
f.flags(mode)
will turn on the associated flag (merge)
mode can contain the following characters:
(character representing a flag)
a append only flag
c close on exec flag
n no delay flag
s synchronization flag
(modifiers)
! turn flags 'off' instead of default 'on'
= copy flags 'as is' instead of default 'merge'
? return a string in which the characters represent the flags
that are set
note: - the '!' and '=' modifiers are mutually exclusive.
- the '?' modifier will return the status of the flags after they
have been changed by other characters in the mode string
f.lock(mode [, len [, start [, whence]]])
will (un)lock a region
mode can contain the following characters:
(character representing type of lock)
u unlock
r read lock
w write lock
(modifiers)
| wait until the lock can be granted
? return the first lock conflicting with the requested lock
or 'None' if there is no conflict. The lock returned is in the
format (mode, len, start, whence, pid) where mode is a
character representing the type of lock ('r' or 'w')
note: - the '?' modifier prevents a region from being locked; it is
query only
"""
import warnings
warnings.warn("The posixfile module is deprecated; "
"fcntl.lockf() provides better locking", DeprecationWarning, 2)
class _posixfile_:
"""File wrapper class that provides extra POSIX file routines."""
states = ['open', 'closed']
#
# Internal routines
#
def __repr__(self):
file = self._file_
return "<%s posixfile '%s', mode '%s' at %s>" % \
(self.states[file.closed], file.name, file.mode, \
hex(id(self))[2:])
#
# Initialization routines
#
def open(self, name, mode='r', bufsize=-1):
import __builtin__
return self.fileopen(__builtin__.open(name, mode, bufsize))
def fileopen(self, file):
import types
if repr(type(file)) != "<type 'file'>":
raise TypeError, 'posixfile.fileopen() arg must be file object'
self._file_ = file
# Copy basic file methods
for maybemethod in dir(file):
if not maybemethod.startswith('_'):
attr = getattr(file, maybemethod)
if isinstance(attr, types.BuiltinMethodType):
setattr(self, maybemethod, attr)
return self
#
# New methods
#
def file(self):
return self._file_
def dup(self):
import posix
if not hasattr(posix, 'fdopen'):
raise AttributeError, 'dup() method unavailable'
return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode)
def dup2(self, fd):
import posix
if not hasattr(posix, 'fdopen'):
raise AttributeError, 'dup() method unavailable'
posix.dup2(self._file_.fileno(), fd)
return posix.fdopen(fd, self._file_.mode)
def flags(self, *which):
import fcntl, os
if which:
if len(which) > 1:
raise TypeError, 'Too many arguments'
which = which[0]
else: which = '?'
l_flags = 0
if 'n' in which: l_flags = l_flags | os.O_NDELAY
if 'a' in which: l_flags = l_flags | os.O_APPEND
if 's' in which: l_flags = l_flags | os.O_SYNC
file = self._file_
if '=' not in which:
cur_fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0)
if '!' in which: l_flags = cur_fl & ~ l_flags
else: l_flags = cur_fl | l_flags
l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFL, l_flags)
if 'c' in which:
arg = ('!' not in which) # 0 is don't, 1 is do close on exec
l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFD, arg)
if '?' in which:
which = '' # Return current flags
l_flags = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0)
if os.O_APPEND & l_flags: which = which + 'a'
if fcntl.fcntl(file.fileno(), fcntl.F_GETFD, 0) & 1:
which = which + 'c'
if os.O_NDELAY & l_flags: which = which + 'n'
if os.O_SYNC & l_flags: which = which + 's'
return which
def lock(self, how, *args):
import struct, fcntl
if 'w' in how: l_type = fcntl.F_WRLCK
elif 'r' in how: l_type = fcntl.F_RDLCK
elif 'u' in how: l_type = fcntl.F_UNLCK
else: raise TypeError, 'no type of lock specified'
if '|' in how: cmd = fcntl.F_SETLKW
elif '?' in how: cmd = fcntl.F_GETLK
else: cmd = fcntl.F_SETLK
l_whence = 0
l_start = 0
l_len = 0
if len(args) == 1:
l_len = args[0]
elif len(args) == 2:
l_len, l_start = args
elif len(args) == 3:
l_len, l_start, l_whence = args
elif len(args) > 3:
raise TypeError, 'too many arguments'
# Hack by [email protected] to get locking to go on freebsd;
# additions for AIX by [email protected]
import sys, os
if sys.platform in ('netbsd1',
'openbsd2',
'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
'freebsd6', 'freebsd7', 'freebsd8',
'bsdos2', 'bsdos3', 'bsdos4'):
flock = struct.pack('lxxxxlxxxxlhh', \
l_start, l_len, os.getpid(), l_type, l_whence)
elif sys.platform in ('aix3', 'aix4'):
flock = struct.pack('hhlllii', \
l_type, l_whence, l_start, l_len, 0, 0, 0)
else:
flock = struct.pack('hhllhh', \
l_type, l_whence, l_start, l_len, 0, 0)
flock = fcntl.fcntl(self._file_.fileno(), cmd, flock)
if '?' in how:
if sys.platform in ('netbsd1',
'openbsd2',
'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
'bsdos2', 'bsdos3', 'bsdos4'):
l_start, l_len, l_pid, l_type, l_whence = \
struct.unpack('lxxxxlxxxxlhh', flock)
elif sys.platform in ('aix3', 'aix4'):
l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \
struct.unpack('hhlllii', flock)
elif sys.platform == "linux2":
l_type, l_whence, l_start, l_len, l_pid, l_sysid = \
struct.unpack('hhllhh', flock)
else:
l_type, l_whence, l_start, l_len, l_sysid, l_pid = \
struct.unpack('hhllhh', flock)
if l_type != fcntl.F_UNLCK:
if l_type == fcntl.F_RDLCK:
return 'r', l_len, l_start, l_whence, l_pid
else:
return 'w', l_len, l_start, l_whence, l_pid
def open(name, mode='r', bufsize=-1):
"""Public routine to open a file as a posixfile object."""
return _posixfile_().open(name, mode, bufsize)
def fileopen(file):
"""Public routine to get a posixfile object from a Python file object."""
return _posixfile_().fileopen(file)
#
# Constants
#
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#
# End of posixfile.py
#
| huran2014/huran.github.io | wot_gateway/usr/lib/python2.7/posixfile.py | Python | gpl-2.0 | 8,003 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
import webob
from nova.compute import api as compute_api
from nova.compute import vm_states
from nova import context
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
def fake_compute_api(*args, **kwargs):
return True
def fake_compute_api_get(self, context, instance_id):
return {
'id': 1,
'uuid': instance_id,
'vm_state': vm_states.ACTIVE,
'task_state': None, 'host': 'host1'
}
class EvacuateTest(test.TestCase):
_methods = ('resize', 'evacuate')
def setUp(self):
super(EvacuateTest, self).setUp()
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.UUID = uuid.uuid4()
for _method in self._methods:
self.stubs.Set(compute_api.API, _method, fake_compute_api)
def test_evacuate_instance_with_no_target(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
def test_evacuate_instance_with_target(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
uuid = self.UUID
req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my_host',
'onSharedStorage': 'false',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
resp = req.get_response(app)
self.assertEqual(resp.status_int, 200)
resp_json = jsonutils.loads(resp.body)
self.assertEqual("MyNewPass", resp_json['adminPass'])
def test_evacuate_shared_and_pass(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
uuid = self.UUID
req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my_host',
'onSharedStorage': 'True',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
def test_evacuate_not_shared_pass_generated(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
uuid = self.UUID
req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my_host',
'onSharedStorage': 'False',
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
resp = req.get_response(app)
self.assertEqual(resp.status_int, 200)
resp_json = jsonutils.loads(resp.body)
self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
def test_evacuate_shared(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
uuid = self.UUID
req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my_host',
'onSharedStorage': 'True',
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
| maheshp/novatest | nova/tests/api/openstack/compute/contrib/test_evacuate.py | Python | apache-2.0 | 5,920 |
from queue import Queue
from bears.python.PEP8Bear import PEP8Bear
from tests.LocalBearTestHelper import LocalBearTestHelper
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
class PEP8BearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section('name')
self.section.append(Setting('max_line_length', '80'))
self.uut = PEP8Bear(self.section, Queue())
def test_valid(self):
self.check_validity(self.uut, ["import sys"])
self.check_validity(self.uut, ["a = 1 + 1"])
def test_line_length(self):
self.check_validity(self.uut, ["a = 1 + 1 + 1 + 1 + 1 + 1 + 1"])
self.section.append(Setting('max_line_length', '10'))
self.check_validity(self.uut,
["a = 1 + 1 + 1 + 1 + 1 + 1 + 1"],
valid=False)
def test_indent_level(self):
test_code = ['def func():\n',
' pass\n']
self.check_validity(self.uut, test_code)
self.section.append(Setting('tab_width', '2'))
self.check_validity(self.uut, test_code, valid=False)
self.check_validity(self.uut, ['def func():\n', ' pass\n'])
def test_disable_warnings(self):
test_code = ['def func():\n',
' pass\n',
'def func2():\n',
' pass\n']
self.check_validity(self.uut, test_code, valid=False)
self.section.append(Setting('pep_ignore', 'E302'))
self.check_validity(self.uut, test_code)
def test_invalid(self):
self.check_validity(self.uut, [""], valid=False)
self.check_validity(self.uut, ["a=1+1"], valid=False)
| sims1253/coala-bears | tests/python/PEP8BearTest.py | Python | agpl-3.0 | 1,722 |
#!/bin/python
"""
4.6 Write a program to prompt the user for hours and rate per hour using
raw_input to compute gross pay. Award time-and-a-half for the hourly
rate for all hours worked above 40 hours. Put the logic to do the
computation of time-and-a-half in a function called computepay() and use
the function to do the computation. The function should return a value.
Use 45 hours and a rate of 10.50 per hour to test the program (the pay
should be 498.75). You should use raw_input to read a string and float()
to convert the string to a number. Do not worry about error checking the
user input unless you want to - you can assume the user types numbers
properly.
"""
def computepay(h, rate):
tot_pay = 0
if h <= 40:
tot_pay = pay_rate * h
if h > 40:
tot_pay = 40 * pay_rate
tot_pay += 1.5 * pay_rate * (h - 40)
return tot_pay
# Actual execution start
hrs = raw_input("Enter Hours:")
rate = raw_input("Enter pay rate:")
try:
h = float(hrs)
pay_rate = float(rate)
except:
print "Invalid hours or hourly rate"
exit()
# get the total pay
tot_pay = computepay(h, rate)
print tot_pay
| Shashaankar/C_programs | src/Python/coursera/ass_4_6.py | Python | gpl-2.0 | 1,145 |
# Resource object code
#
# Created: Fri Jan 6 09:39:04 2006
# by: The Resource Compiler for PyQt (Qt v4.1.0)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = "\
\x00\x00\x03\x4c\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\
\x00\x00\x00\x3d\x00\x4d\x09\xa4\x00\x00\x00\x58\x00\x5a\xf0\x84\
\x00\x00\x00\x71\x02\xf0\x8c\x31\x00\x00\x00\x8e\x05\x93\x08\xe5\
\x00\x00\x00\xaf\x05\x9b\xa6\x44\x00\x00\x00\xc8\x06\x3c\xe8\x53\
\x00\x00\x00\xe3\x06\xec\x79\x65\x00\x00\x01\x04\x0c\x4e\x30\xd8\
\x00\x00\x01\x25\x0e\x9f\xe7\x05\x00\x00\x01\x40\x69\x00\x00\x01\
\x87\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x06\x00\x56\x00\x75\x00\x65\x05\x00\x05\
\xcf\xc7\x01\x03\x00\x00\x00\x10\x00\x26\x00\x46\x00\x69\x00\x63\
\x00\x68\x00\x69\x00\x65\x00\x72\x05\x00\x2a\xd0\x25\x01\x03\x00\
\x00\x00\x10\x00\x26\x00\x51\x00\x75\x00\x69\x00\x74\x00\x74\x00\
\x65\x00\x72\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0e\x00\x50\
\x00\x72\x00\x65\x00\x6d\x00\x69\x00\x65\x00\x72\x05\x00\x4d\x09\
\xa4\x01\x03\x00\x00\x00\x12\x00\x54\x00\x72\x00\x6f\x00\x69\x00\
\x73\x00\x69\x00\xe8\x00\x6d\x00\x65\x05\x00\x5a\xf0\x84\x01\x03\
\x00\x00\x00\x16\x00\x4c\x00\x61\x00\x6e\x00\x67\x00\x75\x00\x65\
\x00\x20\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\
\x03\x00\x00\x00\x0e\x00\x4f\x00\x62\x00\x6c\x00\x69\x00\x71\x00\
\x75\x00\x65\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x10\x00\x44\
\x00\x65\x00\x75\x00\x78\x00\x69\x00\xe8\x00\x6d\x00\x65\x05\x05\
\x9b\xa6\x44\x01\x03\x00\x00\x00\x16\x00\x49\x00\x73\x00\x6f\x00\
\x6d\x00\xe9\x00\x74\x00\x72\x00\x69\x00\x71\x00\x75\x00\x65\x05\
\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x00\x50\x00\x65\x00\x72\
\x00\x73\x00\x70\x00\x65\x00\x63\x00\x74\x00\x69\x00\x76\x00\x65\
\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x10\x00\x46\x00\x72\x00\
\x61\x00\x6e\x00\xe7\x00\x61\x00\x69\x00\x73\x05\x0c\x4e\x30\xd8\
\x01\x03\x00\x00\x00\x3c\x00\x45\x00\x78\x00\x65\x00\x6d\x00\x70\
\x00\x6c\x00\x65\x00\x20\x00\x64\x00\x27\x00\x69\x00\x6e\x00\x74\
\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\
\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x61\x00\x74\x00\x69\x00\x6f\
\x00\x6e\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\
\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x02\xb2\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x20\x00\x47\xdf\x04\
\x00\x00\x00\x33\x00\x4d\x09\xa4\x00\x00\x00\x46\x00\x5a\xf0\x84\
\x00\x00\x00\x57\x02\xf0\x8c\x31\x00\x00\x00\x68\x05\x93\x08\xe5\
\x00\x00\x00\x81\x05\x9b\xa6\x44\x00\x00\x00\x90\x06\x3c\xe8\x53\
\x00\x00\x00\xa1\x06\xec\x79\x65\x00\x00\x00\xb2\x0c\x4e\x30\xd8\
\x00\x00\x00\xc5\x0e\x9f\xe7\x05\x00\x00\x00\xd6\x69\x00\x00\x00\
\xed\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x04\xbc\xf4\xae\x30\x05\x00\x05\xcf\xc7\
\x01\x03\x00\x00\x00\x08\xd3\x0c\xc7\x7c\x00\x26\x00\x46\x05\x00\
\x2a\xd0\x25\x01\x03\x00\x00\x00\x08\xc8\x85\xb8\xcc\x00\x26\x00\
\x58\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x06\xcc\xab\xbc\x88\
\xc9\xf8\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x06\xc1\x38\xbc\
\x88\xc9\xf8\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x0e\xc5\xb8\
\xc5\xb4\x00\x20\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\
\x31\x01\x03\x00\x00\x00\x04\xbe\x57\xac\x01\x05\x05\x93\x08\xe5\
\x01\x03\x00\x00\x00\x06\xb4\x50\xbc\x88\xc9\xf8\x05\x05\x9b\xa6\
\x44\x01\x03\x00\x00\x00\x06\xb4\xf1\xce\x21\xb3\xc4\x05\x06\x3c\
\xe8\x53\x01\x03\x00\x00\x00\x08\xc6\xd0\xad\xfc\xd6\x54\xbc\x95\
\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x06\xd5\x5c\xad\x6d\xc5\
\xb4\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x0c\xad\x6d\xc8\x1c\
\xd6\x54\x00\x20\xc6\x08\xc8\x1c\x05\x0e\x9f\xe7\x05\x01\x2f\x00\
\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\
\x00\
\x00\x00\x03\x26\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\
\x00\x00\x00\x35\x00\x4d\x09\xa4\x00\x00\x00\x4a\x00\x5a\xf0\x84\
\x00\x00\x00\x61\x02\xf0\x8c\x31\x00\x00\x00\x78\x05\x93\x08\xe5\
\x00\x00\x00\x93\x05\x9b\xa6\x44\x00\x00\x00\xaa\x06\x3c\xe8\x53\
\x00\x00\x00\xc1\x06\xec\x79\x65\x00\x00\x00\xe8\x0c\x4e\x30\xd8\
\x00\x00\x01\x09\x0e\x9f\xe7\x05\x00\x00\x01\x22\x69\x00\x00\x01\
\x61\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x06\x04\x12\x04\x38\x04\x34\x05\x00\x05\
\xcf\xc7\x01\x03\x00\x00\x00\x08\x04\x24\x04\x30\x04\x39\x04\x3b\
\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0a\x04\x12\x04\x4b\x04\
\x45\x04\x3e\x04\x34\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0c\
\x04\x1f\x04\x35\x04\x40\x04\x32\x04\x4b\x04\x39\x05\x00\x4d\x09\
\xa4\x01\x03\x00\x00\x00\x0c\x04\x22\x04\x40\x04\x35\x04\x42\x04\
\x38\x04\x39\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x10\x04\x2f\
\x04\x37\x04\x4b\x04\x3a\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\
\xf0\x8c\x31\x01\x03\x00\x00\x00\x0c\x04\x1a\x04\x43\x04\x40\x04\
\x41\x04\x38\x04\x32\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0c\
\x04\x12\x04\x42\x04\x3e\x04\x40\x04\x3e\x04\x39\x05\x05\x9b\xa6\
\x44\x01\x03\x00\x00\x00\x1c\x04\x18\x04\x37\x04\x3e\x04\x3c\x04\
\x35\x04\x42\x04\x40\x04\x38\x04\x47\x04\x35\x04\x41\x04\x3a\x04\
\x38\x04\x39\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x04\x1f\
\x04\x35\x04\x40\x04\x41\x04\x3f\x04\x35\x04\x3a\x04\x42\x04\x38\
\x04\x32\x04\x30\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0e\x04\
\x20\x04\x43\x04\x41\x04\x41\x04\x3a\x04\x38\x04\x39\x05\x0c\x4e\
\x30\xd8\x01\x03\x00\x00\x00\x34\x04\x1f\x04\x40\x04\x38\x04\x3c\
\x04\x35\x04\x40\x00\x20\x04\x38\x04\x3d\x04\x42\x04\x35\x04\x40\
\x04\x3d\x04\x30\x04\x46\x04\x38\x04\x3d\x04\x3e\x04\x30\x04\x3b\
\x04\x38\x04\x37\x04\x30\x04\x46\x04\x38\x04\x38\x05\x0e\x9f\xe7\
\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\
\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x2e\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\
\x00\x00\x00\x3b\x00\x4d\x09\xa4\x00\x00\x00\x56\x00\x5a\xf0\x84\
\x00\x00\x00\x6d\x02\xf0\x8c\x31\x00\x00\x00\x84\x05\x93\x08\xe5\
\x00\x00\x00\xa1\x05\x9b\xa6\x44\x00\x00\x00\xb6\x06\x3c\xe8\x53\
\x00\x00\x00\xcb\x06\xec\x79\x65\x00\x00\x00\xec\x0c\x4e\x30\xd8\
\x00\x00\x01\x0d\x0e\x9f\xe7\x05\x00\x00\x01\x26\x69\x00\x00\x01\
\x69\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x08\x00\x56\x00\x69\x00\x73\x00\x61\x05\
\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0c\x00\x26\x00\x41\x00\x72\
\x00\x6b\x00\x69\x00\x76\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\
\x10\x00\x26\x00\x41\x00\x76\x00\x73\x00\x6c\x00\x75\x00\x74\x00\
\x61\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0c\x00\x46\x00\xf6\
\x00\x72\x00\x73\x00\x74\x00\x61\x05\x00\x4d\x09\xa4\x01\x03\x00\
\x00\x00\x0c\x00\x54\x00\x72\x00\x65\x00\x64\x00\x6a\x00\x65\x05\
\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x12\x00\x53\x00\x70\x00\x72\
\x00\xe5\x00\x6b\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\
\x31\x01\x03\x00\x00\x00\x0a\x00\x53\x00\x6b\x00\x65\x00\x76\x00\
\x74\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0a\x00\x41\x00\x6e\
\x00\x64\x00\x72\x00\x61\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\
\x16\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\
\x69\x00\x73\x00\x6b\x00\x74\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\
\x00\x16\x00\x50\x00\x65\x00\x72\x00\x73\x00\x70\x00\x65\x00\x6b\
\x00\x74\x00\x69\x00\x76\x00\x74\x05\x06\xec\x79\x65\x01\x03\x00\
\x00\x00\x0e\x00\x53\x00\x76\x00\x65\x00\x6e\x00\x73\x00\x6b\x00\
\x61\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x38\x00\x49\x00\x6e\
\x00\x74\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\
\x00\x6e\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x65\x00\x72\x00\x69\
\x00\x6e\x00\x67\x00\x73\x00\x65\x00\x78\x00\x65\x00\x6d\x00\x70\
\x00\x65\x00\x6c\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\
\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x50\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x2a\x00\x47\xdf\x04\
\x00\x00\x00\x41\x00\x4d\x09\xa4\x00\x00\x00\x5c\x00\x5a\xf0\x84\
\x00\x00\x00\x75\x02\xf0\x8c\x31\x00\x00\x00\x90\x05\x93\x08\xe5\
\x00\x00\x00\xb1\x05\x9b\xa6\x44\x00\x00\x00\xc8\x06\x3c\xe8\x53\
\x00\x00\x00\xe3\x06\xec\x79\x65\x00\x00\x01\x04\x0c\x4e\x30\xd8\
\x00\x00\x01\x2b\x0e\x9f\xe7\x05\x00\x00\x01\x44\x69\x00\x00\x01\
\x8b\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x0e\x00\x41\x00\x6e\x00\x73\x00\x69\x00\
\x63\x00\x68\x00\x74\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0c\
\x00\x26\x00\x44\x00\x61\x00\x74\x00\x65\x00\x69\x05\x00\x2a\xd0\
\x25\x01\x03\x00\x00\x00\x10\x00\x42\x00\x65\x00\x26\x00\x65\x00\
\x6e\x00\x64\x00\x65\x00\x6e\x05\x00\x47\xdf\x04\x01\x03\x00\x00\
\x00\x0e\x00\x45\x00\x72\x00\x73\x00\x74\x00\x65\x00\x6e\x00\x73\
\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x10\x00\x44\x00\x72\x00\
\x69\x00\x74\x00\x74\x00\x65\x00\x6e\x00\x73\x05\x00\x5a\xf0\x84\
\x01\x03\x00\x00\x00\x16\x00\x53\x00\x70\x00\x72\x00\x61\x00\x63\
\x00\x68\x00\x65\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\
\x31\x01\x03\x00\x00\x00\x0c\x00\x53\x00\x63\x00\x68\x00\x69\x00\
\x65\x00\x66\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x10\x00\x5a\
\x00\x77\x00\x65\x00\x69\x00\x74\x00\x65\x00\x6e\x00\x73\x05\x05\
\x9b\xa6\x44\x01\x03\x00\x00\x00\x16\x00\x49\x00\x73\x00\x6f\x00\
\x6d\x00\x65\x00\x74\x00\x72\x00\x69\x00\x73\x00\x63\x00\x68\x05\
\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x1c\x00\x50\x00\x65\x00\x72\
\x00\x73\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\x00\x69\
\x00\x73\x00\x63\x00\x68\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\
\x0e\x00\x44\x00\x65\x00\x75\x00\x74\x00\x73\x00\x63\x00\x68\x05\
\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x3c\x00\x49\x00\x6e\x00\x74\
\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\
\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x69\x00\x65\x00\x72\x00\x75\
\x00\x6e\x00\x67\x00\x73\x00\x62\x00\x65\x00\x69\x00\x73\x00\x70\
\x00\x69\x00\x65\x00\x6c\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\
\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x02\xbc\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x20\x00\x47\xdf\x04\
\x00\x00\x00\x37\x00\x4d\x09\xa4\x00\x00\x00\x4e\x00\x5a\xf0\x84\
\x00\x00\x00\x5f\x02\xf0\x8c\x31\x00\x00\x00\x70\x05\x93\x08\xe5\
\x00\x00\x00\x87\x05\x9b\xa6\x44\x00\x00\x00\x98\x06\x3c\xe8\x53\
\x00\x00\x00\xa9\x06\xec\x79\x65\x00\x00\x00\xbc\x0c\x4e\x30\xd8\
\x00\x00\x00\xcf\x0e\x9f\xe7\x05\x00\x00\x00\xe2\x69\x00\x00\x00\
\xf7\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x04\x89\xc6\x56\xfe\x05\x00\x05\xcf\xc7\
\x01\x03\x00\x00\x00\x0c\x65\x87\x4e\xf6\x00\x5b\x00\x26\x00\x46\
\x00\x5d\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0c\x90\x00\x51\
\xfa\x00\x5b\x00\x26\x00\x78\x00\x5d\x05\x00\x47\xdf\x04\x01\x03\
\x00\x00\x00\x06\x7b\x2c\x4e\x00\x4e\x2a\x05\x00\x4d\x09\xa4\x01\
\x03\x00\x00\x00\x06\x7b\x2c\x4e\x09\x4e\x2a\x05\x00\x5a\xf0\x84\
\x01\x03\x00\x00\x00\x0c\x8b\xed\x8a\x00\x00\x3a\x00\x20\x00\x25\
\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\x06\x65\x9c\x62\
\x95\x5f\x71\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x06\x7b\x2c\
\x4e\x8c\x4e\x2a\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x08\x7b\
\x49\x89\xd2\x62\x95\x5f\x71\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\
\x00\x08\x90\x0f\x89\xc6\x62\x95\x5f\x71\x05\x06\xec\x79\x65\x01\
\x03\x00\x00\x00\x08\x7b\x80\x4f\x53\x4e\x2d\x65\x87\x05\x0c\x4e\
\x30\xd8\x01\x03\x00\x00\x00\x0a\x56\xfd\x96\x45\x53\x16\x83\x03\
\x4f\x8b\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\
\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x02\xe0\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\
\x00\x00\x00\x39\x00\x4d\x09\xa4\x00\x00\x00\x4c\x00\x5a\xf0\x84\
\x00\x00\x00\x5d\x02\xf0\x8c\x31\x00\x00\x00\x70\x05\x93\x08\xe5\
\x00\x00\x00\x8d\x05\x9b\xa6\x44\x00\x00\x00\xa0\x06\x3c\xe8\x53\
\x00\x00\x00\xb3\x06\xec\x79\x65\x00\x00\x00\xca\x0c\x4e\x30\xd8\
\x00\x00\x00\xdf\x0e\x9f\xe7\x05\x00\x00\x00\xf8\x69\x00\x00\x01\
\x1b\x03\x00\x00\x00\x06\x00\x52\x00\x54\x00\x4c\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x08\x06\x45\x06\x31\x06\x26\x06\x49\x05\
\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0a\x06\x27\x06\x44\x06\x45\
\x06\x44\x06\x41\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x08\x06\
\x23\x06\x2e\x06\x31\x06\x2c\x05\x00\x47\xdf\x04\x01\x03\x00\x00\
\x00\x06\x06\x23\x06\x48\x06\x44\x05\x00\x4d\x09\xa4\x01\x03\x00\
\x00\x00\x08\x06\x2b\x06\x27\x06\x44\x06\x2b\x05\x00\x5a\xf0\x84\
\x01\x03\x00\x00\x00\x12\x06\x27\x06\x44\x06\x44\x06\x3a\x06\x29\
\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\
\x00\x00\x08\x06\x45\x06\x35\x06\x45\x06\x2a\x05\x05\x93\x08\xe5\
\x01\x03\x00\x00\x00\x08\x06\x2b\x06\x27\x06\x46\x06\x49\x05\x05\
\x9b\xa6\x44\x01\x03\x00\x00\x00\x0c\x06\x45\x06\x2a\x06\x45\x06\
\x27\x06\x2b\x06\x44\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x0a\
\x06\x45\x06\x46\x06\x38\x06\x48\x06\x31\x05\x06\xec\x79\x65\x01\
\x03\x00\x00\x00\x0e\x06\x27\x06\x44\x06\x39\x06\x31\x06\x28\x06\
\x4a\x06\x29\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x18\x06\x45\
\x06\x2b\x06\x27\x06\x44\x00\x20\x06\x27\x06\x44\x06\x2a\x06\x2f\
\x06\x48\x06\x4a\x06\x44\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\
\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x1c\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x28\x00\x47\xdf\x04\
\x00\x00\x00\x41\x00\x4d\x09\xa4\x00\x00\x00\x58\x00\x5a\xf0\x84\
\x00\x00\x00\x6d\x02\xf0\x8c\x31\x00\x00\x00\x82\x05\x93\x08\xe5\
\x00\x00\x00\x9f\x05\x9b\xa6\x44\x00\x00\x00\xbc\x06\x3c\xe8\x53\
\x00\x00\x00\xd1\x06\xec\x79\x65\x00\x00\x00\xf2\x0c\x4e\x30\xd8\
\x00\x00\x01\x15\x0e\x9f\xe7\x05\x00\x00\x01\x2a\x69\x00\x00\x01\
\x57\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x0c\x00\x50\x00\x6f\x00\x68\x00\x6c\x00\
\x65\x00\x64\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0e\x00\x26\
\x00\x53\x00\x6f\x00\x75\x00\x62\x00\x6f\x00\x72\x05\x00\x2a\xd0\
\x25\x01\x03\x00\x00\x00\x0c\x00\x26\x00\x4b\x00\x6f\x00\x6e\x00\
\x65\x00\x63\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0a\x00\x50\
\x00\x72\x00\x76\x00\x6e\x00\xed\x05\x00\x4d\x09\xa4\x01\x03\x00\
\x00\x00\x0a\x00\x54\x01\x59\x00\x65\x00\x74\x00\xed\x05\x00\x5a\
\xf0\x84\x01\x03\x00\x00\x00\x12\x00\x4a\x00\x61\x00\x79\x00\x7a\
\x00\x6b\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\
\x03\x00\x00\x00\x12\x00\x4e\x00\x61\x00\x6b\x00\x6c\x00\x6f\x00\
\x6e\x01\x1b\x00\x6e\x00\xfd\x05\x05\x93\x08\xe5\x01\x03\x00\x00\
\x00\x0a\x00\x44\x00\x72\x00\x75\x00\x68\x00\xfd\x05\x05\x9b\xa6\
\x44\x01\x03\x00\x00\x00\x16\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\
\x65\x00\x74\x00\x72\x00\x69\x00\x63\x00\x6b\x00\xfd\x05\x06\x3c\
\xe8\x53\x01\x03\x00\x00\x00\x18\x00\x50\x00\x65\x00\x72\x00\x73\
\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\x00\x6e\x00\xed\
\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0a\x01\x0c\x00\x65\x00\
\x73\x00\x6b\x00\xfd\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x22\
\x00\x55\x00\x6b\x00\xe1\x00\x7a\x00\x6b\x00\x61\x00\x20\x00\x6c\
\x00\x6f\x00\x6b\x00\x61\x00\x6c\x00\x69\x00\x7a\x00\x61\x00\x63\
\x00\x65\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\
\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x28\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x26\x00\x47\xdf\x04\
\x00\x00\x00\x3b\x00\x4d\x09\xa4\x00\x00\x00\x50\x00\x5a\xf0\x84\
\x00\x00\x00\x65\x02\xf0\x8c\x31\x00\x00\x00\x7a\x05\x93\x08\xe5\
\x00\x00\x00\x99\x05\x9b\xa6\x44\x00\x00\x00\xb2\x06\x3c\xe8\x53\
\x00\x00\x00\xcb\x06\xec\x79\x65\x00\x00\x00\xea\x0c\x4e\x30\xd8\
\x00\x00\x01\x0b\x0e\x9f\xe7\x05\x00\x00\x01\x26\x69\x00\x00\x01\
\x63\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x0a\x00\x56\x00\x69\x00\x73\x00\x74\x00\
\x61\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0a\x00\x26\x00\x46\
\x00\x69\x00\x6c\x00\x65\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\
\x0a\x00\x26\x00\x45\x00\x73\x00\x63\x00\x69\x05\x00\x47\xdf\x04\
\x01\x03\x00\x00\x00\x0a\x00\x50\x00\x72\x00\x69\x00\x6d\x00\x6f\
\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x0a\x00\x54\x00\x65\x00\
\x72\x00\x7a\x00\x6f\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x14\
\x00\x4c\x00\x69\x00\x6e\x00\x67\x00\x75\x00\x61\x00\x3a\x00\x20\
\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\x0e\x00\
\x4f\x00\x62\x00\x6c\x00\x69\x00\x71\x00\x75\x00\x61\x05\x05\x93\
\x08\xe5\x01\x03\x00\x00\x00\x0e\x00\x53\x00\x65\x00\x63\x00\x6f\
\x00\x6e\x00\x64\x00\x6f\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\
\x14\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\
\x69\x00\x63\x00\x61\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\
\x00\x50\x00\x72\x00\x6f\x00\x73\x00\x70\x00\x65\x00\x74\x00\x74\
\x00\x69\x00\x63\x00\x61\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\
\x10\x00\x49\x00\x74\x00\x61\x00\x6c\x00\x69\x00\x61\x00\x6e\x00\
\x6f\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x32\x00\x45\x00\x73\
\x00\x65\x00\x6d\x00\x70\x00\x69\x00\x6f\x00\x20\x00\x64\x00\x69\
\x00\x20\x00\x6c\x00\x6f\x00\x63\x00\x61\x00\x6c\x00\x69\x00\x7a\
\x00\x7a\x00\x61\x00\x7a\x00\x69\x00\x6f\x00\x6e\x00\x65\x05\x0e\
\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\
\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x24\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\
\x00\x00\x00\x35\x00\x4d\x09\xa4\x00\x00\x00\x50\x00\x5a\xf0\x84\
\x00\x00\x00\x67\x02\xf0\x8c\x31\x00\x00\x00\x7e\x05\x93\x08\xe5\
\x00\x00\x00\x9b\x05\x9b\xa6\x44\x00\x00\x00\xb2\x06\x3c\xe8\x53\
\x00\x00\x00\xc7\x06\xec\x79\x65\x00\x00\x00\xe6\x0c\x4e\x30\xd8\
\x00\x00\x01\x05\x0e\x9f\xe7\x05\x00\x00\x01\x1a\x69\x00\x00\x01\
\x5f\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x06\x00\x56\x00\x69\x00\x73\x05\x00\x05\
\xcf\xc7\x01\x03\x00\x00\x00\x08\x00\x26\x00\x46\x00\x69\x00\x6c\
\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x10\x00\x26\x00\x41\x00\
\x76\x00\x73\x00\x6c\x00\x75\x00\x74\x00\x74\x05\x00\x47\xdf\x04\
\x01\x03\x00\x00\x00\x0c\x00\x46\x00\xf8\x00\x72\x00\x73\x00\x74\
\x00\x65\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x0c\x00\x54\x00\
\x72\x00\x65\x00\x64\x00\x6a\x00\x65\x05\x00\x5a\xf0\x84\x01\x03\
\x00\x00\x00\x12\x00\x53\x00\x70\x00\x72\x00\xe5\x00\x6b\x00\x3a\
\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\
\x0c\x00\x53\x00\x6b\x00\x6a\x00\x65\x00\x76\x00\x74\x05\x05\x93\
\x08\xe5\x01\x03\x00\x00\x00\x0a\x00\x41\x00\x6e\x00\x64\x00\x72\
\x00\x65\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x14\x00\x49\x00\
\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\x69\x00\x73\x00\
\x6b\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x14\x00\x50\x00\x65\
\x00\x72\x00\x73\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\
\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0a\x00\x4e\x00\x6f\x00\
\x72\x00\x73\x00\x6b\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x3a\
\x00\x49\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x73\
\x00\x6a\x00\x6f\x00\x6e\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x65\
\x00\x72\x00\x69\x00\x6e\x00\x67\x00\x73\x00\x65\x00\x6b\x00\x73\
\x00\x65\x00\x6d\x00\x70\x00\x65\x00\x6c\x05\x0e\x9f\xe7\x05\x01\
\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\
\x6f\x77\x00\
\x00\x00\x03\x24\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\
\x00\x00\x00\x3b\x00\x4d\x09\xa4\x00\x00\x00\x54\x00\x5a\xf0\x84\
\x00\x00\x00\x69\x02\xf0\x8c\x31\x00\x00\x00\x7e\x05\x93\x08\xe5\
\x00\x00\x00\x9d\x05\x9b\xa6\x44\x00\x00\x00\xb4\x06\x3c\xe8\x53\
\x00\x00\x00\xcd\x06\xec\x79\x65\x00\x00\x00\xec\x0c\x4e\x30\xd8\
\x00\x00\x01\x09\x0e\x9f\xe7\x05\x00\x00\x01\x24\x69\x00\x00\x01\
\x5f\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x06\x03\x8c\x03\xc8\x03\xb7\x05\x00\x05\
\xcf\xc7\x01\x03\x00\x00\x00\x0e\x00\x26\x03\x91\x03\xc1\x03\xc7\
\x03\xb5\x03\xaf\x03\xbf\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\
\x0e\x03\x88\x00\x26\x03\xbe\x03\xbf\x03\xb4\x03\xbf\x03\xc2\x05\
\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0a\x03\xa0\x03\xc1\x03\xce\
\x03\xc4\x03\xbf\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x0a\x03\
\xa4\x03\xc1\x03\xaf\x03\xc4\x03\xbf\x05\x00\x5a\xf0\x84\x01\x03\
\x00\x00\x00\x14\x03\x93\x03\xbb\x03\xce\x03\xc3\x03\xc3\x03\xb1\
\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\
\x00\x00\x0c\x03\xa0\x03\xbb\x03\xac\x03\xb3\x03\xb9\x03\xb1\x05\
\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0e\x03\x94\x03\xb5\x03\xcd\
\x03\xc4\x03\xb5\x03\xc1\x03\xbf\x05\x05\x9b\xa6\x44\x01\x03\x00\
\x00\x00\x14\x03\x99\x03\xc3\x03\xbf\x03\xbc\x03\xb5\x03\xc4\x03\
\xc1\x03\xb9\x03\xba\x03\xae\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\
\x00\x12\x03\xa0\x03\xc1\x03\xbf\x03\xbf\x03\xc0\x03\xc4\x03\xb9\
\x03\xba\x03\xae\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x10\x03\
\x95\x03\xbb\x03\xbb\x03\xb7\x03\xbd\x03\xb9\x03\xba\x03\xac\x05\
\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x30\x03\xa0\x03\xb1\x03\xc1\
\x03\xac\x03\xb4\x03\xb5\x03\xb9\x03\xb3\x03\xbc\x03\xb1\x00\x20\
\x03\xb4\x03\xb9\x03\xb5\x03\xb8\x03\xbd\x03\xbf\x03\xc0\x03\xbf\
\x03\xaf\x03\xb7\x03\xc3\x03\xb7\x03\xc2\x05\x0e\x9f\xe7\x05\x01\
\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\
\x6f\x77\x00\
\x00\x00\x03\x26\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x2a\x00\x47\xdf\x04\
\x00\x00\x00\x45\x00\x4d\x09\xa4\x00\x00\x00\x5a\x00\x5a\xf0\x84\
\x00\x00\x00\x6d\x02\xf0\x8c\x31\x00\x00\x00\x80\x05\x93\x08\xe5\
\x00\x00\x00\x9f\x05\x9b\xa6\x44\x00\x00\x00\xb8\x06\x3c\xe8\x53\
\x00\x00\x00\xc9\x06\xec\x79\x65\x00\x00\x00\xe6\x0c\x4e\x30\xd8\
\x00\x00\x01\x07\x0e\x9f\xe7\x05\x00\x00\x01\x24\x69\x00\x00\x01\
\x61\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x0e\x00\x41\x00\x73\x00\x70\x00\x65\x00\
\x6b\x00\x74\x00\x6f\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x10\
\x00\x26\x00\x44\x00\x6f\x00\x73\x00\x69\x00\x65\x00\x72\x00\x6f\
\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0a\x00\x26\x00\x46\x00\
\x69\x00\x6e\x00\x69\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x08\
\x00\x55\x00\x6e\x00\x75\x00\x65\x05\x00\x4d\x09\xa4\x01\x03\x00\
\x00\x00\x08\x00\x54\x00\x72\x00\x69\x00\x65\x05\x00\x5a\xf0\x84\
\x01\x03\x00\x00\x00\x14\x00\x4c\x00\x69\x00\x6e\x00\x67\x00\x76\
\x00\x6f\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\
\x03\x00\x00\x00\x0e\x00\x4f\x00\x62\x00\x6c\x00\x69\x00\x6b\x00\
\x76\x00\x61\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x06\x00\x44\
\x00\x75\x00\x65\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x12\x00\
\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\x69\x00\
\x61\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x00\x50\x00\x65\
\x00\x72\x00\x73\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\
\x00\x61\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x12\x00\x45\x00\
\x73\x00\x70\x00\x65\x00\x72\x00\x61\x00\x6e\x00\x74\x00\x6f\x05\
\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x32\x00\x45\x00\x6b\x00\x7a\
\x00\x65\x00\x6d\x00\x70\x00\x6c\x00\x6f\x00\x20\x00\x70\x00\x72\
\x00\x69\x00\x20\x00\x69\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x6e\
\x00\x61\x00\x63\x00\x69\x00\x69\x00\x67\x00\x6f\x05\x0e\x9f\xe7\
\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\
\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x2a\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\
\x00\x00\x00\x39\x00\x4d\x09\xa4\x00\x00\x00\x4e\x00\x5a\xf0\x84\
\x00\x00\x00\x63\x02\xf0\x8c\x31\x00\x00\x00\x78\x05\x93\x08\xe5\
\x00\x00\x00\x9b\x05\x9b\xa6\x44\x00\x00\x00\xb4\x06\x3c\xe8\x53\
\x00\x00\x00\xcb\x06\xec\x79\x65\x00\x00\x00\xe8\x0c\x4e\x30\xd8\
\x00\x00\x01\x09\x0e\x9f\xe7\x05\x00\x00\x01\x22\x69\x00\x00\x01\
\x65\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x08\x00\x56\x00\x69\x00\x65\x00\x77\x05\
\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0a\x00\x26\x00\x46\x00\x69\
\x00\x6c\x00\x65\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0a\x00\
\x45\x00\x26\x00\x78\x00\x69\x00\x74\x05\x00\x47\xdf\x04\x01\x03\
\x00\x00\x00\x0a\x00\x46\x00\x69\x00\x72\x00\x73\x00\x74\x05\x00\
\x4d\x09\xa4\x01\x03\x00\x00\x00\x0a\x00\x54\x00\x68\x00\x69\x00\
\x72\x00\x64\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x18\x00\x4c\
\x00\x61\x00\x6e\x00\x67\x00\x75\x00\x61\x00\x67\x00\x65\x00\x3a\
\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\
\x0e\x00\x4f\x00\x62\x00\x6c\x00\x69\x00\x71\x00\x75\x00\x65\x05\
\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0c\x00\x53\x00\x65\x00\x63\
\x00\x6f\x00\x6e\x00\x64\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\
\x12\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\
\x69\x00\x63\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x00\x50\
\x00\x65\x00\x72\x00\x73\x00\x70\x00\x65\x00\x63\x00\x74\x00\x69\
\x00\x76\x00\x65\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0e\x00\
\x45\x00\x6e\x00\x67\x00\x6c\x00\x69\x00\x73\x00\x68\x05\x0c\x4e\
\x30\xd8\x01\x03\x00\x00\x00\x38\x00\x49\x00\x6e\x00\x74\x00\x65\
\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x61\
\x00\x6c\x00\x69\x00\x7a\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\
\x00\x20\x00\x45\x00\x78\x00\x61\x00\x6d\x00\x70\x00\x6c\x00\x65\
\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\
\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x02\xd2\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\
\x00\x00\x00\x3f\x00\x4d\x09\xa4\x00\x00\x00\x56\x00\x5a\xf0\x84\
\x00\x00\x00\x67\x02\xf0\x8c\x31\x00\x00\x00\x78\x05\x93\x08\xe5\
\x00\x00\x00\x8f\x05\x9b\xa6\x44\x00\x00\x00\xa4\x06\x3c\xe8\x53\
\x00\x00\x00\xb5\x06\xec\x79\x65\x00\x00\x00\xca\x0c\x4e\x30\xd8\
\x00\x00\x00\xdb\x0e\x9f\xe7\x05\x00\x00\x00\xec\x69\x00\x00\x01\
\x0d\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x08\x88\x68\x79\x3a\x65\xb9\x5f\x0f\x05\
\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x10\x30\xd5\x30\xa1\x30\xa4\
\x30\xeb\x00\x28\x00\x26\x00\x46\x00\x29\x05\x00\x2a\xd0\x25\x01\
\x03\x00\x00\x00\x0c\x7d\x42\x4e\x86\x00\x28\x00\x26\x00\x58\x00\
\x29\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x06\x7b\x2c\x4e\x00\
\x88\x4c\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x06\x7b\x2c\x4e\
\x09\x88\x4c\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x0c\x8a\x00\
\x8a\x9e\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\
\x03\x00\x00\x00\x0a\x65\x9c\x30\x81\x62\x95\x5f\x71\x6c\xd5\x05\
\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x06\x7b\x2c\x4e\x8c\x88\x4c\
\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x0a\x7b\x49\x89\xd2\x62\
\x95\x5f\x71\x6c\xd5\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x06\
\x90\x60\x8f\xd1\x6c\xd5\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\
\x06\x65\xe5\x67\x2c\x8a\x9e\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\
\x00\x16\x56\xfd\x96\x9b\x53\x16\x00\x28\x00\x69\x00\x31\x00\x38\
\x00\x6e\x00\x29\x30\x6e\x4f\x8b\x05\x0e\x9f\xe7\x05\x01\x2f\x00\
\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\
\x00\
"
qt_resource_name = "\
\x00\x0c\
\x0d\xfc\x11\x13\
\x00\x74\
\x00\x72\x00\x61\x00\x6e\x00\x73\x00\x6c\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x73\
\x00\x0a\
\x04\x50\xdc\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x66\x00\x72\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x6f\xac\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x6b\x00\x6f\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x65\x0c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x72\x00\x75\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x67\x1c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x73\x00\x76\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x58\x0c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x64\x00\x65\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x7d\x3c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x7a\x00\x68\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x55\xdc\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x61\x00\x72\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x57\xec\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x63\x00\x73\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x6d\xfc\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x69\x00\x74\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x68\xac\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x6e\x00\x6f\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x56\x7c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x65\x00\x6c\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x59\xac\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x65\x00\x6f\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x59\x9c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x65\x00\x6e\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x6c\xbc\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x6a\x00\x70\x00\x2e\x00\x71\x00\x6d\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x0e\x00\x00\x00\x02\
\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xba\x00\x00\x00\x00\x00\x01\x00\x00\x12\x76\
\x00\x00\x01\x22\x00\x00\x00\x00\x00\x01\x00\x00\x1e\xce\
\x00\x00\x00\xd4\x00\x00\x00\x00\x00\x01\x00\x00\x15\x5a\
\x00\x00\x00\x86\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x62\
\x00\x00\x01\x56\x00\x00\x00\x00\x00\x01\x00\x00\x25\x20\
\x00\x00\x01\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x21\xf6\
\x00\x00\x00\x52\x00\x00\x00\x00\x00\x01\x00\x00\x06\x06\
\x00\x00\x00\x6c\x00\x00\x00\x00\x00\x01\x00\x00\x09\x30\
\x00\x00\x01\x08\x00\x00\x00\x00\x00\x01\x00\x00\x1b\xa6\
\x00\x00\x01\x70\x00\x00\x00\x00\x00\x01\x00\x00\x28\x4e\
\x00\x00\x00\xee\x00\x00\x00\x00\x00\x01\x00\x00\x18\x7a\
\x00\x00\x00\x38\x00\x00\x00\x00\x00\x01\x00\x00\x03\x50\
\x00\x00\x00\xa0\x00\x00\x00\x00\x00\x01\x00\x00\x0f\xb6\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| Southpaw-TACTIC/Team | src/python/Lib/site-packages/PySide/examples/tools/i18n/i18n_rc.py | Python | epl-1.0 | 48,776 |
#
# Copyright (C) 2012-2013 Stanislav Bohm
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
import utils
from copy import copy
class Packet:
def __init__(self, time, size, edge_id):
self.time = time
self.size = size
self.edge_id = edge_id
class RunInstance:
def __init__(self, project, process_count, threads_count):
self.project = project
self.process_count = process_count
self.threads_count = threads_count
self.net = None
self.net_instances = {}
self.activites = [None] * (self.process_count * self.threads_count)
self.last_event = None # "fire" / "finished" / "receive" / None
self.last_event_activity = None
self.last_event_instance = None
self.last_event_time = None
self.packets = [ [] for i in xrange(self.process_count * self.process_count)]
def add_token(self, place_id, token_pointer, token_value, send_time=None):
self.last_event_instance.add_token(place_id, token_pointer, token_value, send_time)
def remove_token(self, place_id, token_pointer):
self.last_event_instance.remove_token(place_id, token_pointer)
def clear_removed_and_new_tokens(self):
for i in self.net_instances:
self.net_instances[i].clear_removed_and_new_tokens()
def add_enabled_transition(self, transition_id):
self.last_event_instance.add_enabled_transition(transition_id)
def set_activity(self, process_id, thread_id, activity):
index = process_id * self.threads_count + thread_id
self.activites[index] = activity
self.last_event_activity = activity
def pre_event(self):
""" This method is called by tracelog before each event_* """
self.clear_removed_and_new_tokens()
def reset_last_event_info(self):
self.last_event = None
self.last_event_activity = None
self.last_event_instance = None
self.last_event_time = None
def event_spawn(self, process_id, thread_id, time, net_id):
self.net = self.project.find_net(net_id)
assert self.net.id == net_id
self.last_event = "spawn"
if thread_id is not None:
self.set_activity(process_id, thread_id, None)
instance = NetInstance(process_id)
self.net_instances[process_id] = instance
self.last_event_instance = instance
self.last_event_process = process_id
self.last_event_time = time
def event_quit(self, process_id, thread_id, time):
self.last_event = "quit"
self.last_event_process = process_id
self.last_event_thread = thread_id
self.last_event_time = time
index = process_id * self.threads_count + thread_id
self.last_event_activity = self.activites[index]
if self.last_event_activity is not None:
# None can occur when we are logging
# "quit" but not transition fire
self.last_event_activity.quit = True
self.last_event_instance = self.net_instances[process_id]
def event_idle(self, process_id, thread_id, time):
self.last_event = "idle"
self.last_event_process = process_id
self.last_event_thread = thread_id
self.last_event_time = time
self.last_event_activity = None
self.last_event_instance = self.net_instances[process_id]
def event_send(self, process_id, thread_id, time, target_id, size, edge_id):
packet = Packet(time, size, edge_id)
self.packets[target_id * self.process_count + process_id].append(packet)
def event_end(self, process_id, thread_id, time):
pass
def event_receive(self, process_id, thread_id, time, origin_id):
self.last_event = "receive"
self.last_event_process = process_id
self.last_event_thread = thread_id
self.last_event_time = time
packets = self.packets[process_id * self.process_count + origin_id]
packet = packets[0]
del packets[0]
self.last_event_instance = self.net_instances[process_id]
self.set_activity(process_id,
thread_id,
Receive(time, process_id, thread_id, origin_id))
return time - packet.time
def transition_fired(self, process_id, thread_id, time, transition_id, values):
self.last_event = "fire"
self.last_event_instance = self.net_instances[process_id]
self.last_event_process = process_id
self.last_event_thread = thread_id
self.last_event_time = time
transition = self.net.item_by_id(transition_id)
self.last_event_activity = \
TransitionFire(time, process_id, thread_id, transition, values)
if transition.has_code():
index = process_id * self.threads_count + thread_id
self.activites[index] = self.last_event_activity
def transition_finished(self, process_id, thread_id, time):
self.last_event = "finish"
self.last_event_process = process_id
self.last_event_thread = thread_id
self.last_event_time = time
index = process_id * self.threads_count + thread_id
self.last_event_activity = self.activites[index]
self.last_event_instance = self.net_instances[process_id]
self.activites[index] = None
def copy(self):
runinstance = RunInstance(self.project,
self.process_count,
self.threads_count)
for i in self.net_instances:
n = self.net_instances[i].copy()
runinstance.net_instances[i] = n
runinstance.activites = self.activites[:]
return runinstance
def get_perspectives(self):
perspectives = [ Perspective("All", self, self.net_instances) ]
v = self.net_instances.keys()
v.sort()
for i in v:
perspectives.append(
Perspective(str(i),
self, { i : self.net_instances[i] } ))
return perspectives
def get_packets_info(self, edge_id, process_id):
results = []
for i in xrange(self.process_count):
packets = self.packets[process_id * self.process_count + i]
ps = [ p for p in packets if p.edge_id == edge_id ]
if ps:
itr = iter(ps)
first = next(itr)
text = "{0} -> {1} | {2}".format(i, process_id, first.size)
if len(ps) > 1:
size = 0
for p in itr:
size += p.size
text += " ({0}, {1})".format(len(ps) - 1, size)
if packets[0].edge_id != edge_id:
top = False
text += " *"
else:
top = True
results.append((process_id, i, top, text))
return results
def get_packets_count(self, origin_id, target_id):
return len(self.packets[target_id * self.process_count + origin_id])
class ThreadActivity:
def __init__(self, time, process_id, thread_id):
self.time = time
self.process_id = process_id
self.thread_id = thread_id
class TransitionFire(ThreadActivity):
name = "fire"
quit = False
def __init__(self, time, process_id, thread_id, transition, values):
ThreadActivity.__init__(self, time, process_id, thread_id)
self.transition = transition
self.values = values
class Receive(ThreadActivity):
name = "receive"
def __init__(self, time, process_id, thread_id, origin_id):
ThreadActivity.__init__(self, time, process_id, thread_id)
self.origin_id = origin_id
class NetInstance:
def __init__(self, process_id, tokens=None):
self.process_id = process_id
self.enabled_transitions = None
self.new_tokens = {}
self.removed_tokens = {}
if tokens is None:
self.tokens = {}
else:
self.tokens = tokens
def add_token(self, place_id, token_pointer, token_value, send_time):
lst = self.new_tokens.get(place_id)
if lst is None:
lst = []
self.new_tokens[place_id] = lst
if len(token_value) == 1:
token_value = token_value[0]
lst.append((token_pointer, token_value, send_time))
def clear_removed_and_new_tokens(self):
"""
'new_tokens' are moved into regular list of tokens and
'removed_tokens' tokens are emptied
"""
if self.new_tokens:
for place_id in self.new_tokens:
lst = self.tokens.get(place_id)
if lst is None:
lst = []
self.tokens[place_id] = lst
lst += self.new_tokens.get(place_id)
self.new_tokens = {}
if self.removed_tokens:
self.removed_tokens = {}
def remove_token(self, place_id, token_pointer):
lst = self.tokens.get(place_id)
if lst is None:
return
removed_lst = self.removed_tokens.get(place_id)
if removed_lst is None:
removed_lst = []
self.removed_tokens[place_id] = removed_lst
for i in xrange(len(lst)):
if lst[i][0] == token_pointer:
removed_lst.append(lst[i])
del lst[i]
return
def remove_all_tokens(self, place_id):
self.removed_tokens[place_id] = self.tokens.get(place_id)
self.tokens[place_id] = None
def add_enabled_transition(self, transition_id):
if self.enabled_transitions is None:
self.enabled_transitions = []
self.enabled_transitions.append(transition_id)
def copy(self):
netinstance = NetInstance(self.process_id, copy(self.tokens))
netinstance.enabled_transitions = copy(self.enabled_transitions)
return netinstance
class Perspective(utils.EqMixin):
def __init__(self, name, runinstance, net_instances):
self.name = name
self.runinstance = runinstance
self.net_instances = net_instances
def get_tokens(self, place):
tokens = []
for net_instance in self.net_instances.values():
t = net_instance.tokens.get(place.id)
if t is not None:
for token_pointer, token_value, token_time in t:
tokens.append("{0}@{1}".format(token_value, net_instance.process_id))
return tokens
def get_new_tokens(self, place):
tokens = []
for net_instance in self.net_instances.values():
t = net_instance.new_tokens.get(place.id)
if t is not None:
for token_pointer, token_value, token_time in t:
if token_time:
tokens.append("{0}@{1} ({2})".format(
token_value,
net_instance.process_id,
utils.time_to_string(token_time, seconds=True)))
else:
tokens.append("{0}@{1}".format(token_value, net_instance.process_id))
return tokens
def get_packets_info(self, edge_id):
results = []
for net_instance in self.net_instances.values():
results += self.runinstance.get_packets_info(edge_id, net_instance.process_id)
return results
def get_removed_tokens(self, place):
tokens = []
for net_instance in self.net_instances.values():
t = net_instance.removed_tokens.get(place.id)
if t is not None:
for token_pointer, token_value, token_time in t:
tokens.append("{0}@{1}".format(token_value, net_instance.process_id))
return tokens
def get_transition_trace_values(self, transition):
if self.runinstance.net is None:
return None
values = []
runinstance = self.runinstance
for i in range(runinstance.threads_count * runinstance.process_count):
activity = runinstance.activites[i]
if isinstance(activity, TransitionFire) \
and activity.transition.id == transition.id:
run_on = "{0}/{1} -> ".format(i // runinstance.threads_count,
i % runinstance.threads_count)
values.append(run_on + "; ".join(map(str, activity.values)) + ";")
return values
def get_enabled_transitions(self):
enabled = set()
enabled.update(*[ net_instance.enabled_transitions
for net_instance in self.net_instances.values()
if net_instance.enabled_transitions is not None ])
return enabled
def is_transition_enabled(self, transition):
return transition.id in self.get_enabled_transitions()
def get_activations_values(self, transition):
runinstance = self.runinstance
result = []
for p in range(runinstance.process_count):
for t in range(runinstance.threads_count):
activity = runinstance.activites[ p * runinstance.threads_count + t ]
if (runinstance.last_event_activity and
runinstance.last_event_activity.name == "fire" and
runinstance.last_event_activity.transition == transition and
runinstance.last_event_activity.process_id == p and
runinstance.last_event_activity.thread_id == t):
if runinstance.last_event == "fire":
color = (0, 1, 0, 0.8)
elif runinstance.last_event_activity.quit:
color = (0.45, 0.45, 0.45, 0.8)
else:
color = (1, 0, 0, 0.8)
activity = runinstance.last_event_activity
elif isinstance(activity, TransitionFire) and \
activity.transition == transition:
color = (1.0, 1.0, 0, 0.8)
else:
break
text = "{0.process_id}/{0.thread_id}".format(activity)
result.append((text, color, (p, t, transition)))
return result
def get_process_ids(self):
return [ net_instance.process_id
for net_instance in self.net_instances.values() ]
| Palasekm/Kaira | gui/runinstance.py | Python | gpl-3.0 | 15,134 |
# Copyright (C) 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import db
from nova.objects import virtual_interface as vif_obj
from nova.tests.unit.objects import test_objects
fake_vif = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 1,
'address': '00:00:00:00:00:00',
'network_id': 123,
'instance_uuid': 'fake-uuid',
'uuid': 'fake-uuid-2',
}
class _TestVirtualInterface(object):
@staticmethod
def _compare(test, db, obj):
for field, value in db.items():
test.assertEqual(db[field], obj[field])
def test_get_by_id(self):
with mock.patch.object(db, 'virtual_interface_get') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_id(self.context, 1)
self._compare(self, fake_vif, vif)
def test_get_by_uuid(self):
with mock.patch.object(db, 'virtual_interface_get_by_uuid') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_uuid(self.context,
'fake-uuid-2')
self._compare(self, fake_vif, vif)
def test_get_by_address(self):
with mock.patch.object(db, 'virtual_interface_get_by_address') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_address(self.context,
'00:00:00:00:00:00')
self._compare(self, fake_vif, vif)
def test_get_by_instance_and_network(self):
with mock.patch.object(db,
'virtual_interface_get_by_instance_and_network') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_instance_and_network(
self.context, 'fake-uuid', 123)
self._compare(self, fake_vif, vif)
def test_create(self):
vif = vif_obj.VirtualInterface()
vif.address = '00:00:00:00:00:00'
vif.network_id = 123
vif.instance_uuid = 'fake-uuid'
vif.uuid = 'fake-uuid-2'
with mock.patch.object(db, 'virtual_interface_create') as create:
create.return_value = fake_vif
vif.create(self.context)
self.assertEqual(self.context, vif._context)
vif._context = None
self._compare(self, fake_vif, vif)
def test_delete_by_instance_uuid(self):
with mock.patch.object(db,
'virtual_interface_delete_by_instance') as delete:
vif_obj.VirtualInterface.delete_by_instance_uuid(self.context,
'fake-uuid')
delete.assert_called_with(self.context, 'fake-uuid')
class TestVirtualInterfaceObject(test_objects._LocalTest,
_TestVirtualInterface):
pass
class TestRemoteVirtualInterfaceObject(test_objects._RemoteTest,
_TestVirtualInterface):
pass
class _TestVirtualInterfaceList(object):
def test_get_all(self):
with mock.patch.object(db, 'virtual_interface_get_all') as get:
get.return_value = [fake_vif]
vifs = vif_obj.VirtualInterfaceList.get_all(self.context)
self.assertEqual(1, len(vifs))
_TestVirtualInterface._compare(self, fake_vif, vifs[0])
def test_get_by_instance_uuid(self):
with mock.patch.object(db, 'virtual_interface_get_by_instance') as get:
get.return_value = [fake_vif]
vifs = vif_obj.VirtualInterfaceList.get_by_instance_uuid(
self.context, 'fake-uuid')
self.assertEqual(1, len(vifs))
_TestVirtualInterface._compare(self, fake_vif, vifs[0])
class TestVirtualInterfaceList(test_objects._LocalTest,
_TestVirtualInterfaceList):
pass
class TestRemoteVirtualInterfaceList(test_objects._RemoteTest,
_TestVirtualInterfaceList):
pass
| silenceli/nova | nova/tests/unit/objects/test_virtual_interface.py | Python | apache-2.0 | 4,623 |
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""generated automatically by auto_dao.py"""
from __future__ import division
from vistrails.core.system import get_elementtree_library
from xml_dao import XMLDAO
from vistrails.db.versions.v0_9_5.domain import *
ElementTree = get_elementtree_library()
class DBPortSpecXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'portSpec':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('type', None)
type = self.convertFromStr(data, 'str')
data = node.get('optional', None)
optional = self.convertFromStr(data, 'int')
data = node.get('sortKey', None)
sort_key = self.convertFromStr(data, 'int')
data = node.get('sigstring', None)
sigstring = self.convertFromStr(data, 'str')
obj = DBPortSpec(id=id,
name=name,
type=type,
optional=optional,
sort_key=sort_key,
sigstring=sigstring)
obj.is_dirty = False
return obj
def toXML(self, portSpec, node=None):
if node is None:
node = ElementTree.Element('portSpec')
# set attributes
node.set('id',self.convertToStr(portSpec.db_id, 'long'))
node.set('name',self.convertToStr(portSpec.db_name, 'str'))
node.set('type',self.convertToStr(portSpec.db_type, 'str'))
node.set('optional',self.convertToStr(portSpec.db_optional, 'int'))
node.set('sortKey',self.convertToStr(portSpec.db_sort_key, 'int'))
node.set('sigstring',self.convertToStr(portSpec.db_sigstring, 'str'))
return node
class DBModuleXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'module':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('cache', None)
cache = self.convertFromStr(data, 'int')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('namespace', None)
namespace = self.convertFromStr(data, 'str')
data = node.get('package', None)
package = self.convertFromStr(data, 'str')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
data = node.get('tag', None)
tag = self.convertFromStr(data, 'str')
location = None
functions = []
annotations = []
portSpecs = []
# read children
for child in node.getchildren():
if child.tag == 'location':
_data = self.getDao('location').fromXML(child)
location = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
functions.append(_data)
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'portSpec':
_data = self.getDao('portSpec').fromXML(child)
portSpecs.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBModule(id=id,
cache=cache,
name=name,
namespace=namespace,
package=package,
version=version,
tag=tag,
location=location,
functions=functions,
annotations=annotations,
portSpecs=portSpecs)
obj.is_dirty = False
return obj
def toXML(self, module, node=None):
if node is None:
node = ElementTree.Element('module')
# set attributes
node.set('id',self.convertToStr(module.db_id, 'long'))
node.set('cache',self.convertToStr(module.db_cache, 'int'))
node.set('name',self.convertToStr(module.db_name, 'str'))
node.set('namespace',self.convertToStr(module.db_namespace, 'str'))
node.set('package',self.convertToStr(module.db_package, 'str'))
node.set('version',self.convertToStr(module.db_version, 'str'))
node.set('tag',self.convertToStr(module.db_tag, 'str'))
# set elements
location = module.db_location
if location is not None:
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(location, childNode)
functions = module.db_functions
for function in functions:
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(function, childNode)
annotations = module.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
portSpecs = module.db_portSpecs
for portSpec in portSpecs:
childNode = ElementTree.SubElement(node, 'portSpec')
self.getDao('portSpec').toXML(portSpec, childNode)
return node
class DBModuleDescriptorXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'moduleDescriptor':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('package', None)
package = self.convertFromStr(data, 'str')
data = node.get('namespace', None)
namespace = self.convertFromStr(data, 'str')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
data = node.get('baseDescriptorId', None)
base_descriptor_id = self.convertFromStr(data, 'long')
portSpecs = []
# read children
for child in node.getchildren():
if child.tag == 'portSpec':
_data = self.getDao('portSpec').fromXML(child)
portSpecs.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBModuleDescriptor(id=id,
name=name,
package=package,
namespace=namespace,
version=version,
base_descriptor_id=base_descriptor_id,
portSpecs=portSpecs)
obj.is_dirty = False
return obj
def toXML(self, module_descriptor, node=None):
if node is None:
node = ElementTree.Element('moduleDescriptor')
# set attributes
node.set('id',self.convertToStr(module_descriptor.db_id, 'long'))
node.set('name',self.convertToStr(module_descriptor.db_name, 'str'))
node.set('package',self.convertToStr(module_descriptor.db_package, 'str'))
node.set('namespace',self.convertToStr(module_descriptor.db_namespace, 'str'))
node.set('version',self.convertToStr(module_descriptor.db_version, 'str'))
node.set('baseDescriptorId',self.convertToStr(module_descriptor.db_base_descriptor_id, 'long'))
# set elements
portSpecs = module_descriptor.db_portSpecs
for portSpec in portSpecs:
childNode = ElementTree.SubElement(node, 'portSpec')
self.getDao('portSpec').toXML(portSpec, childNode)
return node
class DBTagXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'tag':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
obj = DBTag(id=id,
name=name)
obj.is_dirty = False
return obj
def toXML(self, tag, node=None):
if node is None:
node = ElementTree.Element('tag')
# set attributes
node.set('id',self.convertToStr(tag.db_id, 'long'))
node.set('name',self.convertToStr(tag.db_name, 'str'))
return node
class DBPortXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'port':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('type', None)
type = self.convertFromStr(data, 'str')
data = node.get('moduleId', None)
moduleId = self.convertFromStr(data, 'long')
data = node.get('moduleName', None)
moduleName = self.convertFromStr(data, 'str')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('signature', None)
signature = self.convertFromStr(data, 'str')
obj = DBPort(id=id,
type=type,
moduleId=moduleId,
moduleName=moduleName,
name=name,
signature=signature)
obj.is_dirty = False
return obj
def toXML(self, port, node=None):
if node is None:
node = ElementTree.Element('port')
# set attributes
node.set('id',self.convertToStr(port.db_id, 'long'))
node.set('type',self.convertToStr(port.db_type, 'str'))
node.set('moduleId',self.convertToStr(port.db_moduleId, 'long'))
node.set('moduleName',self.convertToStr(port.db_moduleName, 'str'))
node.set('name',self.convertToStr(port.db_name, 'str'))
node.set('signature',self.convertToStr(port.db_signature, 'str'))
return node
class DBGroupXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'group':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('cache', None)
cache = self.convertFromStr(data, 'int')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('namespace', None)
namespace = self.convertFromStr(data, 'str')
data = node.get('package', None)
package = self.convertFromStr(data, 'str')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
data = node.get('tag', None)
tag = self.convertFromStr(data, 'str')
workflow = None
location = None
functions = []
annotations = []
# read children
for child in node.getchildren():
if child.tag == 'workflow':
_data = self.getDao('workflow').fromXML(child)
workflow = _data
elif child.tag == 'location':
_data = self.getDao('location').fromXML(child)
location = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
functions.append(_data)
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBGroup(id=id,
workflow=workflow,
cache=cache,
name=name,
namespace=namespace,
package=package,
version=version,
tag=tag,
location=location,
functions=functions,
annotations=annotations)
obj.is_dirty = False
return obj
def toXML(self, group, node=None):
if node is None:
node = ElementTree.Element('group')
# set attributes
node.set('id',self.convertToStr(group.db_id, 'long'))
node.set('cache',self.convertToStr(group.db_cache, 'int'))
node.set('name',self.convertToStr(group.db_name, 'str'))
node.set('namespace',self.convertToStr(group.db_namespace, 'str'))
node.set('package',self.convertToStr(group.db_package, 'str'))
node.set('version',self.convertToStr(group.db_version, 'str'))
node.set('tag',self.convertToStr(group.db_tag, 'str'))
# set elements
workflow = group.db_workflow
if workflow is not None:
childNode = ElementTree.SubElement(node, 'workflow')
self.getDao('workflow').toXML(workflow, childNode)
location = group.db_location
if location is not None:
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(location, childNode)
functions = group.db_functions
for function in functions:
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(function, childNode)
annotations = group.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
return node
class DBLogXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'log':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('vistrail_id', None)
vistrail_id = self.convertFromStr(data, 'long')
workflow_execs = []
machines = []
# read children
for child in node.getchildren():
if child.tag == 'workflowExec':
_data = self.getDao('workflow_exec').fromXML(child)
workflow_execs.append(_data)
elif child.tag == 'machine':
_data = self.getDao('machine').fromXML(child)
machines.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBLog(id=id,
version=version,
name=name,
workflow_execs=workflow_execs,
machines=machines,
vistrail_id=vistrail_id)
obj.is_dirty = False
return obj
def toXML(self, log, node=None):
if node is None:
node = ElementTree.Element('log')
# set attributes
node.set('id',self.convertToStr(log.db_id, 'long'))
node.set('version',self.convertToStr(log.db_version, 'str'))
node.set('name',self.convertToStr(log.db_name, 'str'))
node.set('vistrail_id',self.convertToStr(log.db_vistrail_id, 'long'))
# set elements
workflow_execs = log.db_workflow_execs
for workflow_exec in workflow_execs:
childNode = ElementTree.SubElement(node, 'workflowExec')
self.getDao('workflow_exec').toXML(workflow_exec, childNode)
machines = log.db_machines
for machine in machines:
childNode = ElementTree.SubElement(node, 'machine')
self.getDao('machine').toXML(machine, childNode)
return node
class DBMachineXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'machine':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('os', None)
os = self.convertFromStr(data, 'str')
data = node.get('architecture', None)
architecture = self.convertFromStr(data, 'str')
data = node.get('processor', None)
processor = self.convertFromStr(data, 'str')
data = node.get('ram', None)
ram = self.convertFromStr(data, 'int')
obj = DBMachine(id=id,
name=name,
os=os,
architecture=architecture,
processor=processor,
ram=ram)
obj.is_dirty = False
return obj
def toXML(self, machine, node=None):
if node is None:
node = ElementTree.Element('machine')
# set attributes
node.set('id',self.convertToStr(machine.db_id, 'long'))
node.set('name',self.convertToStr(machine.db_name, 'str'))
node.set('os',self.convertToStr(machine.db_os, 'str'))
node.set('architecture',self.convertToStr(machine.db_architecture, 'str'))
node.set('processor',self.convertToStr(machine.db_processor, 'str'))
node.set('ram',self.convertToStr(machine.db_ram, 'int'))
return node
class DBAddXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'add':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('what', None)
what = self.convertFromStr(data, 'str')
data = node.get('objectId', None)
objectId = self.convertFromStr(data, 'long')
data = node.get('parentObjId', None)
parentObjId = self.convertFromStr(data, 'long')
data = node.get('parentObjType', None)
parentObjType = self.convertFromStr(data, 'str')
data = None
# read children
for child in node.getchildren():
if child.tag == 'module':
_data = self.getDao('module').fromXML(child)
data = _data
elif child.tag == 'location':
_data = self.getDao('location').fromXML(child)
data = _data
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
data = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
data = _data
elif child.tag == 'connection':
_data = self.getDao('connection').fromXML(child)
data = _data
elif child.tag == 'port':
_data = self.getDao('port').fromXML(child)
data = _data
elif child.tag == 'parameter':
_data = self.getDao('parameter').fromXML(child)
data = _data
elif child.tag == 'portSpec':
_data = self.getDao('portSpec').fromXML(child)
data = _data
elif child.tag == 'abstraction':
_data = self.getDao('abstraction').fromXML(child)
data = _data
elif child.tag == 'group':
_data = self.getDao('group').fromXML(child)
data = _data
elif child.tag == 'other':
_data = self.getDao('other').fromXML(child)
data = _data
elif child.tag == 'plugin_data':
_data = self.getDao('plugin_data').fromXML(child)
data = _data
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBAdd(data=data,
id=id,
what=what,
objectId=objectId,
parentObjId=parentObjId,
parentObjType=parentObjType)
obj.is_dirty = False
return obj
def toXML(self, add, node=None):
if node is None:
node = ElementTree.Element('add')
# set attributes
node.set('id',self.convertToStr(add.db_id, 'long'))
node.set('what',self.convertToStr(add.db_what, 'str'))
node.set('objectId',self.convertToStr(add.db_objectId, 'long'))
node.set('parentObjId',self.convertToStr(add.db_parentObjId, 'long'))
node.set('parentObjType',self.convertToStr(add.db_parentObjType, 'str'))
# set elements
data = add.db_data
if data is not None:
if data.vtType == 'module':
childNode = ElementTree.SubElement(node, 'module')
self.getDao('module').toXML(data, childNode)
elif data.vtType == 'location':
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(data, childNode)
elif data.vtType == 'annotation':
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(data, childNode)
elif data.vtType == 'function':
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(data, childNode)
elif data.vtType == 'connection':
childNode = ElementTree.SubElement(node, 'connection')
self.getDao('connection').toXML(data, childNode)
elif data.vtType == 'port':
childNode = ElementTree.SubElement(node, 'port')
self.getDao('port').toXML(data, childNode)
elif data.vtType == 'parameter':
childNode = ElementTree.SubElement(node, 'parameter')
self.getDao('parameter').toXML(data, childNode)
elif data.vtType == 'portSpec':
childNode = ElementTree.SubElement(node, 'portSpec')
self.getDao('portSpec').toXML(data, childNode)
elif data.vtType == 'abstraction':
childNode = ElementTree.SubElement(node, 'abstraction')
self.getDao('abstraction').toXML(data, childNode)
elif data.vtType == 'group':
childNode = ElementTree.SubElement(node, 'group')
self.getDao('group').toXML(data, childNode)
elif data.vtType == 'other':
childNode = ElementTree.SubElement(node, 'other')
self.getDao('other').toXML(data, childNode)
elif data.vtType == 'plugin_data':
childNode = ElementTree.SubElement(node, 'plugin_data')
self.getDao('plugin_data').toXML(data, childNode)
return node
class DBOtherXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'other':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('key', None)
key = self.convertFromStr(data, 'str')
value = None
# read children
for child in node.getchildren():
if child.tag == 'value':
_data = self.convertFromStr(child.text,'')
value = _data
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBOther(id=id,
key=key,
value=value)
obj.is_dirty = False
return obj
def toXML(self, other, node=None):
if node is None:
node = ElementTree.Element('other')
# set attributes
node.set('id',self.convertToStr(other.db_id, 'long'))
node.set('key',self.convertToStr(other.db_key, 'str'))
# set elements
return node
class DBLocationXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'location':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('x', None)
x = self.convertFromStr(data, 'float')
data = node.get('y', None)
y = self.convertFromStr(data, 'float')
obj = DBLocation(id=id,
x=x,
y=y)
obj.is_dirty = False
return obj
def toXML(self, location, node=None):
if node is None:
node = ElementTree.Element('location')
# set attributes
node.set('id',self.convertToStr(location.db_id, 'long'))
node.set('x',self.convertToStr(location.db_x, 'float'))
node.set('y',self.convertToStr(location.db_y, 'float'))
return node
class DBParameterXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'parameter':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('pos', None)
pos = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('type', None)
type = self.convertFromStr(data, 'str')
data = node.get('val', None)
val = self.convertFromStr(data, 'str')
data = node.get('alias', None)
alias = self.convertFromStr(data, 'str')
obj = DBParameter(id=id,
pos=pos,
name=name,
type=type,
val=val,
alias=alias)
obj.is_dirty = False
return obj
def toXML(self, parameter, node=None):
if node is None:
node = ElementTree.Element('parameter')
# set attributes
node.set('id',self.convertToStr(parameter.db_id, 'long'))
node.set('pos',self.convertToStr(parameter.db_pos, 'long'))
node.set('name',self.convertToStr(parameter.db_name, 'str'))
node.set('type',self.convertToStr(parameter.db_type, 'str'))
node.set('val',self.convertToStr(parameter.db_val, 'str'))
node.set('alias',self.convertToStr(parameter.db_alias, 'str'))
return node
class DBPluginDataXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'plugin_data':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('data', None)
data = self.convertFromStr(data, 'str')
obj = DBPluginData(id=id,
data=data)
obj.is_dirty = False
return obj
def toXML(self, plugin_data, node=None):
if node is None:
node = ElementTree.Element('plugin_data')
# set attributes
node.set('id',self.convertToStr(plugin_data.db_id, 'long'))
node.set('data',self.convertToStr(plugin_data.db_data, 'str'))
return node
class DBFunctionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'function':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('pos', None)
pos = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
parameters = []
# read children
for child in node.getchildren():
if child.tag == 'parameter':
_data = self.getDao('parameter').fromXML(child)
parameters.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBFunction(id=id,
pos=pos,
name=name,
parameters=parameters)
obj.is_dirty = False
return obj
def toXML(self, function, node=None):
if node is None:
node = ElementTree.Element('function')
# set attributes
node.set('id',self.convertToStr(function.db_id, 'long'))
node.set('pos',self.convertToStr(function.db_pos, 'long'))
node.set('name',self.convertToStr(function.db_name, 'str'))
# set elements
parameters = function.db_parameters
for parameter in parameters:
childNode = ElementTree.SubElement(node, 'parameter')
self.getDao('parameter').toXML(parameter, childNode)
return node
class DBAbstractionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'abstraction':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('cache', None)
cache = self.convertFromStr(data, 'int')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('namespace', None)
namespace = self.convertFromStr(data, 'str')
data = node.get('package', None)
package = self.convertFromStr(data, 'str')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
data = node.get('internalVersion', None)
internal_version = self.convertFromStr(data, 'str')
data = node.get('tag', None)
tag = self.convertFromStr(data, 'str')
location = None
functions = []
annotations = []
# read children
for child in node.getchildren():
if child.tag == 'location':
_data = self.getDao('location').fromXML(child)
location = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
functions.append(_data)
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBAbstraction(id=id,
cache=cache,
name=name,
namespace=namespace,
package=package,
version=version,
internal_version=internal_version,
tag=tag,
location=location,
functions=functions,
annotations=annotations)
obj.is_dirty = False
return obj
def toXML(self, abstraction, node=None):
if node is None:
node = ElementTree.Element('abstraction')
# set attributes
node.set('id',self.convertToStr(abstraction.db_id, 'long'))
node.set('cache',self.convertToStr(abstraction.db_cache, 'int'))
node.set('name',self.convertToStr(abstraction.db_name, 'str'))
node.set('namespace',self.convertToStr(abstraction.db_namespace, 'str'))
node.set('package',self.convertToStr(abstraction.db_package, 'str'))
node.set('version',self.convertToStr(abstraction.db_version, 'str'))
node.set('internalVersion',self.convertToStr(abstraction.db_internal_version, 'str'))
node.set('tag',self.convertToStr(abstraction.db_tag, 'str'))
# set elements
location = abstraction.db_location
if location is not None:
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(location, childNode)
functions = abstraction.db_functions
for function in functions:
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(function, childNode)
annotations = abstraction.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
return node
class DBWorkflowXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'workflow':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
data = node.get('vistrail_id', None)
vistrail_id = self.convertFromStr(data, 'long')
connections = []
annotations = []
plugin_datas = []
others = []
modules = []
# read children
for child in node.getchildren():
if child.tag == 'connection':
_data = self.getDao('connection').fromXML(child)
connections.append(_data)
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'plugin_data':
_data = self.getDao('plugin_data').fromXML(child)
plugin_datas.append(_data)
elif child.tag == 'other':
_data = self.getDao('other').fromXML(child)
others.append(_data)
elif child.tag == 'module':
_data = self.getDao('module').fromXML(child)
modules.append(_data)
elif child.tag == 'abstraction':
_data = self.getDao('abstraction').fromXML(child)
modules.append(_data)
elif child.tag == 'group':
_data = self.getDao('group').fromXML(child)
modules.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBWorkflow(modules=modules,
id=id,
name=name,
version=version,
connections=connections,
annotations=annotations,
plugin_datas=plugin_datas,
others=others,
vistrail_id=vistrail_id)
obj.is_dirty = False
return obj
def toXML(self, workflow, node=None):
if node is None:
node = ElementTree.Element('workflow')
# set attributes
node.set('id',self.convertToStr(workflow.db_id, 'long'))
node.set('name',self.convertToStr(workflow.db_name, 'str'))
node.set('version',self.convertToStr(workflow.db_version, 'str'))
node.set('vistrail_id',self.convertToStr(workflow.db_vistrail_id, 'long'))
# set elements
connections = workflow.db_connections
for connection in connections:
childNode = ElementTree.SubElement(node, 'connection')
self.getDao('connection').toXML(connection, childNode)
annotations = workflow.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
plugin_datas = workflow.db_plugin_datas
for plugin_data in plugin_datas:
childNode = ElementTree.SubElement(node, 'plugin_data')
self.getDao('plugin_data').toXML(plugin_data, childNode)
others = workflow.db_others
for other in others:
childNode = ElementTree.SubElement(node, 'other')
self.getDao('other').toXML(other, childNode)
modules = workflow.db_modules
for module in modules:
if module.vtType == 'module':
childNode = ElementTree.SubElement(node, 'module')
self.getDao('module').toXML(module, childNode)
elif module.vtType == 'abstraction':
childNode = ElementTree.SubElement(node, 'abstraction')
self.getDao('abstraction').toXML(module, childNode)
elif module.vtType == 'group':
childNode = ElementTree.SubElement(node, 'group')
self.getDao('group').toXML(module, childNode)
return node
class DBRegistryXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'registry':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
data = node.get('rootDescriptorId', None)
root_descriptor_id = self.convertFromStr(data, 'long')
packages = []
# read children
for child in node.getchildren():
if child.tag == 'package':
_data = self.getDao('package').fromXML(child)
packages.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBRegistry(id=id,
version=version,
root_descriptor_id=root_descriptor_id,
packages=packages)
obj.is_dirty = False
return obj
def toXML(self, registry, node=None):
if node is None:
node = ElementTree.Element('registry')
# set attributes
node.set('id',self.convertToStr(registry.db_id, 'long'))
node.set('version',self.convertToStr(registry.db_version, 'str'))
node.set('rootDescriptorId',self.convertToStr(registry.db_root_descriptor_id, 'long'))
# set elements
packages = registry.db_packages
for package in packages:
childNode = ElementTree.SubElement(node, 'package')
self.getDao('package').toXML(package, childNode)
return node
class DBAnnotationXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'annotation':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('key', None)
key = self.convertFromStr(data, 'str')
data = node.get('value', None)
value = self.convertFromStr(data, 'str')
obj = DBAnnotation(id=id,
key=key,
value=value)
obj.is_dirty = False
return obj
def toXML(self, annotation, node=None):
if node is None:
node = ElementTree.Element('annotation')
# set attributes
node.set('id',self.convertToStr(annotation.db_id, 'long'))
node.set('key',self.convertToStr(annotation.db_key, 'str'))
node.set('value',self.convertToStr(annotation.db_value, 'str'))
return node
class DBChangeXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'change':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('what', None)
what = self.convertFromStr(data, 'str')
data = node.get('oldObjId', None)
oldObjId = self.convertFromStr(data, 'long')
data = node.get('newObjId', None)
newObjId = self.convertFromStr(data, 'long')
data = node.get('parentObjId', None)
parentObjId = self.convertFromStr(data, 'long')
data = node.get('parentObjType', None)
parentObjType = self.convertFromStr(data, 'str')
data = None
# read children
for child in node.getchildren():
if child.tag == 'module':
_data = self.getDao('module').fromXML(child)
data = _data
elif child.tag == 'location':
_data = self.getDao('location').fromXML(child)
data = _data
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
data = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
data = _data
elif child.tag == 'connection':
_data = self.getDao('connection').fromXML(child)
data = _data
elif child.tag == 'port':
_data = self.getDao('port').fromXML(child)
data = _data
elif child.tag == 'parameter':
_data = self.getDao('parameter').fromXML(child)
data = _data
elif child.tag == 'portSpec':
_data = self.getDao('portSpec').fromXML(child)
data = _data
elif child.tag == 'abstraction':
_data = self.getDao('abstraction').fromXML(child)
data = _data
elif child.tag == 'group':
_data = self.getDao('group').fromXML(child)
data = _data
elif child.tag == 'other':
_data = self.getDao('other').fromXML(child)
data = _data
elif child.tag == 'plugin_data':
_data = self.getDao('plugin_data').fromXML(child)
data = _data
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBChange(data=data,
id=id,
what=what,
oldObjId=oldObjId,
newObjId=newObjId,
parentObjId=parentObjId,
parentObjType=parentObjType)
obj.is_dirty = False
return obj
def toXML(self, change, node=None):
if node is None:
node = ElementTree.Element('change')
# set attributes
node.set('id',self.convertToStr(change.db_id, 'long'))
node.set('what',self.convertToStr(change.db_what, 'str'))
node.set('oldObjId',self.convertToStr(change.db_oldObjId, 'long'))
node.set('newObjId',self.convertToStr(change.db_newObjId, 'long'))
node.set('parentObjId',self.convertToStr(change.db_parentObjId, 'long'))
node.set('parentObjType',self.convertToStr(change.db_parentObjType, 'str'))
# set elements
data = change.db_data
if data is not None:
if data.vtType == 'module':
childNode = ElementTree.SubElement(node, 'module')
self.getDao('module').toXML(data, childNode)
elif data.vtType == 'location':
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(data, childNode)
elif data.vtType == 'annotation':
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(data, childNode)
elif data.vtType == 'function':
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(data, childNode)
elif data.vtType == 'connection':
childNode = ElementTree.SubElement(node, 'connection')
self.getDao('connection').toXML(data, childNode)
elif data.vtType == 'port':
childNode = ElementTree.SubElement(node, 'port')
self.getDao('port').toXML(data, childNode)
elif data.vtType == 'parameter':
childNode = ElementTree.SubElement(node, 'parameter')
self.getDao('parameter').toXML(data, childNode)
elif data.vtType == 'portSpec':
childNode = ElementTree.SubElement(node, 'portSpec')
self.getDao('portSpec').toXML(data, childNode)
elif data.vtType == 'abstraction':
childNode = ElementTree.SubElement(node, 'abstraction')
self.getDao('abstraction').toXML(data, childNode)
elif data.vtType == 'group':
childNode = ElementTree.SubElement(node, 'group')
self.getDao('group').toXML(data, childNode)
elif data.vtType == 'other':
childNode = ElementTree.SubElement(node, 'other')
self.getDao('other').toXML(data, childNode)
elif data.vtType == 'plugin_data':
childNode = ElementTree.SubElement(node, 'plugin_data')
self.getDao('plugin_data').toXML(data, childNode)
return node
class DBGroupExecXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'groupExec':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('tsStart', None)
ts_start = self.convertFromStr(data, 'datetime')
data = node.get('tsEnd', None)
ts_end = self.convertFromStr(data, 'datetime')
data = node.get('cached', None)
cached = self.convertFromStr(data, 'int')
data = node.get('moduleId', None)
module_id = self.convertFromStr(data, 'long')
data = node.get('groupName', None)
group_name = self.convertFromStr(data, 'str')
data = node.get('groupType', None)
group_type = self.convertFromStr(data, 'str')
data = node.get('completed', None)
completed = self.convertFromStr(data, 'int')
data = node.get('error', None)
error = self.convertFromStr(data, 'str')
data = node.get('machine_id', None)
machine_id = self.convertFromStr(data, 'long')
annotations = []
loop_execs = []
module_execs = []
group_execs = []
# read children
for child in node.getchildren():
if child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'loopExec':
_data = self.getDao('loop_exec').fromXML(child)
loop_execs.append(_data)
elif child.tag == 'moduleExec':
_data = self.getDao('module_exec').fromXML(child)
module_execs.append(_data)
elif child.tag == 'groupExec':
_data = self.getDao('group_exec').fromXML(child)
group_execs.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBGroupExec(id=id,
ts_start=ts_start,
ts_end=ts_end,
cached=cached,
module_id=module_id,
group_name=group_name,
group_type=group_type,
completed=completed,
error=error,
machine_id=machine_id,
annotations=annotations,
loop_execs=loop_execs,
module_execs=module_execs,
group_execs=group_execs)
obj.is_dirty = False
return obj
def toXML(self, group_exec, node=None):
if node is None:
node = ElementTree.Element('groupExec')
# set attributes
node.set('id',self.convertToStr(group_exec.db_id, 'long'))
node.set('tsStart',self.convertToStr(group_exec.db_ts_start, 'datetime'))
node.set('tsEnd',self.convertToStr(group_exec.db_ts_end, 'datetime'))
node.set('cached',self.convertToStr(group_exec.db_cached, 'int'))
node.set('moduleId',self.convertToStr(group_exec.db_module_id, 'long'))
node.set('groupName',self.convertToStr(group_exec.db_group_name, 'str'))
node.set('groupType',self.convertToStr(group_exec.db_group_type, 'str'))
node.set('completed',self.convertToStr(group_exec.db_completed, 'int'))
node.set('error',self.convertToStr(group_exec.db_error, 'str'))
node.set('machine_id',self.convertToStr(group_exec.db_machine_id, 'long'))
# set elements
annotations = group_exec.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
loop_execs = group_exec.db_loop_execs
for loop_exec in loop_execs:
childNode = ElementTree.SubElement(node, 'loopExec')
self.getDao('loop_exec').toXML(loop_exec, childNode)
module_execs = group_exec.db_module_execs
for module_exec in module_execs:
childNode = ElementTree.SubElement(node, 'moduleExec')
self.getDao('module_exec').toXML(module_exec, childNode)
group_execs = group_exec.db_group_execs
for group_exec in group_execs:
childNode = ElementTree.SubElement(node, 'groupExec')
self.getDao('group_exec').toXML(group_exec, childNode)
return node
class DBPackageXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'package':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('identifier', None)
identifier = self.convertFromStr(data, 'str')
data = node.get('codepath', None)
codepath = self.convertFromStr(data, 'str')
data = node.get('loadConfiguration', None)
load_configuration = self.convertFromStr(data, 'int')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
data = node.get('description', None)
description = self.convertFromStr(data, 'str')
module_descriptors = []
# read children
for child in node.getchildren():
if child.tag == 'moduleDescriptor':
_data = self.getDao('module_descriptor').fromXML(child)
module_descriptors.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBPackage(id=id,
name=name,
identifier=identifier,
codepath=codepath,
load_configuration=load_configuration,
version=version,
description=description,
module_descriptors=module_descriptors)
obj.is_dirty = False
return obj
def toXML(self, package, node=None):
if node is None:
node = ElementTree.Element('package')
# set attributes
node.set('id',self.convertToStr(package.db_id, 'long'))
node.set('name',self.convertToStr(package.db_name, 'str'))
node.set('identifier',self.convertToStr(package.db_identifier, 'str'))
node.set('codepath',self.convertToStr(package.db_codepath, 'str'))
node.set('loadConfiguration',self.convertToStr(package.db_load_configuration, 'int'))
node.set('version',self.convertToStr(package.db_version, 'str'))
node.set('description',self.convertToStr(package.db_description, 'str'))
# set elements
module_descriptors = package.db_module_descriptors
for module_descriptor in module_descriptors:
childNode = ElementTree.SubElement(node, 'moduleDescriptor')
self.getDao('module_descriptor').toXML(module_descriptor, childNode)
return node
class DBWorkflowExecXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'workflowExec':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('user', None)
user = self.convertFromStr(data, 'str')
data = node.get('ip', None)
ip = self.convertFromStr(data, 'str')
data = node.get('session', None)
session = self.convertFromStr(data, 'long')
data = node.get('vtVersion', None)
vt_version = self.convertFromStr(data, 'str')
data = node.get('tsStart', None)
ts_start = self.convertFromStr(data, 'datetime')
data = node.get('tsEnd', None)
ts_end = self.convertFromStr(data, 'datetime')
data = node.get('parentId', None)
parent_id = self.convertFromStr(data, 'long')
data = node.get('parentType', None)
parent_type = self.convertFromStr(data, 'str')
data = node.get('parentVersion', None)
parent_version = self.convertFromStr(data, 'long')
data = node.get('completed', None)
completed = self.convertFromStr(data, 'int')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
items = []
# read children
for child in node.getchildren():
if child.tag == 'moduleExec':
_data = self.getDao('module_exec').fromXML(child)
items.append(_data)
elif child.tag == 'groupExec':
_data = self.getDao('group_exec').fromXML(child)
items.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBWorkflowExec(items=items,
id=id,
user=user,
ip=ip,
session=session,
vt_version=vt_version,
ts_start=ts_start,
ts_end=ts_end,
parent_id=parent_id,
parent_type=parent_type,
parent_version=parent_version,
completed=completed,
name=name)
obj.is_dirty = False
return obj
def toXML(self, workflow_exec, node=None):
if node is None:
node = ElementTree.Element('workflowExec')
# set attributes
node.set('id',self.convertToStr(workflow_exec.db_id, 'long'))
node.set('user',self.convertToStr(workflow_exec.db_user, 'str'))
node.set('ip',self.convertToStr(workflow_exec.db_ip, 'str'))
node.set('session',self.convertToStr(workflow_exec.db_session, 'long'))
node.set('vtVersion',self.convertToStr(workflow_exec.db_vt_version, 'str'))
node.set('tsStart',self.convertToStr(workflow_exec.db_ts_start, 'datetime'))
node.set('tsEnd',self.convertToStr(workflow_exec.db_ts_end, 'datetime'))
node.set('parentId',self.convertToStr(workflow_exec.db_parent_id, 'long'))
node.set('parentType',self.convertToStr(workflow_exec.db_parent_type, 'str'))
node.set('parentVersion',self.convertToStr(workflow_exec.db_parent_version, 'long'))
node.set('completed',self.convertToStr(workflow_exec.db_completed, 'int'))
node.set('name',self.convertToStr(workflow_exec.db_name, 'str'))
# set elements
items = workflow_exec.db_items
for item in items:
if item.vtType == 'module_exec':
childNode = ElementTree.SubElement(node, 'moduleExec')
self.getDao('module_exec').toXML(item, childNode)
elif item.vtType == 'group_exec':
childNode = ElementTree.SubElement(node, 'groupExec')
self.getDao('group_exec').toXML(item, childNode)
return node
class DBLoopExecXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'loopExec':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('tsStart', None)
ts_start = self.convertFromStr(data, 'datetime')
data = node.get('tsEnd', None)
ts_end = self.convertFromStr(data, 'datetime')
data = node.get('completed', None)
completed = self.convertFromStr(data, 'int')
data = node.get('error', None)
error = self.convertFromStr(data, 'str')
module_execs = []
group_execs = []
# read children
for child in node.getchildren():
if child.tag == 'moduleExec':
_data = self.getDao('module_exec').fromXML(child)
module_execs.append(_data)
elif child.tag == 'groupExec':
_data = self.getDao('group_exec').fromXML(child)
group_execs.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBLoopExec(id=id,
ts_start=ts_start,
ts_end=ts_end,
completed=completed,
error=error,
module_execs=module_execs,
group_execs=group_execs)
obj.is_dirty = False
return obj
def toXML(self, loop_exec, node=None):
if node is None:
node = ElementTree.Element('loopExec')
# set attributes
node.set('id',self.convertToStr(loop_exec.db_id, 'long'))
node.set('tsStart',self.convertToStr(loop_exec.db_ts_start, 'datetime'))
node.set('tsEnd',self.convertToStr(loop_exec.db_ts_end, 'datetime'))
node.set('completed',self.convertToStr(loop_exec.db_completed, 'int'))
node.set('error',self.convertToStr(loop_exec.db_error, 'str'))
# set elements
module_execs = loop_exec.db_module_execs
for module_exec in module_execs:
childNode = ElementTree.SubElement(node, 'moduleExec')
self.getDao('module_exec').toXML(module_exec, childNode)
group_execs = loop_exec.db_group_execs
for group_exec in group_execs:
childNode = ElementTree.SubElement(node, 'groupExec')
self.getDao('group_exec').toXML(group_exec, childNode)
return node
class DBConnectionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'connection':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
ports = []
# read children
for child in node.getchildren():
if child.tag == 'port':
_data = self.getDao('port').fromXML(child)
ports.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBConnection(id=id,
ports=ports)
obj.is_dirty = False
return obj
def toXML(self, connection, node=None):
if node is None:
node = ElementTree.Element('connection')
# set attributes
node.set('id',self.convertToStr(connection.db_id, 'long'))
# set elements
ports = connection.db_ports
for port in ports:
childNode = ElementTree.SubElement(node, 'port')
self.getDao('port').toXML(port, childNode)
return node
class DBActionXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'action':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('prevId', None)
prevId = self.convertFromStr(data, 'long')
data = node.get('date', None)
date = self.convertFromStr(data, 'datetime')
data = node.get('session', None)
session = self.convertFromStr(data, 'long')
data = node.get('user', None)
user = self.convertFromStr(data, 'str')
data = node.get('prune', None)
prune = self.convertFromStr(data, 'int')
annotations = []
operations = []
# read children
for child in node.getchildren():
if child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'add':
_data = self.getDao('add').fromXML(child)
operations.append(_data)
elif child.tag == 'delete':
_data = self.getDao('delete').fromXML(child)
operations.append(_data)
elif child.tag == 'change':
_data = self.getDao('change').fromXML(child)
operations.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBAction(operations=operations,
id=id,
prevId=prevId,
date=date,
session=session,
user=user,
prune=prune,
annotations=annotations)
obj.is_dirty = False
return obj
def toXML(self, action, node=None):
if node is None:
node = ElementTree.Element('action')
# set attributes
node.set('id',self.convertToStr(action.db_id, 'long'))
node.set('prevId',self.convertToStr(action.db_prevId, 'long'))
node.set('date',self.convertToStr(action.db_date, 'datetime'))
node.set('session',self.convertToStr(action.db_session, 'long'))
node.set('user',self.convertToStr(action.db_user, 'str'))
node.set('prune',self.convertToStr(action.db_prune, 'int'))
# set elements
annotations = action.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
operations = action.db_operations
for operation in operations:
if operation.vtType == 'add':
childNode = ElementTree.SubElement(node, 'add')
self.getDao('add').toXML(operation, childNode)
elif operation.vtType == 'delete':
childNode = ElementTree.SubElement(node, 'delete')
self.getDao('delete').toXML(operation, childNode)
elif operation.vtType == 'change':
childNode = ElementTree.SubElement(node, 'change')
self.getDao('change').toXML(operation, childNode)
return node
class DBDeleteXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'delete':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('what', None)
what = self.convertFromStr(data, 'str')
data = node.get('objectId', None)
objectId = self.convertFromStr(data, 'long')
data = node.get('parentObjId', None)
parentObjId = self.convertFromStr(data, 'long')
data = node.get('parentObjType', None)
parentObjType = self.convertFromStr(data, 'str')
obj = DBDelete(id=id,
what=what,
objectId=objectId,
parentObjId=parentObjId,
parentObjType=parentObjType)
obj.is_dirty = False
return obj
def toXML(self, delete, node=None):
if node is None:
node = ElementTree.Element('delete')
# set attributes
node.set('id',self.convertToStr(delete.db_id, 'long'))
node.set('what',self.convertToStr(delete.db_what, 'str'))
node.set('objectId',self.convertToStr(delete.db_objectId, 'long'))
node.set('parentObjId',self.convertToStr(delete.db_parentObjId, 'long'))
node.set('parentObjType',self.convertToStr(delete.db_parentObjType, 'str'))
return node
class DBVistrailXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'vistrail':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
actions = []
tags = []
annotations = []
# read children
for child in node.getchildren():
if child.tag == 'action':
_data = self.getDao('action').fromXML(child)
actions.append(_data)
elif child.tag == 'tag':
_data = self.getDao('tag').fromXML(child)
tags.append(_data)
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBVistrail(id=id,
version=version,
name=name,
actions=actions,
tags=tags,
annotations=annotations)
obj.is_dirty = False
return obj
def toXML(self, vistrail, node=None):
if node is None:
node = ElementTree.Element('vistrail')
# set attributes
node.set('id',self.convertToStr(vistrail.db_id, 'long'))
node.set('version',self.convertToStr(vistrail.db_version, 'str'))
node.set('name',self.convertToStr(vistrail.db_name, 'str'))
# set elements
actions = vistrail.db_actions
for action in actions:
childNode = ElementTree.SubElement(node, 'action')
self.getDao('action').toXML(action, childNode)
tags = vistrail.db_tags
for tag in tags:
childNode = ElementTree.SubElement(node, 'tag')
self.getDao('tag').toXML(tag, childNode)
annotations = vistrail.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
return node
class DBModuleExecXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'moduleExec':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('tsStart', None)
ts_start = self.convertFromStr(data, 'datetime')
data = node.get('tsEnd', None)
ts_end = self.convertFromStr(data, 'datetime')
data = node.get('cached', None)
cached = self.convertFromStr(data, 'int')
data = node.get('moduleId', None)
module_id = self.convertFromStr(data, 'long')
data = node.get('moduleName', None)
module_name = self.convertFromStr(data, 'str')
data = node.get('completed', None)
completed = self.convertFromStr(data, 'int')
data = node.get('error', None)
error = self.convertFromStr(data, 'str')
data = node.get('abstraction_id', None)
abstraction_id = self.convertFromStr(data, 'long')
data = node.get('abstraction_version', None)
abstraction_version = self.convertFromStr(data, 'long')
data = node.get('machine_id', None)
machine_id = self.convertFromStr(data, 'long')
annotations = []
loop_execs = []
# read children
for child in node.getchildren():
if child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'loopExec':
_data = self.getDao('loop_exec').fromXML(child)
loop_execs.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBModuleExec(id=id,
ts_start=ts_start,
ts_end=ts_end,
cached=cached,
module_id=module_id,
module_name=module_name,
completed=completed,
error=error,
abstraction_id=abstraction_id,
abstraction_version=abstraction_version,
machine_id=machine_id,
annotations=annotations,
loop_execs=loop_execs)
obj.is_dirty = False
return obj
def toXML(self, module_exec, node=None):
if node is None:
node = ElementTree.Element('moduleExec')
# set attributes
node.set('id',self.convertToStr(module_exec.db_id, 'long'))
node.set('tsStart',self.convertToStr(module_exec.db_ts_start, 'datetime'))
node.set('tsEnd',self.convertToStr(module_exec.db_ts_end, 'datetime'))
node.set('cached',self.convertToStr(module_exec.db_cached, 'int'))
node.set('moduleId',self.convertToStr(module_exec.db_module_id, 'long'))
node.set('moduleName',self.convertToStr(module_exec.db_module_name, 'str'))
node.set('completed',self.convertToStr(module_exec.db_completed, 'int'))
node.set('error',self.convertToStr(module_exec.db_error, 'str'))
node.set('abstraction_id',self.convertToStr(module_exec.db_abstraction_id, 'long'))
node.set('abstraction_version',self.convertToStr(module_exec.db_abstraction_version, 'long'))
node.set('machine_id',self.convertToStr(module_exec.db_machine_id, 'long'))
# set elements
annotations = module_exec.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
loop_execs = module_exec.db_loop_execs
for loop_exec in loop_execs:
childNode = ElementTree.SubElement(node, 'loopExec')
self.getDao('loop_exec').toXML(loop_exec, childNode)
return node
"""generated automatically by auto_dao.py"""
class XMLDAOListBase(dict):
def __init__(self, daos=None):
if daos is not None:
dict.update(self, daos)
if 'portSpec' not in self:
self['portSpec'] = DBPortSpecXMLDAOBase(self)
if 'module' not in self:
self['module'] = DBModuleXMLDAOBase(self)
if 'module_descriptor' not in self:
self['module_descriptor'] = DBModuleDescriptorXMLDAOBase(self)
if 'tag' not in self:
self['tag'] = DBTagXMLDAOBase(self)
if 'port' not in self:
self['port'] = DBPortXMLDAOBase(self)
if 'group' not in self:
self['group'] = DBGroupXMLDAOBase(self)
if 'log' not in self:
self['log'] = DBLogXMLDAOBase(self)
if 'machine' not in self:
self['machine'] = DBMachineXMLDAOBase(self)
if 'add' not in self:
self['add'] = DBAddXMLDAOBase(self)
if 'other' not in self:
self['other'] = DBOtherXMLDAOBase(self)
if 'location' not in self:
self['location'] = DBLocationXMLDAOBase(self)
if 'parameter' not in self:
self['parameter'] = DBParameterXMLDAOBase(self)
if 'plugin_data' not in self:
self['plugin_data'] = DBPluginDataXMLDAOBase(self)
if 'function' not in self:
self['function'] = DBFunctionXMLDAOBase(self)
if 'abstraction' not in self:
self['abstraction'] = DBAbstractionXMLDAOBase(self)
if 'workflow' not in self:
self['workflow'] = DBWorkflowXMLDAOBase(self)
if 'registry' not in self:
self['registry'] = DBRegistryXMLDAOBase(self)
if 'annotation' not in self:
self['annotation'] = DBAnnotationXMLDAOBase(self)
if 'change' not in self:
self['change'] = DBChangeXMLDAOBase(self)
if 'group_exec' not in self:
self['group_exec'] = DBGroupExecXMLDAOBase(self)
if 'package' not in self:
self['package'] = DBPackageXMLDAOBase(self)
if 'workflow_exec' not in self:
self['workflow_exec'] = DBWorkflowExecXMLDAOBase(self)
if 'loop_exec' not in self:
self['loop_exec'] = DBLoopExecXMLDAOBase(self)
if 'connection' not in self:
self['connection'] = DBConnectionXMLDAOBase(self)
if 'action' not in self:
self['action'] = DBActionXMLDAOBase(self)
if 'delete' not in self:
self['delete'] = DBDeleteXMLDAOBase(self)
if 'vistrail' not in self:
self['vistrail'] = DBVistrailXMLDAOBase(self)
if 'module_exec' not in self:
self['module_exec'] = DBModuleExecXMLDAOBase(self)
| VisTrails/VisTrails | vistrails/db/versions/v0_9_5/persistence/xml/auto_gen.py | Python | bsd-3-clause | 80,893 |
############# ############# ############# ############# #############
# precomputeIDF
# by JAG3
# maintained by JAG3 and ekimbrel
# v1.0 - precompute IDF vectors for a dataset(s)
#
############# ############# ############# ############# #############
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
import sys
import argparse
import codecs
sys.path.insert(0, './lib/')
sys.path.insert(0, 'geqe-ml/lib/') # allow running from project root
import fspLib
import time
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("jobNm", help="Application name, default = 'Find Similar Events'",default='findEvents')
parser.add_argument("datasets",help="comma separated list of datasets in hdfs")
parser.add_argument("--dap", "--dontApplyStop", dest="bUseStopFilter", action="store_false", help="Specified such that stop words are not filtered out.",default=True)
parser.add_argument("-partitions", help="repartition the input data set before processing.",type=int,default=-1)
parser.add_argument("-sCustStop", help="Comma seperated list of stop words to add include on this run",default='')
parser.add_argument("--stopWordsFile",help="File path to a stop words list. One word per line. default=inputFiles/stopWordList.txt",default="inputFiles/stopWordList.txt")
args = parser.parse_args()
inputPartitions = args.partitions
jobNm = args.jobNm
bUseStopFilter = args.bUseStopFilter
stopWordsPath = args.stopWordsFile
sCustStop=args.sCustStop
datasets = args.datasets.split(',')
conf = SparkConf().setAppName(jobNm)
sc = SparkContext(conf = conf)
sqlContext = SQLContext(sc)
#Read in stop word list early to get notified of issues early in process
bc_lStopWords = fspLib.load_stopwords(sc,stopWordsPath,sCustStop)
t0 = time.time()
records = None
for file in datasets:
print 'reading file: ',file
if records == None:
records = sqlContext.parquetFile(file)
else:
newRec = sqlContext.parquetFile(file)
records = records.unionAll(newRec)
if inputPartitions > 0: records = records.repartition(inputPartitions)
#Find the word document frequency for the corpus
#this is used for an idf score used in feature vector formation
t1 = time.time()
goodRecords = records.map(lambda x: fspLib.uniqueWords(x.text, bUseStopFilter, bc_lStopWords))
goodRecords = goodRecords.filter(lambda x: len(x) > 0).cache()
nGoodTweets = goodRecords.count()
t2 = time.time()
print "Number of good tweets:",nGoodTweets
diff = t2-t1
print "Time to read in and filter nonscorable words", diff
t1 = time.time()
dIDF = goodRecords.flatMap(lambda x: [(w,1) for w in x]).reduceByKey(lambda x,y: x+y)
dIDF.cache()
nTerms = dIDF.count()
nThresh = int(float(nGoodTweets)/1000000.)
final = dIDF.filter(lambda x: x[1]>nThresh).cache()
nFinal = final.count()
t2 = time.time()
diff = t2-t1
print "Time to perform idf calc: ", diff
print "Number of terms:", nTerms, ", number that pass thresh: ", nFinal
retDict = final.collectAsMap()
fDict = codecs.open("dictFiles/dict_"+jobNm, encoding="utf-8", mode="w")
pos = 0
for t, v in retDict.iteritems():
buffer = [t, pos, v]
buffer = map(lambda x: x if type(x) == unicode else unicode(x), buffer)
fDict.write(u'\t'.join(buffer)+u'\n')
pos = pos + 1
diff = time.time() - t0
print "<-------------Done------------>"
print "<-- Total time:", diff
print "<-- Threshold:", nThresh
print "<-- Num tweets:", nGoodTweets
print "<-- Num terms:", pos
print "<----------------------------->" | Sotera/GEQE | geqe-ml/precomputeIDF.py | Python | unlicense | 3,753 |
# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Availability Zone
"""
class Zone:
def __init__(self, connection=None):
self.connection = connection
self.name = None
self.state = None
def __repr__(self):
return 'Zone:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'zoneName':
self.name = value
elif name == 'zoneState':
self.state = value
else:
setattr(self, name, value)
| carlgao/lenga | images/lenny64-peon/usr/share/python-support/python-boto/boto/ec2/zone.py | Python | mit | 1,667 |
"""
Django settings for learn_models project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm!c6vi)5k0l^sf-p!d(ssh$m9g0*4*5h3)9n7btn5!bfbkms#('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'people',
'south',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learn_models.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learn_models.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| lichengshuang/createvhost | python/others/django/learn_models/learn_models/settings.py | Python | apache-2.0 | 3,211 |
import hashlib
import hmac
from time import time
from typing import Dict, Optional, Union
class Clock:
@staticmethod
def now() -> float:
return time()
class SignatureVerifier:
def __init__(self, signing_secret: str, clock: Clock = Clock()):
"""Slack request signature verifier
Slack signs its requests using a secret that's unique to your app.
With the help of signing secrets, your app can more confidently verify
whether requests from us are authentic.
https://api.slack.com/authentication/verifying-requests-from-slack
"""
self.signing_secret = signing_secret
self.clock = clock
def is_valid_request(
self,
body: Union[str, bytes],
headers: Dict[str, str],
) -> bool:
"""Verifies if the given signature is valid"""
if headers is None:
return False
normalized_headers = {k.lower(): v for k, v in headers.items()}
return self.is_valid(
body=body,
timestamp=normalized_headers.get("x-slack-request-timestamp", None),
signature=normalized_headers.get("x-slack-signature", None),
)
def is_valid(
self,
body: Union[str, bytes],
timestamp: str,
signature: str,
) -> bool:
"""Verifies if the given signature is valid"""
if timestamp is None or signature is None:
return False
if abs(self.clock.now() - int(timestamp)) > 60 * 5:
return False
calculated_signature = self.generate_signature(timestamp=timestamp, body=body)
if calculated_signature is None:
return False
return hmac.compare_digest(calculated_signature, signature)
def generate_signature(
self, *, timestamp: str, body: Union[str, bytes]
) -> Optional[str]:
"""Generates a signature"""
if timestamp is None:
return None
if body is None:
body = ""
if isinstance(body, bytes):
body = body.decode("utf-8")
format_req = str.encode(f"v0:{timestamp}:{body}")
encoded_secret = str.encode(self.signing_secret)
request_hash = hmac.new(encoded_secret, format_req, hashlib.sha256).hexdigest()
calculated_signature = f"v0={request_hash}"
return calculated_signature
| slackhq/python-slackclient | slack/signature/verifier.py | Python | mit | 2,373 |
"""templates.unix.ui.Shutdown Module"""
import cairn
from cairn import Options
def getClass():
return Shutdown()
class Shutdown(object):
def run(self, sysdef):
if Options.get("no-ui") or Options.get("ui") == "none":
return True
elif Options.get("ui") == "curses":
sysdef.moduleList.insertAfterMe("ui.curses.Shutdown")
return True
| redshodan/cairn | src/python/cairn/sysdefs/templates/unix/ui/Shutdown.py | Python | gpl-2.0 | 354 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# wf-api-client documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 3 13:38:22 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'wf-api-client'
copyright = '2015, David J. Cox'
author = 'David J. Cox'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.1'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'wf-api-clientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'wf-api-client.tex', 'wf-api-client Documentation',
'David J. Cox', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'wf-api-client', 'wf-api-client Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'wf-api-client', 'wf-api-client Documentation',
author, 'wf-api-client', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| davidjcox/wf-api-client | wf-api-client/docs/conf.py | Python | bsd-3-clause | 11,287 |
# Copyright 2015 Rémy Lapeyrade <remy at lapeyrade dot net>
# Copyright 2015 LAAS-CNRS
#
#
# This file is part of TouSIX-Manager.
#
# TouSIX-Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TouSIX-Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TouSIX-Manager. If not, see <http://www.gnu.org/licenses/>.
import json
from tousix_manager.Rules_Generation.Production.manager import Manager as Production
from tousix_manager.Rules_Generation.configuration import Peer
from django.conf import settings
from tousix_manager.Database.models import Hote, Regles
from tousix_manager.Rules_Generation.Statistics.manager import Manager as Statistics
class Manager(object):
"""
Main class for managing the core of Generate_rues app
"""
def get_peers(self, switch):
"""
This method creates a list of peers seen by the given switch (only valid peers)
:param switch: Switch model
:return list(Peer): Peer array
"""
hotes = Hote.objects.filter(valid=True)
peers = []
for hote in hotes:
peer = Peer()
peer.idPeer = hote.idhote
peer.Name = hote.nomhote
peer.Mac = hote.machote
peer.IPv6 = hote.ipv6hote
peer.IPv4 = hote.ipv4hote
if hote.idport.idswitch == switch:
peer.Egress = True
peer.outputPort = hote.idport.numport
else:
# TODO parcours chemin à étudier ?
peer.Egress = False
peer.nextHop = hote.idport.idswitch_id
peers.append(peer)
return peers
def create_rules(self, switches):
"""
Insert into the database a new set of rules generated .
:param switches: list(Switch)
:return:
"""
for switch in switches:
peers = self.get_peers(switch)
rules = self.call_managers(switch.idswitch, peers)
# Remove existing rules for this switch
Regles.objects.filter(idswitch=switch.idswitch).filter(etat="Production").delete()
db_rules = []
for rule in rules:
db_rules.append(Regles(idswitch=switch, typeregle=rule.get("module"), regle=json.dumps(rule.get("rule")),
source_id=rule.get("source"), destination_id=rule.get("destination")))
Regles.objects.bulk_create(db_rules)
# Copy raw group rules into database
groups_switch = settings.RULES_GENERATION_GROUPS_DEFINITION[switch.idswitch]
db_groups = []
for group in groups_switch:
db_groups.append(Regles(idswitch=switch, typeregle="Group", regle=json.dumps(group)))
Regles.objects.bulk_create(db_groups)
def call_managers(self, dpid, peers):
"""
Method for calling managgers of other modules.
:param dpid: Target DPID
:param peers: LIst Peer
:return:
"""
rules = []
production = Production()
statistics = Statistics()
rules.extend(production.create_rules_members(dpid, peers))
rules.extend(statistics.create_rules_members(dpid, peers))
return rules
| Baloc/TouSIX-Manager | tousix_manager/Rules_Generation/manager.py | Python | gpl-3.0 | 3,739 |
# -*- coding: UTF-8 -*-
# YaBlog
# (c) Regis FLORET
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Regis FLORET BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| regisf/yablog | blog/templatetags/__init__.py | Python | bsd-3-clause | 1,525 |
##
# This file is part of Overkill-writers.
#
# Overkill-writers is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Overkill-writers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Overkill-writers. If not, see <http://www.gnu.org/licenses/>.
##
from ..base import Subprocess
import subprocess
class Writer:
def write(self, line):
raise NotImplementedError()
class StdoutWriter:
def write(self, line):
print(line)
class PipeWriter(Subprocess, Writer):
stdin = subprocess.PIPE
def write(self, line):
with self._state_lock:
if not self.running:
raise RuntimeError("Not Running")
self.proc.stdin.write((line+'\n').encode("utf-8"))
self.proc.stdin.flush()
| Stebalien/overkill-writers | overkill/extra/writers.py | Python | gpl-3.0 | 1,232 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import operator
import shlex
import warnings
import heapq
import bisect
import random
from subprocess import Popen, PIPE
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import (
AutoBatchedSerializer,
BatchedSerializer,
NoOpSerializer,
CartesianDeserializer,
CloudPickleSerializer,
PairDeserializer,
CPickleSerializer,
pack_long,
read_int,
write_int,
)
from pyspark.join import (
python_join,
python_left_outer_join,
python_right_outer_join,
python_full_outer_join,
python_cogroup,
)
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resource.requests import ExecutorResourceRequests, TaskResourceRequests
from pyspark.resource.profile import ResourceProfile
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import (
Aggregator,
ExternalMerger,
get_used_memory,
ExternalSorter,
ExternalGroupBy,
)
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration, _parse_memory
__all__ = ["RDD"]
class PythonEvalType:
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
SQL_GROUPED_AGG_PANDAS_UDF = 202
SQL_WINDOW_AGG_PANDAS_UDF = 203
SQL_SCALAR_PANDAS_ITER_UDF = 204
SQL_MAP_PANDAS_ITER_UDF = 205
SQL_COGROUPED_MAP_PANDAS_UDF = 206
SQL_MAP_ARROW_ITER_UDF = 207
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
Examples
--------
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if "PYTHONHASHSEED" not in os.environ:
raise RuntimeError("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
Examples
--------
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _create_local_socket(sock_info):
"""
Create a local socket that can be used to load deserialized data from the JVM
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
Returns
-------
sockfile file descriptor of the local socket
"""
port = sock_info[0]
auth_secret = sock_info[1]
sockfile, sock = local_connect_and_auth(port, auth_secret)
# The RDD materialization time is unpredictable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
return sockfile
def _load_from_socket(sock_info, serializer):
"""
Connect to a local socket described by sock_info and use the given serializer to yield data
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
serializer : :py:class:`Serializer`
The PySpark serializer to use
Returns
-------
result of :py:meth:`Serializer.load_stream`,
usually a generator that yields deserialized data
"""
sockfile = _create_local_socket(sock_info)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def _local_iterator_from_socket(sock_info, serializer):
class PyLocalIterable:
"""Create a synchronous local iterable over a socket"""
def __init__(self, _sock_info, _serializer):
port, auth_secret, self.jsocket_auth_server = _sock_info
self._sockfile = _create_local_socket((port, auth_secret))
self._serializer = _serializer
self._read_iter = iter([]) # Initialize as empty iterator
self._read_status = 1
def __iter__(self):
while self._read_status == 1:
# Request next partition data from Java
write_int(1, self._sockfile)
self._sockfile.flush()
# If response is 1 then there is a partition to read, if 0 then fully consumed
self._read_status = read_int(self._sockfile)
if self._read_status == 1:
# Load the partition data as a stream and read each item
self._read_iter = self._serializer.load_stream(self._sockfile)
for item in self._read_iter:
yield item
# An error occurred, join serving thread and raise any exceptions from the JVM
elif self._read_status == -1:
self.jsocket_auth_server.getResult()
def __del__(self):
# If local iterator is not fully consumed,
if self._read_status == 1:
try:
# Finish consuming partition data stream
for _ in self._read_iter:
pass
# Tell Java to stop sending data and close connection
write_int(0, self._sockfile)
self._sockfile.flush()
except Exception:
# Ignore any errors, socket is automatically closed when garbage-collected
pass
return iter(PyLocalIterable(sock_info, serializer))
class Partitioner:
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (
isinstance(other, Partitioner)
and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc
)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD:
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(CPickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.has_resource_profile = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(CPickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise RuntimeError(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The :class:`SparkContext` that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (`MEMORY_ONLY`).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_ONLY`).
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self, blocking=False):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
self.is_cached = False
self._jrdd.unpersist(blocking)
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with :meth:`SparkContext.setCheckpointDir` and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
`spark.dynamicAllocation.cachedExecutorIdleTimeout` to a high value.
The checkpoint directory set through :meth:`SparkContext.setCheckpointDir` is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
Examples
--------
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
.. deprecated:: 0.9.0
use :py:meth:`RDD.mapPartitionsWithIndex` instead.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn(
"mapPartitionsWithSplit is deprecated; use mapPartitionsWithIndex instead",
FutureWarning,
stacklevel=2,
)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
Examples
--------
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return (
self.map(lambda x: (x, None))
.reduceByKey(lambda x, _: x, numPartitions)
.map(lambda x: x[0])
)
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
Parameters
----------
withReplacement : bool
can elements be sampled multiple times (replaced when sampled out)
fraction : float
expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
seed : int, optional
seed for the random number generator
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
Examples
--------
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
weights : list
weights for splits, will be normalized if they don't sum to 1
seed : int, optional
random seed
Returns
-------
list
split RDDs in a list
Examples
--------
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [
self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])
]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError("Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if sampleSizeLowerBound < 12:
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = -log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer)
if (
self.partitioner == other.partitioner
and self.getNumPartitions() == rdd.getNumPartitions()
):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Notes
-----
This method performs a shuffle internally.
Examples
--------
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return (
self.map(lambda v: (v, None))
.cogroup(other.map(lambda v: (v, None)))
.filter(lambda k_vs: all(k_vs[1]))
.keys()
)
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(
self, numPartitions=None, partitionFunc=portable_hash, ascending=True, keyfunc=lambda x: x
):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
Examples
--------
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [
samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)
]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements ``(a, b)`` where ``a`` is in `self` and
``b`` is in `other`.
Examples
--------
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
Parameters
----------
command : str
command to run.
env : dict, optional
environment variables to set.
checkCode : bool, optional
whether or not to check the return value of the shell command.
Examples
--------
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip("\n") + "\n"
out.write(s.encode("utf-8"))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise RuntimeError(
"Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode)
)
else:
for i in range(0):
yield i
return (
x.rstrip(b"\n").decode("utf-8")
for x in chain(iter(pipe.stdout.readline, b""), check_return_code())
)
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
Examples
--------
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
Examples
--------
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context):
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def collectWithJobGroup(self, groupId, description, interruptOnCancel=False):
"""
When collect rdd, use this method to specify job group.
.. versionadded:: 3.0.0
.. deprecated:: 3.1.0
Use :class:`pyspark.InheritableThread` with the pinned thread mode enabled.
"""
warnings.warn(
"Deprecated in 3.1, Use pyspark.InheritableThread with "
"the pinned thread mode enabled.",
FutureWarning,
)
with SCCallSiteSync(self.context):
sock_info = self.ctx._jvm.PythonRDD.collectAndServeWithJobGroup(
self._jrdd.rdd(), groupId, description, interruptOnCancel
)
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
Parameters
----------
f : function
depth : int, optional
suggested depth of the tree (default: 2)
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
Examples
--------
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
depth : int, optional
suggested depth of the tree (default: 2)
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = (
partiallyAggregated.mapPartitionsWithIndex(mapPartition)
.reduceByKey(combOp, curNumPartitions)
.values()
)
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
Parameters
----------
key : function, optional
A function used to generate key for comparing
Examples
--------
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
Parameters
----------
key : function, optional
A function used to generate key for comparing
Examples
--------
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
Examples
--------
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
Examples
--------
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a :class:`StatCounter` object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) insertion to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
Examples
--------
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = int((i - minv) / inc) if even else bisect.bisect_right(buckets, i) - 1
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
Examples
--------
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
It returns the list sorted in descending order.
Examples
--------
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first parameter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
Examples
--------
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
Notes
-----
An RDD may be empty even when it has at least 1 partition.
Examples
--------
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, True
)
def saveAsNewAPIHadoopFile(
self,
path,
outputFormatClass,
keyClass=None,
valueClass=None,
keyConverter=None,
valueConverter=None,
conf=None,
):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
Hadoop job configuration (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, False
)
def saveAsHadoopFile(
self,
path,
outputFormatClass,
keyClass=None,
valueClass=None,
keyConverter=None,
valueConverter=None,
conf=None,
compressionCodecClass=None,
):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
Parameters
----------
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
(None by default)
compressionCodecClass : str
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
compressionCodecClass,
)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the "org.apache.hadoop.io.Writable" types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pickle is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
Parameters
----------
path : str
path to sequence file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(
pickledRDD._jrdd, True, path, compressionCodecClass
)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is :class:`pyspark.serializers.CPickleSerializer`, default batch size
is 10.
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(CPickleSerializer())
else:
ser = BatchedSerializer(CPickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
Parameters
----------
path : str
path to text file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> from tempfile import NamedTemporaryFile
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> from tempfile import NamedTemporaryFile
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> ''.join([r.decode('utf-8') if isinstance(r, bytes) else r for r in result])
'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (str, bytes)):
x = str(x)
if isinstance(x, str):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
Notes
-----
This method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with `numPartitions` partitions, or
the default parallelism level if `numPartitions` is not specified.
Default partitioner is hash-partition.
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
`self` and `other`.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in `self` and (k, v2) is in `other`.
Performs a hash join across the cluster.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of `self` and `other`.
For each element (k, w) in `other`, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Similarly, for each element (k, w) in `other`, the resulting RDD will
either contain all pairs (k, (v, w)) for v in `self`, or the pair
(k, (None, w)) if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
Examples
--------
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = self._memory_limit() / 2
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if c % 1000 == 0 and get_used_memory() > limit or c > batch:
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch = min(sys.maxsize, batch * 1.5)
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context):
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(
self,
createCombiner,
mergeValue,
mergeCombiners,
numPartitions=None,
partitionFunc=portable_hash,
):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- `createCombiner`, which turns a V into a C (e.g., creates
a one-element list)
- `mergeValue`, to merge a V into a C (e.g., adds it to the end of
a list)
- `mergeCombiners`, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
Notes
-----
V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(
self, zeroValue, seqFunc, combFunc, numPartitions=None, partitionFunc=portable_hash
):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc
)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: func(createZero(), v), func, func, numPartitions, partitionFunc
)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Notes
-----
If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
Examples
--------
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
Examples
--------
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
Examples
--------
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom partitioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in `self` or `other`, return a resulting RDD that
contains a tuple with the list of values for that key in `self` as
well as `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
Examples
--------
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True
)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in `self` that has no pair with matching
key in `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in `self` that is not contained in `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying `f`.
Examples
--------
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
Examples
--------
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
Examples
--------
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(CPickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
Examples
--------
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(CPickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
:meth:`zipWithIndex`.
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
def setName(self, name):
"""
Assign a name to this RDD.
Examples
--------
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode("utf-8")
def getStorageLevel(self):
"""
Get the RDD's current storage level.
Examples
--------
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(
java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication(),
)
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
Examples
--------
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
"""Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pickle, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
Approximate operation to return the sum within a timeout
or meet the confidence.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
Approximate operation to return the mean within a timeout
or meet the confidence.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
Return approximate number of distinct elements in the RDD.
Parameters
----------
relativeSD : float, optional
Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
Notes
-----
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<https://doi.org/10.1145/2452376.2452456>`_.
Examples
--------
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self, prefetchPartitions=False):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
With prefetch it may consume up to the memory of the 2 largest partitions.
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition
before it is needed.
Examples
--------
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context):
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(
self._jrdd.rdd(), prefetchPartitions
)
return _local_iterator_from_socket(sock_info, self._jrdd_deserializer)
def barrier(self):
"""
Marks the current stage as a barrier stage, where Spark must launch all tasks together.
In case of a task failure, instead of only restarting the failed task, Spark will abort the
entire stage and relaunch all tasks for this stage.
The barrier execution mode feature is experimental and it only handles limited scenarios.
Please read the linked SPIP and design docs to understand the limitations and future plans.
.. versionadded:: 2.4.0
Returns
-------
:class:`RDDBarrier`
instance that provides actions within a barrier stage.
See Also
--------
pyspark.BarrierTaskContext
Notes
-----
For additional information see
- `SPIP: Barrier Execution Mode <http://jira.apache.org/jira/browse/SPARK-24374>`_
- `Design Doc <https://jira.apache.org/jira/browse/SPARK-24582>`_
This API is experimental
"""
return RDDBarrier(self)
def _is_barrier(self):
"""
Whether this RDD is in a barrier stage.
"""
return self._jrdd.rdd().isBarrier()
def withResources(self, profile):
"""
Specify a :class:`pyspark.resource.ResourceProfile` to use when calculating this RDD.
This is only supported on certain cluster managers and currently requires dynamic
allocation to be enabled. It will result in new executors with the resources specified
being acquired to calculate the RDD.
.. versionadded:: 3.1.0
Notes
-----
This API is experimental
"""
self.has_resource_profile = True
if profile._java_resource_profile is not None:
jrp = profile._java_resource_profile
else:
builder = self.ctx._jvm.org.apache.spark.resource.ResourceProfileBuilder()
ereqs = ExecutorResourceRequests(self.ctx._jvm, profile._executor_resource_requests)
treqs = TaskResourceRequests(self.ctx._jvm, profile._task_resource_requests)
builder.require(ereqs._java_executor_resource_requests)
builder.require(treqs._java_task_resource_requests)
jrp = builder.build()
self._jrdd.withResources(jrp)
return self
def getResourceProfile(self):
"""
Get the :class:`pyspark.resource.ResourceProfile` specified with this RDD or None
if it wasn't specified.
.. versionadded:: 3.1.0
Returns
-------
:py:class:`pyspark.resource.ResourceProfile`
The user specified profile or None if none were specified
Notes
-----
This API is experimental
"""
rp = self._jrdd.getResourceProfile()
if rp is not None:
return ResourceProfile(_java_resource_profile=rp)
else:
return None
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > sc._jvm.PythonUtils.getBroadcastThreshold(sc._jsc): # Default 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(
bytearray(pickled_command),
env,
includes,
sc.pythonExec,
sc.pythonVer,
broadcast_vars,
sc._javaAccumulator,
)
class RDDBarrier:
"""
Wraps an RDD in a barrier stage, which forces Spark to launch tasks of this stage together.
:class:`RDDBarrier` instances are created by :func:`RDD.barrier`.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def __init__(self, rdd):
self.rdd = rdd
def mapPartitions(self, f, preservesPartitioning=False):
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD,
where tasks are launched together in a barrier stage.
The interface is the same as :func:`RDD.mapPartitions`.
Please see the API doc there.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def func(s, iterator):
return f(iterator)
return PipelinedRDD(self.rdd, func, preservesPartitioning, isFromBarrier=True)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD, while
tracking the index of the original partition. And all tasks are launched together
in a barrier stage.
The interface is the same as :func:`RDD.mapPartitionsWithIndex`.
Please see the API doc there.
.. versionadded:: 3.0.0
Notes
-----
This API is experimental
"""
return PipelinedRDD(self.rdd, f, preservesPartitioning, isFromBarrier=True)
class PipelinedRDD(RDD):
"""
Examples
--------
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False, isFromBarrier=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.has_resource_profile = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self.is_barrier = isFromBarrier or prev._is_barrier()
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(
self.ctx, self.func, self._prev_jrdd_deserializer, self._jrdd_deserializer, profiler
)
python_rdd = self.ctx._jvm.PythonRDD(
self._prev_jrdd.rdd(), wrapped_func, self.preservesPartitioning, self.is_barrier
)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed or self.has_resource_profile)
def _is_barrier(self):
return self.is_barrier
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs["sc"] = SparkContext("local[4]", "PythonTest")
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs["sc"].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| holdenk/spark | python/pyspark/rdd.py | Python | apache-2.0 | 109,178 |
from django.test import RequestFactory
from test_plus.test import TestCase
from ..views import (
UserRedirectView,
UserUpdateView
)
class BaseUserTestCase(TestCase):
def setUp(self):
self.user = self.make_user()
self.factory = RequestFactory()
class TestUserRedirectView(BaseUserTestCase):
def test_get_redirect_url(self):
# Instantiate the view directly. Never do this outside a test!
view = UserRedirectView()
# Generate a fake request
request = self.factory.get('/fake-url')
# Attach the user to the request
request.user = self.user
# Attach the request to the view
view.request = request
# Expect: '/users/[email protected]/', as that is the default username for
# self.make_user()
self.assertEqual(
view.get_redirect_url(),
'/users/[email protected]/'
)
class TestUserUpdateView(BaseUserTestCase):
def setUp(self):
# call BaseUserTestCase.setUp()
super(TestUserUpdateView, self).setUp()
# Instantiate the view directly. Never do this outside a test!
self.view = UserUpdateView()
# Generate a fake request
request = self.factory.get('/fake-url')
# Attach the user to the request
request.user = self.user
# Attach the request to the view
self.view.request = request
def test_get_success_url(self):
# Expect: '/users/testuser/', as that is the default username for
# self.make_user()
self.assertEqual(
self.view.get_success_url(),
'/users/[email protected]/'
)
def test_get_object(self):
# Expect: self.user, as that is the request's user object
self.assertEqual(
self.view.get_object(),
self.user
)
| HandyCodeJob/hcj-django-temp | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/tests/test_views.py | Python | bsd-3-clause | 1,876 |
"""Signomial, Posynomial, Monomial, Constraint, & MonoEQCOnstraint classes"""
from collections import defaultdict
import numpy as np
from .core import Nomial
from .array import NomialArray
from .. import units
from ..constraints import SingleEquationConstraint
from ..globals import SignomialsEnabled
from ..small_classes import Numbers
from ..small_classes import HashVector, EMPTY_HV
from ..varkey import VarKey
from ..small_scripts import mag
from ..exceptions import (InvalidGPConstraint, InvalidPosynomial,
PrimalInfeasible, DimensionalityError)
from .map import NomialMap
from .substitution import parse_subs
class Signomial(Nomial):
"""A representation of a Signomial.
Arguments
---------
exps: tuple of dicts
Exponent dicts for each monomial term
cs: tuple
Coefficient values for each monomial term
require_positive: bool
If True and Signomials not enabled, c <= 0 will raise ValueError
Returns
-------
Signomial
Posynomial (if the input has only positive cs)
Monomial (if the input has one term and only positive cs)
"""
_c = _exp = None # pylint: disable=invalid-name
__hash__ = Nomial.__hash__
def __init__(self, hmap=None, cs=1, require_positive=True): # pylint: disable=too-many-statements,too-many-branches
if not isinstance(hmap, NomialMap):
if hasattr(hmap, "hmap"):
hmap = hmap.hmap
elif isinstance(hmap, Numbers):
hmap_ = NomialMap([(EMPTY_HV, mag(hmap))])
hmap_.units_of_product(hmap)
hmap = hmap_
elif isinstance(hmap, dict):
exp = HashVector({VarKey(k): v for k, v in hmap.items() if v})
hmap = NomialMap({exp: mag(cs)})
hmap.units_of_product(cs)
else:
raise ValueError("Nomial construction accepts only NomialMaps,"
" objects with an .hmap attribute, numbers,"
" or *(exp dict of strings, number).")
super().__init__(hmap)
if self.any_nonpositive_cs:
if require_positive and not SignomialsEnabled:
raise InvalidPosynomial("each c must be positive.")
self.__class__ = Signomial
elif len(self.hmap) == 1:
self.__class__ = Monomial
else:
self.__class__ = Posynomial
def diff(self, var):
"""Derivative of this with respect to a Variable
Arguments
---------
var : Variable key
Variable to take derivative with respect to
Returns
-------
Signomial (or Posynomial or Monomial)
"""
var = var.key
if var not in self.vks:
diff = NomialMap({EMPTY_HV: 0.0})
diff.units = None
else:
diff = self.hmap.diff(var)
return Signomial(diff, require_positive=False)
def posy_negy(self):
"""Get the positive and negative parts, both as Posynomials
Returns
-------
Posynomial, Posynomial:
p_pos and p_neg in (self = p_pos - p_neg) decomposition,
"""
py, ny = NomialMap(), NomialMap()
py.units, ny.units = self.units, self.units
for exp, c in self.hmap.items():
if c > 0:
py[exp] = c
elif c < 0:
ny[exp] = -c # -c to keep it a posynomial
return Posynomial(py) if py else 0, Posynomial(ny) if ny else 0
def mono_approximation(self, x0):
"""Monomial approximation about a point x0
Arguments
---------
x0 (dict):
point to monomialize about
Returns
-------
Monomial (unless self(x0) < 0, in which case a Signomial is returned)
"""
x0, _, _ = parse_subs(self.vks, x0) # use only varkey keys
psub = self.hmap.sub(x0, self.vks, parsedsubs=True)
if EMPTY_HV not in psub or len(psub) > 1:
raise ValueError("Variables %s remained after substituting x0=%s"
" into %s" % (psub, x0, self))
c0, = psub.values()
c, exp = c0, HashVector()
for vk in self.vks:
val = float(x0[vk])
diff, = self.hmap.diff(vk).sub(x0, self.vks,
parsedsubs=True).values()
e = val*diff/c0
if e:
exp[vk] = e
try:
c /= val**e
except OverflowError:
raise OverflowError(
"While approximating the variable %s with a local value of"
" %s, %s/(%s**%s) overflowed. Try reducing the variable's"
" value by changing its unit prefix, or specify x0 values"
" for any free variables it's multiplied or divided by in"
" the posynomial %s whose expected value is far from 1."
% (vk, val, c, val, e, self))
hmap = NomialMap({exp: c})
hmap.units = self.units
return Monomial(hmap)
def sub(self, substitutions, require_positive=True):
"""Returns a nomial with substitued values.
Usage
-----
3 == (x**2 + y).sub({'x': 1, y: 2})
3 == (x).gp.sub(x, 3)
Arguments
---------
substitutions : dict or key
Either a dictionary whose keys are strings, Variables, or VarKeys,
and whose values are numbers, or a string, Variable or Varkey.
val : number (optional)
If the substitutions entry is a single key, val holds the value
require_positive : boolean (optional, default is True)
Controls whether the returned value can be a Signomial.
Returns
-------
Returns substituted nomial.
"""
return Signomial(self.hmap.sub(substitutions, self.vks),
require_positive=require_positive)
def __le__(self, other):
if isinstance(other, (Numbers, Signomial)):
return SignomialInequality(self, "<=", other)
return NotImplemented
def __ge__(self, other):
if isinstance(other, (Numbers, Signomial)):
return SignomialInequality(self, ">=", other)
return NotImplemented
def __add__(self, other, rev=False):
other_hmap = getattr(other, "hmap", None)
if isinstance(other, Numbers):
if other == 0:
return Signomial(self.hmap)
other_hmap = NomialMap({EMPTY_HV: mag(other)})
other_hmap.units_of_product(other)
if other_hmap:
astorder = (self, other)
if rev:
astorder = tuple(reversed(astorder))
out = Signomial(self.hmap + other_hmap)
out.ast = ("add", astorder)
return out
return NotImplemented
def __mul__(self, other, rev=False):
astorder = (self, other)
if rev:
astorder = tuple(reversed(astorder))
if isinstance(other, np.ndarray):
s = NomialArray(self)
s.ast = self.ast
return s*other
if isinstance(other, Numbers):
if not other: # other is zero
return other
hmap = mag(other)*self.hmap
hmap.units_of_product(self.hmap.units, other)
out = Signomial(hmap)
out.ast = ("mul", astorder)
return out
if isinstance(other, Signomial):
hmap = NomialMap()
for exp_s, c_s in self.hmap.items():
for exp_o, c_o in other.hmap.items():
exp = exp_s + exp_o
new, accumulated = c_s*c_o, hmap.get(exp, 0)
if new != -accumulated:
hmap[exp] = accumulated + new
elif accumulated:
del hmap[exp]
hmap.units_of_product(self.hmap.units, other.hmap.units)
out = Signomial(hmap)
out.ast = ("mul", astorder)
return out
return NotImplemented
def __truediv__(self, other):
"Support the / operator in Python 2.x"
if isinstance(other, Numbers):
out = self*other**-1
out.ast = ("div", (self, other))
return out
if isinstance(other, Monomial):
return other.__rtruediv__(self)
return NotImplemented
def __pow__(self, expo):
if isinstance(expo, int) and expo >= 0:
p = 1
while expo > 0:
p *= self
expo -= 1
p.ast = ("pow", (self, expo))
return p
return NotImplemented
def __neg__(self):
if SignomialsEnabled: # pylint: disable=using-constant-test
out = -1*self
out.ast = ("neg", self)
return out
return NotImplemented
def __sub__(self, other):
return self + -other if SignomialsEnabled else NotImplemented # pylint: disable=using-constant-test
def __rsub__(self, other):
return other + -self if SignomialsEnabled else NotImplemented # pylint: disable=using-constant-test
def chop(self):
"Returns a list of monomials in the signomial."
monmaps = [NomialMap({exp: c}) for exp, c in self.hmap.items()]
for monmap in monmaps:
monmap.units = self.hmap.units
return [Monomial(monmap) for monmap in sorted(monmaps, key=str)]
class Posynomial(Signomial):
"A Signomial with strictly positive cs"
__hash__ = Signomial.__hash__
def __le__(self, other):
if isinstance(other, Numbers + (Monomial,)):
return PosynomialInequality(self, "<=", other)
return NotImplemented
# Posynomial.__ge__ falls back on Signomial.__ge__
def mono_lower_bound(self, x0):
"""Monomial lower bound at a point x0
Arguments
---------
x0 (dict):
point to make lower bound exact
Returns
-------
Monomial
"""
return self.mono_approximation(x0)
class Monomial(Posynomial):
"A Posynomial with only one term"
__hash__ = Posynomial.__hash__
@property
def exp(self):
"Creates exp or returns a cached exp"
if not self._exp:
self._exp, = self.hmap.keys() # pylint: disable=attribute-defined-outside-init
return self._exp
@property
def c(self): # pylint: disable=invalid-name
"Creates c or returns a cached c"
if not self._c:
self._c, = self.cs # pylint: disable=attribute-defined-outside-init, invalid-name
return self._c
def __rtruediv__(self, other):
"Divide other by this Monomial"
if isinstance(other, Numbers + (Signomial,)):
out = other * self**-1
out.ast = ("div", (other, self))
return out
return NotImplemented
def __pow__(self, expo):
if isinstance(expo, Numbers):
(exp, c), = self.hmap.items()
exp = exp*expo if expo else EMPTY_HV
hmap = NomialMap({exp: c**expo})
if expo and self.hmap.units:
hmap.units = self.hmap.units**expo
else:
hmap.units = None
out = Monomial(hmap)
out.ast = ("pow", (self, expo))
return out
return NotImplemented
def __eq__(self, other):
if isinstance(other, MONS):
try: # if both are monomials, return a constraint
return MonomialEquality(self, other)
except (DimensionalityError, ValueError) as e:
print("Infeasible monomial equality: %s" % e)
return False
return super().__eq__(other)
def __ge__(self, other):
if isinstance(other, Numbers + (Posynomial,)):
return PosynomialInequality(self, ">=", other)
# elif isinstance(other, np.ndarray):
# return other.__le__(self, rev=True)
return NotImplemented
# Monomial.__le__ falls back on Posynomial.__le__
def mono_approximation(self, x0):
return self
MONS = Numbers + (Monomial,)
#######################################################
####### CONSTRAINTS ###################################
#######################################################
class ScalarSingleEquationConstraint(SingleEquationConstraint):
"A SingleEquationConstraint with scalar left and right sides."
generated_by = v_ss = parent = None
bounded = meq_bounded = {}
def __init__(self, left, oper, right):
lr = [left, right]
self.vks = set()
for i, sig in enumerate(lr):
if isinstance(sig, Signomial):
for exp in sig.hmap:
self.vks.update(exp)
else:
lr[i] = Signomial(sig)
from .. import NamedVariables
self.lineage = tuple(NamedVariables.lineage)
super().__init__(lr[0], oper, lr[1])
def relaxed(self, relaxvar):
"Returns the relaxation of the constraint in a list."
if self.oper == ">=":
return [relaxvar*self.left >= self.right]
if self.oper == "<=":
return [self.left <= relaxvar*self.right]
if self.oper == "=":
return [self.left <= relaxvar*self.right,
relaxvar*self.left >= self.right]
raise ValueError(
"Constraint %s had unknown operator %s." % self.oper, self)
# pylint: disable=too-many-instance-attributes, invalid-unary-operand-type
class PosynomialInequality(ScalarSingleEquationConstraint):
"""A constraint of the general form monomial >= posynomial
Stored in the posylt1_rep attribute as a single Posynomial (self <= 1)
Usually initialized via operator overloading, e.g. cc = (y**2 >= 1 + x)
"""
feastol = 1e-3
# NOTE: follows .check_result's max default, but 1e-3 seems a bit lax...
def __init__(self, left, oper, right):
ScalarSingleEquationConstraint.__init__(self, left, oper, right)
if self.oper == "<=":
p_lt, m_gt = self.left, self.right
elif self.oper == ">=":
m_gt, p_lt = self.left, self.right
else:
raise ValueError("operator %s is not supported." % self.oper)
self.unsubbed = self._gen_unsubbed(p_lt, m_gt)
self.bounded = set()
for p in self.unsubbed:
for exp in p.hmap:
for vk, x in exp.items():
self.bounded.add((vk, "upper" if x > 0 else "lower"))
def _simplify_posy_ineq(self, hmap, pmap=None, fixed=None):
"Simplify a posy <= 1 by moving constants to the right side."
if EMPTY_HV not in hmap:
return hmap
coeff = 1 - hmap[EMPTY_HV]
if pmap is not None: # note constant term's mmap
const_idx = list(hmap.keys()).index(EMPTY_HV)
self.const_mmap = self.pmap.pop(const_idx) # pylint: disable=attribute-defined-outside-init
self.const_coeff = coeff # pylint: disable=attribute-defined-outside-init
if coeff >= -self.feastol and len(hmap) == 1:
return None # a tautological monomial!
if coeff < -self.feastol:
msg = "'%s' is infeasible by %.2g%%" % (self, -coeff*100)
if fixed:
msg += " after substituting %s." % fixed
raise PrimalInfeasible(msg)
scaled = hmap/coeff
scaled.units = hmap.units
del scaled[EMPTY_HV]
return scaled
def _gen_unsubbed(self, p_lt, m_gt):
"""Returns the unsubstituted posys <= 1.
Parameters
----------
p_lt : posynomial
the left-hand side of (posynomial < monomial)
m_gt : monomial
the right-hand side of (posynomial < monomial)
"""
try:
m_exp, = m_gt.hmap.keys()
m_c, = m_gt.hmap.values()
except ValueError:
raise TypeError("greater-than side '%s' is not monomial." % m_gt)
m_c *= units.of_division(m_gt, p_lt)
hmap = p_lt.hmap.copy()
for exp in list(hmap):
hmap[exp-m_exp] = hmap.pop(exp)/m_c
hmap = self._simplify_posy_ineq(hmap)
return [Posynomial(hmap)] if hmap else []
def as_hmapslt1(self, substitutions):
"Returns the posys <= 1 representation of this constraint."
out = []
for posy in self.unsubbed:
fixed, _, _ = parse_subs(posy.vks, substitutions, clean=True)
hmap = posy.hmap.sub(fixed, posy.vks, parsedsubs=True)
self.pmap = hmap.mmap(posy.hmap) # pylint: disable=attribute-defined-outside-init
del hmap.expmap, hmap.csmap # needed only for the mmap call above
hmap = self._simplify_posy_ineq(hmap, self.pmap, fixed)
if hmap is not None:
if any(c <= 0 for c in hmap.values()):
raise InvalidGPConstraint("'%s' became Signomial after sub"
"stituting %s" % (self, fixed))
hmap.parent = self
out.append(hmap)
return out
def sens_from_dual(self, la, nu, _):
"Returns the variable/constraint sensitivities from lambda/nu"
presub, = self.unsubbed
if hasattr(self, "pmap"):
nu_ = np.zeros(len(presub.hmap))
for i, mmap in enumerate(self.pmap):
for idx, percentage in mmap.items():
nu_[idx] += percentage*nu[i]
del self.pmap # not needed after dual has been derived
if hasattr(self, "const_mmap"):
scale = (1-self.const_coeff)/self.const_coeff
for idx, percentage in self.const_mmap.items():
nu_[idx] += percentage * la*scale
del self.const_mmap # not needed after dual has been derived
nu = nu_
self.v_ss = HashVector()
if self.parent:
self.parent.v_ss = self.v_ss
if self.generated_by:
self.generated_by.v_ss = self.v_ss
for nu_i, exp in zip(nu, presub.hmap):
for vk, x in exp.items():
self.v_ss[vk] = nu_i*x + self.v_ss.get(vk, 0)
return self.v_ss, la
class MonomialEquality(PosynomialInequality):
"A Constraint of the form Monomial == Monomial."
oper = "="
def __init__(self, left, right):
# pylint: disable=super-init-not-called,non-parent-init-called
ScalarSingleEquationConstraint.__init__(self, left, self.oper, right)
self.unsubbed = self._gen_unsubbed(self.left, self.right)
self.bounded = set()
self.meq_bounded = {}
self._las = []
if self.unsubbed and len(self.vks) > 1:
exp, = self.unsubbed[0].hmap
for key, e in exp.items():
s_e = np.sign(e)
ubs = frozenset((k, "upper" if np.sign(e) != s_e else "lower")
for k, e in exp.items() if k != key)
lbs = frozenset((k, "lower" if np.sign(e) != s_e else "upper")
for k, e in exp.items() if k != key)
self.meq_bounded[(key, "upper")] = frozenset([ubs])
self.meq_bounded[(key, "lower")] = frozenset([lbs])
def _gen_unsubbed(self, left, right): # pylint: disable=arguments-differ
"Returns the unsubstituted posys <= 1."
unsubbed = PosynomialInequality._gen_unsubbed
l_over_r = unsubbed(self, left, right)
r_over_l = unsubbed(self, right, left)
return l_over_r + r_over_l
def as_hmapslt1(self, substitutions):
"Tags posynomials for dual feasibility checking"
out = super().as_hmapslt1(substitutions)
for h in out:
h.from_meq = True # pylint: disable=attribute-defined-outside-init
return out
def __bool__(self):
'A constraint not guaranteed to be satisfied evaluates as "False".'
return bool(self.left.c == self.right.c
and self.left.exp == self.right.exp)
def sens_from_dual(self, la, nu, _):
"Returns the variable/constraint sensitivities from lambda/nu"
self._las.append(la)
if len(self._las) == 1:
return {}, 0
la = self._las[0] - self._las[1]
self._las = []
exp, = self.unsubbed[0].hmap
self.v_ss = exp*la
return self.v_ss, la
class SignomialInequality(ScalarSingleEquationConstraint):
"""A constraint of the general form posynomial >= posynomial
Stored at .unsubbed[0] as a single Signomial (0 >= self)"""
def __init__(self, left, oper, right):
ScalarSingleEquationConstraint.__init__(self, left, oper, right)
if not SignomialsEnabled:
raise TypeError("Cannot initialize SignomialInequality"
" outside of a SignomialsEnabled environment.")
if self.oper == "<=":
plt, pgt = self.left, self.right
elif self.oper == ">=":
pgt, plt = self.left, self.right
else:
raise ValueError("operator %s is not supported." % self.oper)
self.unsubbed = [plt - pgt]
self.bounded = self.as_gpconstr({}).bounded
def as_hmapslt1(self, substitutions):
"Returns the posys <= 1 representation of this constraint."
siglt0, = self.unsubbed
siglt0 = siglt0.sub(substitutions, require_positive=False)
posy, negy = siglt0.posy_negy()
if posy is 0: # pylint: disable=literal-comparison
print("Warning: SignomialConstraint %s became the tautological"
" constraint 0 <= %s after substitution." % (self, negy))
return []
if negy is 0: # pylint: disable=literal-comparison
raise ValueError("%s became the infeasible constraint %s <= 0"
" after substitution." % (self, posy))
if hasattr(negy, "cs") and len(negy.cs) > 1:
raise InvalidGPConstraint(
"%s did not simplify to a PosynomialInequality; try calling"
" `.localsolve` instead of `.solve` to form your Model as a"
" SequentialGeometricProgram." % self)
# all but one of the negy terms becomes compatible with the posy
p_ineq = PosynomialInequality(posy, "<=", negy)
p_ineq.parent = self
siglt0_us, = self.unsubbed
siglt0_hmap = siglt0_us.hmap.sub(substitutions, siglt0_us.vks)
negy_hmap = NomialMap()
posy_hmaps = defaultdict(NomialMap)
for o_exp, exp in siglt0_hmap.expmap.items():
if exp == negy.exp:
negy_hmap[o_exp] = -siglt0_us.hmap[o_exp]
else:
posy_hmaps[exp-negy.exp][o_exp] = siglt0_us.hmap[o_exp]
# pylint: disable=attribute-defined-outside-init
self._mons = [Monomial(NomialMap({k: v}))
for k, v in (posy/negy).hmap.items()]
self._negysig = Signomial(negy_hmap, require_positive=False)
self._coeffsigs = {exp: Signomial(hmap, require_positive=False)
for exp, hmap in posy_hmaps.items()}
self._sigvars = {exp: (list(self._negysig.vks)
+ list(sig.vks))
for exp, sig in self._coeffsigs.items()}
return p_ineq.as_hmapslt1(substitutions)
def sens_from_dual(self, la, nu, result):
""" We want to do the following chain:
dlog(Obj)/dlog(monomial[i]) = nu[i]
* dlog(monomial)/d(monomial) = 1/(monomial value)
* d(monomial)/d(var) = see below
* d(var)/dlog(var) = var
= dlog(Obj)/dlog(var)
each final monomial is really
(coeff signomial)/(negy signomial)
and by the chain rule d(monomial)/d(var) =
d(coeff)/d(var)*1/negy + d(1/negy)/d(var)*coeff
= d(coeff)/d(var)*1/negy - d(negy)/d(var)*coeff*1/negy**2
"""
# pylint: disable=too-many-locals, attribute-defined-outside-init
# pylint: disable=no-member
def subval(posy):
"Substitute solution into a posynomial and return the result"
hmap = posy.sub(result["variables"],
require_positive=False).hmap
(key, value), = hmap.items()
assert not key # constant
return value
self.v_ss = {}
invnegy_val = 1/subval(self._negysig)
for i, nu_i in enumerate(nu):
mon = self._mons[i]
inv_mon_val = 1/subval(mon)
coeff = self._coeffsigs[mon.exp]
for var in self._sigvars[mon.exp]:
d_mon_d_var = (subval(coeff.diff(var))*invnegy_val
- (subval(self._negysig.diff(var))
* subval(coeff) * invnegy_val**2))
var_val = result["variables"][var]
sens = (nu_i*inv_mon_val*d_mon_d_var*var_val)
assert isinstance(sens, float)
self.v_ss[var] = sens + self.v_ss.get(var, 0)
return self.v_ss, la
def as_gpconstr(self, x0):
"Returns GP-compatible approximation at x0"
siglt0, = self.unsubbed
posy, negy = siglt0.posy_negy()
# default guess of 1.0 for unspecified negy variables
x0 = {vk: x0.get(vk, 1) for vk in negy.vks}
pconstr = PosynomialInequality(posy, "<=", negy.mono_lower_bound(x0))
pconstr.generated_by = self
return pconstr
class SingleSignomialEquality(SignomialInequality):
"A constraint of the general form posynomial == posynomial"
def __init__(self, left, right):
SignomialInequality.__init__(self, left, "<=", right)
self.oper = "="
self.meq_bounded = self.as_gpconstr({}).meq_bounded
def as_hmapslt1(self, substitutions):
"SignomialEquality is never considered GP-compatible"
raise InvalidGPConstraint(self)
def as_gpconstr(self, x0):
"Returns GP-compatible approximation at x0"
siglt0, = self.unsubbed
posy, negy = siglt0.posy_negy()
# default guess of 1.0 for unspecified negy variables
x0 = {vk: x0.get(vk, 1) for vk in siglt0.vks}
mec = (posy.mono_lower_bound(x0) == negy.mono_lower_bound(x0))
mec.generated_by = self
return mec
| hoburg/gpkit | gpkit/nomials/math.py | Python | mit | 26,707 |
# region gplv3preamble
# The Medical Simulation Markup Language (MSML) - Simplifying the biomechanical modeling workflow
#
# MSML has been developed in the framework of 'SFB TRR 125 Cognition-Guided Surgery'
#
# If you use this software in academic work, please cite the paper:
# S. Suwelack, M. Stoll, S. Schalck, N.Schoch, R. Dillmann, R. Bendl, V. Heuveline and S. Speidel,
# The Medical Simulation Markup Language (MSML) - Simplifying the biomechanical modeling workflow,
# Medicine Meets Virtual Reality (MMVR) 2014
#
# Copyright (C) 2013-2014 see Authors.txt
#
# If you have any questions please feel free to contact us at [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# endregion
__author__ = 'Alexander Weigl'
| CognitionGuidedSurgery/msml | src/msmllab/__init__.py | Python | gpl-3.0 | 1,332 |
from django.conf.urls import url, include
from core.views import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(
r'^projects/',
include('core.urlincludes.projects')
),
url(
r'^experiments/',
include('core.urlincludes.experiments')
),
url(
r'^samples/',
include('core.urlincludes.samples')
),
url(
r'^datafiles/',
include('core.urlincludes.data_files')
),
]
| slohr/paperlims | paperlims/core/urls.py | Python | mit | 402 |
"""
Print elements of a linked list in reverse order as standard output
head could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
def Reverse(head):
if head:
l = []
new_head = Node()
while head:
l.append(head.data)
head = head.next
i = len(l) - 1
data = l[i]
head = Node(data=data)
head.next = Node(data=l[i - 1])
temp = head
while i > 0:
temp = temp.next
i -= 1
temp.data = l[i]
if i == 0:
temp.next = None
else:
temp.next = Node(l[i-1])
return head
else:
return head | spradeepv/dive-into-python | hackerrank/domain/data_structures/linked_lists/print_in_reverse.py | Python | mit | 822 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import re
import urlparse
class LanguageStripper(object):
def __init__(self, languages=None, strip_query_variables=False):
self._strip_query_variables = []
if strip_query_variables:
self._strip_query_variables = [
'lang', 'clang', 'language', 'locale', 'selectedLocale']
self.code_to_language = {}
# These should all be lower-case, matching is case-insensitive
for code in ['arabic', 'ara', 'ar']:
self.code_to_language[code] = 'ar'
for code in ['bulgarian', 'bul', 'bg']:
self.code_to_language[code] = 'bg'
for code in ['czech', 'cze', 'cz', 'cs']:
self.code_to_language[code] = 'cs'
for code in ['deutsch', 'german', 'ger', 'deu', 'de']:
self.code_to_language[code] = 'de'
for code in ['english', 'eng', 'en']:
self.code_to_language[code] = 'en'
for code in ['espanol', 'spanish', 'spa', 'esp', 'es']:
self.code_to_language[code] = 'es'
for code in ['french', 'francais', 'fran', 'fra', 'fre', 'fr']:
self.code_to_language[code] = 'fr'
for code in ['chinese', 'chi', 'zh']:
self.code_to_language[code] = 'zh'
# new, not in 'Dirt-Cheap'-paper
for code in ['tedesco', 'de-de', 'de-ch', 'de-at', 'de-li', 'de-lu',
'allemand']:
self.code_to_language[code] = 'de'
for code in ['fr-be', 'fr-ca', 'fr-fr', 'fr-lu', 'fr-ch', 'f']:
self.code_to_language[code] = 'fr'
for code in ['italian', 'italiano', 'ital', 'ita', 'it-it', 'it-ch',
'it']:
self.code_to_language[code] = 'it'
for code in ['portuguese', 'portugues', 'pt-pt', 'pt-br', 'ptg', 'ptb', 'pt']:
self.code_to_language[code] = 'pt'
for code in ['russian', 'russkiy', 'ru-ru', 'rus', 'ru']:
self.code_to_language[code] = 'ru'
for code in ['dutch', 'nederlands', 'nl-nl', 'nld', 'dut', 'nl']:
self.code_to_language[code] = 'nl'
for code in ['en-en', 'en-us', 'en-uk', 'en-ca', 'en-bz', 'en-ab',
'en-in', 'en-ie', 'en-jm', 'en-nz', 'en-ph', 'en-za',
'en-tt', 'gb', 'en-gb', 'inglese', 'englisch', 'us', 'e']:
self.code_to_language[code] = 'en'
for code in ['romanian', 'romana', 'romlang', 'rom', 'ro-ro', 'ro']:
self.code_to_language[code] = 'ro'
for code in ['soma', 'som', 'so', 'somal', 'somali', 'so-so',
'af-soomaali', 'soomaali']:
self.code_to_language[code] = 'so'
for code in ['turkish', 'tur', 'turkic', 'tr-tr', 'tr']:
self.code_to_language[code] = 'tr'
for code in ['finnish', 'finnisch', 'fin', 'suomi', 'suomeksi',
'suominen', 'suomija', 'fi-fi', 'fi']:
self.code_to_language[code] = 'fi'
if languages is not None:
kv_pairs = [(k, v) for k, v in self.code_to_language.items()
if v in languages]
self.code_to_language = dict(kv_pairs)
for code, lang in self.code_to_language.items():
# add de_de from de-de
self.code_to_language[code.replace('-', '_')] = lang
keys = self.code_to_language.keys()
keys.sort(key=len, reverse=True)
regexp_string = '(?<![a-zA-Z0-9])(?:%s)(?![a-zA-Z0-9])' % (
'|'.join(keys))
self.re_code = re.compile(regexp_string, re.IGNORECASE)
# remove '-eng' including the hyphen but not -fr from fr-fr
keys = [key for key in keys if '-' not in key and '_' not in key]
regexp_string = '[-_](?:%s)(?![a-zA-Z0-9])' % (
'|'.join(keys))
self.re_strip = re.compile(regexp_string, re.IGNORECASE)
self.re_punct_at_start = re.compile(r'^[^a-zA-Z0-9]+')
self.re_punct_at_end = re.compile(r'[^a-zA-Z0-9]+$')
def strip_path(self, path):
components = []
for c in path.split('/'):
if not c:
components.append(c)
continue
stripped = self.re_strip.sub('', c)
stripped = self.re_code.sub('', stripped)
if stripped:
if not self.re_punct_at_start.match(c) and \
self.re_punct_at_start.match(stripped):
stripped = self.re_punct_at_start.sub('', stripped)
if stripped:
if not self.re_punct_at_end.match(c) and \
self.re_punct_at_end.match(stripped):
stripped = self.re_punct_at_end.sub('', stripped)
if stripped:
components.append(stripped)
return '/'.join(components)
def strip_query(self, query):
result = []
for k, v in urlparse.parse_qsl(query, keep_blank_values=True):
k_lower = k.lower()
ignore = False
for v in self._strip_query_variables:
if v.endswith(k_lower) or v.startswith(k_lower):
ignore = True
if ignore:
continue
stripped_k = self.re_code.sub('', k)
if not stripped_k:
continue
stripped_v = self.re_code.sub('', v)
if stripped_v == v or stripped_v:
result.append((k, v))
return urllib.urlencode(result)
def stripn(self, uri):
return self.re_code.subn('', uri)
def strip(self, uri):
return self.re_code.sub('', uri)
def match(self, uri):
for match in self.re_code.findall(uri):
match = match.lower()
assert match in self.code_to_language, \
'Unknown match: %s\n' % match
return self.code_to_language[match]
return ''
def strip_uri(self, uri, expected_language=None,
remove_index=False):
''' Returns (stripped_uri, success) '''
normalized_uri = urlparse.urlunsplit(urlparse.urlsplit(uri))
parsed_uri = urlparse.urlparse(normalized_uri)
matched_languages = [self.match(parsed_uri.path),
self.match(parsed_uri.query)]
if (expected_language is not None) and \
(expected_language not in matched_languages):
# we removed a bit of the URL but is does not support our
# hope to find expected_language, e.g. removed /fr/ when we were
# looking for Italian pages.
return '', False
stripped_path = self.strip_path(parsed_uri.path)
if len(stripped_path) < len(parsed_uri.path):
# repair some stripping artifacts
stripped_path = re.sub(r'//+', '/', stripped_path)
stripped_path = re.sub(r'__+', '_', stripped_path)
stripped_path = re.sub(r'/_+', '/', stripped_path)
stripped_path = re.sub(r'_/', '/', stripped_path)
stripped_path = re.sub(r'--+', '-', stripped_path)
# remove new trailing /
if stripped_path and stripped_path[-1] == '/' \
and parsed_uri.path and parsed_uri.path[-1] != '/':
stripped_path = stripped_path[:-1]
# add removed trailing /
if not stripped_path.endswith('/') and \
parsed_uri.path.endswith('/'):
stripped_path += '/'
stripped_query = self.strip_query(parsed_uri.query)
# remove index files from tail of path if query empty
if remove_index and not stripped_query:
if stripped_path.split('/')[-1].startswith('index'):
stripped_path = '/'.join(stripped_path.split('/')[:-1]) + '/'
netloc = parsed_uri.netloc
if '@' in netloc:
netloc = netloc.split('@')[1]
if ':' in netloc:
netloc = netloc.split(':')[0]
if not netloc:
return '', False
stripped_uri = urlparse.ParseResult(scheme='http',
netloc=parsed_uri.netloc,
path=stripped_path,
params='',
query=stripped_query,
fragment='').geturl()
return stripped_uri, True
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-language', help='language code')
args = parser.parse_args()
language_stripper = LanguageStripper(languages=[args.language])
for line in sys.stdin:
stripped = []
changed = False
for uri in line.strip().split('\t'):
stripped_uri, success = language_stripper.strip_uri(
uri, expected_language=args.language)
if success:
stripped.append(stripped_uri)
if stripped_uri != uri:
changed = True
if changed:
print line.strip(), '\t'.join(stripped)
| ModernMT/DataCollection | baseline/languagestripper.py | Python | apache-2.0 | 9,168 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the testing base code."""
from oslo_config import cfg
import oslo_messaging as messaging
from cinder import rpc
from cinder import test
class IsolationTestCase(test.TestCase):
"""Ensure that things are cleaned up after failed tests.
These tests don't really do much here, but if isolation fails a bunch
of other tests should fail.
"""
def test_service_isolation(self):
self.start_service('volume')
def test_rpc_consumer_isolation(self):
class NeverCalled(object):
def __getattribute__(*args):
assert False, "I should never get called."
server = rpc.get_server(messaging.Target(topic='volume',
server=cfg.CONF.host),
endpoints=[NeverCalled()])
server.start()
| Akrog/cinder | cinder/tests/test_test.py | Python | apache-2.0 | 1,578 |
import math
ceil = lambda f: int(math.ceil(f))
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
import cairo
from gi.repository import Pango
from gi.repository import PangoCairo
line = 10
curve = 60
dotSmall = 14
dotLarge = 24
lineprc = 1/7.
hpadding = 5
vpadding = 3
class SpotGraph (Gtk.EventBox):
__gsignals__ = {
'spotClicked' : (GObject.SignalFlags.RUN_FIRST, None, (str,))
}
def __init__ (self):
GObject.GObject.__init__(self)
self.connect("draw", self.expose)
self.typeColors = [[[85, 152, 215], [59, 106, 151]],
[[115, 210, 22], [78, 154, 6]]]
for type in self.typeColors:
for color in type:
color[0] = color[0]/255.
color[1] = color[1]/255.
color[2] = color[2]/255.
self.add_events( Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK )
self.state = 0
self.connect("button_press_event", self.button_press)
self.connect("button_release_event", self.button_release)
self.connect("motion_notify_event", self.motion_notify)
self.connect("leave_notify_event", self.motion_notify)
self.connect("size-allocate", self.size_allocate)
self.cords = []
self.hovered = None
self.pressed = False
self.spots = {}
self.spotQueue = [] # For spots added prior to widget allocation
self.xmarks = []
self.ymarks = []
self.set_visible_window(False)
############################################################################
# Drawing #
############################################################################
def redraw_canvas(self, prect=None):
if self.get_window():
if not prect:
alloc = self.get_allocation()
prect = (0, 0, alloc.width, alloc.height)
rect = Gdk.Rectangle()
rect.x, rect.y, rect.width, rect.height = prect
self.get_window().invalidate_rect(rect, True)
self.get_window().process_updates(True)
def expose(self, widget, ctx):
context = widget.get_window().cairo_create()
self.draw(context)
return False
def draw (self, context):
alloc = self.get_allocation()
width = alloc.width
height = alloc.height
#------------------------------------------------------ Paint side ruler
context.move_to(alloc.x+line, alloc.y+line)
context.rel_line_to(0, height-line*2-curve)
context.rel_curve_to(0, curve, 0, curve, curve, curve)
context.rel_line_to(width-line*2-curve, 0)
sc = self.get_style_context()
bool1, dark_prelight = sc.lookup_color("p_dark_prelight")
bool1, fg_prelight = sc.lookup_color("p_fg_prelight")
bool1, bg_prelight = sc.lookup_color("p_bg_prelight")
context.set_line_width(line)
context.set_line_cap(cairo.LINE_CAP_ROUND)
state = self.state == Gtk.StateType.NORMAL and Gtk.StateType.PRELIGHT or self.state
context.set_source_rgba(dark_prelight.red, dark_prelight.green, dark_prelight.blue, dark_prelight.alpha)
context.stroke()
#------------------------------------------------ Paint horizontal marks
for x, title in self.xmarks:
context.set_source_rgba(fg_prelight.red, fg_prelight.green, fg_prelight.blue, fg_prelight.alpha)
context.set_font_size(12)
x, y = self.prcToPix (x, 1)
context.move_to (x+line/2., y-line/2.)
context.rotate(-math.pi/2)
context.show_text(title)
context.rotate(math.pi/2)
context.set_source_rgba(bg_prelight.red, bg_prelight.green, bg_prelight.blue, bg_prelight.alpha)
context.move_to (x-line/2., y)
context.rel_curve_to (6, 0, 6, line, 6, line)
context.rel_curve_to (0, -line, 6, -line, 6, -line)
context.close_path()
context.fill()
#-------------------------------------------------- Paint vertical marks
for y, title in self.ymarks:
context.set_source_rgba(fg_prelight.red, fg_prelight.green, fg_prelight.blue, fg_prelight.alpha)
context.set_font_size(12)
x, y = self.prcToPix (0, y)
context.move_to (x+line/2., y+line/2.)
context.show_text(title)
context.set_source_rgba(bg_prelight.red, bg_prelight.green, bg_prelight.blue, bg_prelight.alpha)
context.move_to (x, y-line/2.)
context.rel_curve_to (0, 6, -line, 6, -line, 6)
context.rel_curve_to (line, 0, line, 6, line, 6)
context.close_path()
context.fill()
#----------------------------------------------------------- Paint spots
context.set_line_width(dotSmall*lineprc)
for x, y, type, name, text in self.spots.values():
context.set_source_rgb(*self.typeColors[type][0])
if self.hovered and name == self.hovered[3]:
continue
x, y = self.prcToPix (x, y)
context.arc(x, y, dotSmall/(1+lineprc)/2., 0, 2 * math.pi)
context.fill_preserve()
context.set_source_rgb(*self.typeColors[type][1])
context.stroke()
#--------------------------------------------------- Paint hovered spots
context.set_line_width(dotLarge*lineprc)
if self.hovered:
x, y, type, name, text = self.hovered
x, y = self.prcToPix (x, y)
if not self.pressed:
context.set_source_rgb(*self.typeColors[type][0])
else:
context.set_source_rgb(*self.typeColors[type][1])
context.arc(x, y, dotLarge/(1+lineprc)/2., 0, 2 * math.pi)
context.fill_preserve()
context.set_source_rgb(*self.typeColors[type][1])
context.stroke()
x, y, width, height = self.getTextBounds(self.hovered)
sc = self.get_style_context()
sc.save()
sc.add_class(Gtk.STYLE_CLASS_NOTEBOOK)
Gtk.render_background(sc, context, int(x-hpadding), int(y-vpadding), ceil(width+hpadding*2), ceil(height+vpadding*2))
Gtk.render_frame(sc, context, int(x-hpadding), int(y-vpadding), ceil(width+hpadding*2), ceil(height+vpadding*2))
sc.restore()
context.move_to(x, y)
context.set_source_rgba(fg_prelight.red, fg_prelight.green, fg_prelight.blue, fg_prelight.alpha)
PangoCairo.show_layout(context, self.create_pango_layout(text))
############################################################################
# Events #
############################################################################
def button_press (self, widget, event):
alloc = self.get_allocation()
self.cords = [event.x+alloc.x, event.y+alloc.y]
self.pressed = True
if self.hovered:
self.redraw_canvas(self.getBounds(self.hovered))
def button_release (self, widget, event):
alloc = self.get_allocation()
self.cords = [event.x+alloc.x, event.y+alloc.y]
self.pressed = False
if self.hovered:
self.redraw_canvas(self.getBounds(self.hovered))
if self.pointIsOnSpot (event.x+alloc.x, event.y+alloc.y, self.hovered):
self.emit("spotClicked", self.hovered[3])
def motion_notify (self, widget, event):
alloc = self.get_allocation()
self.cords = [event.x+alloc.x, event.y+alloc.y]
spot = self.getSpotAtPoint (*self.cords)
if self.hovered and spot == self.hovered:
return
if self.hovered:
bounds = self.getBounds(self.hovered)
self.hovered = None
self.redraw_canvas(bounds)
if spot:
self.hovered = spot
self.redraw_canvas(self.getBounds(self.hovered))
def size_allocate (self, widget, allocation):
assert self.get_allocation().width > 1
for spot in self.spotQueue:
self.addSpot(*spot)
del self.spotQueue[:]
############################################################################
# Interaction #
############################################################################
def addSpot (self, name, text, x0, y0, type=0):
""" x and y are in % from 0 to 1 """
assert type in range(len(self.typeColors))
if self.get_allocation().width <= 1:
self.spotQueue.append((name, text, x0, y0, type))
return
x1, y1 = self.getNearestFreeNeighbourHexigon(x0, 1-y0)
spot = (x1, y1, type, name, text)
self.spots[name] = spot
if not self.hovered and self.cords and \
self.pointIsOnSpot (self.cords[0], self.cords[1], spot):
self.hovered = spot
self.redraw_canvas(self.getBounds(spot))
def removeSpot (self, name):
if not name in self.spots:
return
spot = self.spots.pop(name)
bounds = self.getBounds(spot)
if spot == self.hovered:
self.hovered = None
self.redraw_canvas(bounds)
def clearSpots (self):
self.hovered = None
self.spots.clear()
self.redraw_canvas()
def addXMark (self, x, title):
self.xmarks.append( (x, title) )
def addYMark (self, y, title):
self.ymarks.append( (1-y, title) )
############################################################################
# Internal stuff #
############################################################################
def getTextBounds (self, spot):
x, y, type, name, text = spot
x, y = self.prcToPix (x, y)
alloc = self.get_allocation()
width = alloc.width
height = alloc.height
extends = self.create_pango_layout(text).get_extents()
scale = float(Pango.SCALE)
x_bearing, y_bearing, twidth, theight = [extends[1].x/scale, extends[1].y/scale, extends[1].width/scale, extends[1].height/scale]
tx = x - x_bearing + dotLarge/2.
ty = y - y_bearing - theight - dotLarge/2.
if tx + twidth > width and x - x_bearing - twidth - dotLarge/2. > alloc.x:
tx = x - x_bearing - twidth - dotLarge/2.
if ty < alloc.y:
ty = y - y_bearing + dotLarge/2.
return (tx, ty, twidth, theight)
def join (self, r0, r1):
x1 = min(r0[0], r1[0])
x2 = max(r0[0]+r0[2], r1[0]+r1[2])
y1 = min(r0[1], r1[1])
y2 = max(r0[1]+r0[3], r1[1]+r1[3])
return (x1, y1, x2 - x1, y2 - y1)
def getBounds (self, spot):
x, y, type, name, text = spot
x, y = self.prcToPix (x, y)
if spot == self.hovered:
size = dotLarge
else: size = dotSmall
bounds = (x-size/2.-1, y-size/2.-1, size+2, size+2)
if spot == self.hovered:
x, y, w, h = self.getTextBounds(spot)
tbounds = (x-hpadding, y-vpadding, w+hpadding*2+1, h+vpadding*2+1)
return self.join(bounds, tbounds)
return bounds
def getNearestFreeNeighbourHexigon (self, xorg, yorg):
""" This method performs an hexigon search for an empty place to put a
new dot. """
x, y = self.prcToPix (xorg, yorg)
# Start by testing current spot
if self.isEmpty (x, y):
return xorg, yorg
directions = [(math.cos((i+2)*math.pi/3),
math.sin((i+2)*math.pi/3)) for i in range(6)]
level = 1
while True:
x += dotSmall
for dx, dy in directions:
for i in range(level):
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
x += dx*dotSmall
y += dy*dotSmall
level += 1
def getNearestFreeNeighbourArchi (self, xorg, yorg):
""" This method performs an archimedes-spircal search for an empty
place to put a new dot.
http://en.wikipedia.org/wiki/Archimedean_spiral """
xorg, yorg = self.prcToPix (xorg, yorg)
# Start by testing current spot
if self.isEmpty (xorg, yorg):
return self.pixToPrc (xorg, yorg)
r = 0
while True:
# This is an approx to the equation
# cos((r-s)/(2pi)) = (r^2+s^2-1)/(2*r*s)
# which gives the next point on the spiral 1 away.
r = (4*math.pi**3*r + r**2 + math.sqrt(16*math.pi**6 +
8*math.pi**3*r + r**4)) / (4*math.pi**3 + 2*r)
x = r*math.cos(r)/(4*math.pi)*dotSmall + xorg
y = r*math.sin(r)/(4*math.pi)*dotSmall + yorg
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
def getNearestFreeNeighbourSquare (self, xorg, yorg):
""" This method performs a spircal search for an empty square to put a
new dot. """
up = 2
right = 1
down = 1
left = 2
x, y = self.prcToPix (xorg, yorg)
# Start by testing current spot
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
while True:
for i in range(right):
x += dotSmall
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
for i in range(down):
y += dotSmall
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
for i in range(left):
x -= dotSmall
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
for i in range(up):
y -= dotSmall
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
# Grow spiral bounds
right += 2
down += 2
left += 2
up += 2
def isEmpty (self, x0, y0):
""" Returns true if a spot placed on (x, y) is inside the graph and not
intersecting with other spots.
x and y should be in pixels, not percent """
# Make sure spiral search don't put dots outside the graph
x, y = self.prcToPix(0,0)
w, h = self.prcToPix(1,1)
if not x <= x0 <= w or not y <= y0 <= h:
return False
# Tests if the spot intersects any other spots
for x1, y1, type, name, text in self.spots.values():
x1, y1 = self.prcToPix(x1, y1)
if (x1-x0)**2 + (y1-y0)**2 < dotSmall**2 - 0.1:
return False
return True
def pointIsOnSpot (self, x0, y0, spot):
""" Returns true if (x, y) is inside the spot 'spot'. The size of the
spot is determined based on its hoverness.
x and y should be in pixels, not percent """
if spot == self.hovered:
size = dotLarge
else: size = dotSmall
x1, y1, type, name, text = spot
x1, y1 = self.prcToPix(x1, y1)
if (x1-x0)**2 + (y1-y0)**2 <= (size/2.)**2:
return True
return False
def getSpotAtPoint (self, x, y):
""" Returns the spot embrace (x, y) if any. Otherwise it returns None.
x and y should be in pixels, not percent """
if self.hovered and self.pointIsOnSpot(x, y, self.hovered):
return self.hovered
for spot in self.spots.values():
if spot == self.hovered:
continue
if self.pointIsOnSpot(x, y, spot):
return spot
return None
def prcToPix (self, x, y):
""" Translates from 0-1 cords to real world cords """
alloc = self.get_allocation()
return x*(alloc.width - line*1.5-dotLarge*0.5) + line*1.5 + alloc.x, \
y*(alloc.height - line*1.5-dotLarge*0.5) + dotLarge*0.5 + alloc.y
def pixToPrc (self, x, y):
""" Translates from real world cords to 0-1 cords """
alloc = self.get_allocation()
return (x - line*1.5 - alloc.x)/(alloc.width - line*1.5-dotLarge*0.5), \
(y - dotLarge*0.5 - alloc.y)/(alloc.height - line*1.5-dotLarge*0.5)
if __name__ == "__main__":
w = Gtk.Window()
sc = w.get_style_context()
data = "@define-color p_bg_color #ededed; \
@define-color p_light_color #ffffff; \
@define-color p_dark_color #a6a6a6; \
@define-color p_dark_prelight #a9a9a9; \
@define-color p_fg_prelight #313739; \
@define-color p_bg_prelight #ededed; \
@define-color p_bg_active #d6d6d6;"
provider = Gtk.CssProvider.new()
provider.load_from_data(data)
sc.add_provider_for_screen(Gdk.Screen.get_default(), provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
nb = Gtk.Notebook()
w.add(nb)
vb = Gtk.VBox()
nb.append_page(vb, None)
sg = SpotGraph()
sg.addXMark(.5, "Center")
sg.addYMark(.5, "Center")
vb.pack_start(sg, True, True, 0)
button = Gtk.Button("New Spot")
def callback (button):
if not hasattr(button, "nextnum"):
button.nextnum = 0
else: button.nextnum += 1
sg.addSpot(str(button.nextnum), "Blablabla", 1, 1, 0)
button.connect("clicked", callback)
vb.pack_start(button, False, True, 0)
w.connect("delete-event", Gtk.main_quit)
w.show_all()
w.resize(400,400)
Gtk.main()
| Aleks31/pychess | lib/pychess/widgets/SpotGraph.py | Python | gpl-3.0 | 18,644 |
# $Id$
#
from inc_cfg import *
ADD_PARAM = ""
if (HAS_SND_DEV == 0):
ADD_PARAM += "--null-audio"
# Call with Speex/8000 codec
test_param = TestParam(
"PESQ codec Speex NB",
[
InstanceParam("UA1", ADD_PARAM + " --max-calls=1 --add-codec speex/8000 --clock-rate 8000 --play-file wavs/input.8.wav"),
InstanceParam("UA2", "--null-audio --max-calls=1 --add-codec speex/8000 --clock-rate 8000 --rec-file wavs/tmp.8.wav --auto-answer 200")
]
)
pesq_threshold = 3.0
| Jopie64/pjsip | tests/pjsua/scripts-pesq/200_codec_speex_8000.py | Python | gpl-2.0 | 478 |
""" History related magics and functionality """
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import atexit
import datetime
import os
import re
try:
import sqlite3
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite3
except ImportError:
sqlite3 = None
import threading
from traitlets.config.configurable import LoggingConfigurable
from decorator import decorator
from IPython.utils.decorators import undoc
from IPython.utils.path import locate_profile
from traitlets import (
Any, Bool, Dict, Instance, Integer, List, Unicode, TraitError,
default, observe,
)
from warnings import warn
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
@undoc
class DummyDB(object):
"""Dummy DB that will act as a black hole for history.
Only used in the absence of sqlite"""
def execute(*args, **kwargs):
return []
def commit(self, *args, **kwargs):
pass
def __enter__(self, *args, **kwargs):
pass
def __exit__(self, *args, **kwargs):
pass
@decorator
def needs_sqlite(f, self, *a, **kw):
"""Decorator: return an empty list in the absence of sqlite."""
if sqlite3 is None or not self.enabled:
return []
else:
return f(self, *a, **kw)
if sqlite3 is not None:
DatabaseError = sqlite3.DatabaseError
OperationalError = sqlite3.OperationalError
else:
@undoc
class DatabaseError(Exception):
"Dummy exception when sqlite could not be imported. Should never occur."
@undoc
class OperationalError(Exception):
"Dummy exception when sqlite could not be imported. Should never occur."
# use 16kB as threshold for whether a corrupt history db should be saved
# that should be at least 100 entries or so
_SAVE_DB_SIZE = 16384
@decorator
def catch_corrupt_db(f, self, *a, **kw):
"""A decorator which wraps HistoryAccessor method calls to catch errors from
a corrupt SQLite database, move the old database out of the way, and create
a new one.
We avoid clobbering larger databases because this may be triggered due to filesystem issues,
not just a corrupt file.
"""
try:
return f(self, *a, **kw)
except (DatabaseError, OperationalError) as e:
self._corrupt_db_counter += 1
self.log.error("Failed to open SQLite history %s (%s).", self.hist_file, e)
if self.hist_file != ':memory:':
if self._corrupt_db_counter > self._corrupt_db_limit:
self.hist_file = ':memory:'
self.log.error("Failed to load history too many times, history will not be saved.")
elif os.path.isfile(self.hist_file):
# move the file out of the way
base, ext = os.path.splitext(self.hist_file)
size = os.stat(self.hist_file).st_size
if size >= _SAVE_DB_SIZE:
# if there's significant content, avoid clobbering
now = datetime.datetime.now().isoformat().replace(':', '.')
newpath = base + '-corrupt-' + now + ext
# don't clobber previous corrupt backups
for i in range(100):
if not os.path.isfile(newpath):
break
else:
newpath = base + '-corrupt-' + now + (u'-%i' % i) + ext
else:
# not much content, possibly empty; don't worry about clobbering
# maybe we should just delete it?
newpath = base + '-corrupt' + ext
os.rename(self.hist_file, newpath)
self.log.error("History file was moved to %s and a new file created.", newpath)
self.init_db()
return []
else:
# Failed with :memory:, something serious is wrong
raise
class HistoryAccessorBase(LoggingConfigurable):
"""An abstract class for History Accessors """
def get_tail(self, n=10, raw=True, output=False, include_latest=False):
raise NotImplementedError
def search(self, pattern="*", raw=True, search_raw=True,
output=False, n=None, unique=False):
raise NotImplementedError
def get_range(self, session, start=1, stop=None, raw=True,output=False):
raise NotImplementedError
def get_range_by_str(self, rangestr, raw=True, output=False):
raise NotImplementedError
class HistoryAccessor(HistoryAccessorBase):
"""Access the history database without adding to it.
This is intended for use by standalone history tools. IPython shells use
HistoryManager, below, which is a subclass of this."""
# counter for init_db retries, so we don't keep trying over and over
_corrupt_db_counter = 0
# after two failures, fallback on :memory:
_corrupt_db_limit = 2
# String holding the path to the history file
hist_file = Unicode(
help="""Path to file to use for SQLite history database.
By default, IPython will put the history database in the IPython
profile directory. If you would rather share one history among
profiles, you can set this value in each, so that they are consistent.
Due to an issue with fcntl, SQLite is known to misbehave on some NFS
mounts. If you see IPython hanging, try setting this to something on a
local disk, e.g::
ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
you can also use the specific value `:memory:` (including the colon
at both end but not the back ticks), to avoid creating an history file.
""").tag(config=True)
enabled = Bool(True,
help="""enable the SQLite history
set enabled=False to disable the SQLite history,
in which case there will be no stored history, no SQLite connection,
and no background saving thread. This may be necessary in some
threaded environments where IPython is embedded.
"""
).tag(config=True)
connection_options = Dict(
help="""Options for configuring the SQLite connection
These options are passed as keyword args to sqlite3.connect
when establishing database conenctions.
"""
).tag(config=True)
# The SQLite database
db = Any()
@observe('db')
def _db_changed(self, change):
"""validate the db, since it can be an Instance of two different types"""
new = change['new']
connection_types = (DummyDB,)
if sqlite3 is not None:
connection_types = (DummyDB, sqlite3.Connection)
if not isinstance(new, connection_types):
msg = "%s.db must be sqlite3 Connection or DummyDB, not %r" % \
(self.__class__.__name__, new)
raise TraitError(msg)
def __init__(self, profile='default', hist_file=u'', **traits):
"""Create a new history accessor.
Parameters
----------
profile : str
The name of the profile from which to open history.
hist_file : str
Path to an SQLite history database stored by IPython. If specified,
hist_file overrides profile.
config : :class:`~traitlets.config.loader.Config`
Config object. hist_file can also be set through this.
"""
# We need a pointer back to the shell for various tasks.
super(HistoryAccessor, self).__init__(**traits)
# defer setting hist_file from kwarg until after init,
# otherwise the default kwarg value would clobber any value
# set by config
if hist_file:
self.hist_file = hist_file
if self.hist_file == u'':
# No one has set the hist_file, yet.
self.hist_file = self._get_hist_file_name(profile)
if sqlite3 is None and self.enabled:
warn("IPython History requires SQLite, your history will not be saved")
self.enabled = False
self.init_db()
def _get_hist_file_name(self, profile='default'):
"""Find the history file for the given profile name.
This is overridden by the HistoryManager subclass, to use the shell's
active profile.
Parameters
----------
profile : str
The name of a profile which has a history file.
"""
return os.path.join(locate_profile(profile), 'history.sqlite')
@catch_corrupt_db
def init_db(self):
"""Connect to the database, and create tables if necessary."""
if not self.enabled:
self.db = DummyDB()
return
# use detect_types so that timestamps return datetime objects
kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
kwargs.update(self.connection_options)
self.db = sqlite3.connect(self.hist_file, **kwargs)
self.db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
primary key autoincrement, start timestamp,
end timestamp, num_cmds integer, remark text)""")
self.db.execute("""CREATE TABLE IF NOT EXISTS history
(session integer, line integer, source text, source_raw text,
PRIMARY KEY (session, line))""")
# Output history is optional, but ensure the table's there so it can be
# enabled later.
self.db.execute("""CREATE TABLE IF NOT EXISTS output_history
(session integer, line integer, output text,
PRIMARY KEY (session, line))""")
self.db.commit()
# success! reset corrupt db count
self._corrupt_db_counter = 0
def writeout_cache(self):
"""Overridden by HistoryManager to dump the cache before certain
database lookups."""
pass
## -------------------------------
## Methods for retrieving history:
## -------------------------------
def _run_sql(self, sql, params, raw=True, output=False):
"""Prepares and runs an SQL query for the history database.
Parameters
----------
sql : str
Any filtering expressions to go after SELECT ... FROM ...
params : tuple
Parameters passed to the SQL query (to replace "?")
raw, output : bool
See :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
toget = 'source_raw' if raw else 'source'
sqlfrom = "history"
if output:
sqlfrom = "history LEFT JOIN output_history USING (session, line)"
toget = "history.%s, output_history.output" % toget
cur = self.db.execute("SELECT session, line, %s FROM %s " %\
(toget, sqlfrom) + sql, params)
if output: # Regroup into 3-tuples, and parse JSON
return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur)
return cur
@needs_sqlite
@catch_corrupt_db
def get_session_info(self, session):
"""Get info about a session.
Parameters
----------
session : int
Session number to retrieve.
Returns
-------
session_id : int
Session ID number
start : datetime
Timestamp for the start of the session.
end : datetime
Timestamp for the end of the session, or None if IPython crashed.
num_cmds : int
Number of commands run, or None if IPython crashed.
remark : unicode
A manually set description.
"""
query = "SELECT * from sessions where session == ?"
return self.db.execute(query, (session,)).fetchone()
@catch_corrupt_db
def get_last_session_id(self):
"""Get the last session ID currently in the database.
Within IPython, this should be the same as the value stored in
:attr:`HistoryManager.session_number`.
"""
for record in self.get_tail(n=1, include_latest=True):
return record[0]
@catch_corrupt_db
def get_tail(self, n=10, raw=True, output=False, include_latest=False):
"""Get the last n lines from the history database.
Parameters
----------
n : int
The number of lines to get
raw, output : bool
See :meth:`get_range`
include_latest : bool
If False (default), n+1 lines are fetched, and the latest one
is discarded. This is intended to be used where the function
is called by a user command, which it should not return.
Returns
-------
Tuples as :meth:`get_range`
"""
self.writeout_cache()
if not include_latest:
n += 1
cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?",
(n,), raw=raw, output=output)
if not include_latest:
return reversed(list(cur)[1:])
return reversed(list(cur))
@catch_corrupt_db
def search(self, pattern="*", raw=True, search_raw=True,
output=False, n=None, unique=False):
"""Search the database using unix glob-style matching (wildcards
* and ?).
Parameters
----------
pattern : str
The wildcarded pattern to match when searching
search_raw : bool
If True, search the raw input, otherwise, the parsed input
raw, output : bool
See :meth:`get_range`
n : None or int
If an integer is given, it defines the limit of
returned entries.
unique : bool
When it is true, return only unique entries.
Returns
-------
Tuples as :meth:`get_range`
"""
tosearch = "source_raw" if search_raw else "source"
if output:
tosearch = "history." + tosearch
self.writeout_cache()
sqlform = "WHERE %s GLOB ?" % tosearch
params = (pattern,)
if unique:
sqlform += ' GROUP BY {0}'.format(tosearch)
if n is not None:
sqlform += " ORDER BY session DESC, line DESC LIMIT ?"
params += (n,)
elif unique:
sqlform += " ORDER BY session, line"
cur = self._run_sql(sqlform, params, raw=raw, output=output)
if n is not None:
return reversed(list(cur))
return cur
@catch_corrupt_db
def get_range(self, session, start=1, stop=None, raw=True,output=False):
"""Retrieve input by session.
Parameters
----------
session : int
Session number to retrieve.
start : int
First line to retrieve.
stop : int
End of line range (excluded from output itself). If None, retrieve
to the end of the session.
raw : bool
If True, return untranslated input
output : bool
If True, attempt to include output. This will be 'real' Python
objects for the current session, or text reprs from previous
sessions if db_log_output was enabled at the time. Where no output
is found, None is used.
Returns
-------
entries
An iterator over the desired lines. Each line is a 3-tuple, either
(session, line, input) if output is False, or
(session, line, (input, output)) if output is True.
"""
if stop:
lineclause = "line >= ? AND line < ?"
params = (session, start, stop)
else:
lineclause = "line>=?"
params = (session, start)
return self._run_sql("WHERE session==? AND %s" % lineclause,
params, raw=raw, output=output)
def get_range_by_str(self, rangestr, raw=True, output=False):
"""Get lines of history from a string of ranges, as used by magic
commands %hist, %save, %macro, etc.
Parameters
----------
rangestr : str
A string specifying ranges, e.g. "5 ~2/1-4". See
:func:`magic_history` for full details.
raw, output : bool
As :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
for sess, s, e in extract_hist_ranges(rangestr):
for line in self.get_range(sess, s, e, raw=raw, output=output):
yield line
class HistoryManager(HistoryAccessor):
"""A class to organize all history-related functionality in one place.
"""
# Public interface
# An instance of the IPython shell we are attached to
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
# Lists to hold processed and raw history. These start with a blank entry
# so that we can index them starting from 1
input_hist_parsed = List([""])
input_hist_raw = List([""])
# A list of directories visited during session
dir_hist = List()
@default('dir_hist')
def _dir_hist_default(self):
try:
return [os.getcwd()]
except OSError:
return []
# A dict of output history, keyed with ints from the shell's
# execution count.
output_hist = Dict()
# The text/plain repr of outputs.
output_hist_reprs = Dict()
# The number of the current session in the history database
session_number = Integer()
db_log_output = Bool(False,
help="Should the history database include output? (default: no)"
).tag(config=True)
db_cache_size = Integer(0,
help="Write to database every x commands (higher values save disk access & power).\n"
"Values of 1 or less effectively disable caching."
).tag(config=True)
# The input and output caches
db_input_cache = List()
db_output_cache = List()
# History saving in separate thread
save_thread = Instance('IPython.core.history.HistorySavingThread',
allow_none=True)
save_flag = Instance(threading.Event, allow_none=True)
# Private interface
# Variables used to store the three last inputs from the user. On each new
# history update, we populate the user's namespace with these, shifted as
# necessary.
_i00 = Unicode(u'')
_i = Unicode(u'')
_ii = Unicode(u'')
_iii = Unicode(u'')
# A regex matching all forms of the exit command, so that we don't store
# them in the history (it's annoying to rewind the first entry and land on
# an exit call).
_exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$")
def __init__(self, shell=None, config=None, **traits):
"""Create a new history manager associated with a shell instance.
"""
# We need a pointer back to the shell for various tasks.
super(HistoryManager, self).__init__(shell=shell, config=config,
**traits)
self.save_flag = threading.Event()
self.db_input_cache_lock = threading.Lock()
self.db_output_cache_lock = threading.Lock()
try:
self.new_session()
except OperationalError:
self.log.error("Failed to create history session in %s. History will not be saved.",
self.hist_file, exc_info=True)
self.hist_file = ':memory:'
if self.enabled and self.hist_file != ':memory:':
self.save_thread = HistorySavingThread(self)
self.save_thread.start()
def _get_hist_file_name(self, profile=None):
"""Get default history file name based on the Shell's profile.
The profile parameter is ignored, but must exist for compatibility with
the parent class."""
profile_dir = self.shell.profile_dir.location
return os.path.join(profile_dir, 'history.sqlite')
@needs_sqlite
def new_session(self, conn=None):
"""Get a new session number."""
if conn is None:
conn = self.db
with conn:
cur = conn.execute("""INSERT INTO sessions VALUES (NULL, ?, NULL,
NULL, "") """, (datetime.datetime.now(),))
self.session_number = cur.lastrowid
def end_session(self):
"""Close the database session, filling in the end time and line count."""
self.writeout_cache()
with self.db:
self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE
session==?""", (datetime.datetime.now(),
len(self.input_hist_parsed)-1, self.session_number))
self.session_number = 0
def name_session(self, name):
"""Give the current session a name in the history database."""
with self.db:
self.db.execute("UPDATE sessions SET remark=? WHERE session==?",
(name, self.session_number))
def reset(self, new_session=True):
"""Clear the session history, releasing all object references, and
optionally open a new session."""
self.output_hist.clear()
# The directory history can't be completely empty
self.dir_hist[:] = [os.getcwd()]
if new_session:
if self.session_number:
self.end_session()
self.input_hist_parsed[:] = [""]
self.input_hist_raw[:] = [""]
self.new_session()
# ------------------------------
# Methods for retrieving history
# ------------------------------
def get_session_info(self, session=0):
"""Get info about a session.
Parameters
----------
session : int
Session number to retrieve. The current session is 0, and negative
numbers count back from current session, so -1 is the previous session.
Returns
-------
session_id : int
Session ID number
start : datetime
Timestamp for the start of the session.
end : datetime
Timestamp for the end of the session, or None if IPython crashed.
num_cmds : int
Number of commands run, or None if IPython crashed.
remark : unicode
A manually set description.
"""
if session <= 0:
session += self.session_number
return super(HistoryManager, self).get_session_info(session=session)
def _get_range_session(self, start=1, stop=None, raw=True, output=False):
"""Get input and output history from the current session. Called by
get_range, and takes similar parameters."""
input_hist = self.input_hist_raw if raw else self.input_hist_parsed
n = len(input_hist)
if start < 0:
start += n
if not stop or (stop > n):
stop = n
elif stop < 0:
stop += n
for i in range(start, stop):
if output:
line = (input_hist[i], self.output_hist_reprs.get(i))
else:
line = input_hist[i]
yield (0, i, line)
def get_range(self, session=0, start=1, stop=None, raw=True,output=False):
"""Retrieve input by session.
Parameters
----------
session : int
Session number to retrieve. The current session is 0, and negative
numbers count back from current session, so -1 is previous session.
start : int
First line to retrieve.
stop : int
End of line range (excluded from output itself). If None, retrieve
to the end of the session.
raw : bool
If True, return untranslated input
output : bool
If True, attempt to include output. This will be 'real' Python
objects for the current session, or text reprs from previous
sessions if db_log_output was enabled at the time. Where no output
is found, None is used.
Returns
-------
entries
An iterator over the desired lines. Each line is a 3-tuple, either
(session, line, input) if output is False, or
(session, line, (input, output)) if output is True.
"""
if session <= 0:
session += self.session_number
if session==self.session_number: # Current session
return self._get_range_session(start, stop, raw, output)
return super(HistoryManager, self).get_range(session, start, stop, raw,
output)
## ----------------------------
## Methods for storing history:
## ----------------------------
def store_inputs(self, line_num, source, source_raw=None):
"""Store source and raw input in history and create input cache
variables ``_i*``.
Parameters
----------
line_num : int
The prompt number of this input.
source : str
Python input.
source_raw : str, optional
If given, this is the raw input without any IPython transformations
applied to it. If not given, ``source`` is used.
"""
if source_raw is None:
source_raw = source
source = source.rstrip('\n')
source_raw = source_raw.rstrip('\n')
# do not store exit/quit commands
if self._exit_re.match(source_raw.strip()):
return
self.input_hist_parsed.append(source)
self.input_hist_raw.append(source_raw)
with self.db_input_cache_lock:
self.db_input_cache.append((line_num, source, source_raw))
# Trigger to flush cache and write to DB.
if len(self.db_input_cache) >= self.db_cache_size:
self.save_flag.set()
# update the auto _i variables
self._iii = self._ii
self._ii = self._i
self._i = self._i00
self._i00 = source_raw
# hackish access to user namespace to create _i1,_i2... dynamically
new_i = '_i%s' % line_num
to_main = {'_i': self._i,
'_ii': self._ii,
'_iii': self._iii,
new_i : self._i00 }
if self.shell is not None:
self.shell.push(to_main, interactive=False)
def store_output(self, line_num):
"""If database output logging is enabled, this saves all the
outputs from the indicated prompt number to the database. It's
called by run_cell after code has been executed.
Parameters
----------
line_num : int
The line number from which to save outputs
"""
if (not self.db_log_output) or (line_num not in self.output_hist_reprs):
return
output = self.output_hist_reprs[line_num]
with self.db_output_cache_lock:
self.db_output_cache.append((line_num, output))
if self.db_cache_size <= 1:
self.save_flag.set()
def _writeout_input_cache(self, conn):
with conn:
for line in self.db_input_cache:
conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)",
(self.session_number,)+line)
def _writeout_output_cache(self, conn):
with conn:
for line in self.db_output_cache:
conn.execute("INSERT INTO output_history VALUES (?, ?, ?)",
(self.session_number,)+line)
@needs_sqlite
def writeout_cache(self, conn=None):
"""Write any entries in the cache to the database."""
if conn is None:
conn = self.db
with self.db_input_cache_lock:
try:
self._writeout_input_cache(conn)
except sqlite3.IntegrityError:
self.new_session(conn)
print("ERROR! Session/line number was not unique in",
"database. History logging moved to new session",
self.session_number)
try:
# Try writing to the new session. If this fails, don't
# recurse
self._writeout_input_cache(conn)
except sqlite3.IntegrityError:
pass
finally:
self.db_input_cache = []
with self.db_output_cache_lock:
try:
self._writeout_output_cache(conn)
except sqlite3.IntegrityError:
print("!! Session/line number for output was not unique",
"in database. Output will not be stored.")
finally:
self.db_output_cache = []
class HistorySavingThread(threading.Thread):
"""This thread takes care of writing history to the database, so that
the UI isn't held up while that happens.
It waits for the HistoryManager's save_flag to be set, then writes out
the history cache. The main thread is responsible for setting the flag when
the cache size reaches a defined threshold."""
daemon = True
stop_now = False
enabled = True
def __init__(self, history_manager):
super(HistorySavingThread, self).__init__(name="IPythonHistorySavingThread")
self.history_manager = history_manager
self.enabled = history_manager.enabled
atexit.register(self.stop)
@needs_sqlite
def run(self):
# We need a separate db connection per thread:
try:
self.db = sqlite3.connect(self.history_manager.hist_file,
**self.history_manager.connection_options
)
while True:
self.history_manager.save_flag.wait()
if self.stop_now:
self.db.close()
return
self.history_manager.save_flag.clear()
self.history_manager.writeout_cache(self.db)
except Exception as e:
print(("The history saving thread hit an unexpected error (%s)."
"History will not be written to the database.") % repr(e))
def stop(self):
"""This can be called from the main thread to safely stop this thread.
Note that it does not attempt to write out remaining history before
exiting. That should be done by calling the HistoryManager's
end_session method."""
self.stop_now = True
self.history_manager.save_flag.set()
self.join()
# To match, e.g. ~5/8-~2/3
range_re = re.compile(r"""
((?P<startsess>~?\d+)/)?
(?P<start>\d+)?
((?P<sep>[\-:])
((?P<endsess>~?\d+)/)?
(?P<end>\d+))?
$""", re.VERBOSE)
def extract_hist_ranges(ranges_str):
"""Turn a string of history ranges into 3-tuples of (session, start, stop).
Examples
--------
>>> list(extract_hist_ranges("~8/5-~7/4 2"))
[(-8, 5, None), (-7, 1, 5), (0, 2, 3)]
"""
for range_str in ranges_str.split():
rmatch = range_re.match(range_str)
if not rmatch:
continue
start = rmatch.group("start")
if start:
start = int(start)
end = rmatch.group("end")
# If no end specified, get (a, a + 1)
end = int(end) if end else start + 1
else: # start not specified
if not rmatch.group('startsess'): # no startsess
continue
start = 1
end = None # provide the entire session hist
if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3]
end += 1
startsess = rmatch.group("startsess") or "0"
endsess = rmatch.group("endsess") or startsess
startsess = int(startsess.replace("~","-"))
endsess = int(endsess.replace("~","-"))
assert endsess >= startsess, "start session must be earlier than end session"
if endsess == startsess:
yield (startsess, start, end)
continue
# Multiple sessions in one range:
yield (startsess, start, None)
for sess in range(startsess+1, endsess):
yield (sess, 1, None)
yield (endsess, 1, end)
def _format_lineno(session, line):
"""Helper function to format line numbers properly."""
if session == 0:
return str(line)
return "%s#%s" % (session, line)
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/IPython/core/history.py | Python | bsd-2-clause | 32,904 |
import rclpy
from rcl_interfaces.msg import ParameterDescriptor
from rclpy.node import Node
from rclpy.qos import QoSProfile
from std_msgs.msg import String
class Listener(Node):
def __init__(self):
super().__init__("listener")
parameter = self.declare_parameter(
"exit_after_receive", False, ParameterDescriptor()
)
self._exit_after_receive = parameter.value
self.should_exit = False
qos_profile = QoSProfile(depth=1)
self._subscription = self.create_subscription(
String, "chatter", self._callback, qos_profile
)
def _callback(self, message):
self.get_logger().info("I heard {!r}".format(message.data))
if self._exit_after_receive:
self.get_logger().info(
"Requested to exit after message received. Exiting now."
)
self.should_exit = True
def main(args=None):
rclpy.init(args=args)
listener = Listener()
while rclpy.ok():
rclpy.spin_once(listener)
if listener.should_exit:
break
listener.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
| chipaca/snapcraft | tests/spread/plugins/v1/colcon/snaps/colcon-talker-listener/src/listener_py/listener.py | Python | gpl-3.0 | 1,184 |
"""
"""
from abc import ABCMeta, abstractproperty, abstractmethod
from fractions import Fraction
from itertools import groupby
from operator import add, attrgetter, mul
from grendel import type_checking_enabled, sanity_checking_enabled
import string
import math
from grendel.gmath.tensor import Tensor
from grendel.util.decorators import with_flexible_arguments, typechecked, IterableOf
from grendel.util.metaprogramming import ReadOnlyAttribute
from grendel.util.overloading import listify_args
from grendel.util.strings import indented
#class FiniteDifferenceDerivativeCollection(object):
# """
# Note: Not a subclass of `Tensor`
# """
#
# ######################
# # Private Attributes #
# ######################
#
# _value_tens = None
#
# ##################
# # Initialization #
# ##################
#
# @typechecked(
# function="FiniteDifferenceFunction",
# variable_list=IterableOf('FiniteDifferenceVariable'),
# max_order=int
# )
# def __init__(self, function, variable_list, max_order):
# pass
#noinspection PyTypeChecker
class FiniteDifferenceDerivative(object):
""" An arbitrary finite difference derivative with respect to k `FiniteDifferenceVariable` instances (not necessarily distinct)
of a `FiniteDifferenceFunction` of an arbitrary number of `FiniteDifferenceVariable` instances whose output is a `Differentiable` instance.
Ideally, the derivative can be calculated to an arbitrary order of robustness in the displacement, though in
practice not all orders may be implemented yet.
"""
####################
# Class Attributes #
####################
formulas = []
generated_single_variable_formulas = {}
##############
# Attributes #
##############
function = ReadOnlyAttribute('function',
doc="""The `FiniteDifferenceFunction` instance to differentiate."""
)
variables = ReadOnlyAttribute('variables',
doc="""The list of `FiniteDifferenceVariable` instances to be displaced for computation of the finite difference derivative."""
)
target_robustness = ReadOnlyAttribute('target_robustness',
doc="""The minimum order of the error in the displacement. Defaults to 2."""
)
formula = ReadOnlyAttribute('formula',
doc="""The FiniteDifferenceFormula object we need to use, based on the input parameters."""
)
orders = ReadOnlyAttribute('orders')
######################
# Private Attributes #
######################
_value = None
_value_function = None
_delta_function = None
_delta = None
_forward = None
##################
# Initialization #
##################
@with_flexible_arguments(
optional = [
('target_robustness', 'robustness', 'accuracy', 'order', 'correct_to_order')
]
)
@typechecked(function='FiniteDifferenceFunction')
def __init__(self, function, *variables, **kwargs):
"""
"""
if len(FiniteDifferenceDerivative.formulas) == 0:
# Load the formulas generated "by hand", which (for now, anyway) require fewer
# displacements than the automatically generated formulas if we also need to
# compute the lower order derivatives as well, as is the case with the computation
# of quartic forcefields. (But not, for instance, the B tensor. So the
# FiniteDifferenceDerivative constructor could be optimized to take a parameter
# which specifies whether we should choose the formula with the fewest overall
# displacements or the fewest "new" displacements not needed for smaller derivatives)
load_formulas()
#--------------------------------------------------------------------------------#
# miscellanea
self._target_robustness = kwargs.pop('target_robustness', 2)
self._value_function = kwargs.pop('value_function', None)
self._delta_function = kwargs.pop('delta_function', None)
self._delta = kwargs.pop('delta', None)
self._forward = kwargs.pop('forward', False)
self._function = function
#--------------------------------------------------------------------------------#
# type checking
if type_checking_enabled:
if not all(isinstance(v, FiniteDifferenceVariable) for v in variables):
raise TypeError
if not isinstance(self.target_robustness, int):
raise TypeError
#--------------------------------------------------------------------------------#
# Get the variables and the orders....
vars = listify_args(*variables)
# Determine which formula we need
vars = sorted(vars, key=id)
# This is nasty, but it works...The zip(*list_of_lists) effectively "unzips"
self._orders, self._variables = zip(
*sorted(
[(len(list(g)), k) for k, g in groupby(vars)],
reverse=True)
)
#--------------------------------------------------------------------------------#
# Determine which formula to use
# This gets reused, so define a quicky function...
def get_possibilities(formula_list):
return [f for f in formula_list
if f.orders == list(self.orders)
and f.robustness >= self.target_robustness
and (f.is_forward() if self._forward else f.is_central())
]
#----------------------------------------#
# First, try and get a "hand-generated" formula
possibilities = get_possibilities(FiniteDifferenceDerivative.formulas)
if len(possibilities) == 0:
# We know how to generate single variable formulas to arbitrary order, so let's do it
n_derivative_vars = len(self.orders)
derivative_order = sum(self.orders)
if n_derivative_vars == 1:
# This long name is unweildy...
gen_dict = FiniteDifferenceDerivative.generated_single_variable_formulas
# See if we've already generated it...
formula = gen_dict.get(
(
derivative_order,
self.target_robustness
+ (1 if not self._forward and self.target_robustness % 2 == 1 else 0),
self._forward
),
None)
if formula is None:
# okay, we can generate it.
generate_single_variable_formulas(
derivative_order,
self.target_robustness
+ (1 if not self._forward and self.target_robustness % 2 == 1 else 0),
self._forward)
formula = gen_dict[(
derivative_order,
self.target_robustness
+ (1 if not self._forward and self.target_robustness % 2 == 1 else 0),
self._forward)]
possibilities.append(formula)
if sanity_checking_enabled:
possibilities = get_possibilities(possibilities)
else:
# we don't know how to generate these...yet...but I'm working on it!
raise RuntimeError("Can't find formula for orders {0} and"
" robustness {1}".format(
self.orders, self.target_robustness))
# Use the minimum robustness for now. Later we can make it use
# the best possible without additional calculations.
self._formula = sorted(possibilities, key=attrgetter('robustness'))[0]
##############
# Properties #
##############
@property
def value(self):
if self._value is None:
self.compute()
return self._value
@property
def needed_increments(self):
return self.formula.coefficients.keys()
#################
# Class Methods #
#################
@classmethod
def precompute_single_variable(cls, max_derivative, max_order, forward=False):
""" Save a little bit of time by prepopulating the single variable displacement
formulas dictionary up to `max_derivative` and `max_order`. If `forward` is True,
the forward formulas are precomputed instead of the central ones.
"""
generate_single_variable_formulas(max_derivative, max_order, forward)
###########
# Methods #
###########
def compute(self):
#TODO handle units (efficiently!!!)
if self._value is not None:
return self._value
total = None
for increments, coeff in self.formula.coefficients.items():
if self._value_function:
tmp = self._value_function(zip(self.variables, increments))
else:
tmp = self.function.value_for_displacements(zip(self.variables, increments))
if tmp is None:
raise ValueError("the value_for_displacements method of FiniteDifferenceFunction"
" '{}' returned `None` for increments {}".format(
self.function, increments
))
if hasattr(tmp, 'value'):
val = tmp.value * coeff
else:
val = tmp * coeff
if total is not None:
total += val
else:
total = val
if self._delta is not None:
deltas = (self._delta,) * len(set(self.variables))
elif self._delta_function:
deltas = self._delta_function(self.variables)
else:
deltas = self.function.deltas_for_variables(self.variables)
if isinstance(total, Fraction):
# Try and keep it that way
denom = reduce(mul, [d**exp for d, exp in zip(deltas, self.orders)])
total /= denom
else:
total /= reduce(mul, Tensor(deltas)**Tensor(self.orders))
self._value = total
class FiniteDifferenceFunction(object):
"""
"""
__metaclass__ = ABCMeta
@property
def variables(self):
"""The list of `FiniteDifferenceVariable` instances on which the function depends."""
raise NotImplementedError
@abstractmethod
def value_for_displacements(self, pairs):
""" Get a value for the displacement corresponding to the (variable, number of deltas) pairs given in the argument `pairs`
"""
return NotImplemented
def deltas_for_variables(self, vars):
""" The displacement amounts for each of the variables.
"""
if hasattr(self, 'delta'):
return (self.delta,) * len(set(vars))
else:
raise NotImplementedError
class FiniteDifferenceVariable(object):
"""
"""
__metaclass__ = ABCMeta
class Differentiable(object):
""" Abstract base class for things that you are allowed to take derivatives of.
"""
__metaclass__ = ABCMeta
@property
def value(self):
""" The value of the differentiable property. If this function is not
overridden, assume that `self` can be added and subtracted, as well
as multiplied by a float.
"""
return self
@property
def shape(self):
return tuple()
# Simplest possible subclass of Differentiable that acts like a float
class FloatShell(float, Differentiable): pass
class FiniteDifferenceFormula(object):
""" A formula for a given finite difference derivative.
"""
#############
# Constants #
#############
CENTRAL = 0
FORWARD = 1
##############
# Attributes #
##############
orders = ReadOnlyAttribute('orders')
robustness = ReadOnlyAttribute('robustness')
coefficients = ReadOnlyAttribute('coefficients',
""" displacement => coefficient dictionary, where
the elements of the displacement tuple correspond
to the orders tuple
"""
)
direction = None
##################
# Initialization #
##################
def __init__(self, orders, robustness, pairs, forward=False):
self._orders = orders
self._robustness = robustness
# pairs is a list of (coefficient, displacement tuple) tuples
self._coefficients = dict((p[1], p[0]) for p in pairs)
if forward:
self.direction = FiniteDifferenceFormula.FORWARD
else:
self.direction = FiniteDifferenceFormula.CENTRAL
###################
# Special Methods #
###################
#------------------------#
# Output Representations #
#------------------------#
#def __eq__(self, other):
# return self._robustness
def __str__(self):
max_width=80
indent_size = 4
function_name = 'F'
disp_name = 'h'
#----------------------------------------#
def var(idx):
vars = 'xyz' + str(reversed(string.ascii_lowercase[:-3]))
vars = vars.replace(disp_name, '')
vars = vars.replace(function_name, '')
try:
return vars[idx]
except IndexError:
return 'x_' + idx
#----------------------------------------#
flines = []
curr_line = '['
for dispnum, (disp, coeff) in enumerate(sorted(self.coefficients.items())):
if coeff == 0:
continue
if coeff.denominator == 1:
# Only print non-unit coefficients
if abs(coeff) != 1:
disp_f = str(abs(coeff))
else:
disp_f = ''
else:
disp_f = '(' + str(abs(coeff)) + ')'
disp_f += function_name + '('
for idx, d in enumerate(disp):
disp_f += var(idx)
if d == -1:
disp_f += ' - ' + disp_name + '_' + var(idx)
elif d == 1:
disp_f += ' + ' + disp_name + '_' + var(idx)
elif d < 0:
disp_f += ' - ' + str(abs(d)) + disp_name + '_' + var(idx)
elif d > 0:
disp_f += ' + ' + str(d) + disp_name + '_' + var(idx)
if d is not disp[-1]:
disp_f += ', '
disp_f += ')'
if dispnum != 0:
if coeff < 0:
disp_f = ' - ' + disp_f
else:
disp_f = ' + ' + disp_f
elif coeff < 0: # and dispnum == 0
disp_f = '-' + disp_f
if len(curr_line + disp_f) > (max_width if len(flines) == 0 else max_width - indent_size):
if curr_line != '':
flines.append(curr_line)
curr_line = disp_f
else:
# one term per line is the best we can do, and we still overflow...yikes...
flines.append(disp_f)
else:
curr_line += disp_f
denom = '] / '
if len(self.orders) > 1:
denom += '('
for idx, exp in enumerate(self.orders):
denom += disp_name + '_' + var(idx)
if exp > 1:
denom += '^' + str(exp)
if len(self.orders) > 1:
denom += ')'
if len(curr_line + denom) > (max_width if len(flines) == 0 else max_width - indent_size):
if curr_line != '':
flines.append(curr_line)
curr_line = denom
else:
curr_line = denom
else:
curr_line += denom
flines.append(curr_line)
return '{cent} finite difference formula for df/{dvars},' \
' correct to order {order} in displacement ({terms} terms):\n{formula}'.format(
cent='Central' if self.direction == FiniteDifferenceFormula.CENTRAL else 'Forward',
order = self.robustness,
dvars=''.join('d' + var(idx) + ('^'+str(exp) if exp > 1 else '')
for idx, exp in enumerate(self.orders)),
formula=indented(('\n' + ' ' * indent_size).join(flines), indent_size),
terms=len([c for c in self.coefficients.values() if c != 0])
)
def __repr__(self):
return "FiniteDifferenceFormula({dord}, {rob}, [\n{terms}\n])".format(
dord=repr(self.orders),
rob=self.robustness,
terms=indented(
',\n'.join(repr((coeff, deltas)) for deltas, coeff in
sorted(self.coefficients.items(), key=lambda x: ' '.join(str(i) for i in x[0]))
if coeff != 0)
)
)
##############
# Properties #
##############
@property
def order(self):
return reduce(add, self.orders)
@property
def needed_displacements(self):
return self.coefficients.items()
###########
# Methods #
###########
#-----------------#
# Inquiry methods #
#-----------------#
def is_forward(self):
return self.direction == FiniteDifferenceFormula.FORWARD
def is_central(self):
return self.direction == FiniteDifferenceFormula.CENTRAL
def generate_single_variable_formulas(max_derivative, max_order, forward=False):
# generate the 'number of deltas' list
fornberg_N = max_derivative + max_order - 1
if forward:
disps = range(fornberg_N + 1)
else:
disps = sum(([i, -i] for i in range(1, int(math.ceil(float(fornberg_N)/2.0)) + 1)), [0])
# Generate the formulas!
deltas = fornberg_coefficient_generator(max_derivative, fornberg_N, 0, *disps)
# Long name is too unweildy...
gen_formulas = FiniteDifferenceDerivative.generated_single_variable_formulas
# Parse out the coefficients from the three-dimensional array returned by Fornberg's algorithm
# into FiniteDifferenceFormula instances
for derivative in xrange(1, max_derivative+1):
for n in xrange(derivative, max_derivative + max_order):
robustness = n - derivative + 1
if not forward and robustness % 2 == 1:
continue
terms = []
coeffs = deltas[derivative][n]
for nu, coeff in enumerate(coeffs):
terms.append((coeff, (disps[nu],)))
# Just overwrite what was there (even though it is exactly the same,
# it takes longer and is more complicated to check for it's existence
# than to just overwrite what's already there, since we have it anyway)
gen_formulas[(derivative, robustness, forward)] = \
FiniteDifferenceFormula([derivative], robustness, terms, forward=forward)
def fornberg_coefficient_generator(M, N, x0, *alpha):
"""
From Fornberg, Bengt. Mathematics of Computation 51 (1988), p. 699-706
M: "the order of the highest derivative we wish to approximate"
N: given "a set of N + 1 grid points"
x0: the point at which we wish to take the derivative
alpha: alpha_i is the ith grid point
"""
delta = [[[0 for nu in xrange(n+1)] for n in xrange(N+1)] for m in xrange(M+1)]
delta[0][0][0] = Fraction(1)
c1 = Fraction(1)
for n in xrange(1, N+1):
c2 = Fraction(1)
alpha = tuple(Fraction(a) for a in alpha)
for nu in xrange(n):
c3 = alpha[n] - alpha[nu]
c2 = c2 * c3
for m in range(min(n, M)+1):
delta[m][n][nu] = ((alpha[n] - x0) * delta[m][n-1][nu]
- (m * delta[m-1][n-1][nu] if m >= 0 else 0))/c3
for m in range(min(n, M)+1):
delta[m][n][n] = c1/c2 * (m * (delta[m-1][n-1][n-1] if m >= 0 else 0)
- (alpha[n-1] - x0) * delta[m][n-1][n-1])
c1 = c2
return delta
#####################
# Dependent Imports #
#####################
from grendel.differentiation.fdiff_formulas import load_formulas
| spring01/libPSI | lib/python/grendel/differentiation/finite_difference.py | Python | gpl-2.0 | 20,260 |
#(C) Copyright Syd Logan 2016-2019
#(C) Copyright Thousand Smiles Foundation 2016-2019
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.exceptions import APIException, NotFound
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from patient.models import *
from clinic.models import *
from routingslip.models import *
from datetime import *
from django.core import serializers
from django.db.models import Q
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest, HttpResponseServerError, HttpResponseNotFound
from common.decorators import *
import json
import sys
class PatientView(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def toState(self, statecode):
ret = None
for x in Patient.STATE_CHOICES:
if x[0] == statecode:
ret = x[1]
return ret
def serialize(self, entry):
m = {}
m["id"] = entry.id
m["paternal_last"] = entry.paternal_last
m["maternal_last"] = entry.maternal_last
m["first"] = entry.first
m["middle"] = entry.middle
m["suffix"] = entry.suffix
m["prefix"] = entry.prefix
m["dob"] = entry.dob.strftime("%m/%d/%Y")
if entry.gender == "f":
m["gender"] = "Female"
else:
m["gender"] = "Male"
m["street1"] = entry.street1
m["street2"] = entry.street2
m["city"] = entry.city
m["colonia"] = entry.colonia
m["state"] = self.toState(entry.state)
m["phone1"] = entry.phone1
m["phone2"] = entry.phone2
m["email"] = entry.email
m["emergencyfullname"] = entry.emergencyfullname
m["emergencyphone"] = entry.emergencyphone
m["emergencyemail"] = entry.emergencyemail
m["curp"] = entry.curp
m["oldid"] = entry.oldid
return m
@log_request
def get(self, request, patient_id=None, format=None):
badRequest = False
notFound = False
patient = None
byClinicId = None
aClinic = None
if patient_id:
try:
patient = Patient.objects.get(id = patient_id)
except:
patient = None
else:
# look for optional arguments for searching
byClinicId = request.GET.get("clinic", '')
if byClinicId != '':
aClinic = Clinic.objects.get(id=byClinicId)
if not aClinic:
notFound = True
if not notFound:
kwargs = {}
name = request.GET.get('name', '')
if not name == '':
try:
patient = Patient.objects.filter(Q(paternal_last__icontains=name) | Q(maternal_last__icontains=name) | Q(first__icontains=name) | Q(middle__icontains=name))
except:
patient = None
else:
paternal_last = request.GET.get('paternal_last', '')
if not paternal_last == '':
kwargs["paternal_last__icontains"] = paternal_last
maternal_last = request.GET.get('maternal_last', '')
if not maternal_last == '':
kwargs["maternal_last__icontains"] = maternal_last
first = request.GET.get('first', '')
if not first == '':
kwargs["first__icontains"] = first
dob = request.GET.get('dob', '')
if not dob == '':
x = dob.split("/")
if len(x) == 3:
try:
kwargs["dob"] = datetime.strptime(dob, "%m/%d/%Y")
except:
badRequest = True
else:
badRequest = True
curp = request.GET.get('curp', '')
if not curp == '':
kwargs["curp__icontains"] = curp
oldid = request.GET.get('oldid', '')
if not oldid == '':
try:
kwargs["oldid"] = int(oldid)
except:
badRequest = True
gender = request.GET.get('gender', '')
if not gender == '':
if gender == "Male":
kwargs["gender"] = "m"
elif gender == "Female":
kwargs["gender"] = "f"
else:
badRequest = True
if not badRequest:
try:
patient = Patient.objects.filter(**kwargs)
except:
patient = None
if not patient and not badRequest:
notFound = True
elif patient:
if patient_id:
ret = self.serialize(patient)
else:
ret = []
for x in patient:
if aClinic != None:
routingSlip = RoutingSlip.objects.filter(patient=x.id, clinic=aClinic)
if routingSlip != None and len(routingSlip) > 0:
ret.append(x.id)
else:
ret.append(x.id)
if badRequest:
return HttpResponseBadRequest()
elif notFound:
return HttpResponseNotFound()
else:
return Response(ret)
def validateState(self, state):
valid = False
ret = None
for val in Patient.STATE_CHOICES:
if val[1] == state:
valid = True
ret = val[0]
break
return valid, ret
def validatePutArgs(self, data, patient):
valid = True
if "curp" in data:
patient.curp = data["curp"]
if "oldid" in data:
patient.oldid = data["oldid"]
if "paternal_last" in data:
patient.paternal_last = data["paternal_last"]
if "maternal_last" in data:
patient.maternal_last = data["maternal_last"]
if "first" in data:
patient.first = data["first"]
if "middle" in data:
patient.middle = data["middle"]
if "suffix" in data:
patient.suffix = data["suffix"]
if "prefix" in data:
patient.prefix = data["prefix"]
if "dob" in data:
dob = data["dob"]
try:
dob = datetime.strptime(dob, "%m/%d/%Y")
patient.dob = dob
except:
try:
dob = datetime.strptime(dob, "%m-%d-%Y")
patient.dob = dob
except:
valid = False
if "gender" in data:
gender = data["gender"]
if gender != 'Female' and gender != 'Male':
valid = False
else:
if gender == "Female":
gender = "f"
else:
gender = "m"
patient.gender = gender
if "street1" in data:
patient.street1 = data["street1"]
if "street2" in data:
patient.street2 = data["street2"]
if "city" in data:
patient.city = data["city"]
if "colonia" in data:
patient.colonia = data["colonia"]
if "state" in data:
validtmp, state = self.validateState(data["state"])
if validtmp == True:
patient.state = state
else:
valid = False
if "phone1" in data:
patient.phone1 = data["phone1"]
if "phone2" in data:
patient.phone2 = data["phone2"]
if "email" in data:
patient.email = data["email"]
if "emergencyfullname" in data:
patient.emergencyfullname = data["emergencyfullname"]
if "emergencyphone" in data:
patient.emergencyphone = data["emergencyphone"]
if "emergencyemail" in data:
patient.emergencyemail = data["emergencyemail"]
return valid, patient
@log_request
def put(self, request, patient_id, format=None):
badRequest = False
implError = False
notFound = False
if not patient_id:
badRequest = True
if not badRequest:
patient = None
try:
patient = Patient.objects.get(id=patient_id)
except:
pass
if not patient:
notFound = True
else:
try:
data = json.loads(request.body)
valid, patient = self.validatePutArgs(data, patient)
if valid:
patient.save()
else:
badRequest = True
except:
implError = True
implMsg = sys.exc_info()[0]
if badRequest:
return HttpResponseBadRequest()
if notFound:
return HttpResponseNotFound()
if implError:
return HttpResponseServerError(implMsg)
else:
return Response({})
def validatePostArgs(self, data):
valid = True
kwargs = data
required = ["paternal_last",
"maternal_last",
"first",
"middle",
"suffix",
"prefix",
"dob",
"gender",
"street1",
"street2",
"city",
"colonia",
"state",
"phone1",
"phone2",
"email",
"emergencyfullname",
"emergencyphone",
"emergencyemail",
"curp"]
optional = ["oldid"]
for key, val in data.iteritems():
if not key in required and not key in optional:
valid = False
break
for k in required:
if not k in data:
valid = False
break
if valid:
try:
validtmp, state = self.validateState(data["state"])
if validtmp == True:
kwargs["state"] = state
else:
valid = False
try:
kwargs["dob"] = datetime.strptime(data["dob"], '%m/%d/%Y')
except ValueError:
try:
kwargs["dob"] = datetime.strptime(data["dob"], '%m-%d-%Y')
except ValueError:
valid = False
if data["gender"] in ["Male", "Female"]:
kwargs["gender"] = data["gender"][0].lower()
else:
valid = False;
except:
valid = False
return valid, kwargs
@log_request
def post(self, request, format=None):
badRequest = False
implError = False
duplicatePatient = False
data = json.loads(request.body)
valid, kwargs = self.validatePostArgs(data)
if not valid:
badRequest = True
if not badRequest:
patient = None
# see if the patient already exists, using core subset of data
try:
patient = Patient.objects.filter(paternal_last=kwargs["paternal_last"],
first=kwargs["first"],
dob=kwargs["dob"],
gender=kwargs["gender"])
if patient and len(patient) > 0:
badRequest = True
duplicatePatient = True
except:
implMsg = "Patient.objects.filter {} {}".format(sys.exc_info()[0], data)
implError = True
if not badRequest and not implError:
try:
patient = Patient(**kwargs)
if patient:
patient.save()
else:
implMsg = "Unable to create patient"
implError = True
except:
implMsg = "Patient create {} {}".format(sys.exc_info()[0], data)
implError = True
if badRequest:
if duplicatePatient:
r = HttpResponse(status=status.HTTP_409_CONFLICT, reason="Patient (%d) already exists".format(patient[0].id))
return r
else:
return HttpResponseBadRequest()
if implError:
return HttpResponseServerError(implMsg)
else:
return Response({'id': patient.id})
@log_request
def delete(self, request, patient_id=None, format=None):
patient = None
# see if the patient exists
try:
patient = Patient.objects.get(id=patient_id)
except:
patient = None
if not patient:
raise NotFound
else:
patient.delete()
return Response({})
| slogan621/tscharts | patient/views.py | Python | apache-2.0 | 14,344 |
from priorityqueueset import PriorityQueueSet
class PathFinder(object):
""" Computes a path in a graph using the A* algorithm.
Initialize the object and then repeatedly compute_path to
get the path between a start point and an end point.
The points on a graph are required to be hashable and
comparable with __eq__. Other than that, they may be
represented as you wish, as long as the functions
supplied to the constructor know how to handle them.
"""
def __init__(self, successors, move_cost, heuristic_to_goal):
""" Create a new PathFinder. Provided with several
functions that represent your graph and the costs of
moving through it.
successors:
A function that receives a point as a single
argument and returns a list of "successor" points,
the points on the graph that can be reached from
the given point.
move_cost:
A function that receives two points as arguments
and returns the numeric cost of moving from the
first to the second.
heuristic_to_goal:
A function that receives a point and a goal point,
and returns the numeric heuristic estimation of
the cost of reaching the goal from the point.
"""
self.successors = successors
self.move_cost = move_cost
self.heuristic_to_goal = heuristic_to_goal
def compute_path(self, start, goal):
""" Compute the path between the 'start' point and the
'goal' point.
The path is returned as an iterator to the points,
including the start and goal points themselves.
If no path was found, an empty list is returned.
"""
#
# Implementation of the A* algorithm.
#
closed_set = {}
start_node = self._Node(start)
start_node.g_cost = 0
start_node.f_cost = self._compute_f_cost(start_node, goal)
open_set = PriorityQueueSet()
open_set.add(start_node)
while len(open_set) > 0:
# Remove and get the node with the lowest f_score from
# the open set
#
curr_node = open_set.pop_smallest()
if curr_node.coord == goal:
return self._reconstruct_path(curr_node)
closed_set[curr_node] = curr_node
for succ_coord in self.successors(curr_node.coord):
succ_node = self._Node(succ_coord)
succ_node.g_cost = self._compute_g_cost(curr_node, succ_node)
succ_node.f_cost = self._compute_f_cost(succ_node, goal)
if succ_node in closed_set:
continue
if open_set.add(succ_node):
succ_node.pred = curr_node
return []
########################## PRIVATE ##########################
def _compute_g_cost(self, from_node, to_node):
return (from_node.g_cost +
self.move_cost(from_node.coord, to_node.coord))
def _compute_f_cost(self, node, goal):
return node.g_cost + self._cost_to_goal(node, goal)
def _cost_to_goal(self, node, goal):
return self.heuristic_to_goal(node.coord, goal)
def _reconstruct_path(self, node):
""" Reconstructs the path to the node from the start node
(for which .pred is None)
"""
pth = [node.coord]
n = node
while n.pred:
n = n.pred
pth.append(n.coord)
return reversed(pth)
class _Node(object):
""" Used to represent a node on the searched graph during
the A* search.
Each Node has its coordinate (the point it represents),
a g_cost (the cumulative cost of reaching the point
from the start point), a f_cost (the estimated cost
from the start to the goal through this point) and
a predecessor Node (for path construction).
The Node is meant to be used inside PriorityQueueSet,
so it implements equality and hashinig (based on the
coordinate, which is assumed to be unique) and
comparison (based on f_cost) for sorting by cost.
"""
def __init__(self, coord, g_cost=None, f_cost=None, pred=None):
self.coord = coord
self.g_cost = g_cost
self.f_cost = f_cost
self.pred = pred
def __eq__(self, other):
return self.coord == other.coord
def __cmp__(self, other):
return cmp(self.f_cost, other.f_cost)
def __hash__(self):
return hash(self.coord)
def __str__(self):
return 'N(%s) -> g: %s, f: %s' % (self.coord, self.g_cost, self.f_cost)
def __repr__(self):
return self.__str__()
| xzmagic/code-for-blog | 2009/pygame_creeps_game/pathfinder.py | Python | unlicense | 5,242 |
'''
A condition
'''
from compares.const import register_compare, Const as const
from compares.simple import Exact
from collections import OrderedDict
from inspect import isclass, ismethod
calldict = {}
class Stack(const):
def stack_add(self, cb, cbargs):
'''
Pass the values of which would be provided to the callback.
The callback and its arguments are added to a singleton stack
of callbacks of which should fire.
After the natural fire-events, one or many callbacks may have been
skipped. These members methods and functions are chacked against the
stack. If they exist the standard runner did not call the method.
This may occur if the callback has an error of which was lost through
the event chain. In good operation this stack will always be empty.
'''
# check callback assignments. Providing flag values from
# attr
# We should provide this as arguments as it's flagged as special.
# There seems to be a special little bug where:
# if more than one event is occuring at the same time, the event
# callback will not fire.
# The first print line will occur and then nothing.
#
# I've been debugging this for a while so intead, a dictionary stack
_id = id(self)
if calldict.get(_id) is None:
calldict[_id] = {
id(cb): [ cb, cbargs ],
}
return _id
def stack_call(self):
'''
Call every stack method within the calldict singleton.
'''
for missed_call in calldict:
func_set = calldict[missed_call]
# cb(args)
for func_id in func_set:
caller = func_set[func_id]
print 'calling from stack', caller[0]
caller[0](*caller[1])
else:
print 'No skipped results'
def stack_remove(self, _id):
'''
Remove the stack entity by reference of the provided _id from the
calldict singleton.
This _id was previously provided by self.stack_add
'''
# Delete ourselves as the successor.
del calldict[_id]
# Rerun undone
class Condition(Stack):
''' A condition perpetuates changes of an object base upon
rules applied at configuration.
Condition('foo', Condition.CHANGED, 'foo_changed')
Condition('bar', 2, foo_callback, node='ANode')
'''
state = None
def __init__(self, attr=None, value=None, valid=None, node=None, name=None, **kw):
''' A condition requires
a node (Node|String|iterable),
the attribute to monitor (String),
a value to validate condition.
Optionally `valid` callback when the condition is met
You can add keyword args to provide additional validations to the condition.
'''
self._keys = OrderedDict()
self.valid_cache = {}
# the node to watch for changes.
self.node = node
# Attrbibute to monitor on node.
self._attr = attr
# Assign the constant type to match - or the target value to validate to.
self._value = value
# A def callback - if any
self._valid_cb = valid
self._last = False
if valid is not None:
print 'Valid on condition', valid
self.name = name
self.read_args(**kw)
def read_args(self, **kw):
'''provide keyword arguments for the condition to match.
The value of a keyword may be a primitive python object or a Compare.
'''
attr = self._attr
value = self._value
if attr is not None:
self.store_statement(attr, value)
for key in kw:
self.store_statement(key, kw[key])
def store_statement(self, key, value):
''' Store a statement into the condition to be met when the condition
is run '''
self._keys.update({key: value})
return self._keys
def match(self, current, incoming, node, key, expand=False, parent_node=None, **kw):
''' This method is to be used outside the reference scope. Called by
a node alteration or a machine call, the match() method will
return a validation based upon provided values and the internal
self.value statements.
current is the value existing within the node[key].
This value could be collected again, but to protect against any
future complex implementation of a Node, we have an early
definition to check.
incoming is the value node[key] will become after match()
A condition is called prior to the value being written to a Node.
We're defining ahead of time if this statement will be true.
node is the context object to match the condition
key is the attr within the node object this condition is matching.
The Node blindly runs this method. We check if the key is
something we wish to use before performing validity.
expand returns the valids object if True else a boolean if False
passing true, you can see which statements failed within the
condition.
parent_node is the node this condition exists within.
If a condition exists within a node, when the parent node is ran
for changes, it passes a reference to the matcher.
This allows string references within the condition to be dynamic
returned is a boolean value of validity. '''
if key not in self._keys.keys():
return self._last
valids = self.run_statements(node, key, current, incoming, **kw)
# flatten the dict in to a set of True/False
vlist = list(set(valids.values()))
# Both True/False exist
if len(vlist) > 1:
return False
if vlist[0] is True:
self._call_handler(node, valids, key, incoming, current, parent_node, **kw)
if expand:
return valids
# Return the one statement
self._last = vlist[0]
return self._last
def _call_handler(self, node, valids, key, incoming, current, parent_node=None, **kw):
''' Call the handler with the node, value and field passed.
If the self._valid_cb is a string the method is received from the node
and called. '''
cbn = self._valid_cb
cb = cbn
parent = parent_node or node
if isinstance(cbn, (str, unicode,)):
cb = getattr(parent, cbn)
if cb is None:
print 'Could not find callback method', cbn
return
_id = self.stack_add(cb, [node, key, incoming, current, valids])
# Will fire
cbv = cb(node, key, incoming, current, self, valids, **kw)
# Successful will fire
self.stack_remove(_id)
# self.stack_call()
def run_statements(self, node, key, current, incoming, **kw):
''' Iterate the statements collecting boolean values.
Returned is a a bool of validity.
Pass expand=True to return an object of key values. Each key is
an attr of the node with its boolean return.
node is the object of context.
This is syntax sugar and probably not required.
key is the attr within the node
current is the existing value within the node[key]
incoming is the future value the node[key] will become after validity'''
valids = {}
for _key in self._keys:
if _key == key:
value = self._keys[_key]
res = self.check_statement(node, _key, value, current, incoming, **kw)
self.valid_cache[_key] = res
valids[_key] = res
else:
# populate the validity object with previously checked conditions.
# # If None the attr has never been set on the condition therefore
# the statement is False.
valids[_key] = self.valid_cache.get(_key, False)
return valids
def check_statement(self, node, key, value, current, incoming, **kw):
''' check_statement returns boolean of the key, value passed.
The node is the element to check the condition statement against.
The key is the attr within the node of which will change to the value.
The value is the stored comparison value to check against.
current is the existing value within the node[key]
incoming is the future value the node[key] will become after validity
The node[key] contains the existing value of the key attr. This may
change after this statement has returned its validity.
it's most likely to check the incoming value rather than the current.
The condition is pre checked ensuring any later nodes within a chain
denotes this conditions validity. '''
# print 'Checking condition against node', self.node, ' input:', node
if self.node is not None:
# Is this our context node to matcn
is_node = node.get_name() == self.node or self.node == node
if is_node is False:
# print '\nX Node does not match', self.node, node
return False
# else:
# print'\nY Good match Continue validation'
Klass = Exact
matching_val = value
if isinstance(value, tuple):
_K = self.get_comparison_class(value[1])
# print 'Using', _K
if isclass(_K) and ismethod(_K.match):
Klass = _K
matching_val = current
else:
print 'Not a matching class', _K, value
comp = Klass(self)
s = '++ statement {0} val:{1} - Using: {2}({3}, {4})'
ps = s.format(key,
value,
comp.__class__.__name__,
incoming,
matching_val
)
# print ps
valid = comp.match(incoming, matching_val)
# print 'valid', valid
return valid
def get_comparison_class(self, compare):
'''
Return the compare class by string
'''
m = __import__('scatter.compares.simple', fromlist=[compare])
k = getattr(m, compare)
return k
def valid(self, value=None):
return self.match(value, self.value, self.node, self.attr)
def __str__(self):
t = self._value
if isinstance(t, (list, tuple)):
t = self._value[1]
s = '{1}:{2}'.format(self.node, self._attr, t)
return s
def __unicode__(self):
return u'%s' % self.__str__()
def __repr__(self):
s = self.name if self.name is not None else self.__str__()
return '<Condition: %s>' % (s,)
| Strangemother/python-state-machine | scatter/conditions.py | Python | mit | 10,942 |
import copy
import tempfile
import xmlsec
from tests import base
consts = xmlsec.constants
class TestKeys(base.TestMemoryLeaks):
def test_key_from_memory(self):
key = xmlsec.Key.from_memory(self.load("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
def test_key_from_memory_with_bad_args(self):
with self.assertRaises(TypeError):
xmlsec.Key.from_memory(1, format="")
def test_key_from_memory_invalid_data(self):
with self.assertRaisesRegex(xmlsec.Error, '.*cannot load key.*'):
xmlsec.Key.from_memory(b'foo', format=consts.KeyDataFormatPem)
def test_key_from_file(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
def test_key_from_file_with_bad_args(self):
with self.assertRaises(TypeError):
xmlsec.Key.from_file(1, format="")
def test_key_from_invalid_file(self):
with self.assertRaisesRegex(xmlsec.Error, '.*cannot read key.*'):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(b'foo')
xmlsec.Key.from_file(tmpfile.name, format=consts.KeyDataFormatPem)
def test_key_from_fileobj(self):
with open(self.path("rsakey.pem"), "rb") as fobj:
key = xmlsec.Key.from_file(fobj, format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
def test_key_from_invalid_fileobj(self):
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(b'foo')
with self.assertRaisesRegex(xmlsec.Error, '.*cannot read key.*'), open(tmpfile.name) as fp:
xmlsec.Key.from_file(fp, format=consts.KeyDataFormatPem)
def test_generate(self):
key = xmlsec.Key.generate(klass=consts.KeyDataAes, size=256, type=consts.KeyDataTypeSession)
self.assertIsNotNone(key)
def test_generate_with_bad_args(self):
with self.assertRaises(TypeError):
xmlsec.Key.generate(klass="", size="", type="")
def test_generate_invalid_size(self):
with self.assertRaisesRegex(xmlsec.Error, '.*cannot generate key.*'):
xmlsec.Key.generate(klass=consts.KeyDataAes, size=0, type=consts.KeyDataTypeSession)
def test_from_binary_file(self):
key = xmlsec.Key.from_binary_file(klass=consts.KeyDataDes, filename=self.path("deskey.bin"))
self.assertIsNotNone(key)
def test_from_binary_file_with_bad_args(self):
with self.assertRaises(TypeError):
xmlsec.Key.from_binary_file(klass="", filename=1)
def test_from_invalid_binary_file(self):
with self.assertRaisesRegex(xmlsec.Error, '.*cannot read key.*'):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(b'foo')
xmlsec.Key.from_binary_file(klass=consts.KeyDataDes, filename=tmpfile.name)
def test_from_binary_data(self):
key = xmlsec.Key.from_binary_data(klass=consts.KeyDataDes, data=self.load("deskey.bin"))
self.assertIsNotNone(key)
def test_from_binary_data_with_bad_args(self):
with self.assertRaises(TypeError):
xmlsec.Key.from_binary_data(klass="", data=1)
def test_from_invalid_binary_data(self):
with self.assertRaisesRegex(xmlsec.Error, '.*cannot read key.*'):
xmlsec.Key.from_binary_data(klass=consts.KeyDataDes, data=b'')
def test_load_cert_from_file(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
key.load_cert_from_file(self.path("rsacert.pem"), format=consts.KeyDataFormatPem)
def test_load_cert_from_file_with_bad_args(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with self.assertRaises(TypeError):
key.load_cert_from_file(1, format="")
def test_load_cert_from_invalid_file(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with self.assertRaisesRegex(xmlsec.Error, '.*cannot load cert.*'):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(b'foo')
key.load_cert_from_file(tmpfile.name, format=consts.KeyDataFormatPem)
def test_load_cert_from_fileobj(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with open(self.path("rsacert.pem"), "rb") as fobj:
key.load_cert_from_file(fobj, format=consts.KeyDataFormatPem)
def test_load_cert_from_fileobj_with_bad_args(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with self.assertRaises(TypeError), open(self.path("rsacert.pem"), "rb") as fobj:
key.load_cert_from_file(fobj, format='')
def test_load_cert_from_invalid_fileobj(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(b'foo')
with self.assertRaisesRegex(xmlsec.Error, '.*cannot load cert.*'), open(tmpfile.name) as fp:
key.load_cert_from_file(fp, format=consts.KeyDataFormatPem)
def test_load_cert_from_memory(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
key.load_cert_from_memory(self.load("rsacert.pem"), format=consts.KeyDataFormatPem)
def test_load_cert_from_memory_with_bad_args(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with self.assertRaises(TypeError):
key.load_cert_from_memory(1, format="")
def test_load_cert_from_memory_invalid_data(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with self.assertRaisesRegex(xmlsec.Error, '.*cannot load cert.*'):
key.load_cert_from_memory(b'', format=consts.KeyDataFormatPem)
def test_get_name(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNone(key.name)
def test_get_name_invalid_key(self):
key = xmlsec.Key()
with self.assertRaisesRegex(ValueError, 'key is not ready'):
key.name
def test_del_name(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
key.name = "rsakey"
del key.name
self.assertIsNone(key.name)
def test_set_name(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
key.name = "rsakey"
self.assertEqual("rsakey", key.name)
def test_set_name_invalid_key(self):
key = xmlsec.Key()
with self.assertRaisesRegex(ValueError, 'key is not ready'):
key.name = 'foo'
def test_copy(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
key2 = copy.copy(key)
del key
key2.load_cert_from_file(self.path("rsacert.pem"), format=consts.KeyDataFormatPem)
class TestKeysManager(base.TestMemoryLeaks):
def test_add_key(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
mngr = xmlsec.KeysManager()
mngr.add_key(key)
def test_add_key_with_bad_args(self):
mngr = xmlsec.KeysManager()
with self.assertRaises(TypeError):
mngr.add_key("")
def test_load_cert(self):
mngr = xmlsec.KeysManager()
mngr.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
mngr.load_cert(self.path("rsacert.pem"), format=consts.KeyDataFormatPem, type=consts.KeyDataTypeTrusted)
def test_load_cert_with_bad_args(self):
mngr = xmlsec.KeysManager()
mngr.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
with self.assertRaisesRegex(xmlsec.Error, '.*cannot load cert.*'):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(b'foo')
mngr.load_cert(tmpfile.name, format=consts.KeyDataFormatPem, type=consts.KeyDataTypeTrusted)
def test_load_invalid_cert(self):
mngr = xmlsec.KeysManager()
mngr.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
with self.assertRaises(TypeError):
mngr.load_cert(1, format="", type="")
def test_load_cert_from_memory(self):
mngr = xmlsec.KeysManager()
mngr.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
mngr.load_cert_from_memory(self.load("rsacert.pem"), format=consts.KeyDataFormatPem, type=consts.KeyDataTypeTrusted)
def test_load_cert_from_memory_with_bad_args(self):
mngr = xmlsec.KeysManager()
mngr.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
with self.assertRaises(TypeError):
mngr.load_cert_from_memory(1, format="", type="")
def test_load_cert_from_memory_invalid_data(self):
mngr = xmlsec.KeysManager()
mngr.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
with self.assertRaisesRegex(xmlsec.Error, '.*cannot load cert.*'):
mngr.load_cert_from_memory(b'', format=consts.KeyDataFormatPem, type=consts.KeyDataTypeTrusted)
def test_load_invalid_key(self):
mngr = xmlsec.KeysManager()
with self.assertRaises(ValueError):
mngr.add_key(xmlsec.Key())
| mehcode/python-xmlsec | tests/test_keys.py | Python | mit | 10,026 |
## NIFTY (Numerical Information Field Theory) has been developed at the
## Max-Planck-Institute for Astrophysics.
##
## Copyright (C) 2014 Max-Planck-Society
##
## Author: Maksim Greiner, Marco Selig
## Project homepage: <http://www.mpa-garching.mpg.de/ift/nifty/>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
#from nifty import *
import numpy as np
from nifty import pi, \
about, \
field, \
sqrt,exp,log
def power_backward_conversion_lm(k_space,p,mean=None):
"""
This function is designed to convert a theoretical/statistical power
spectrum of a log-normal field to the theoretical power spectrum of
the underlying Gaussian field.
The function only works for power spectra defined for lm_spaces
Parameters
----------
k_space : nifty.rg_space,
a regular grid space with the attribute `Fourier = True`
p : np.array,
the power spectrum of the log-normal field.
Needs to have the same number of entries as
`k_space.get_power_indices()[0]`
mean : float, *optional*
specifies the mean of the log-normal field. If `mean` is not
specified the function will use the monopole of the power spectrum.
If it is specified the function will NOT use the monopole of the
spectrum. (default: None)
WARNING: a mean that is too low can violate positive definiteness
of the log-normal field. In this case the function produces an
error.
Returns
-------
mean : float,
the recovered mean of the underlying Gaussian distribution.
p1 : np.array,
the power spectrum of the underlying Gaussian field, where the
monopole has been set to zero. Eventual monopole power has been
shifted to the mean.
References
----------
.. [#] M. Greiner and T.A. Ensslin, "Log-transforming the matter power spectrum";
`arXiv:1312.1354 <http://arxiv.org/abs/1312.1354>`_
"""
p = np.copy(p)
if(mean is not None):
p[0] = 4*pi*mean**2
klen = k_space.get_power_indices()[0]
C_0_Omega = field(k_space,val=0)
C_0_Omega.val[:len(klen)] = p*sqrt(2*klen+1)/sqrt(4*pi)
C_0_Omega = C_0_Omega.transform()
if(np.any(C_0_Omega.val<0.)):
raise ValueError(about._errors.cstring("ERROR: spectrum or mean incompatible with positive definiteness.\n Try increasing the mean."))
return None
lC = log(C_0_Omega)
Z = lC.transform()
spec = Z.val[:len(klen)]
mean = (spec[0]-0.5*sqrt(4*pi)*log((p*(2*klen+1)/(4*pi)).sum()))/sqrt(4*pi)
spec[0] = 0.
spec = spec*sqrt(4*pi)/sqrt(2*klen+1)
spec = np.real(spec)
if(np.any(spec<0.)):
spec = spec*(spec>0.)
about.warnings.cprint("WARNING: negative modes set to zero.")
return mean.real,spec
def power_forward_conversion_lm(k_space,p,mean=0):
"""
This function is designed to convert a theoretical/statistical power
spectrum of a Gaussian field to the theoretical power spectrum of
the exponentiated field.
The function only works for power spectra defined for lm_spaces
Parameters
----------
k_space : nifty.rg_space,
a regular grid space with the attribute `Fourier = True`
p : np.array,
the power spectrum of the Gaussian field.
Needs to have the same number of entries as
`k_space.get_power_indices()[0]`
m : float, *optional*
specifies the mean of the Gaussian field (default: 0).
Returns
-------
p1 : np.array,
the power spectrum of the exponentiated Gaussian field.
References
----------
.. [#] M. Greiner and T.A. Ensslin, "Log-transforming the matter power spectrum";
`arXiv:1312.1354 <http://arxiv.org/abs/1312.1354>`_
"""
m = mean
klen = k_space.get_power_indices()[0]
C_0_Omega = field(k_space,val=0)
C_0_Omega.val[:len(klen)] = p*sqrt(2*klen+1)/sqrt(4*pi)
C_0_Omega = C_0_Omega.transform()
C_0_0 = (p*(2*klen+1)/(4*pi)).sum()
exC = exp(C_0_Omega+C_0_0+2*m)
Z = exC.transform()
spec = Z.val[:len(klen)]
spec = spec*sqrt(4*pi)/sqrt(2*klen+1)
spec = np.real(spec)
if(np.any(spec<0.)):
spec = spec*(spec>0.)
about.warnings.cprint("WARNING: negative modes set to zero.")
return spec | ultimanet/nifty | lm/nifty_power_conversion_lm.py | Python | gpl-3.0 | 5,320 |
#!/usr/bin/env/python
# _*_ coding:utf-8_*_
from handlers.ExceptionHandler import ExceptionHandler
def process_message(twitter_api, telegram_message):
try:
# TODO: Remove the user from somewhere
# user_id = telegram_message.message.message_from.id
user_first_name = telegram_message.message.message_from.first_name
return '¡Deica logo, {0}!'.format(user_first_name)
except Exception as ex:
ExceptionHandler.handle_exception(ex, False)
| CorunaDevelopers/teleTweetBot | teleTweetBot/commands/StopCommand.py | Python | gpl-3.0 | 487 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product.structure'
db.add_column(u'catalogue_product', 'structure',
self.gf('django.db.models.fields.CharField')(default='standalone', max_length=10),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Product.structure'
db.delete_column(u'catalogue_product', 'structure')
models = {
u'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': u"orm['catalogue.AttributeOptionGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
u'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.ProductAttribute']", 'through': u"orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Category']", 'through': u"orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['catalogue.ProductClass']"}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Product']", 'symmetrical': 'False', 'through': u"orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'structure': ('django.db.models.fields.CharField', [], {'default': "'standalone'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'upc': ('oscar.models.fields.NullCharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': u"orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
u'catalogue.productattributevalue': {
'Meta': {'unique_together': "(('attribute', 'product'),)", 'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.ProductAttribute']"}),
'entity_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'entity_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': u"orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'catalogue.productcategory': {
'Meta': {'ordering': "['product', 'category']", 'unique_together': "(('product', 'category'),)", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'catalogue.productimage': {
'Meta': {'ordering': "['display_order']", 'unique_together': "(('product', 'display_order'),)", 'object_name': 'ProductImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.files.ImageField', [], {'max_length': '255'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': u"orm['catalogue.Product']"})
},
u'catalogue.productrecommendation': {
'Meta': {'ordering': "['primary', '-ranking']", 'unique_together': "(('primary', 'recommendation'),)", 'object_name': 'ProductRecommendation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': u"orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['catalogue'] | jinnykoo/christmas | src/oscar/apps/catalogue/south_migrations/0025_auto__add_field_product_structure.py | Python | bsd-3-clause | 11,627 |
from time import sleep
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
ControlPin = [7,8,9,10]
seq = [ [1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1]]
def init():
GPIO.setwarnings(False)
for pin in ControlPin:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
def setStep(values):
GPIO.output(ControlPin[0], values[0])
GPIO.output(ControlPin[1], values[1])
GPIO.output(ControlPin[2], values[2])
GPIO.output(ControlPin[3], values[3])
def step(forwards, stepSize):
if forwards:
sequence = seq
else:
sequence = list(reversed(seq))
for i in range(0,512):
for step in range(0, 8, stepSize):
for pin in range(0,4):
GPIO.output(ControlPin[pin], sequence[step][pin])
sleep(0.001 * stepSize)
init()
step(True, 1)
step(False, 1)
step(True, 2)
step(False, 2)
for pin in ControlPin:
GPIO.output(pin, 0)
| gilesp/quickdrawmcgraw | obsolete/stepper_old.py | Python | mit | 1,024 |
#!/usr/bin/python
# -*- coding:UTF-8 -*-
################################################################################
#
# Copyright 2010-2014 Carlos Ramisch, Vitor De Araujo, Silvio Ricardo Cordeiro,
# Sandra Castellanos
#
# xml2arff.py is part of mwetoolkit
#
# mwetoolkit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mwetoolkit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mwetoolkit. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
"""
This script converts a candidates file in XML (mwetoolkit-candidates.dtd)
into a corresponding representation in the arff file format, used by the
WEKA machine learning toolkit. Only features and TP base are considered,
information about the candidate's ngrams or occurrences are ignored. Please
notice that if you don't have a feature that uniquely identifies your
candidate, you will not be able to trace back the classifier results to the
original candidates.
For more information, call the script with no parameter and read the
usage instructions.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from libs.util import read_options, treat_options_simplest
from libs import filetype
################################################################################
# GLOBALS
usage_string = """Usage:
python {program} OPTIONS <candidates>
The <candidates> input file must be in one of the filetype
formats accepted by the `--from` switch.
OPTIONS may be:
--from <input-filetype-ext>
Force conversion from given filetype extension.
(By default, file type is automatically detected):
{descriptions.input[candidates]}
{common_options}
"""
all_feats = []
input_filetype_ext = None
from libs import filetype
################################################################################
def treat_options(opts, arg, n_arg, usage_string):
"""Callback function that handles the command line options of this script.
@param opts The options parsed by getopts. Ignored.
@param arg The argument list parsed by getopts.
@param n_arg The number of arguments expected for this script.
"""
global input_filetype_ext
global output_filetype_ext
treat_options_simplest(opts, arg, n_arg, usage_string)
for (o, a) in opts:
if o in ("--from"):
input_filetype_ext = a
else:
raise Exception("Bad arg: " + o)
################################################################################
# MAIN SCRIPT
longopts = ["from="]
args = read_options("", longopts, treat_options, -1, usage_string)
relation_name = "stdin" if len(args) == 0 else args[0].replace(".xml", "")
filetype.parse(args, filetype.printer_class("ARFF")("corpus",
relation_name=relation_name), input_filetype_ext)
| KWARC/mwetoolkit | bin/old/xml2arff.py | Python | gpl-3.0 | 3,443 |
#!/usr/bin/env python3
from knc import device
boards = device.open_boards()
print(boards[0].get_status()) | mborodin/pyknc | examples/get_status.py | Python | gpl-2.0 | 107 |
import json
import random
import uuid
import codecs
PRIOR = 0.4
MATCH_ITEMS = ['personAge', 'hairColor', 'eyeColor', 'name']
MATCHPROBS = {}
TOTALS = {}
RELATIVE_FREQS = {}
class EntityClusterer(object):
def __init__(self):
global PRIOR
## PROBABILITY TABLES
global MATCH_ITEMS
global MATCHPROBS
MATCHPROBS = json.loads(open('MATCHPROBS.json').read())
global TOTALS
TOTALS = json.loads(open('TOTALS.json').read())
global RELATIVE_FREQS
# for match_item in MATCH_ITEMS:
# try:
# RELATIVE_FREQS[match_item] = json.loads(open(data_folder+match_item+'_frequencies.json').read())
# except:
# print '!!!!ERROR LOADING RELATIVE_FREQS for ' + match_item
RELATIVE_FREQS['personAge'] = json.loads(open('personAge_frequencies.json').read())
RELATIVE_FREQS['hairColor'] = json.loads(open('hairColor_frequencies.json').read())
RELATIVE_FREQS['eyeColor'] = json.loads(open('eyeColor_frequencies.json').read())
RELATIVE_FREQS['name'] = json.loads(open('name_frequencies.json').read())
## Not USED YET
MAX_VALUES = {
'personAge': 2,
'hairColor': 2,
'eyeColor': 2,
'name': 2
}
def do_clustering(self, doc, canopy_id ):
if not canopy_id:
canopy_id = str(uuid.uuid4())
# canopy_id = str(uuid.uuid4())
records = []
for record in doc['cluster']:
records.append(record)
# with codecs.open(inputfile, "r", "utf-8") as myfile:
# for line in myfile:
# the_json = line.encode("utf-8")
# json_object = json.loads(the_json)
# records.append(json_object)
# Take an item from the canopy and make it a cluster
clusters = []
random.shuffle(records)
while len(records) > 0:
processing_item = records.pop()
new_cluster = Cluster(canopy_id)
new_cluster.addItem(processing_item)
score = 1
while score > 0.5:
(record, score) = get_best_record_and_score(new_cluster, records)
if score > 0.5:
new_cluster.addItem(record)
records.remove(record)
clusters.append(new_cluster)
print '--> %d entities' % len(clusters)
json_result = []
for cluster in clusters:
json_result.extend(cluster.getFlattenedItems())
return json_result
def get_best_record_and_score(cluster, records):
best_record = {}
best_score = 0
for record in records:
score = PRIOR
for key in cluster.get_keys():
record_key = key.replace("entity_", "")
if record_key in record:
# We do this because some of the values are strings and some are arrays
record_values = []
entity_values = cluster.entity[key]
if isinstance(record[record_key], basestring):
record_values.append(record[record_key])
else:
record_values = record[record_key]
freq_count_total = 0
match = False
for record_value in record_values:
record_value = record_value.lower()
for entity_value in entity_values:
entity_value = entity_value.lower()
freq_count = 1
if record_value in RELATIVE_FREQS[record_key]:
freq_count = RELATIVE_FREQS[record_key][record_value]
# HACK TO DO +/- 1 for age
if record_key == 'personAge':
age = int(record_value)
age_plus_one = str(age+1)
if age_plus_one in RELATIVE_FREQS[record_key]:
freq_count += RELATIVE_FREQS[record_key][age_plus_one]
if age_plus_one == entity_value:
match = True
age_minus_one = str(age-1)
if age_minus_one in RELATIVE_FREQS[record_key]:
freq_count += RELATIVE_FREQS[record_key][age_minus_one]
if age_minus_one == entity_value:
match = True
freq_count_total += freq_count
if record_value == entity_value:
match = True
if freq_count_total > 0:
PVr = (1.0*freq_count_total) / (1.0*TOTALS[record_key])
if match:
numerator = MATCHPROBS[record_key]
else:
numerator = (1 - MATCHPROBS[record_key]) * PVr
unit_score = numerator / PVr
score = score * unit_score
# best_unit_score = -1
# for record_value in record_values:
# record_value = record_value.lower()
# for entity_value in entity_values:
# entity_value = entity_value.lower()
# freq_count = 1
# if record_value in RELATIVE_FREQS[key]:
# freq_count = RELATIVE_FREQS[key][record_value]
# PVr = (1.0*freq_count) / (1.0*TOTALS[key])
# if record_value == entity_value:
# numerator = MATCHPROBS[key]
# else:
# numerator = (1 - MATCHPROBS[key]) * PVr
# denom = PVr
# unit_score = numerator / denom
#
# if unit_score > best_unit_score:
# best_unit_score = unit_score
# if best_unit_score > -1:
# score = score * best_unit_score
if score > best_score:
best_record = record
best_score = score
return (best_record, best_score)
class Cluster(object):
def addItem(self, item):
# self.entity['uri'].append(item['uri'])
if len(self.items) == 0:
for match_item in MATCH_ITEMS:
if match_item in item:
entity_match_item = "entity_" + match_item
self.entity[entity_match_item] = []
if isinstance(item[match_item], basestring):
self.entity[entity_match_item].append(item[match_item])
else:
self.entity[entity_match_item].extend(item[match_item])
self.entity[entity_match_item] = list(set(self.entity[entity_match_item]))
else:
for match_item in MATCH_ITEMS:
if match_item in item:
entity_match_item = "entity_" + match_item
if entity_match_item not in self.entity:
self.entity[entity_match_item] = []
if isinstance(item[match_item], basestring):
self.entity[entity_match_item].append(item[match_item])
else:
self.entity[entity_match_item].extend(item[match_item])
self.entity[entity_match_item] = list(set(self.entity[entity_match_item]))
self.items.append(item)
def get_keys(self):
keys = self.entity.keys()
# keys.remove('uri')
return keys
def __init__(self, canopy_id):
self.items = []
self.CANOPY_ID = canopy_id
self.entity = {}
# self.entity['entity_uris'] = []
self.entity['entity_id'] = str(uuid.uuid4())
def getFlattenedItems(self):
output_objects = []
for item in self.items:
output_object = item
output_object['canopy_id'] = self.CANOPY_ID
output_object.update(self.entity)
for item in self.entity:
if item != 'entity_id':
output_object[item] = list(set(self.entity[item]))
output_objects.append(output_object)
return output_objects
# def __str__(self, *args, **kwargs):
# output_object = {}
# output_object['entity'] = self.entity
# for item in output_object['entity']:
# output_object['entity'][item] = list(set(output_object['entity'][item]))
# output_object['items'] = self.items
# return json.dumps(output_object)
if __name__ == "__main__":
from elasticsearch.client import Elasticsearch
import traceback
import urllib3
import os
urllib3.disable_warnings()
ec = EntityClusterer()
#JSON LINES TO JSON LINES
with codecs.open('../../canopy_entity.jl', "w", "utf-8") as myfile:
directory = '../../data/canopy_frist_try/'
for subdir, dirs, files in os.walk(directory):
for the_file in files:
if the_file.startswith('.'):
continue
print "processing " + the_file
cluster = {}
cluster['cluster'] = []
with codecs.open(directory+the_file, "r", "utf-8") as json_file:
for line in json_file:
record = json.loads(line)
cluster['cluster'].append(record)
if len(cluster['cluster']) < 2:
print "skipping because record size is " + str(len(cluster['cluster']))
continue
print 'processing ' + the_file + ' with size ' + str(len(cluster['cluster']))
records = ec.do_clustering(cluster, the_file)
for record in records:
myfile.write(json.dumps(record))
myfile.write('\n')
## ELASTIC SEARCH TO JSON LINES
# elasticsearch_loc = 'https://darpamemex:[email protected]/dig-clusters-qpr-01/'
# es = Elasticsearch([elasticsearch_loc], show_ssl_warnings=False)
#
# import operator
# clusters = {}
#
# with codecs.open('../../canopy_entity.jl', "w", "utf-8") as myfile:
# res = es.search( body={"size" : 12834, "query": {"match_all": {}}, "_source":["cluster.a"] })
# hits_array = res['hits']['hits']
# for hit in hits_array:
# _id = hit['_id']
# cluster_size = len(hit['_source']['cluster'])
# clusters[_id] = cluster_size
# sorted_clusters = sorted(clusters.items(), key=operator.itemgetter(1), reverse=True)
#
# for index in range(0,5):
# print sorted_clusters[index]
# _id = sorted_clusters[index][0]
# res = es.search( body={"query": {"match": {"_id": _id}},"_source": { "excludes": ["cluster.image.isSimilarTo"]}})
# hits_array = res['hits']['hits']
# for hit in hits_array:
# cluster = hit['_source']
# print 'processing canopy with size ' + str(len(hit['_source']['cluster']))
# records = ec.do_clustering(cluster, None)
# for record in records:
# myfile.write(json.dumps(record))
# myfile.write('\n')
| usc-isi-i2/dig-entity-clustering | src/clustering/EntityClusterer.py | Python | gpl-2.0 | 11,885 |
#David Sanchez Pinsach
#Alex Pardo Fernandez
#Script to download demonyms from Wikipedia and to show the histogram of the
import rules as r
import wpDownload as wp
FILENAME = 'demonyms.csv'
skipIrregulars = True
quiet = True
th = 1
wp.download(FILENAME, skipIrregulars)
r.showHistogram(FILENAME, th, quiet) | alex-pardo/ANLP-PROJECT | main.py | Python | apache-2.0 | 310 |
import os, sys; sys.path.insert(0, os.path.join("..", ".."))
import unittest
import time
import re
import random
from pattern import search
from pattern.en import Sentence, parse
#---------------------------------------------------------------------------------------------------
class TestUtilityFunctions(unittest.TestCase):
def setUp(self):
pass
def test_match(self):
# Assert search._match() wildcard matching.
for s, p, b in (
("rabbit", "rabbit", True),
("rabbits", "rabbit*", True),
("rabbits", "*abbits", True),
("rabbits", "*abbit*", True),
("rabbits", "rab*its", True),
("rabbits", re.compile(r"ra.*?"), True)):
self.assertEqual(search._match(s, p), b)
print "pattern.search._match()"
def test_unique(self):
self.assertEqual(search.unique([1,1,2,2]), [1,2])
def test_unique2(self):
self.assertEqual(search.unique2([1,1,2,2]), [1,2])
def test_find(self):
self.assertEqual(search.find(lambda v: v>2, [1,2,3,4,5]), 3)
def test_combinations(self):
# Assert combinations of list items.
self.assertEqual(list(search.combinations([ ], 2)), []) # No possibilities.
self.assertEqual(list(search.combinations([1], 0)), [[]]) # One possibility: the empty list.
self.assertEqual(list(search.combinations([1,2,3], 2)),
[[1,1], [1,2], [1,3], [2,1], [2,2], [2,3], [3,1], [3,2], [3,3]])
for n, m in ((1,9), (2,81), (3,729), (4,6561)):
v = search.combinations([1,2,3,4,5,6,7,8,9], n)
self.assertEqual(len(list(v)), m)
print "pattern.search.combinations()"
def test_variations(self):
# Assert variations include the original input (the empty list has one variation = itself).
v = search.variations([])
self.assertEqual(v, [[]])
# Assert variations = [1] and [].
v = search.variations([1], optional=lambda item: item == 1)
self.assertEqual(v, [[1], []])
# Assert variations = the original input, [1], [2] and [].
v = search.variations([1,2], optional=lambda item: item in (1,2))
self.assertEqual(v, [[1,2], [2], [1], []])
# Assert variations are sorted longest-first.
v = search.variations([1,2,3,4], optional=lambda item: item in (1,2))
self.assertEqual(v, [[1,2,3,4], [2,3,4], [1,3,4], [3,4]])
self.assertTrue(len(v[0]) >= len(v[1]) >= len(v[2]), len(v[3]))
print "pattern.search.variations()"
def test_odict(self):
# Assert odict.append() which must be order-preserving.
v = search.odict()
v.append(("a", 1))
v.append(("b", 2))
v.append(("c", 3))
v.append(("a", 0))
v = v.copy()
self.assertTrue(isinstance(v, dict))
self.assertEqual(v.keys(), ["a", "c","b"])
print "pattern.search.odict()"
#---------------------------------------------------------------------------------------------------
class TestTaxonomy(unittest.TestCase):
def setUp(self):
pass
def test_taxonomy(self):
# Assert Taxonomy search.
t = search.Taxonomy()
t.append("King Arthur", type="knight", value=1)
t.append("Sir Bedevere", type="knight", value=2)
t.append("Sir Lancelot", type="knight", value=3)
t.append("Sir Gallahad", type="knight", value=4)
t.append("Sir Robin", type="knight", value=5)
t.append("John Cleese", type="Sir Lancelot")
t.append("John Cleese", type="Basil Fawlty")
# Matching is case-insensitive, results are lowercase.
self.assertTrue("John Cleese" in t)
self.assertTrue("john cleese" in t)
self.assertEqual(t.classify("King Arthur"), "knight")
self.assertEqual(t.value("King Arthur"), 1)
self.assertEqual(t.parents("John Cleese"), ["basil fawlty", "sir lancelot"])
self.assertEqual(t.parents("John Cleese", recursive=True), [
"basil fawlty",
"sir lancelot",
"knight"])
self.assertEqual(t.children("knight"), [
"sir robin",
"sir gallahad",
"sir lancelot",
"sir bedevere",
"king arthur"])
self.assertEqual(t.children("knight", recursive=True), [
"sir robin",
"sir gallahad",
"sir lancelot",
"sir bedevere",
"king arthur",
"john cleese"])
print "pattern.search.Taxonomy"
def test_classifier(self):
# Assert taxonomy classifier + keyword arguments.
c1 = search.Classifier(parents=lambda word, chunk=None: word.endswith("ness") and ["quality"] or [])
c2 = search.Classifier(parents=lambda word, chunk=None: chunk=="VP" and ["action"] or [])
t = search.Taxonomy()
t.classifiers.append(c1)
t.classifiers.append(c2)
self.assertEqual(t.classify("fuzziness"), "quality")
self.assertEqual(t.classify("run", chunk="VP"), "action")
print "pattern.search.Classifier"
def test_wordnet_classifier(self):
# Assert WordNet classifier parents & children.
c = search.WordNetClassifier()
t = search.Taxonomy()
t.classifiers.append(c)
self.assertEqual(t.classify("cat"), "feline")
self.assertEqual(t.classify("dog"), "canine")
self.assertTrue("domestic cat" in t.children("cat"))
self.assertTrue("puppy" in t.children("dog"))
print "pattern.search.WordNetClassifier"
#---------------------------------------------------------------------------------------------------
class TestConstraint(unittest.TestCase):
def setUp(self):
pass
def _test_constraint(self, constraint, **kwargs):
# Assert Constraint property values with given optional parameters.
self.assertEqual(constraint.words, kwargs.get("words", []))
self.assertEqual(constraint.tags, kwargs.get("tags", []))
self.assertEqual(constraint.chunks, kwargs.get("chunks", []))
self.assertEqual(constraint.roles, kwargs.get("roles", []))
self.assertEqual(constraint.taxa, kwargs.get("taxa", []))
self.assertEqual(constraint.optional, kwargs.get("optional", False))
self.assertEqual(constraint.multiple, kwargs.get("multiple", False))
self.assertEqual(constraint.first, kwargs.get("first", False))
self.assertEqual(constraint.exclude, kwargs.get("exclude", None))
self.assertEqual(constraint.taxonomy, kwargs.get("taxonomy", search.taxonomy))
def test_fromstring(self):
# Assert Constraint string syntax.
for s, kwargs in (
( "cats", dict( words = ["cats"])),
( "Cat*", dict( words = ["cat*"])),
( "\\[cat\\]", dict( words = ["[cat]"])),
("[black cats]", dict( words = ["black cats"])),
( "black_cats", dict( words = ["black cats"])),
("black\\_cats", dict( words = ["black_cats"])),
( "NNS", dict( tags = ["NNS"])),
( "NN*|VB*", dict( tags = ["NN*", "VB*"])),
( "NP", dict(chunks = ["NP"])),
( "SBJ", dict( roles = ["SBJ"])),
( "CATS", dict( taxa = ["cats"])),
( "(cats)", dict( words = ["cats"], optional=True)),
( "\\(cats\\)", dict( words = ["(cats)"])),
( "cats+", dict( words = ["cats"], multiple=True)),
( "cats\\+", dict( words = ["cats+"])),
( "cats+dogs", dict( words = ["cats+dogs"])),
( "(cats+)", dict( words = ["cats+"], optional=True)),
( "cats\\|dogs", dict( words = ["cats|dogs"])),
( "cats|dogs", dict( words = ["cats", "dogs"])),
( "^cat", dict( words = ["cat"], first=True)),
( "\\^cat", dict( words = ["^cat"])),
( "(cat*)+", dict( words = ["cat*"], optional=True, multiple=True)),
( "^black_cat+", dict( words = ["black cat"], multiple=True, first=True)),
( "cats|NN*", dict( words = ["cats"], tags=["NN*"]))):
self._test_constraint(search.Constraint.fromstring(s), **kwargs)
# Assert non-alpha taxonomy items.
t = search.Taxonomy()
t.append("0.5", type="0.5")
t.append("half", type="0.5")
v = search.Constraint.fromstring("0.5", taxonomy=t)
# Assert non-alpha words without taxonomy.
self.assertTrue(v.taxa == ["0.5"])
v = search.Constraint.fromstring("0.5")
# Assert exclude Constraint.
self.assertTrue(v.words == ["0.5"])
v = search.Constraint.fromstring("\\!cats|!dogs|!fish")
self.assertTrue(v.words == ["!cats"])
self.assertTrue(v.exclude.words == ["dogs", "fish"])
print "pattern.search.Constraint.fromstring"
print "pattern.search.Constraint.fromstring"
def test_match(self):
# Assert Constraint-Word matching.
R = search.Constraint.fromstring
S = lambda s: Sentence(parse(s, relations=True, lemmata=True))
W = lambda s, tag=None, index=0: search.Word(None, s, tag, index)
for constraint, tests in (
(R("cat|dog"), [(W("cat"), 1), (W("dog"), 1), (W("fish"), 0)]),
(R("cat*"), [(W("cats"), 1)]),
(R("*cat"), [(W("tomcat"), 1)]),
(R("c*t|d*g"), [(W("cat"), 1), (W("cut"), 1), (W("dog"), 1), (W("dig"), 1)]),
(R("cats|NN*"), [(W("cats", "NNS"), 1), (W("cats"), 0)]),
(R("^cat"), [(W("cat", "NN", index=0), 1),(W("cat", "NN", index=1), 0)]),
(R("*|!cat"), [(W("cat"), 0), (W("dog"), 1), (W("fish"), 1)]),
(R("my cat"), [(W("cat"), 0)]),
(R("my cat"), [(S("my cat").words[1], 1)]), # "my cat" is an overspecification of "cat"
(R("my_cat"), [(S("my cat").words[1], 1)]),
(R("cat|NP"), [(S("my cat").words[1], 1)]),
(R("dog|VP"), [(S("my dog").words[1], 0)]),
(R("cat|SBJ"), [(S("the cat is sleeping").words[1], 1)]),
(R("dog"), [(S("MY DOGS").words[1], 1)]), # lemma matches
(R("dog"), [(S("MY DOG").words[1], 1)])): # case-insensitive
for test, b in tests:
self.assertEqual(constraint.match(test), bool(b))
# Assert Constraint-Taxa matching.
t = search.Taxonomy()
t.append("Tweety", type="bird")
t.append("Steven", type="bird")
v = search.Constraint.fromstring("BIRD", taxonomy=t)
self.assertTrue(v.match(W("bird")))
self.assertTrue(v.match(S("tweeties")[0]))
self.assertTrue(v.match(W("Steven")))
print "pattern.search.Constraint.match()"
def test_string(self):
# Assert Constraint.string.
v = search.Constraint()
v.words = ["Steven\\*"]
v.tags = ["NN*"]
v.roles = ["SBJ"]
v.taxa = ["(associate) professor"]
v.exclude = search.Constraint(["bird"])
v.multiple = True
v.first = True
self.assertEqual(v.string, "^[Steven\\*|NN*|SBJ|\(ASSOCIATE\)_PROFESSOR|!bird]+")
print "pattern.search.Constraint.string"
#---------------------------------------------------------------------------------------------------
class TestPattern(unittest.TestCase):
def setUp(self):
pass
def test_pattern(self):
# Assert Pattern properties.
v = search.Pattern([
search.Constraint("a|an|the"),
search.Constraint("JJ*"),
search.Constraint("cat")], search.STRICT)
self.assertEqual(len(v), 3)
self.assertEqual(v.strict, True)
print "pattern.search.Pattern"
def test_fromstring(self):
# Assert Pattern string syntax.
v = search.Pattern.fromstring("a|an|the (JJ*) cat*")
self.assertEqual(v[0].words, ["a", "an", "the"])
self.assertEqual(v[1].tags, ["JJ*"])
self.assertEqual(v[1].optional, True)
self.assertEqual(v[2].words, ["cat*"])
# Assert escaped control characters.
v = search.Pattern.fromstring("[\\[Figure 1\\]] VP")
self.assertEqual(v[0].words, ["[figure 1]"])
self.assertEqual(v[1].chunks, ["VP"])
# Assert messy syntax (fix brackets and whitespace, don't fix empty options).
v = search.Pattern.fromstring("[avoid][|!|messy |syntax |]")
self.assertEqual(v[0].words, ["avoid"])
self.assertEqual(v[1].words, ["", "messy", "syntax", ""])
self.assertEqual(v[1].exclude.words, [""]) # "!" = exclude everything
print "pattern.search.Pattern.fromstring()"
def test_match(self):
# Assert Pattern.match()
P = search.Pattern.fromstring
X = search.STRICT
S = lambda s: Sentence(parse(s, relations=True, lemmata=True))
for i, (pattern, test, match) in enumerate((
(P("^rabbit"), "white rabbit", None), # 0
(P("^rabbit"), "rabbit", "rabbit"), # 1
(P("rabbit"), "big white rabbit", "rabbit"), # 2
(P("rabbit*"), "big white rabbits", "rabbits"), # 3
(P("JJ|NN"), S("big white rabbits"), "big"), # 4
(P("JJ+"), S("big white rabbits"), "big white"), # 5
(P("JJ+ NN*"), S("big white rabbits"), "big white rabbits"), # 6
(P("JJ black|white NN*"), S("big white rabbits"), "big white rabbits"), # 7
(P("NP"), S("big white rabbit"), "big white rabbit"), # 8
(P("(big) rabbit", X), S("big white rabbit"), "rabbit"), # 9 strict
(P("(big) rabbit|NN"), S("big white rabbit"), "rabbit"), # 10 explicit
(P("(big) rabbit"), S("big white rabbit"), "big white rabbit"), # 11 greedy
(P("rabbit VP JJ"), S("the rabbit was huge"), "the rabbit was huge"), # 12
(P("rabbit be JJ"), S("the rabbit was huge"), "the rabbit was huge"), # 13 lemma
(P("rabbit be JJ", X), S("the rabbit was huge"), "rabbit was huge"), # 14
(P("rabbit is JJ"), S("the rabbit was huge"), None), # 15
(P("the NP"), S("the rabid rodents"), "the rabid rodents"), # 16 overlap
(P("t*|r*+"), S("the rabid rodents"), "the rabid rodents"), # 17
(P("(DT) (JJ) NN*"), S("the rabid rodents"), "the rabid rodents"), # 18
(P("(DT) (JJ) NN*"), S("the rabbit"), "the rabbit"), # 19
(P("rabbit"), S("the big rabbit"), "the big rabbit"), # 20 greedy
(P("eat carrot"), S("is eating a carrot"), "is eating a carrot"), # 21
(P("eat carrot|NP"), S("is eating a carrot"), "is eating a carrot"), # 22
(P("eat NP"), S("is eating a carrot"), "is eating a carrot"), # 23
(P("eat a"), S("is eating a carrot"), "is eating a"), # 24
(P("!NP carrot"), S("is eating a carrot"), "is eating a carrot"), # 25
(P("eat !pizza"), S("is eating a carrot"), "is eating a carrot"), # 26
(P("eating a"), S("is eating a carrot"), "is eating a"), # 27
(P("eating !carrot", X), S("is eating a carrot"), "eating a"), # 28
(P("eat !carrot"), S("is eating a carrot"), None), # 28 NP chunk is a carrot
(P("eat !DT"), S("is eating a carrot"), None), # 30 eat followed by DT
(P("eat !NN"), S("is eating a carrot"), "is eating a"), # 31 a/DT is not NN
(P("!be carrot"), S("is eating a carrot"), "is eating a carrot"), # 32 is eating == eat != is
(P("!eat|VP carrot"), S("is eating a carrot"), None), # 33 VP chunk == eat
(P("white_rabbit"), S("big white rabbit"), None), # 34
(P("[white rabbit]"), S("big white rabbit"), None), # 35
(P("[* white rabbit]"), S("big white rabbit"), "big white rabbit"), # 36
(P("[big * rabbit]"), S("big white rabbit"), "big white rabbit"), # 37
(P("big [big * rabbit]"), S("big white rabbit"), "big white rabbit"), # 38
(P("[*+ rabbit]"), S("big white rabbit"), None), # 39 bad pattern: "+" is literal
)):
m = pattern.match(test)
#print i, match, "<=>", m and m.string or None
self.assertTrue(getattr(m, "string", None) == match)
# Assert chunk with head at the front.
s = S("Felix the cat")
s.chunks[0].head = s.chunks[0][0] # head = "Felix"
self.assertEqual(P("felix").match(s).string, "Felix the cat")
# Assert negation + custom greedy() function.
s = S("the big white rabbit")
g = lambda chunk, constraint: len([w for w in chunk if not constraint.match(w)]) == 0
self.assertEqual(P("!white").match(s).string, "the big white rabbit") # a rabbit != white
self.assertEqual(P("!white", greedy=g).match(s), None) # a white rabbit == white
# Assert taxonomy items with spaces.
s = S("Bugs Bunny is a giant talking rabbit.")
t = search.Taxonomy()
t.append("rabbit", type="rodent")
t.append("Bugs Bunny", type="rabbit")
self.assertEqual(P("RABBIT", taxonomy=t).match(s).string, "Bugs Bunny")
# Assert None, the syntax cannot handle taxonomy items that span multiple chunks.
s = S("Elmer Fudd fires a cannon")
t = search.Taxonomy()
t.append("fire cannon", type="violence")
self.assertEqual(P("VIOLENCE").match(s), None)
# Assert regular expressions.
s = S("a sack with 3.5 rabbits")
p = search.Pattern.fromstring("[] NNS")
p[0].words.append(re.compile(r"[0-9|\.]+"))
self.assertEqual(p.match(s).string, "3.5 rabbits")
print "pattern.search.Pattern.match()"
def test_search(self):
# Assert one match containing all words.
v = search.Pattern.fromstring("*+")
v = v.search("one two three")
self.assertEqual(v[0].string, "one two three")
# Assert one match for each word.
v = search.Pattern.fromstring("*")
v = v.search("one two three")
self.assertEqual(v[0].string, "one")
self.assertEqual(v[1].string, "two")
self.assertEqual(v[2].string, "three")
# Assert all variations are matched (sentence starts with a NN* which must be caught).
v = search.Pattern.fromstring("(DT) (JJ)+ NN*")
v = v.search(Sentence(parse("dogs, black cats and a big white rabbit")))
self.assertEqual(v[0].string, "dogs")
self.assertEqual(v[1].string, "black cats")
self.assertEqual(v[2].string, "a big white rabbit")
v = search.Pattern.fromstring("NN*")
print "pattern.search.Pattern.search()"
def test_convergence(self):
# Test with random sentences and random patterns to see if it crashes.
w = ("big", "white", "rabbit", "black", "cats", "is", "was", "going", "to", "sleep", "sleepy", "very", "or")
x = ("(DT)", "(JJ)+", "NN*", "(VP)", "cat", "[*]")
for i in range(100):
s = " ".join(random.choice(w) for i in range(20))
s = Sentence(parse(s, lemmata=True))
p = " ".join(random.choice(x) for i in range(5))
p = search.Pattern.fromstring(p)
p.search(s)
def test_compile_function(self):
# Assert creating and caching Pattern with compile().
t = search.Taxonomy()
p = search.compile("(JJ)+ NN*", search.STRICT, taxonomy=t)
self.assertEqual(p.strict, True)
self.assertEqual(p[0].optional, True)
self.assertEqual(p[0].tags, ["JJ"])
self.assertEqual(p[1].tags, ["NN*"])
self.assertEqual(p[1].taxonomy, t)
# Assert regular expression input.
p = search.compile(re.compile(r"[0-9|\.]+"))
self.assertTrue(isinstance(p[0].words[0], search.regexp))
# Assert TypeError for other input.
self.assertRaises(TypeError, search.compile, 1)
print "pattern.search.compile()"
def test_match_function(self):
# Assert match() function.
s = Sentence(parse("Go on Bors, chop his head off!"))
m1 = search.match("chop NP off", s, strict=False)
m2 = search.match("chop NP+ off", s, strict=True)
self.assertEqual(m1.constituents()[1].string, "his head")
self.assertEqual(m2.constituents()[1].string, "his head")
print "pattern.search.match()"
def test_search_function(self):
# Assert search() function.
s = Sentence(parse("Go on Bors, chop his head off!"))
m = search.search("(PRP*) NN*", s)
self.assertEqual(m[0].string, "Bors")
self.assertEqual(m[1].string, "his head")
print "pattern.search.search()"
def test_escape(self):
# Assert escape() function.
self.assertEqual(search.escape("[]()_|!*+^."), "\\[\\]\\(\\)\\_\\|\\!\\*\\+\\^.")
print "pattern.search.escape()"
#---------------------------------------------------------------------------------------------------
class TestMatch(unittest.TestCase):
def setUp(self):
pass
def test_match(self):
# Assert Match properties.
s = Sentence(parse("Death awaits you all with nasty, big, pointy teeth."))
p = search.Pattern(sequence=[
search.Constraint(tags=["JJ"], optional=True),
search.Constraint(tags=["NN*"])])
m = p.search(s)
self.assertTrue(isinstance(m, list))
self.assertEqual(m[0].pattern, p)
self.assertEqual(m[1].pattern, p)
self.assertEqual(m[0].words, [s.words[0]])
self.assertEqual(m[1].words, [s.words[-3], s.words[-2]])
# Assert contraint "NN*" links to "Death" and "teeth", and "JJ" to "pointy".
self.assertEqual(m[0].constraint(s.words[ 0]), p[1])
self.assertEqual(m[1].constraint(s.words[-3]), p[0])
self.assertEqual(m[1].constraint(s.words[-2]), p[1])
# Assert constraints "JJ NN*" links to chunk "pointy teeth".
self.assertEqual(m[1].constraints(s.chunks[6]), [p[0], p[1]])
# Assert Match.constituents() by constraint, constraint index and list of indices.
self.assertEqual(m[1].constituents(), [s.chunks[6]])
self.assertEqual(m[1].constituents(constraint=p[0]), [s.words[-3]])
self.assertEqual(m[1].constituents(constraint=1), [s.words[-2]])
self.assertEqual(m[1].constituents(constraint=(0,1)), [s.chunks[6]])
# Assert Match.string.
self.assertEqual(m[1].string, "pointy teeth")
print "pattern.search.Match"
#---------------------------------------------------------------------------------------------------
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestUtilityFunctions))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestTaxonomy))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestConstraint))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestPattern))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMatch))
return suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=1).run(suite()) | piskvorky/pattern | test/test_search.py | Python | bsd-3-clause | 24,100 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Summary API v2.
The operations in this package are safe to use with eager execution turned on or
off. It has a more flexible API that allows summaries to be written directly
from ops to places other than event log files, rather than propagating protos
from `tf.summary.merge_all` to `tf.summary.FileWriter`.
To use with eager execution enabled, write your code as follows:
```python
global_step = tf.train.get_or_create_global_step()
summary_writer = tf.contrib.summary.create_file_writer(
train_dir, flush_millis=10000)
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
# model code goes here
# and in it call
tf.contrib.summary.scalar("loss", my_loss)
# In this case every call to tf.contrib.summary.scalar will generate a record
# ...
```
To use it with graph execution, write your code as follows:
```python
global_step = tf.train.get_or_create_global_step()
summary_writer = tf.contrib.summary.create_file_writer(
train_dir, flush_millis=10000)
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
# model definition code goes here
# and in it call
tf.contrib.summary.scalar("loss", my_loss)
# In this case every call to tf.contrib.summary.scalar will generate an op,
# note the need to run tf.contrib.summary.all_summary_ops() to make sure these
# ops get executed.
# ...
train_op = ....
with tf.Session(...) as sess:
tf.global_variables_initializer().run()
tf.contrib.summary.initialize(graph=tf.get_default_graph())
# ...
while not_done_training:
sess.run([train_op, tf.contrib.summary.all_summary_ops()])
# ...
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops.summary_ops_v2 import all_v2_summary_ops as all_summary_ops
from tensorflow.python.ops.summary_ops_v2 import always_record_summaries
from tensorflow.python.ops.summary_ops_v2 import audio
from tensorflow.python.ops.summary_ops_v2 import create_db_writer
from tensorflow.python.ops.summary_ops_v2 import create_file_writer
from tensorflow.python.ops.summary_ops_v2 import create_summary_file_writer
from tensorflow.python.ops.summary_ops_v2 import eval_dir
from tensorflow.python.ops.summary_ops_v2 import flush
from tensorflow.python.ops.summary_ops_v2 import generic
from tensorflow.python.ops.summary_ops_v2 import graph
from tensorflow.python.ops.summary_ops_v2 import histogram
from tensorflow.python.ops.summary_ops_v2 import image
from tensorflow.python.ops.summary_ops_v2 import import_event
from tensorflow.python.ops.summary_ops_v2 import initialize
from tensorflow.python.ops.summary_ops_v2 import never_record_summaries
from tensorflow.python.ops.summary_ops_v2 import record_summaries_every_n_global_steps
from tensorflow.python.ops.summary_ops_v2 import scalar
from tensorflow.python.ops.summary_ops_v2 import should_record_summaries
from tensorflow.python.ops.summary_ops_v2 import summary_writer_initializer_op
from tensorflow.python.ops.summary_ops_v2 import SummaryWriter
| chemelnucfin/tensorflow | tensorflow/contrib/summary/summary.py | Python | apache-2.0 | 3,803 |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.table import ExplainDetail
from pyflink.table.table_result import TableResult
from pyflink.util.java_utils import to_j_explain_detail_arr
__all__ = ['StatementSet']
class StatementSet(object):
"""
A StatementSet accepts DML statements or Tables,
the planner can optimize all added statements and Tables together
and then submit as one job.
.. note::
The added statements and Tables will be cleared
when calling the `execute` method.
.. versionadded:: 1.11.0
"""
def __init__(self, _j_statement_set, t_env):
self._j_statement_set = _j_statement_set
self._t_env = t_env
def add_insert_sql(self, stmt: str) -> 'StatementSet':
"""
add insert statement to the set.
:param stmt: The statement to be added.
:return: current StatementSet instance.
.. versionadded:: 1.11.0
"""
self._j_statement_set.addInsertSql(stmt)
return self
def add_insert(self, target_path: str, table, overwrite: bool = False) -> 'StatementSet':
"""
add Table with the given sink table name to the set.
:param target_path: The path of the registered :class:`~pyflink.table.TableSink` to which
the :class:`~pyflink.table.Table` is written.
:param table: The Table to add.
:type table: pyflink.table.Table
:param overwrite: The flag that indicates whether the insert
should overwrite existing data or not.
:return: current StatementSet instance.
.. versionadded:: 1.11.0
"""
self._j_statement_set.addInsert(target_path, table._j_table, overwrite)
return self
def explain(self, *extra_details: ExplainDetail) -> str:
"""
returns the AST and the execution plan of all statements and Tables.
:param extra_details: The extra explain details which the explain result should include,
e.g. estimated cost, changelog mode for streaming
:return: All statements and Tables for which the AST and execution plan will be returned.
.. versionadded:: 1.11.0
"""
j_extra_details = to_j_explain_detail_arr(extra_details)
return self._j_statement_set.explain(j_extra_details)
def execute(self) -> TableResult:
"""
execute all statements and Tables as a batch.
.. note::
The added statements and Tables will be cleared when executing this method.
:return: execution result.
.. versionadded:: 1.11.0
"""
self._t_env._before_execute()
return TableResult(self._j_statement_set.execute())
| rmetzger/flink | flink-python/pyflink/table/statement_set.py | Python | apache-2.0 | 3,677 |
from math import *
import random
"""
Representation: Permutation of {0..n-1}
Parenthood Selection: Ternary tournament (3 individuals)
Mutation: Random swap of two indices
Crossover: Order crossover
Survival Selection: Replace single worst entity with new one
Fitness: Cost of the cycle I_0 I_1 .. I_n I_0 for an individual I
Some test results:
POPSIZE = 10
GENERATIONS = 10000
MUTPROB = 0.1
Shortest distance = 174081.470967
POPSIZE = 20
GENERATIONS = 5000
MUTPROB = 0.1
Shortest distance = 288226.47026
"""
# want to minimize this
def follow_path(points, indices):
sm = 0
for (i, j) in zip(indices, indices[1:] + [indices[0]]):
p1 = points[i]
p2 = points[j]
sm += dist(p1, p2)
return sm
# order crossover
def crossover(i1, i2):
n = len(i1)
r1 = random.randrange(n-1)
r2 = random.randrange(r1, n)
child1 = [None] * n
child2 = [None] * n
child1[r1:r2] = i1[r1:r2]
child2[r1:r2] = i2[r1:r2]
rest1 = filter(lambda e: e not in i1[r1:r2], i2)
rest2 = filter(lambda e: e not in i2[r1:r2], i1)
for i in xrange(len(rest1)):
child1[(r2 + i) % n] = rest1[i]
for i in xrange(len(rest2)):
child2[(r2 + i) % n] = rest2[i]
return child1, child2
# swap a random one
def mutate(ind):
return ind
i1 = random.randrange(len(ind))
i2 = random.randrange(len(ind))
ind[i1], ind[i2] = ind[i2], ind[i1]
return ind
def dist(p1, p2):
x1, y1 = p1
x2, y2 = p2
return sqrt((x1 - x2)**2 + (y1-y2)**2)
# tourament selection
def select(xs, n=3):
selection = []
for i in xrange(0, n):
selection.append(xs[random.randrange(len(xs))])
fixed = zip(selection, map(lambda path: unfitness(path), selection))
mini = None
minind = None
for ind, fit in fixed:
if (fit < mini or mini is None):
mini = fit
minind = ind
return (mini, minind)
def is_valid(child):
try:
c = child.index(None)
return False
except ValueError:
return True
def generate_child(pop, mutate_prob=0.1):
fit1, p1 = select(pop)
fit2, p2 = select(pop)
c1, c2 = crossover(p1, p2)
while (not is_valid(c1) or not is_valid(c2)) :
fit1, p1 = select(pop)
fit2, p2 = select(pop)
c1, c2 = crossover(p1, p2)
# return the child with greater fitness
if (unfitness(c1) < unfitness(c2)):
if(random.random() < mutate_prob):
return mutate(c2)
else:
return c2
else:
if(random.random() < mutate_prob):
return mutate(c1)
else:
return c1
def unfitness(ind):
return follow_path(points, ind)
def individual(n):
# random permutation of 0..n-1
return random.sample(range(n), n)
def population(n, ind_size):
return [individual(ind_size) for _ in xrange(n)]
def unfitnesses(pop):
return map(unfitness, pop)
def sort_pop(pop):
fits = unfitnesses(pop)
inds = zip(fits, pop)
return sorted(inds, key=lambda ind: ind[0])
def replace_worst(pop, n=1, mut_prob=0.1):
keepers = sort_pop(pop)[:-n]
_pop = map(lambda ind: ind[1], keepers)
for i in xrange(0, n):
_pop.append(generate_child(pop, mut_prob))
return keepers[0][0], _pop
def read_points():
f = open('in.txt')
lns = f.readlines()
return map(tuple, map(lambda ln: map(int, ln.split()), lns))
points = read_points()
ind_size = len(points)
N = len(points)
POPSIZE = 10
GENERATIONS = 50000
MUTPROB = 0.1
pop = population(POPSIZE, N)
for g in xrange(GENERATIONS):
mini, pop = replace_worst(pop, mut_prob=MUTPROB)
if(g % 100 == 0):
print ("Evaluating generation " + str(g))
print mini | 5outh/evolve | tsp.py | Python | mit | 3,712 |
class StartAt:
""" Represents the Start At CLI parameter """
def check(self, context):
""" Return if this CLI should be used """
return context.config.startAt is not None
def build(self, context):
""" Return the string parameters to add to the command string """
return ["--start-at", context.config.startAt] | cloew/Kaovert | kaovert/conversion/hbclis/start_at.py | Python | mit | 377 |
# -*- coding: utf-8 -*-
import pytest
import sys
import random
from .test_base_class import TestBaseClass
from aerospike import exception as e
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
class TestListPopRange(object):
@pytest.fixture(autouse=True)
def setup(self, request, as_connection):
keys = []
for i in range(5):
key = ('test', 'demo', i)
rec = {'name': 'name%s' % (str(i)),
'contact_no': [i, i + 1, i + 2, i + 3, i + 4, i + 5],
'city': ['Pune', 'Dehli']}
self.as_connection.put(key, rec)
keys.append(key)
key = ('test', 'demo', 1)
self.as_connection.list_append(key, "contact_no", [45, 50, 80])
keys.append(key)
def teardown():
"""
Teardown method.
"""
for key in keys:
try:
as_connection.remove(key)
except e.RecordNotFound:
pass
request.addfinalizer(teardown)
def test_pos_list_pop_range_with_correct_paramters(self):
"""
Invoke list_pop_range() get back elements from the list with
correct parameters
"""
key = ('test', 'demo', 1)
bins = self.as_connection.list_pop_range(key, "contact_no", 4, 3)
assert bins == [5, 6, [45, 50, 80]]
def test_pos_list_pop_range_with_correct_policy(self):
"""
Invoke list_pop_range() get back elements from the list with
correct policy
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'commit_level': aerospike.POLICY_COMMIT_LEVEL_MASTER
}
bins = self.as_connection.list_pop_range(
key, 'city', 0, 2, {}, policy)
assert bins == ['Pune', 'Dehli']
# Negative Tests
def test_neg_list_pop_range_with_no_parameters(self):
"""
Invoke list_pop_range() without any mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
self.as_connection.list_pop_range()
assert "argument 'key' (pos 1)" in str(
typeError.value)
def test_neg_list_pop_range_with_incorrect_policy(self):
"""
Invoke list_pop_range() with incorrect policy
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 0.5
}
try:
self.as_connection.list_pop_range(
key, "contact_no", 0, 2, {}, policy)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "timeout is invalid"
def test_neg_list_pop_range_with_nonexistent_key(self):
"""
Invoke list_pop_range() with non-existent key
"""
if self.server_version < [3, 15, 2]:
pytest.skip("Change of error beginning in 3.15")
charSet = 'abcdefghijklmnopqrstuvwxyz1234567890'
minLength = 5
maxLength = 30
length = random.randint(minLength, maxLength)
key = ('test', 'demo', ''.join(map(lambda unused:
random.choice(charSet),
range(length))) + ".com")
with pytest.raises(e.RecordNotFound):
self.as_connection.list_pop_range(key, "abc", 0, 1)
def test_neg_list_pop_range_with_nonexistent_bin(self):
"""
Invoke list_pop_range() with non-existent bin
"""
key = ('test', 'demo', 1)
charSet = 'abcdefghijklmnopqrstuvwxyz1234567890'
minLength = 5
maxLength = 10
length = random.randint(minLength, maxLength)
bin = ''.join(map(lambda unused:
random.choice(charSet), range(length))) + ".com"
try:
self.as_connection.list_pop_range(key, bin, 0, 1)
except e.BinIncompatibleType as exception:
assert exception.code == 12
def test_neg_list_pop_range_with_extra_parameter(self):
"""
Invoke list_pop_range() with extra parameter.
"""
key = ('test', 'demo', 1)
policy = {'timeout': 1000}
with pytest.raises(TypeError) as typeError:
self.as_connection.list_pop_range(
key, "contact_no", 1, 1, {}, policy, "")
assert "list_pop_range() takes at most 6 arguments (7 given)" in str(
typeError.value)
def test_neg_list_pop_range_policy_is_string(self):
"""
Invoke list_pop_range() with policy is string
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_pop_range(
key, "contact_no", 0, 1, {}, "")
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "policy must be a dict"
def test_neg_list_pop_range_key_is_none(self):
"""
Invoke list_pop_range() with key is none
"""
try:
self.as_connection.list_pop_range(None, "contact_no", 0, 2)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "key is invalid"
def test_neg_list_pop_range_bin_is_none(self):
"""
Invoke list_pop_range() with bin is none
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_pop_range(key, None, 1, 3)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Bin name should be of type string"
def test_neg_list_pop_range_with_negative_index(self):
"""
Invoke list_pop_range() with negative index
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_pop_range(key, "contact_no", -56, 5)
except e.InvalidRequest as exception:
assert exception.code == 4
def test_neg_list_pop_range_with_negative_length(self):
"""
Invoke list_pop_range() with negative count
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_pop_range(key, "contact_no", 0, -59)
except e.InvalidRequest as exception:
assert exception.code == 4
def test_neg_list_pop_range_meta_type_integer(self):
"""
Invoke list_pop_range() with metadata input is of type integer
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_pop_range(
key, "contact_no", 0, 2, 888)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Metadata should be of type dictionary"
def test_neg_list_pop_range_index_type_string(self):
"""
Invoke list_pop_range() with index is of type string
"""
key = ('test', 'demo', 1)
with pytest.raises(TypeError) as typeError:
self.as_connection.list_pop_range(
key, "contact_no", "Fifth", 2)
assert "an integer is required" in str(typeError.value)
| aerospike/aerospike-client-python | test/new_tests/test_list_pop_range.py | Python | apache-2.0 | 7,305 |
# Copyright (c) 2018 China Telecom Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from unittest.mock import call
from osc_lib import exceptions
from openstackclient.network.v2 import floating_ip_port_forwarding
from openstackclient.tests.unit.identity.v2_0 import fakes as identity_fakes_v2
from openstackclient.tests.unit.network.v2 import fakes as network_fakes
from openstackclient.tests.unit import utils as tests_utils
class TestFloatingIPPortForwarding(network_fakes.TestNetworkV2):
def setUp(self):
super(TestFloatingIPPortForwarding, self).setUp()
self.network = self.app.client_manager.network
self.floating_ip = (network_fakes.FakeFloatingIP.
create_one_floating_ip())
self.port = network_fakes.FakePort.create_one_port()
self.project = identity_fakes_v2.FakeProject.create_one_project()
self.network.find_port = mock.Mock(return_value=self.port)
class TestCreateFloatingIPPortForwarding(TestFloatingIPPortForwarding):
def setUp(self):
super(TestCreateFloatingIPPortForwarding, self).setUp()
self.new_port_forwarding = (
network_fakes.FakeFloatingIPPortForwarding.
create_one_port_forwarding(
attrs={
'internal_port_id': self.port.id,
'floatingip_id': self.floating_ip.id,
}
)
)
self.network.create_floating_ip_port_forwarding = mock.Mock(
return_value=self.new_port_forwarding)
self.network.find_ip = mock.Mock(
return_value=self.floating_ip
)
# Get the command object to test
self.cmd = floating_ip_port_forwarding.CreateFloatingIPPortForwarding(
self.app, self.namespace)
self.columns = (
'description',
'external_port',
'floatingip_id',
'id',
'internal_ip_address',
'internal_port',
'internal_port_id',
'protocol'
)
self.data = (
self.new_port_forwarding.description,
self.new_port_forwarding.external_port,
self.new_port_forwarding.floatingip_id,
self.new_port_forwarding.id,
self.new_port_forwarding.internal_ip_address,
self.new_port_forwarding.internal_port,
self.new_port_forwarding.internal_port_id,
self.new_port_forwarding.protocol,
)
def test_create_no_options(self):
arglist = []
verifylist = []
# Missing required args should bail here
self.assertRaises(tests_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_create_all_options(self):
arglist = [
'--port', self.new_port_forwarding.internal_port_id,
'--internal-protocol-port',
str(self.new_port_forwarding.internal_port),
'--external-protocol-port',
str(self.new_port_forwarding.external_port),
'--protocol', self.new_port_forwarding.protocol,
self.new_port_forwarding.floatingip_id,
'--internal-ip-address',
self.new_port_forwarding.internal_ip_address,
'--description',
self.new_port_forwarding.description,
]
verifylist = [
('port', self.new_port_forwarding.internal_port_id),
('internal_protocol_port', self.new_port_forwarding.internal_port),
('external_protocol_port', self.new_port_forwarding.external_port),
('protocol', self.new_port_forwarding.protocol),
('floating_ip', self.new_port_forwarding.floatingip_id),
('internal_ip_address', self.new_port_forwarding.
internal_ip_address),
('description', self.new_port_forwarding.description),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_floating_ip_port_forwarding.\
assert_called_once_with(
self.new_port_forwarding.floatingip_id,
**{
'external_port': self.new_port_forwarding.external_port,
'internal_ip_address': self.new_port_forwarding.
internal_ip_address,
'internal_port': self.new_port_forwarding.internal_port,
'internal_port_id': self.new_port_forwarding.
internal_port_id,
'protocol': self.new_port_forwarding.protocol,
'description': self.new_port_forwarding.description,
})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
class TestDeleteFloatingIPPortForwarding(TestFloatingIPPortForwarding):
def setUp(self):
super(TestDeleteFloatingIPPortForwarding, self).setUp()
self._port_forwarding = (
network_fakes.FakeFloatingIPPortForwarding.create_port_forwardings(
count=2, attrs={
'floatingip_id': self.floating_ip.id,
}
)
)
self.network.delete_floating_ip_port_forwarding = mock.Mock(
return_value=None
)
self.network.find_ip = mock.Mock(
return_value=self.floating_ip
)
# Get the command object to test
self.cmd = floating_ip_port_forwarding.DeleteFloatingIPPortForwarding(
self.app, self.namespace)
def test_port_forwarding_delete(self):
arglist = [
self.floating_ip.id,
self._port_forwarding[0].id,
]
verifylist = [
('floating_ip', self.floating_ip.id),
('port_forwarding_id', [self._port_forwarding[0].id]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.network.delete_floating_ip_port_forwarding.\
assert_called_once_with(
self.floating_ip.id,
self._port_forwarding[0].id,
ignore_missing=False
)
self.assertIsNone(result)
def test_multi_port_forwardings_delete(self):
arglist = []
pf_id = []
arglist.append(str(self.floating_ip))
for a in self._port_forwarding:
arglist.append(a.id)
pf_id.append(a.id)
verifylist = [
('floating_ip', str(self.floating_ip)),
('port_forwarding_id', pf_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
calls = []
for a in self._port_forwarding:
calls.append(call(a.floatingip_id, a.id, ignore_missing=False))
self.network.delete_floating_ip_port_forwarding.assert_has_calls(calls)
self.assertIsNone(result)
def test_multi_port_forwarding_delete_with_exception(self):
arglist = [
self.floating_ip.id,
self._port_forwarding[0].id,
'unexist_port_forwarding_id',
]
verifylist = [
('floating_ip', self.floating_ip.id),
('port_forwarding_id',
[self._port_forwarding[0].id, 'unexist_port_forwarding_id']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
delete_mock_result = [None, exceptions.CommandError]
self.network.delete_floating_ip_port_forwarding = (
mock.MagicMock(side_effect=delete_mock_result)
)
try:
self.cmd.take_action(parsed_args)
self.fail('CommandError should be raised.')
except exceptions.CommandError as e:
self.assertEqual(
'1 of 2 Port forwarding failed to delete.',
str(e)
)
self.network.delete_floating_ip_port_forwarding.\
assert_any_call(
self.floating_ip.id,
'unexist_port_forwarding_id',
ignore_missing=False
)
self.network.delete_floating_ip_port_forwarding.\
assert_any_call(
self.floating_ip.id,
self._port_forwarding[0].id,
ignore_missing=False
)
class TestListFloatingIPPortForwarding(TestFloatingIPPortForwarding):
columns = (
'ID',
'Internal Port ID',
'Internal IP Address',
'Internal Port',
'External Port',
'Protocol',
'Description',
)
def setUp(self):
super(TestListFloatingIPPortForwarding, self).setUp()
self.port_forwardings = (
network_fakes.FakeFloatingIPPortForwarding.create_port_forwardings(
count=3, attrs={
'internal_port_id': self.port.id,
'floatingip_id': self.floating_ip.id,
}
)
)
self.data = []
for port_forwarding in self.port_forwardings:
self.data.append((
port_forwarding.id,
port_forwarding.internal_port_id,
port_forwarding.internal_ip_address,
port_forwarding.internal_port,
port_forwarding.external_port,
port_forwarding.protocol,
port_forwarding.description,
))
self.network.floating_ip_port_forwardings = mock.Mock(
return_value=self.port_forwardings
)
self.network.find_ip = mock.Mock(
return_value=self.floating_ip
)
# Get the command object to test
self.cmd = floating_ip_port_forwarding.ListFloatingIPPortForwarding(
self.app,
self.namespace
)
def test_port_forwarding_list(self):
arglist = [
self.floating_ip.id
]
verifylist = [
('floating_ip', self.floating_ip.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.floating_ip_port_forwardings.assert_called_once_with(
self.floating_ip,
**{}
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
def test_port_forwarding_list_all_options(self):
arglist = [
'--port', self.port_forwardings[0].internal_port_id,
'--external-protocol-port',
str(self.port_forwardings[0].external_port),
'--protocol', self.port_forwardings[0].protocol,
self.port_forwardings[0].floatingip_id,
]
verifylist = [
('port', self.port_forwardings[0].internal_port_id),
('external_protocol_port',
str(self.port_forwardings[0].external_port)),
('protocol', self.port_forwardings[0].protocol),
('floating_ip', self.port_forwardings[0].floatingip_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
query = {
'internal_port_id': self.port_forwardings[0].internal_port_id,
'external_port': str(self.port_forwardings[0].external_port),
'protocol': self.port_forwardings[0].protocol,
}
self.network.floating_ip_port_forwardings.assert_called_once_with(
self.floating_ip,
**query
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
class TestSetFloatingIPPortForwarding(TestFloatingIPPortForwarding):
# The Port Forwarding to set.
def setUp(self):
super(TestSetFloatingIPPortForwarding, self).setUp()
self._port_forwarding = (
network_fakes.FakeFloatingIPPortForwarding.
create_one_port_forwarding(
attrs={
'floatingip_id': self.floating_ip.id,
}
)
)
self.network.update_floating_ip_port_forwarding = mock.Mock(
return_value=None
)
self.network.find_floating_ip_port_forwarding = mock.Mock(
return_value=self._port_forwarding)
self.network.find_ip = mock.Mock(
return_value=self.floating_ip
)
# Get the command object to test
self.cmd = floating_ip_port_forwarding.SetFloatingIPPortForwarding(
self.app,
self.namespace
)
def test_set_nothing(self):
arglist = [
self._port_forwarding.floatingip_id,
self._port_forwarding.id,
]
verifylist = [
('floating_ip', self._port_forwarding.floatingip_id),
('port_forwarding_id', self._port_forwarding.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {}
self.network.update_floating_ip_port_forwarding.assert_called_with(
self._port_forwarding.floatingip_id,
self._port_forwarding.id,
**attrs
)
self.assertIsNone(result)
def test_set_all_thing(self):
arglist = [
'--port', self.port.id,
'--internal-ip-address', 'new_internal_ip_address',
'--internal-protocol-port', '100',
'--external-protocol-port', '200',
'--protocol', 'tcp',
'--description', 'some description',
self._port_forwarding.floatingip_id,
self._port_forwarding.id,
]
verifylist = [
('port', self.port.id),
('internal_ip_address', 'new_internal_ip_address'),
('internal_protocol_port', 100),
('external_protocol_port', 200),
('protocol', 'tcp'),
('description', 'some description'),
('floating_ip', self._port_forwarding.floatingip_id),
('port_forwarding_id', self._port_forwarding.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'internal_port_id': self.port.id,
'internal_ip_address': 'new_internal_ip_address',
'internal_port': 100,
'external_port': 200,
'protocol': 'tcp',
'description': 'some description',
}
self.network.update_floating_ip_port_forwarding.assert_called_with(
self._port_forwarding.floatingip_id,
self._port_forwarding.id,
**attrs
)
self.assertIsNone(result)
class TestShowFloatingIPPortForwarding(TestFloatingIPPortForwarding):
# The port forwarding to show.
columns = (
'description',
'external_port',
'floatingip_id',
'id',
'internal_ip_address',
'internal_port',
'internal_port_id',
'protocol',
)
def setUp(self):
super(TestShowFloatingIPPortForwarding, self).setUp()
self._port_forwarding = (
network_fakes.FakeFloatingIPPortForwarding.
create_one_port_forwarding(
attrs={
'floatingip_id': self.floating_ip.id,
}
)
)
self.data = (
self._port_forwarding.description,
self._port_forwarding.external_port,
self._port_forwarding.floatingip_id,
self._port_forwarding.id,
self._port_forwarding.internal_ip_address,
self._port_forwarding.internal_port,
self._port_forwarding.internal_port_id,
self._port_forwarding.protocol,
)
self.network.find_floating_ip_port_forwarding = mock.Mock(
return_value=self._port_forwarding
)
self.network.find_ip = mock.Mock(
return_value=self.floating_ip
)
# Get the command object to test
self.cmd = floating_ip_port_forwarding.ShowFloatingIPPortForwarding(
self.app,
self.namespace
)
def test_show_no_options(self):
arglist = []
verifylist = []
# Missing required args should bail here
self.assertRaises(tests_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_show_default_options(self):
arglist = [
self._port_forwarding.floatingip_id,
self._port_forwarding.id,
]
verifylist = [
('floating_ip', self._port_forwarding.floatingip_id),
('port_forwarding_id', self._port_forwarding.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.find_floating_ip_port_forwarding.assert_called_once_with(
self.floating_ip,
self._port_forwarding.id,
ignore_missing=False
)
self.assertEqual(self.columns, columns)
self.assertEqual(list(self.data), list(data))
| openstack/python-openstackclient | openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py | Python | apache-2.0 | 17,923 |
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from tracing.mre import threaded_work_queue
class ThreadedWorkQueueTests(unittest.TestCase):
def testSingleThreaded(self):
wq = threaded_work_queue.ThreadedWorkQueue(num_threads=1)
self._RunSimpleDecrementingTest(wq)
def testMultiThreaded(self):
wq = threaded_work_queue.ThreadedWorkQueue(num_threads=4)
self._RunSimpleDecrementingTest(wq)
def _RunSimpleDecrementingTest(self, wq):
remaining = [10]
def Decrement():
remaining[0] -= 1
if remaining[0]:
wq.PostMainThreadTask(Done)
def Done():
wq.Stop(314)
wq.PostAnyThreadTask(Decrement)
res = wq.Run()
self.assertEquals(res, 314)
| benschmaus/catapult | tracing/tracing/mre/threaded_work_queue_unittest.py | Python | bsd-3-clause | 843 |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from cinder.api.contrib import types_manage
from cinder import exception
from cinder import test
from cinder.tests.api import fakes
from cinder.volume import volume_types
def stub_volume_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
def return_volume_types_get_volume_type(context, id):
if id == "777":
raise exception.VolumeTypeNotFound(volume_type_id=id)
return stub_volume_type(int(id))
def return_volume_types_destroy(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
pass
def return_volume_types_create(context, name, specs):
pass
def return_volume_types_get_by_name(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return stub_volume_type(int(name.split("_")[2]))
class VolumeTypesManageApiTest(test.TestCase):
def setUp(self):
super(VolumeTypesManageApiTest, self).setUp()
self.controller = types_manage.VolumeTypesManageController()
def test_volume_types_delete(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
self.stubs.Set(volume_types, 'destroy',
return_volume_types_destroy)
req = fakes.HTTPRequest.blank('/v2/fake/types/1')
self.controller._delete(req, 1)
def test_volume_types_delete_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
self.stubs.Set(volume_types, 'destroy',
return_volume_types_destroy)
req = fakes.HTTPRequest.blank('/v2/fake/types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller._delete,
req, '777')
def test_create(self):
self.stubs.Set(volume_types, 'create',
return_volume_types_create)
self.stubs.Set(volume_types, 'get_volume_type_by_name',
return_volume_types_get_by_name)
body = {"volume_type": {"name": "vol_type_1",
"extra_specs": {"key1": "value1"}}}
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller._create(req, body)
self.assertEqual(1, len(res_dict))
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
class VolumeTypesUnprocessableEntityTestCase(test.TestCase):
"""
Tests of places we throw 422 Unprocessable Entity from
"""
def setUp(self):
super(VolumeTypesUnprocessableEntityTestCase, self).setUp()
self.controller = types_manage.VolumeTypesManageController()
def _unprocessable_volume_type_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/types')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller._create, req, body)
def test_create_no_body(self):
self._unprocessable_volume_type_create(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._unprocessable_volume_type_create(body=body)
def test_create_malformed_entity(self):
body = {'volume_type': 'string'}
self._unprocessable_volume_type_create(body=body)
| citrix-openstack-build/cinder | cinder/tests/api/contrib/test_types_manage.py | Python | apache-2.0 | 4,194 |
import shlex
from typing import Callable
def cmp_to_key(comparator: Callable):
class KeyCmpWrapper:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return comparator(self.obj, other.obj) < 0
def __gt__(self, other):
return comparator(self.obj, other.obj) > 0
def __eq__(self, other):
return comparator(self.obj, other.obj) == 0
def __le__(self, other):
return comparator(self.obj, other.obj) <= 0
def __ge__(self, other):
return comparator(self.obj, other.obj) >= 0
def __ne__(self, other):
return comparator(self.obj, other.obj) != 0
return KeyCmpWrapper
def get_or_default(map: dict, key, default):
if key is None:
return default
value = map.get(key)
if value is None:
return default
return value
def split_join(delimiter: str, delimit_start=True, delimit_end=True, *tokens):
"""
Splits individual tokens by a delimiter and rejoins all of them separated by delimiter.
:param delimiter: delimiter to split at and join with
:param delimit_start: add a separator to the beginning of the result
:param delimit_end: add a separator to the end of the result
:rtype: str
"""
result = ''
delim_at_end = False
if delimit_start:
result += delimiter
delim_at_end = True
for token in tokens:
for subtoken in token.split(delimiter):
if subtoken is not None and len(subtoken):
if len(result) and not delim_at_end:
result += delimiter
delim_at_end = True
result += subtoken
delim_at_end = False
if delimit_end and not delim_at_end:
result += delimiter
return result
def quote(string, quote_char) -> str:
return quote_char + string.replace(quote_char, '\\' + quote_char) + quote_char
def command_to_str(command):
return ' '.join(shlex.quote(token) for token in command)
| abacusresearch/gitflow | gitflow/utils.py | Python | mit | 2,073 |
# -*- coding: utf-8 -*-
from xmlrpclib import Fault
from django.contrib.auth.models import User
from django.test import TestCase
from tcms.xmlrpc.api import build
from tcms.xmlrpc.tests.utils import make_http_request
class AssertMessage(object):
NOT_VALIDATE_ARGS = "Missing validations for args."
NOT_VALIDATE_REQUIRED_ARGS = "Missing validations for required args."
NOT_VALIDATE_ILLEGAL_ARGS = "Missing validations for illegal args."
NOT_VALIDATE_FOREIGN_KEY = "Missing validations for foreign key."
NOT_VALIDATE_LENGTH = "Missing validations for length of value."
NOT_VALIDATE_URL_FORMAT = "Missing validations for URL format."
SHOULD_BE_400 = "Error code should be 400."
SHOULD_BE_409 = "Error code should be 409."
SHOULD_BE_500 = "Error code should be 500."
SHOULD_BE_403 = "Error code should be 403."
SHOULD_BE_401 = "Error code should be 401."
SHOULD_BE_404 = "Error code should be 404."
SHOULD_BE_501 = "Error code should be 501."
SHOULD_BE_1 = "Error code should be 1."
UNEXCEPT_ERROR = "Unexcept error occurs."
NEED_ENCODE_UTF8 = "Need to encode with utf8."
NOT_IMPLEMENT_FUNC = "Not implement yet."
XMLRPC_INTERNAL_ERROR = "xmlrpc library error."
NOT_VALIDATE_PERMS = "Missing validations for user perms."
class TestBuildCreate(TestCase):
def setUp(self):
super(TestBuildCreate, self).setUp()
self.admin = User(username='create_admin',
email='[email protected]')
self.admin.save()
self.admin_request = make_http_request(
user=self.admin,
user_perm='management.add_testbuild'
)
self.staff = User(username='create_staff',
email='[email protected]')
self.staff.save()
self.staff_request = make_http_request(
user=self.staff,
)
def tearDown(self):
super(TestBuildCreate, self).tearDown()
self.admin.delete()
self.staff.delete()
def test_build_create_with_no_args(self):
bad_args = (self.admin_request, [], (), {})
for arg in bad_args:
try:
build.create(self.admin_request, arg)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_create_with_no_perms(self):
try:
build.create(self.staff_request, {})
except Fault as f:
self.assertEqual(f.faultCode, 403, AssertMessage.SHOULD_BE_403)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_create_with_no_required_fields(self):
def _create(data):
try:
build.create(self.admin_request, data)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
values = {
"description": "Test Build",
"is_active": False
}
_create(values)
values["name"] = "TB"
_create(values)
del values["name"]
values["product"] = 4
_create(values)
def test_build_create_with_illegal_fields(self):
values = {
"product": 89,
"name": "B7",
"milestone": "aaaaaaaa"
}
try:
build.create(self.admin_request, values)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_create_with_non_exist_product(self):
values = {
"product": 89,
"name": "B7",
"description": "Test Build",
"is_active": False
}
try:
build.create(self.admin_request, values)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
values['product'] = "AAAAAAAAAA"
try:
build.create(self.admin_request, values)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_create_with_chinese(self):
values = {
"product": 4,
"name": "B99",
"description": "开源中国",
"is_active": False
}
try:
b = build.create(self.admin_request, values)
except Fault as f:
print f.faultString
self.fail(AssertMessage.UNEXCEPT_ERROR)
else:
self.assertIsNotNone(b)
self.assertEqual(b['product_id'], 4)
self.assertEqual(b['name'], "B99")
self.assertEqual(b['description'],
'\xe5\xbc\x80\xe6\xba\x90\xe4\xb8\xad\xe5\x9b\xbd')
self.assertEqual(b['is_active'], False)
def test_build_create(self):
values = {
"product": 4,
"name": "B7",
"description": "Test Build",
"is_active": False
}
try:
b = build.create(self.admin_request, values)
except Fault as f:
print f.faultString
self.fail(AssertMessage.UNEXCEPT_ERROR)
else:
self.assertIsNotNone(b)
self.assertEqual(b['product_id'], 4)
self.assertEqual(b['name'], "B7")
self.assertEqual(b['description'], "Test Build")
self.assertEqual(b['is_active'], False)
class TestBuildUpdate(TestCase):
def setUp(self):
super(TestBuildUpdate, self).setUp()
self.admin = User(username='create_admin',
email='[email protected]')
self.admin.save()
self.admin_request = make_http_request(
user=self.admin,
user_perm='management.change_testbuild'
)
self.staff = User(username='create_staff',
email='[email protected]')
self.staff.save()
self.staff_request = make_http_request(
user=self.staff,
)
def tearDown(self):
super(TestBuildUpdate, self).tearDown()
self.admin.delete()
self.staff.delete()
def test_build_update_with_no_args(self):
bad_args = (None, [], (), {})
for arg in bad_args:
try:
build.update(self.admin_request, arg, {})
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
try:
build.update(self.admin_request, 1, {})
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_update_with_no_perms(self):
try:
build.update(self.staff_request, 1, {})
except Fault as f:
self.assertEqual(f.faultCode, 403, AssertMessage.SHOULD_BE_403)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_update_with_multi_id(self):
try:
build.update(self.admin_request, (1, 2, 3), {})
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_update_with_non_integer(self):
bad_args = (True, False, (1,), dict(a=1), -1, 0.7, "", "AA")
for arg in bad_args:
try:
build.update(self.admin_request, arg, {})
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_update_with_non_exist_build(self):
try:
build.update(self.admin_request, 999, {})
except Fault as f:
self.assertEqual(f.faultCode, 404, AssertMessage.SHOULD_BE_404)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_update_with_non_exist_product_id(self):
try:
build.update(self.admin_request, 1, {
"product": 9999
})
except Fault as f:
self.assertEqual(f.faultCode, 404, AssertMessage.SHOULD_BE_404)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_update_with_non_exist_product_name(self):
try:
build.update(self.admin_request, 1, {
"product": "AAAAAAAAAAAAAA"
})
except Fault as f:
self.assertEqual(f.faultCode, 404, AssertMessage.SHOULD_BE_404)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_update(self):
try:
b = build.update(self.admin_request, 3, {
"product": 1,
"name": "Update",
"description": "Update from unittest."
})
except Fault as f:
print f.faultString
self.fail(AssertMessage.UNEXCEPT_ERROR)
else:
self.assertIsNotNone(b)
self.assertEqual(b['product_id'], 1)
self.assertEqual(b['name'], 'Update')
self.assertEqual(b['description'], 'Update from unittest.')
class TestBuildGet(TestCase):
def test_build_get_with_no_args(self):
bad_args = (None, [], (), {})
for arg in bad_args:
try:
build.get(None, arg)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_non_integer(self):
bad_args = (True, False, (1,), dict(a=1), -1, 0.7, "", "AA")
for arg in bad_args:
try:
build.get(None, arg)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_non_exist_id(self):
try:
build.get(None, 9999)
except Fault as f:
self.assertEqual(f.faultCode, 404, AssertMessage.SHOULD_BE_404)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_id(self):
try:
b = build.get(None, 10)
except Fault as f:
print f.faultString
self.fail(AssertMessage.UNEXCEPT_ERROR)
else:
self.assertIsNotNone(b)
self.assertEqual(b['build_id'], 10)
self.assertEqual(b['name'], "B1")
self.assertEqual(b['product_id'], 4)
self.assertEqual(b['description'], "B1")
self.assertEqual(b['is_active'], True)
class TestBuildGetCaseRuns(TestCase):
def test_build_get_with_no_args(self):
bad_args = (None, [], (), {})
for arg in bad_args:
try:
build.get_caseruns(None, arg)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_non_integer(self):
bad_args = (True, False, (1,), dict(a=1), -1, 0.7, "", "AA")
for arg in bad_args:
try:
build.get_caseruns(None, arg)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_non_exist_id(self):
try:
build.get_caseruns(None, 9999)
except Fault as f:
self.assertEqual(f.faultCode, 404, AssertMessage.SHOULD_BE_404)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_id(self):
try:
b = build.get_caseruns(None, 5)
except Fault as f:
print f.faultString
self.fail(AssertMessage.UNEXCEPT_ERROR)
else:
self.assertIsNotNone(b)
self.assertEqual(len(b), 5)
self.assertEqual(b[0]['case'], "PVZ")
class TestBuildGetRuns(TestCase):
def test_build_get_with_no_args(self):
bad_args = (None, [], (), {})
for arg in bad_args:
try:
build.get_runs(None, arg)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_non_integer(self):
bad_args = (True, False, (1,), dict(a=1), -1, 0.7, "", "AA")
for arg in bad_args:
try:
build.get_runs(None, arg)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_non_exist_id(self):
try:
build.get_runs(None, 9999)
except Fault as f:
self.assertEqual(f.faultCode, 404, AssertMessage.SHOULD_BE_404)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_id(self):
try:
b = build.get_runs(None, 5)
except Fault as f:
print f.faultString
self.fail(AssertMessage.UNEXCEPT_ERROR)
else:
self.assertIsNotNone(b)
self.assertEqual(len(b), 1)
self.assertEqual(b[0]['summary'], "Test run for StarCraft: Init "
"on Unknown environment")
class TestBuildLookupID(TestCase):
"""
DEPRECATED API
"""
pass
class TestBuildLookupName(TestCase):
"""
DEPRECATED API
"""
pass
class TestBuildCheck(TestCase):
def test_build_get_with_no_args(self):
bad_args = (None, [], (), {})
for arg in bad_args:
try:
build.check_build(None, arg, 4)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
try:
build.check_build(None, "B5", arg)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_non_exist_build_name(self):
try:
build.check_build(None, "AAAAAAAAAAAAAA", 4)
except Fault as f:
self.assertEqual(f.faultCode, 404, AssertMessage.SHOULD_BE_404)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_non_exist_product_id(self):
try:
build.check_build(None, "B5", 99)
except Fault as f:
self.assertEqual(f.faultCode, 404, AssertMessage.SHOULD_BE_404)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_non_exist_product_name(self):
try:
build.check_build(None, "B5", "AAAAAAAAAAAAAAAA")
except Fault as f:
self.assertEqual(f.faultCode, 404, AssertMessage.SHOULD_BE_404)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_empty(self):
try:
build.check_build(None, "", 4)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
try:
build.check_build(None, " ", 4)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get_with_illegal_args(self):
bad_args = (self, 0.7, False, True, 1, -1, 0, (1,), dict(a=1))
for arg in bad_args:
try:
build.check_build(None, arg, 4)
except Fault as f:
self.assertEqual(f.faultCode, 400, AssertMessage.SHOULD_BE_400)
else:
self.fail(AssertMessage.NOT_VALIDATE_ARGS)
def test_build_get(self):
try:
b = build.check_build(None, "B5", 4)
except Fault as f:
print f.faultString
self.fail(AssertMessage.UNEXCEPT_ERROR)
else:
self.assertIsNotNone(b)
self.assertEqual(b['build_id'], 14)
self.assertEqual(b['name'], "B5")
self.assertEqual(b['product_id'], 4)
self.assertEqual(b['description'], "B5")
self.assertEqual(b['is_active'], True)
| ShaolongHu/Nitrate | tcms/xmlrpc/tests/test_testbuild.py | Python | gpl-2.0 | 17,281 |
import pprint
import sys
import os
import unittest
from DictDiffer import DictDiffer;
if os.path.exists("./jiraclient/"):
sys.path.insert(0,"./jiraclient/")
import jiraclient
pp = pprint.PrettyPrinter(depth=4,stream=sys.stdout)
class TestUnit(unittest.TestCase):
def setUp(self):
self.c = jiraclient.Jiraclient()
self.c.parse_args()
self.c.options.nopost = True
self.c.options.norcfile = False
self.c.options.config = "./test/data/jiraclientrc-001"
self.c.options.sessionfile = "./test/data/jira-session"
self.c.options.loglevel = "DEBUG"
self.c.prepare_logger()
self.c.read_config()
self.c.options.user = 'jirauser'
self.c.options.password = 'jirauser'
def testTemplate000(self):
self.c.options.template = "./test/data/project-000.yaml"
self.c.create_issues_from_template()
desired = {
'assignee': {'name': 'jirauser'},
'components': [{'id': '10111'}],
'customfield_10010': ['NOOP'],
'description': 'Epic description',
'fixVersions': [{'id': '10020'}],
'issuetype': {'id': '6'},
'priority': {'id': '6'},
'project': {'id': '10001'},
'summary': 'This is an Epic'
}
for got in self.c.issues_created:
diff = DictDiffer(got,desired)
assert diff.areEqual()
def testTemplate001(self):
self.c.options.template = "./test/data/project-001.yaml"
self.c.create_issues_from_template()
desired_epic = {
'assignee': {'name': 'jirauser'},
'components': [{'id': '10111'}],
'customfield_10010': ['NOOP'],
'description': 'Test Epic description',
'fixVersions': [{'id': '10020'}],
'issuetype': {'id': '6'},
'priority': {'id': '6'},
'project': {'id': '10001'},
'summary': 'This is a test Epic'
}
desired_subtask = {
'assignee': {'name': 'jirauser'},
'components': [{'id': '10111'}],
'customfield_10010': ['NOOP'],
'fixVersions': [{'id': '10020'}],
'issuetype': {'id': '5'},
'priority': {'id': '6'},
'project': {'id': '10001'},
'parent': {'key': 'NOOP'},
'summary': 'This is test epic subtask 1',
'description': 'This is test epic subtask 1 description'
}
epic = self.c.issues_created[0]
diff = DictDiffer(epic,desired_epic)
assert diff.areEqual()
subtask = self.c.issues_created[1]
diff = DictDiffer(subtask,desired_subtask)
assert diff.areEqual()
def testTemplate002(self):
self.c.options.template = "./test/data/project-002.yaml"
self.c.create_issues_from_template()
desired_epic = {
'assignee': {'name': 'jirauser'},
'components': [{'id': '10111'}],
'customfield_10010': ['NOOP'],
'customfield_10441': 'The Epic Name',
'description': 'Epic description',
'fixVersions': [{'id': '10020'}],
'issuetype': {'id': '6'},
'priority': {'id': '6'},
'project': {'id': '10001'},
'summary': 'This is an Epic'
}
desired_subtask = {
'assignee': {'name': 'jirauser'},
'components': [{'id': '10111'}],
'customfield_10010': ['NOOP'],
'fixVersions': [{'id': '10020'}],
'issuetype': {'id': '5'},
'priority': {'id': '6'},
'project': {'id': '10001'},
'summary': 'est1 summary',
'parent': {'key':'NOOP'},
}
desired_story = {
'assignee': {'name': 'jirauser'},
'components': [{'id': '10111'}],
'customfield_10010': ['NOOP'],
'fixVersions': [{'id': '10020'}],
'issuetype': {'id': '7'},
'priority': {'id': '6'},
'project': {'id': '10001'},
'summary': 's1 summary',
'description': 'story s1 description',
'timetracking': {'originalEstimate':'1h'},
}
desired_story_subtask = {
'assignee': {'name': 'jirauser'},
'components': [{'id': '10111'}],
'customfield_10010': ['NOOP'],
'fixVersions': [{'id': '10020'}],
'issuetype': {'id': '5'},
'priority': {'id': '6'},
'project': {'id': '10001'},
'summary': 's1 st1 summary',
'description': 's1 st1 description',
'timetracking': {'originalEstimate':'30m'},
'parent': {'key':'NOOP'},
}
epic = self.c.issues_created[0]
diff = DictDiffer(epic,desired_epic)
assert diff.areEqual()
subtask = self.c.issues_created[1]
diff = DictDiffer(subtask,desired_subtask)
assert diff.areEqual()
story = self.c.issues_created[3]
diff = DictDiffer(story,desired_story)
assert diff.areEqual()
subtask = self.c.issues_created[4]
diff = DictDiffer(subtask,desired_story_subtask)
assert diff.areEqual()
def suite():
suite = unittest.makeSuite(TestUnit,'test')
#suite = unittest.TestSuite()
#suite.addTest(TestUnit("testTemplate000"))
#suite.addTest(TestUnit("testTemplate001"))
#suite.addTest(TestUnit("testTemplate002"))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite")
| mcallaway/jiraclient | test/testTemplate.py | Python | gpl-3.0 | 4,858 |
#!/usr/bin/python
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
#
r"""
mx is a command line tool inspired by mvn (http://maven.apache.org/)
and hg (http://mercurial.selenic.com/). It includes a mechanism for
managing the dependencies between a set of projects (like Maven)
as well as making it simple to run commands
(like hg is the interface to the Mercurial commands).
The organizing principle of mx is a project suite. A suite is a directory
containing one or more projects. It's not coincidental that this closely
matches the layout of one or more projects in a Mercurial repository.
The configuration information for a suite lives in an 'mx' sub-directory
at the top level of the suite. A suite is given a name by a 'suite=name'
property in the 'mx/projects' file (if omitted the name is suite directory).
An 'mx' subdirectory can be named as plain 'mx' or 'mxbasename', where
'basename' is the os.path.basename of the suite directory.
The latter is useful to avoid clashes in IDE project names.
When launched, mx treats the current working directory as a suite.
This is the primary suite. All other suites are called included suites.
The configuration files (i.e. in the 'mx' sub-directory) of a suite are:
projects
Defines the projects and libraries in the suite and the
dependencies between them.
commands.py
Suite specific extensions to the commands available to mx.
includes
Other suites to be loaded. This is recursive. Each
line in an includes file is a path to a suite directory.
env
A set of environment variable definitions. These override any
existing environment variables. Common properties set here
include JAVA_HOME and IGNORED_PROJECTS.
The includes and env files are typically not put under version control
as they usually contain local file-system paths.
The projects file is like the pom.xml file from Maven except that
it is a properties file (not XML). Each non-comment line
in the file specifies an attribute of a project or library. The main
difference between a project and a library is that the former contains
source code built by the mx tool where as the latter is an external
dependency. The format of the projects file is
Library specification format:
library@<name>@<prop>=<value>
Built-in library properties (* = required):
*path
The file system path for the library to appear on a class path.
urls
A comma separated list of URLs from which the library (jar) can
be downloaded and saved in the location specified by 'path'.
optional
If "true" then this library will be omitted from a class path
if it doesn't exist on the file system and no URLs are specified.
sourcePath
The file system path for a jar file containing the library sources.
sourceUrls
A comma separated list of URLs from which the library source jar can
be downloaded and saved in the location specified by 'sourcePath'.
Project specification format:
project@<name>@<prop>=<value>
The name of a project also denotes the directory it is in.
Built-in project properties (* = required):
subDir
The sub-directory of the suite in which the project directory is
contained. If not specified, the project directory is directly
under the suite directory.
*sourceDirs
A comma separated list of source directory names (relative to
the project directory).
dependencies
A comma separated list of the libraries and project the project
depends upon (transitive dependencies should be omitted).
checkstyle
The project whose Checkstyle configuration
(i.e. <project>/.checkstyle_checks.xml) is used.
native
"true" if the project is native.
javaCompliance
The minimum JDK version (format: x.y) to which the project's
sources comply (required for non-native projects).
workingSets
A comma separated list of working set names. The project belongs
to the given working sets, for which the eclipseinit command
will generate Eclipse configurations.
Other properties can be specified for projects and libraries for use
by extension commands.
Property values can use environment variables with Bash syntax (e.g. ${HOME}).
"""
import sys, os, errno, time, subprocess, shlex, types, urllib2, contextlib, StringIO, zipfile, signal, xml.sax.saxutils, tempfile, fnmatch
import textwrap
import xml.parsers.expat
import shutil, re, xml.dom.minidom
from collections import Callable
from threading import Thread
from argparse import ArgumentParser, REMAINDER
from os.path import join, basename, dirname, exists, getmtime, isabs, expandvars, isdir, isfile
DEFAULT_JAVA_ARGS = '-ea -Xss2m -Xmx1g'
_projects = dict()
_libs = dict()
_dists = dict()
_suites = dict()
_annotationProcessors = None
_mainSuite = None
_opts = None
_java = None
"""
A distribution is a jar or zip file containing the output from one or more Java projects.
"""
class Distribution:
def __init__(self, suite, name, path, deps):
self.suite = suite
self.name = name
self.path = path.replace('/', os.sep)
if not isabs(self.path):
self.path = join(suite.dir, self.path)
self.deps = deps
self.update_listeners = set()
def __str__(self):
return self.name
def add_update_listener(self, listener):
self.update_listeners.add(listener)
def notify_updated(self):
for l in self.update_listeners:
l(self)
"""
A dependency is a library or project specified in a suite.
"""
class Dependency:
def __init__(self, suite, name):
self.name = name
self.suite = suite
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return self.name != other.name
def __hash__(self):
return hash(self.name)
def isLibrary(self):
return isinstance(self, Library)
def isProject(self):
return isinstance(self, Project)
class Project(Dependency):
def __init__(self, suite, name, srcDirs, deps, javaCompliance, workingSets, d):
Dependency.__init__(self, suite, name)
self.srcDirs = srcDirs
self.deps = deps
self.checkstyleProj = name
self.javaCompliance = JavaCompliance(javaCompliance) if javaCompliance is not None else None
self.native = False
self.workingSets = workingSets
self.dir = d
# Create directories for projects that don't yet exist
if not exists(d):
os.mkdir(d)
for s in self.source_dirs():
if not exists(s):
os.mkdir(s)
def all_deps(self, deps, includeLibs, includeSelf=True, includeAnnotationProcessors=False):
"""
Add the transitive set of dependencies for this project, including
libraries if 'includeLibs' is true, to the 'deps' list.
"""
childDeps = list(self.deps)
if includeAnnotationProcessors and len(self.annotation_processors()) > 0:
childDeps = self.annotation_processors() + childDeps
if self in deps:
return deps
for name in childDeps:
assert name != self.name
dep = dependency(name)
if not dep in deps and (includeLibs or not dep.isLibrary()):
dep.all_deps(deps, includeLibs=includeLibs, includeAnnotationProcessors=includeAnnotationProcessors)
if not self in deps and includeSelf:
deps.append(self)
return deps
def _compute_max_dep_distances(self, name, distances, dist):
currentDist = distances.get(name);
if currentDist is None or currentDist < dist:
distances[name] = dist
p = project(name, False)
if p is not None:
for dep in p.deps:
self._compute_max_dep_distances(dep, distances, dist + 1)
def canonical_deps(self):
"""
Get the dependencies of this project that are not recursive (i.e. cannot be reached
via other dependencies).
"""
distances = dict()
result = set()
self._compute_max_dep_distances(self.name, distances, 0)
for n,d in distances.iteritems():
assert d > 0 or n == self.name
if d == 1:
result.add(n)
if len(result) == len(self.deps) and frozenset(self.deps) == result:
return self.deps
return result;
def max_depth(self):
"""
Get the maximum canonical distance between this project and its most distant dependency.
"""
distances = dict()
self._compute_max_dep_distances(self.name, distances, 0)
return max(distances.values())
def source_dirs(self):
"""
Get the directories in which the sources of this project are found.
"""
return [join(self.dir, s) for s in self.srcDirs]
def source_gen_dir(self):
"""
Get the directory in which source files generated by the annotation processor are found/placed.
"""
if self.native:
return None
return join(self.dir, 'src_gen')
def output_dir(self):
"""
Get the directory in which the class files of this project are found/placed.
"""
if self.native:
return None
return join(self.dir, 'bin')
def jasmin_output_dir(self):
"""
Get the directory in which the Jasmin assembled class files of this project are found/placed.
"""
if self.native:
return None
return join(self.dir, 'jasmin_classes')
def append_to_classpath(self, cp, resolve):
if not self.native:
cp.append(self.output_dir())
def find_classes_with_matching_source_line(self, pkgRoot, function, includeInnerClasses=False):
"""
Scan the sources of this project for Java source files containing a line for which
'function' returns true. A map from class name to source file path for each existing class
corresponding to a matched source file is returned.
"""
result = dict()
pkgDecl = re.compile(r"^package\s+([a-zA-Z_][\w\.]*)\s*;$")
for srcDir in self.source_dirs():
outputDir = self.output_dir()
for root, _, files in os.walk(srcDir):
for name in files:
if name.endswith('.java') and name != 'package-info.java':
matchFound = False
source = join(root, name)
with open(source) as f:
pkg = None
for line in f:
if line.startswith("package "):
match = pkgDecl.match(line)
if match:
pkg = match.group(1)
if function(line.strip()):
matchFound = True
if pkg and matchFound:
break
if matchFound:
basename = name[:-len('.java')]
assert pkg is not None
if pkgRoot is None or pkg.startswith(pkgRoot):
pkgOutputDir = join(outputDir, pkg.replace('.', os.path.sep))
if exists(pkgOutputDir):
for e in os.listdir(pkgOutputDir):
if includeInnerClasses:
if e.endswith('.class') and (e.startswith(basename) or e.startswith(basename + '$')):
className = pkg + '.' + e[:-len('.class')]
result[className] = source
elif e == basename + '.class':
className = pkg + '.' + basename
result[className] = source
return result
def _init_packages_and_imports(self):
if not hasattr(self, '_defined_java_packages'):
packages = set()
extendedPackages = set()
depPackages = set()
for d in self.all_deps([], includeLibs=False, includeSelf=False):
depPackages.update(d.defined_java_packages())
imports = set()
importRe = re.compile(r'import\s+(?:static\s+)?([^;]+);')
for sourceDir in self.source_dirs():
for root, _, files in os.walk(sourceDir):
javaSources = [name for name in files if name.endswith('.java')]
if len(javaSources) != 0:
pkg = root[len(sourceDir) + 1:].replace(os.sep,'.')
if not pkg in depPackages:
packages.add(pkg)
else:
# A project extends a package already defined by one of it dependencies
extendedPackages.add(pkg)
imports.add(pkg)
for n in javaSources:
with open(join(root, n)) as fp:
content = fp.read()
imports.update(importRe.findall(content))
self._defined_java_packages = frozenset(packages)
self._extended_java_packages = frozenset(extendedPackages)
importedPackages = set()
for imp in imports:
name = imp
while not name in depPackages and len(name) > 0:
lastDot = name.rfind('.')
if lastDot == -1:
name = None
break
name = name[0:lastDot]
if name is not None:
importedPackages.add(name)
self._imported_java_packages = frozenset(importedPackages)
def defined_java_packages(self):
"""Get the immutable set of Java packages defined by the Java sources of this project"""
self._init_packages_and_imports()
return self._defined_java_packages
def extended_java_packages(self):
"""Get the immutable set of Java packages extended by the Java sources of this project"""
self._init_packages_and_imports()
return self._extended_java_packages
def imported_java_packages(self):
"""Get the immutable set of Java packages defined by other Java projects that are
imported by the Java sources of this project."""
self._init_packages_and_imports()
return self._imported_java_packages
def annotation_processors(self):
if not hasattr(self, '_annotationProcessors'):
ap = set()
if hasattr(self, '_declaredAnnotationProcessors'):
ap = set(self._declaredAnnotationProcessors)
# find dependencies that auto-inject themselves as annotation processors to all dependents
allDeps = self.all_deps([], includeLibs=False, includeSelf=False, includeAnnotationProcessors=False)
for p in allDeps:
if hasattr(p, 'annotationProcessorForDependents') and p.annotationProcessorForDependents.lower() == 'true':
ap.add(p.name)
self._annotationProcessors = list(ap)
return self._annotationProcessors
class Library(Dependency):
def __init__(self, suite, name, path, mustExist, urls, sourcePath, sourceUrls):
Dependency.__init__(self, suite, name)
self.path = path.replace('/', os.sep)
self.urls = urls
self.mustExist = mustExist
self.sourcePath = sourcePath
self.sourceUrls = sourceUrls
for url in urls:
if url.endswith('/') != self.path.endswith(os.sep):
abort('Path for dependency directory must have a URL ending with "/": path=' + self.path + ' url=' + url)
def __eq__(self, other):
if isinstance(other, Library):
if len(self.urls) == 0:
return self.path == other.path
else:
return self.urls == other.urls
else:
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def get_path(self, resolve):
path = self.path
if not isabs(path):
path = join(self.suite.dir, path)
if resolve and self.mustExist and not exists(path):
assert not len(self.urls) == 0, 'cannot find required library ' + self.name + ' ' + path;
print('Downloading ' + self.name + ' from ' + str(self.urls))
download(path, self.urls)
return path
def get_source_path(self, resolve):
path = self.sourcePath
if path is None:
return None
if not isabs(path):
path = join(self.suite.dir, path)
if resolve and len(self.sourceUrls) != 0 and not exists(path):
print('Downloading sources for ' + self.name + ' from ' + str(self.sourceUrls))
download(path, self.sourceUrls)
return path
def append_to_classpath(self, cp, resolve):
path = self.get_path(resolve)
if exists(path) or not resolve:
cp.append(path)
def all_deps(self, deps, includeLibs, includeSelf=True, includeAnnotationProcessors=False):
if not includeLibs or not includeSelf:
return deps
deps.append(self)
return deps
class Suite:
def __init__(self, d, mxDir, primary):
self.dir = d
self.mxDir = mxDir
self.projects = []
self.libs = []
self.dists = []
self.includes = []
self.commands = None
self.primary = primary
self._load_env(mxDir)
self._load_commands(mxDir)
self._load_includes(mxDir)
self.name = d # re-initialized in _load_projects
def __str__(self):
return self.name
def _load_projects(self, mxDir):
libsMap = dict()
projsMap = dict()
distsMap = dict()
projectsFile = join(mxDir, 'projects')
if not exists(projectsFile):
return
with open(projectsFile) as f:
for line in f:
line = line.strip()
if len(line) != 0 and line[0] != '#':
key, value = line.split('=', 1)
parts = key.split('@')
if len(parts) == 1:
if parts[0] != 'suite':
abort('Single part property must be "suite": ' + key)
self.name = value
continue
if len(parts) != 3:
abort('Property name does not have 3 parts separated by "@": ' + key)
kind, name, attr = parts
if kind == 'project':
m = projsMap
elif kind == 'library':
m = libsMap
elif kind == 'distribution':
m = distsMap
else:
abort('Property name does not start with "project@", "library@" or "distribution@": ' + key)
attrs = m.get(name)
if attrs is None:
attrs = dict()
m[name] = attrs
value = expandvars_in_property(value)
attrs[attr] = value
def pop_list(attrs, name):
v = attrs.pop(name, None)
if v is None or len(v.strip()) == 0:
return []
return [n.strip() for n in v.split(',')]
for name, attrs in projsMap.iteritems():
srcDirs = pop_list(attrs, 'sourceDirs')
deps = pop_list(attrs, 'dependencies')
ap = pop_list(attrs, 'annotationProcessors')
#deps += ap
javaCompliance = attrs.pop('javaCompliance', None)
subDir = attrs.pop('subDir', None)
if subDir is None:
d = join(self.dir, name)
else:
d = join(self.dir, subDir, name)
workingSets = attrs.pop('workingSets', None)
p = Project(self, name, srcDirs, deps, javaCompliance, workingSets, d)
p.checkstyleProj = attrs.pop('checkstyle', name)
p.native = attrs.pop('native', '') == 'true'
if not p.native and p.javaCompliance is None:
abort('javaCompliance property required for non-native project ' + name)
if len(ap) > 0:
p._declaredAnnotationProcessors = ap
p.__dict__.update(attrs)
self.projects.append(p)
for name, attrs in libsMap.iteritems():
path = attrs.pop('path')
mustExist = attrs.pop('optional', 'false') != 'true'
urls = pop_list(attrs, 'urls')
sourcePath = attrs.pop('sourcePath', None)
sourceUrls = pop_list(attrs, 'sourceUrls')
l = Library(self, name, path, mustExist, urls, sourcePath, sourceUrls)
l.__dict__.update(attrs)
self.libs.append(l)
for name, attrs in distsMap.iteritems():
path = attrs.pop('path')
deps = pop_list(attrs, 'dependencies')
d = Distribution(self, name, path, deps)
d.__dict__.update(attrs)
self.dists.append(d)
if self.name is None:
abort('Missing "suite=<name>" in ' + projectsFile)
def _load_commands(self, mxDir):
commands = join(mxDir, 'commands.py')
if exists(commands):
# temporarily extend the Python path
sys.path.insert(0, mxDir)
mod = __import__('commands')
self.commands = sys.modules.pop('commands')
sys.modules[join(mxDir, 'commands')] = self.commands
# revert the Python path
del sys.path[0]
if not hasattr(mod, 'mx_init'):
abort(commands + ' must define an mx_init(env) function')
if hasattr(mod, 'mx_post_parse_cmd_line'):
self.mx_post_parse_cmd_line = mod.mx_post_parse_cmd_line
mod.mx_init(self)
self.commands = mod
def _load_includes(self, mxDir):
includes = join(mxDir, 'includes')
if exists(includes):
with open(includes) as f:
for line in f:
include = expandvars_in_property(line.strip())
self.includes.append(include)
_loadSuite(os.path.abspath(include), False)
def _load_env(self, mxDir):
e = join(mxDir, 'env')
if exists(e):
with open(e) as f:
lineNum = 0
for line in f:
lineNum = lineNum + 1
line = line.strip()
if len(line) != 0 and line[0] != '#':
if not '=' in line:
abort(e + ':' + str(lineNum) + ': line does not match pattern "key=value"')
key, value = line.split('=', 1)
os.environ[key.strip()] = expandvars_in_property(value.strip())
def _post_init(self, opts):
self._load_projects(self.mxDir)
for p in self.projects:
existing = _projects.get(p.name)
if existing is not None:
abort('cannot override project ' + p.name + ' in ' + p.dir + " with project of the same name in " + existing.dir)
if not p.name in _opts.ignored_projects:
_projects[p.name] = p
for l in self.libs:
existing = _libs.get(l.name)
# Check that suites that define same library are consistent
if existing is not None and existing != l:
abort('inconsistent library redefinition of ' + l.name + ' in ' + existing.suite.dir + ' and ' + l.suite.dir)
_libs[l.name] = l
for d in self.dists:
existing = _dists.get(l.name)
if existing is not None:
abort('cannot redefine distribution ' + d.name)
_dists[d.name] = d
if hasattr(self, 'mx_post_parse_cmd_line'):
self.mx_post_parse_cmd_line(opts)
class XMLElement(xml.dom.minidom.Element):
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
xml.dom.minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if not self.ownerDocument.padTextNodeWithoutSiblings and len(self.childNodes) == 1 and isinstance(self.childNodes[0], xml.dom.minidom.Text):
# if the only child of an Element node is a Text node, then the
# text is printed without any indentation or new line padding
writer.write(">")
self.childNodes[0].writexml(writer)
writer.write("</%s>%s" % (self.tagName,newl))
else:
writer.write(">%s"%(newl))
for node in self.childNodes:
node.writexml(writer,indent+addindent,addindent,newl)
writer.write("%s</%s>%s" % (indent,self.tagName,newl))
else:
writer.write("/>%s"%(newl))
class XMLDoc(xml.dom.minidom.Document):
def __init__(self):
xml.dom.minidom.Document.__init__(self)
self.current = self
self.padTextNodeWithoutSiblings = False
def createElement(self, tagName):
# overwritten to create XMLElement
e = XMLElement(tagName)
e.ownerDocument = self
return e
def comment(self, txt):
self.current.appendChild(self.createComment(txt))
def open(self, tag, attributes={}, data=None):
element = self.createElement(tag)
for key, value in attributes.items():
element.setAttribute(key, value)
self.current.appendChild(element)
self.current = element
if data is not None:
element.appendChild(self.createTextNode(data))
return self
def close(self, tag):
assert self.current != self
assert tag == self.current.tagName, str(tag) + ' != ' + self.current.tagName
self.current = self.current.parentNode
return self
def element(self, tag, attributes={}, data=None):
return self.open(tag, attributes, data).close(tag)
def xml(self, indent='', newl='', escape=False, standalone=None):
assert self.current == self
result = self.toprettyxml(indent, newl, encoding="UTF-8")
if escape:
entities = { '"': """, "'": "'", '\n': ' ' }
result = xml.sax.saxutils.escape(result, entities)
if standalone is not None:
result = result.replace('encoding="UTF-8"?>', 'encoding="UTF-8" standalone="' + str(standalone) + '"?>')
return result
def get_os():
"""
Get a canonical form of sys.platform.
"""
if sys.platform.startswith('darwin'):
return 'darwin'
elif sys.platform.startswith('linux'):
return 'linux'
elif sys.platform.startswith('sunos'):
return 'solaris'
elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
return 'windows'
else:
abort('Unknown operating system ' + sys.platform)
def _loadSuite(d, primary=False):
"""
Load a suite from the 'mx' or 'mxbbb' subdirectory of d, where 'bbb' is basename of d
"""
mxDefaultDir = join(d, 'mx')
name = os.path.basename(d)
mxTaggedDir = mxDefaultDir + name
mxDir = None
if exists(mxTaggedDir) and isdir(mxTaggedDir):
mxDir = mxTaggedDir
else:
if exists(mxDefaultDir) and isdir(mxDefaultDir):
mxDir = mxDefaultDir
if mxDir is None:
return None
if len([s for s in _suites.itervalues() if s.dir == d]) == 0:
s = Suite(d, mxDir, primary)
_suites[name] = s
return s
def suites():
"""
Get the list of all loaded suites.
"""
return _suites.values()
def suite(name, fatalIfMissing=True):
"""
Get the suite for a given name.
"""
s = _suites.get(name)
if s is None and fatalIfMissing:
abort('suite named ' + name + ' not found')
return s
def projects():
"""
Get the list of all loaded projects.
"""
return _projects.values()
def annotation_processors():
"""
Get the list of all loaded projects that define an annotation processor.
"""
global _annotationProcessors
if _annotationProcessors is None:
aps = set()
for p in projects():
for ap in p.annotation_processors():
if project(ap, False):
aps.add(ap)
_annotationProcessors = list(aps)
return _annotationProcessors
def distribution(name, fatalIfMissing=True):
"""
Get the distribution for a given name. This will abort if the named distribution does
not exist and 'fatalIfMissing' is true.
"""
d = _dists.get(name)
if d is None and fatalIfMissing:
abort('distribution named ' + name + ' not found')
return d
def dependency(name, fatalIfMissing=True):
"""
Get the project or library for a given name. This will abort if a project or library does
not exist for 'name' and 'fatalIfMissing' is true.
"""
d = _projects.get(name)
if d is None:
d = _libs.get(name)
if d is None and fatalIfMissing:
if name in _opts.ignored_projects:
abort('project named ' + name + ' is ignored')
abort('project or library named ' + name + ' not found')
return d
def project(name, fatalIfMissing=True):
"""
Get the project for a given name. This will abort if the named project does
not exist and 'fatalIfMissing' is true.
"""
p = _projects.get(name)
if p is None and fatalIfMissing:
if name in _opts.ignored_projects:
abort('project named ' + name + ' is ignored')
abort('project named ' + name + ' not found')
return p
def library(name, fatalIfMissing=True):
"""
Gets the library for a given name. This will abort if the named library does
not exist and 'fatalIfMissing' is true.
"""
l = _libs.get(name)
if l is None and fatalIfMissing:
abort('library named ' + name + ' not found')
return l
def _as_classpath(deps, resolve):
cp = []
if _opts.cp_prefix is not None:
cp = [_opts.cp_prefix]
for d in deps:
d.append_to_classpath(cp, resolve)
if _opts.cp_suffix is not None:
cp += [_opts.cp_suffix]
return os.pathsep.join(cp)
def classpath(names=None, resolve=True, includeSelf=True, includeBootClasspath=False):
"""
Get the class path for a list of given dependencies, resolving each entry in the
path (e.g. downloading a missing library) if 'resolve' is true.
"""
if names is None:
result = _as_classpath(sorted_deps(includeLibs=True), resolve)
else:
deps = []
if isinstance(names, types.StringTypes):
names = [names]
for n in names:
dependency(n).all_deps(deps, True, includeSelf)
result = _as_classpath(deps, resolve)
if includeBootClasspath:
result = os.pathsep.join([java().bootclasspath(), result])
return result
def classpath_walk(names=None, resolve=True, includeSelf=True, includeBootClasspath=False):
"""
Walks the resources available in a given classpath, yielding a tuple for each resource
where the first member of the tuple is a directory path or ZipFile object for a
classpath entry and the second member is the qualified path of the resource relative
to the classpath entry.
"""
cp = classpath(names, resolve, includeSelf, includeBootClasspath)
for entry in cp.split(os.pathsep):
if not exists(entry):
continue
if isdir(entry):
for root, dirs, files in os.walk(entry):
for d in dirs:
entryPath = join(root[len(entry) + 1:], d)
yield entry, entryPath
for f in files:
entryPath = join(root[len(entry) + 1:], f)
yield entry, entryPath
elif entry.endswith('.jar') or entry.endswith('.zip'):
with zipfile.ZipFile(entry, 'r') as zf:
for zi in zf.infolist():
entryPath = zi.filename
yield zf, entryPath
def sorted_deps(projectNames=None, includeLibs=False, includeAnnotationProcessors=False):
"""
Gets projects and libraries sorted such that dependencies
are before the projects that depend on them. Unless 'includeLibs' is
true, libraries are omitted from the result.
"""
deps = []
if projectNames is None:
projects = opt_limit_to_suite(_projects.values())
else:
projects = [project(name) for name in projectNames]
for p in projects:
p.all_deps(deps, includeLibs=includeLibs, includeAnnotationProcessors=includeAnnotationProcessors)
return deps
def opt_limit_to_suite(projects):
if _opts.specific_suite is None:
return projects
else:
result=[]
for p in projects:
s = p.suite
if s.name == _opts.specific_suite:
result.append(p)
return result;
def _handle_missing_java_home():
if not sys.stdout.isatty():
abort('Could not find bootstrap JDK. Use --java-home option or ensure JAVA_HOME environment variable is set.')
candidateJdks = []
if get_os() == 'darwin':
base = '/Library/Java/JavaVirtualMachines'
candidateJdks = [join(base, n, 'Contents/Home') for n in os.listdir(base) if exists(join(base, n, 'Contents/Home'))]
elif get_os() == 'linux':
base = '/usr/lib/jvm'
candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, 'jre/lib/rt.jar'))]
elif get_os() == 'solaris':
base = '/usr/jdk/instances'
candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, 'jre/lib/rt.jar'))]
elif get_os() == 'windows':
base = r'C:\Program Files\Java'
candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, r'jre\lib\rt.jar'))]
javaHome = None
if len(candidateJdks) != 0:
javaHome = select_items(candidateJdks + ['<other>'], allowMultiple=False)
if javaHome == '<other>':
javaHome = None
while javaHome is None:
javaHome = raw_input('Enter path of bootstrap JDK: ')
rtJarPath = join(javaHome, 'jre', 'lib', 'rt.jar')
if not exists(rtJarPath):
log('Does not appear to be a valid JDK as ' + rtJarPath + ' does not exist')
javaHome = None
else:
break
envPath = join(_mainSuite.dir, 'mx', 'env')
answer = raw_input('Persist this setting by adding "JAVA_HOME=' + javaHome + '" to ' + envPath + '? [Yn]: ')
if not answer.lower().startswith('n'):
with open(envPath, 'a') as fp:
print >> fp, 'JAVA_HOME=' + javaHome
return javaHome
class ArgParser(ArgumentParser):
# Override parent to append the list of available commands
def format_help(self):
return ArgumentParser.format_help(self) + _format_commands()
def __init__(self):
self.java_initialized = False
# this doesn't resolve the right way, but can't figure out how to override _handle_conflict_resolve in _ActionsContainer
ArgumentParser.__init__(self, prog='mx', conflict_handler='resolve')
self.add_argument('-v', action='store_true', dest='verbose', help='enable verbose output')
self.add_argument('-V', action='store_true', dest='very_verbose', help='enable very verbose output')
self.add_argument('--dbg', type=int, dest='java_dbg_port', help='make Java processes wait on <port> for a debugger', metavar='<port>')
self.add_argument('-d', action='store_const', const=8000, dest='java_dbg_port', help='alias for "-dbg 8000"')
self.add_argument('--cp-pfx', dest='cp_prefix', help='class path prefix', metavar='<arg>')
self.add_argument('--cp-sfx', dest='cp_suffix', help='class path suffix', metavar='<arg>')
self.add_argument('--J', dest='java_args', help='Java VM arguments (e.g. --J @-dsa)', metavar='@<args>', default=DEFAULT_JAVA_ARGS)
self.add_argument('--Jp', action='append', dest='java_args_pfx', help='prefix Java VM arguments (e.g. --Jp @-dsa)', metavar='@<args>', default=[])
self.add_argument('--Ja', action='append', dest='java_args_sfx', help='suffix Java VM arguments (e.g. --Ja @-dsa)', metavar='@<args>', default=[])
self.add_argument('--user-home', help='users home directory', metavar='<path>', default=os.path.expanduser('~'))
self.add_argument('--java-home', help='bootstrap JDK installation directory (must be JDK 6 or later)', metavar='<path>')
self.add_argument('--ignore-project', action='append', dest='ignored_projects', help='name of project to ignore', metavar='<name>', default=[])
self.add_argument('--suite', dest='specific_suite', help='limit command to given suite', default=None)
if get_os() != 'windows':
# Time outs are (currently) implemented with Unix specific functionality
self.add_argument('--timeout', help='timeout (in seconds) for command', type=int, default=0, metavar='<secs>')
self.add_argument('--ptimeout', help='timeout (in seconds) for subprocesses', type=int, default=0, metavar='<secs>')
def _parse_cmd_line(self, args=None):
if args is None:
args = sys.argv[1:]
self.add_argument('commandAndArgs', nargs=REMAINDER, metavar='command args...')
opts = self.parse_args()
# Give the timeout options a default value to avoid the need for hasattr() tests
opts.__dict__.setdefault('timeout', 0)
opts.__dict__.setdefault('ptimeout', 0)
if opts.very_verbose:
opts.verbose = True
if opts.java_home is None:
opts.java_home = os.environ.get('JAVA_HOME')
if opts.java_home is None or opts.java_home == '':
opts.java_home = _handle_missing_java_home()
if opts.user_home is None or opts.user_home == '':
abort('Could not find user home. Use --user-home option or ensure HOME environment variable is set.')
os.environ['JAVA_HOME'] = opts.java_home
os.environ['HOME'] = opts.user_home
opts.ignored_projects = opts.ignored_projects + os.environ.get('IGNORED_PROJECTS', '').split(',')
commandAndArgs = opts.__dict__.pop('commandAndArgs')
return opts, commandAndArgs
def _handle_conflict_resolve(self, action, conflicting_actions):
self._handle_conflict_error(action, conflicting_actions)
def _format_commands():
msg = '\navailable commands:\n\n'
for cmd in sorted(commands.iterkeys()):
c, _ = commands[cmd][:2]
doc = c.__doc__
if doc is None:
doc = ''
msg += ' {0:<20} {1}\n'.format(cmd, doc.split('\n', 1)[0])
return msg + '\n'
def java():
"""
Get a JavaConfig object containing Java commands launch details.
"""
assert _java is not None
return _java
def run_java(args, nonZeroIsFatal=True, out=None, err=None, cwd=None):
return run(java().format_cmd(args), nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd)
def _kill_process_group(pid):
pgid = os.getpgid(pid)
try:
os.killpg(pgid, signal.SIGKILL)
return True
except:
log('Error killing subprocess ' + str(pgid) + ': ' + str(sys.exc_info()[1]))
return False
def _waitWithTimeout(process, args, timeout):
def _waitpid(pid):
while True:
try:
return os.waitpid(pid, os.WNOHANG)
except OSError, e:
if e.errno == errno.EINTR:
continue
raise
def _returncode(status):
if os.WIFSIGNALED(status):
return -os.WTERMSIG(status)
elif os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
end = time.time() + timeout
delay = 0.0005
while True:
(pid, status) = _waitpid(process.pid)
if pid == process.pid:
return _returncode(status)
remaining = end - time.time()
if remaining <= 0:
abort('Process timed out after {0} seconds: {1}'.format(timeout, ' '.join(args)))
delay = min(delay * 2, remaining, .05)
time.sleep(delay)
# Makes the current subprocess accessible to the abort() function
# This is a tuple of the Popen object and args.
_currentSubprocess = None
def waitOn(p):
if get_os() == 'windows':
# on windows use a poll loop, otherwise signal does not get handled
retcode = None
while retcode == None:
retcode = p.poll()
time.sleep(0.05)
else:
retcode = p.wait()
return retcode
def run(args, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None):
"""
Run a command in a subprocess, wait for it to complete and return the exit status of the process.
If the exit status is non-zero and `nonZeroIsFatal` is true, then mx is exited with
the same exit status.
Each line of the standard output and error streams of the subprocess are redirected to
out and err if they are callable objects.
"""
assert isinstance(args, types.ListType), "'args' must be a list: " + str(args)
for arg in args:
assert isinstance(arg, types.StringTypes), 'argument is not a string: ' + str(arg)
if env is None:
env = os.environ
if _opts.verbose:
if _opts.very_verbose:
log('Environment variables:')
for key in sorted(env.keys()):
log(' ' + key + '=' + env[key])
log(' '.join(args))
if timeout is None and _opts.ptimeout != 0:
timeout = _opts.ptimeout
global _currentSubprocess
try:
# On Unix, the new subprocess should be in a separate group so that a timeout alarm
# can use os.killpg() to kill the whole subprocess group
preexec_fn = None
creationflags = 0
if get_os() == 'windows':
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
else:
preexec_fn = os.setsid
if not callable(out) and not callable(err) and timeout is None:
# The preexec_fn=os.setsid
p = subprocess.Popen(args, cwd=cwd, preexec_fn=preexec_fn, creationflags=creationflags, env=env)
_currentSubprocess = (p, args)
retcode = waitOn(p)
else:
def redirect(stream, f):
for line in iter(stream.readline, ''):
f(line)
stream.close()
stdout=out if not callable(out) else subprocess.PIPE
stderr=err if not callable(err) else subprocess.PIPE
p = subprocess.Popen(args, cwd=cwd, stdout=stdout, stderr=stderr, preexec_fn=preexec_fn, creationflags=creationflags, env=env)
_currentSubprocess = (p, args)
if callable(out):
t = Thread(target=redirect, args=(p.stdout, out))
t.daemon = True # thread dies with the program
t.start()
if callable(err):
t = Thread(target=redirect, args=(p.stderr, err))
t.daemon = True # thread dies with the program
t.start()
if timeout is None or timeout == 0:
retcode = waitOn(p)
else:
if get_os() == 'windows':
abort('Use of timeout not (yet) supported on Windows')
retcode = _waitWithTimeout(p, args, timeout)
except OSError as e:
log('Error executing \'' + ' '.join(args) + '\': ' + str(e))
if _opts.verbose:
raise e
abort(e.errno)
except KeyboardInterrupt:
abort(1)
finally:
_currentSubprocess = None
if retcode and nonZeroIsFatal:
if _opts.verbose:
if _opts.very_verbose:
raise subprocess.CalledProcessError(retcode, ' '.join(args))
else:
log('[exit code: ' + str(retcode)+ ']')
abort(retcode)
return retcode
def exe_suffix(name):
"""
Gets the platform specific suffix for an executable
"""
if get_os() == 'windows':
return name + '.exe'
return name
def add_lib_prefix(name):
"""
Adds the platform specific library prefix to a name
"""
os = get_os();
if os == 'linux' or os == 'solaris' or os == 'darwin':
return 'lib' + name
return name
def add_lib_suffix(name):
"""
Adds the platform specific library suffix to a name
"""
os = get_os();
if os == 'windows':
return name + '.dll'
if os == 'linux' or os == 'solaris':
return name + '.so'
if os == 'darwin':
return name + '.dylib'
return name
"""
A JavaCompliance simplifies comparing Java compliance values extracted from a JDK version string.
"""
class JavaCompliance:
def __init__(self, ver):
m = re.match('1\.(\d+).*', ver)
assert m is not None, 'not a recognized version string: ' + ver
self.value = int(m.group(1))
def __str__ (self):
return '1.' + str(self.value)
def __cmp__ (self, other):
if isinstance(other, types.StringType):
other = JavaCompliance(other)
return cmp(self.value, other.value)
"""
A Java version as defined in JSR-56
"""
class JavaVersion:
def __init__(self, versionString):
validChar = '[\x21-\x25\x27-\x29\x2c\x2f-\x5e\x60-\x7f]'
separator = '[.\-_]'
m = re.match(validChar + '+(' + separator + validChar + '+)*', versionString)
assert m is not None, 'not a recognized version string: ' + versionString
self.versionString = versionString;
self.parts = [int(f) if f.isdigit() else f for f in re.split(separator, versionString)]
def __str__(self):
return self.versionString
def __cmp__(self, other):
return cmp(self.parts, other.parts)
"""
A JavaConfig object encapsulates info on how Java commands are run.
"""
class JavaConfig:
def __init__(self, opts):
self.jdk = opts.java_home
self.debug_port = opts.java_dbg_port
self.jar = exe_suffix(join(self.jdk, 'bin', 'jar'))
self.java = exe_suffix(join(self.jdk, 'bin', 'java'))
self.javac = exe_suffix(join(self.jdk, 'bin', 'javac'))
self.javap = exe_suffix(join(self.jdk, 'bin', 'javap'))
self.javadoc = exe_suffix(join(self.jdk, 'bin', 'javadoc'))
self._bootclasspath = None
if not exists(self.java):
abort('Java launcher derived from JAVA_HOME does not exist: ' + self.java)
def delAtAndSplit(s):
return shlex.split(s.lstrip('@'))
self.java_args = delAtAndSplit(_opts.java_args)
self.java_args_pfx = sum(map(delAtAndSplit, _opts.java_args_pfx), [])
self.java_args_sfx = sum(map(delAtAndSplit, _opts.java_args_sfx), [])
# Prepend the -d64 VM option only if the java command supports it
try:
output = subprocess.check_output([self.java, '-d64', '-version'], stderr=subprocess.STDOUT)
self.java_args = ['-d64'] + self.java_args
except subprocess.CalledProcessError as e:
try:
output = subprocess.check_output([self.java, '-version'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print e.output
abort(e.returncode)
output = output.split()
assert output[1] == 'version'
self.version = JavaVersion(output[2].strip('"'))
self.javaCompliance = JavaCompliance(self.version.versionString)
if self.debug_port is not None:
self.java_args += ['-Xdebug', '-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=' + str(self.debug_port)]
def format_cmd(self, args):
return [self.java] + self.java_args_pfx + self.java_args + self.java_args_sfx + args
def bootclasspath(self):
if self._bootclasspath is None:
tmpDir = tempfile.mkdtemp()
try:
src = join(tmpDir, 'bootclasspath.java')
with open(src, 'w') as fp:
print >> fp, """
public class bootclasspath {
public static void main(String[] args) {
String s = System.getProperty("sun.boot.class.path");
if (s != null) {
System.out.println(s);
}
}
}"""
subprocess.check_call([self.javac, '-d', tmpDir, src])
self._bootclasspath = subprocess.check_output([self.java, '-cp', tmpDir, 'bootclasspath'])
finally:
shutil.rmtree(tmpDir)
return self._bootclasspath
def check_get_env(key):
"""
Gets an environment variable, aborting with a useful message if it is not set.
"""
value = get_env(key)
if value is None:
abort('Required environment variable ' + key + ' must be set')
return value
def get_env(key, default=None):
"""
Gets an environment variable.
"""
value = os.environ.get(key, default)
return value
def logv(msg=None):
if _opts.verbose:
log(msg)
def log(msg=None):
"""
Write a message to the console.
All script output goes through this method thus allowing a subclass
to redirect it.
"""
if msg is None:
print
else:
print msg
def expand_project_in_class_path_arg(cpArg):
cp = []
for part in cpArg.split(os.pathsep):
if part.startswith('@'):
cp += classpath(part[1:]).split(os.pathsep)
else:
cp.append(part)
return os.pathsep.join(cp)
def expand_project_in_args(args):
for i in range(len(args)):
if args[i] == '-cp' or args[i] == '-classpath':
if i + 1 < len(args):
args[i + 1] = expand_project_in_class_path_arg(args[i + 1])
return
def gmake_cmd():
for a in ['make', 'gmake', 'gnumake']:
try:
output = subprocess.check_output([a, '--version'])
if 'GNU' in output:
return a;
except:
pass
abort('Could not find a GNU make executable on the current path.')
def expandvars_in_property(value):
result = expandvars(value)
if '$' in result or '%' in result:
abort('Property contains an undefined environment variable: ' + value)
return result
def abort(codeOrMessage):
"""
Aborts the program with a SystemExit exception.
If 'codeOrMessage' is a plain integer, it specifies the system exit status;
if it is None, the exit status is zero; if it has another type (such as a string),
the object's value is printed and the exit status is one.
"""
#import traceback
#traceback.print_stack()
currentSubprocess = _currentSubprocess
if currentSubprocess is not None:
p, _ = currentSubprocess
if get_os() == 'windows':
p.kill()
else:
_kill_process_group(p.pid)
raise SystemExit(codeOrMessage)
def download(path, urls, verbose=False):
"""
Attempts to downloads content for each URL in a list, stopping after the first successful download.
If the content cannot be retrieved from any URL, the program is aborted. The downloaded content
is written to the file indicated by 'path'.
"""
d = dirname(path)
if d != '' and not exists(d):
os.makedirs(d)
# Try it with the Java tool first since it can show a progress counter
myDir = dirname(__file__)
if not path.endswith(os.sep):
javaSource = join(myDir, 'URLConnectionDownload.java')
javaClass = join(myDir, 'URLConnectionDownload.class')
if not exists(javaClass) or getmtime(javaClass) < getmtime(javaSource):
subprocess.check_call([java().javac, '-d', myDir, javaSource])
if run([java().java, '-cp', myDir, 'URLConnectionDownload', path] + urls, nonZeroIsFatal=False) == 0:
return
def url_open(url):
userAgent = 'Mozilla/5.0 (compatible)'
headers = { 'User-Agent' : userAgent }
req = urllib2.Request(url, headers=headers)
return urllib2.urlopen(req)
for url in urls:
try:
if (verbose):
log('Downloading ' + url + ' to ' + path)
if url.startswith('zip:') or url.startswith('jar:'):
i = url.find('!/')
if i == -1:
abort('Zip or jar URL does not contain "!/": ' + url)
url, _, entry = url[len('zip:'):].partition('!/')
with contextlib.closing(url_open(url)) as f:
data = f.read()
zipdata = StringIO.StringIO(f.read())
zf = zipfile.ZipFile(zipdata, 'r')
data = zf.read(entry)
with open(path, 'wb') as f:
f.write(data)
else:
with contextlib.closing(url_open(url)) as f:
data = f.read()
if path.endswith(os.sep):
# Scrape directory listing for relative URLs
hrefs = re.findall(r' href="([^"]*)"', data)
if len(hrefs) != 0:
for href in hrefs:
if not '/' in href:
download(join(path, href), [url + href], verbose)
else:
log('no locals hrefs scraped from ' + url)
else:
with open(path, 'wb') as f:
f.write(data)
return
except IOError as e:
log('Error reading from ' + url + ': ' + str(e))
except zipfile.BadZipfile as e:
log('Error in zip file downloaded from ' + url + ': ' + str(e))
abort('Could not download to ' + path + ' from any of the following URLs:\n\n ' +
'\n '.join(urls) + '\n\nPlease use a web browser to do the download manually')
def update_file(path, content):
"""
Updates a file with some given content if the content differs from what's in
the file already. The return value indicates if the file was updated.
"""
existed = exists(path)
try:
old = None
if existed:
with open(path, 'rb') as f:
old = f.read()
if old == content:
return False
with open(path, 'wb') as f:
f.write(content)
log(('modified ' if existed else 'created ') + path)
return True;
except IOError as e:
abort('Error while writing to ' + path + ': ' + str(e));
# Builtin commands
def build(args, parser=None):
"""compile the Java and C sources, linking the latter
Compile all the Java source code using the appropriate compilers
and linkers for the various source code types."""
suppliedParser = parser is not None
if not suppliedParser:
parser = ArgumentParser(prog='mx build')
javaCompliance = java().javaCompliance
defaultEcjPath = join(_mainSuite.dir, 'mx', 'ecj.jar')
parser = parser if parser is not None else ArgumentParser(prog='mx build')
parser.add_argument('-f', action='store_true', dest='force', help='force build (disables timestamp checking)')
parser.add_argument('-c', action='store_true', dest='clean', help='removes existing build output')
parser.add_argument('--source', dest='compliance', help='Java compliance level for projects without an explicit one', default=str(javaCompliance))
parser.add_argument('--Wapi', action='store_true', dest='warnAPI', help='show warnings about using internal APIs')
parser.add_argument('--projects', action='store', help='comma separated projects to build (omit to build all projects)')
parser.add_argument('--only', action='store', help='comma separated projects to build, without checking their dependencies (omit to build all projects)')
parser.add_argument('--no-java', action='store_false', dest='java', help='do not build Java projects')
parser.add_argument('--no-native', action='store_false', dest='native', help='do not build native projects')
parser.add_argument('--jdt', help='path to ecj.jar, the Eclipse batch compiler (default: ' + defaultEcjPath + ')', default=defaultEcjPath, metavar='<path>')
parser.add_argument('--jdt-warning-as-error', action='store_true', help='convert all Eclipse batch compiler warnings to errors')
if suppliedParser:
parser.add_argument('remainder', nargs=REMAINDER, metavar='...')
args = parser.parse_args(args)
jdtJar = None
if args.jdt is not None:
if args.jdt.endswith('.jar'):
jdtJar=args.jdt
if not exists(jdtJar) and os.path.abspath(jdtJar) == os.path.abspath(defaultEcjPath):
# Silently ignore JDT if default location is used but not ecj.jar exists there
jdtJar = None
built = set()
projects = None
if args.projects is not None:
projects = args.projects.split(',')
if args.only is not None:
sortedProjects = [project(name) for name in args.only.split(',')]
else:
sortedProjects = sorted_deps(projects, includeAnnotationProcessors=True)
for p in sortedProjects:
if p.native:
if args.native:
log('Calling GNU make {0}...'.format(p.dir))
if args.clean:
run([gmake_cmd(), 'clean'], cwd=p.dir)
run([gmake_cmd()], cwd=p.dir)
built.add(p.name)
continue
else:
if not args.java:
continue
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
continue
# skip building this Java project if its Java compliance level is "higher" than the configured JDK
if javaCompliance < p.javaCompliance:
log('Excluding {0} from build (Java compliance level {1} required)'.format(p.name, p.javaCompliance))
continue
outputDir = p.output_dir()
if exists(outputDir):
if args.clean:
log('Cleaning {0}...'.format(outputDir))
shutil.rmtree(outputDir)
os.mkdir(outputDir)
else:
os.mkdir(outputDir)
cp = classpath(p.name, includeSelf=True)
sourceDirs = p.source_dirs()
mustBuild = args.force
if not mustBuild:
for dep in p.all_deps([], False):
if dep.name in built:
mustBuild = True
jasminAvailable = None
javafilelist = []
for sourceDir in sourceDirs:
for root, _, files in os.walk(sourceDir):
javafiles = [join(root, name) for name in files if name.endswith('.java') and name != 'package-info.java']
javafilelist += javafiles
# Copy all non Java resources or assemble Jasmin files
nonjavafilelist = [join(root, name) for name in files if not name.endswith('.java')]
for src in nonjavafilelist:
if src.endswith('.jasm'):
className = None
with open(src) as f:
for line in f:
if line.startswith('.class '):
className = line.split()[-1]
break
if className is not None:
jasminOutputDir = p.jasmin_output_dir()
classFile = join(jasminOutputDir, className.replace('/', os.sep) + '.class')
if exists(dirname(classFile)) and (not exists(classFile) or os.path.getmtime(classFile) < os.path.getmtime(src)):
if jasminAvailable is None:
try:
with open(os.devnull) as devnull:
subprocess.call('jasmin', stdout=devnull, stderr=subprocess.STDOUT)
jasminAvailable = True
except OSError:
jasminAvailable = False
if jasminAvailable:
log('Assembling Jasmin file ' + src)
run(['jasmin', '-d', jasminOutputDir, src])
else:
log('The jasmin executable could not be found - skipping ' + src)
with file(classFile, 'a'):
os.utime(classFile, None)
else:
log('could not file .class directive in Jasmin source: ' + src)
else:
dst = join(outputDir, src[len(sourceDir) + 1:])
if not exists(dirname(dst)):
os.makedirs(dirname(dst))
if exists(dirname(dst)) and (not exists(dst) or os.path.getmtime(dst) < os.path.getmtime(src)):
shutil.copyfile(src, dst)
if not mustBuild:
for javafile in javafiles:
classfile = outputDir + javafile[len(sourceDir):-len('java')] + 'class'
if not exists(classfile) or os.path.getmtime(javafile) > os.path.getmtime(classfile):
mustBuild = True
break
if not mustBuild:
logv('[all class files for {0} are up to date - skipping]'.format(p.name))
continue
if len(javafilelist) == 0:
logv('[no Java sources for {0} - skipping]'.format(p.name))
continue
built.add(p.name)
argfileName = join(p.dir, 'javafilelist.txt')
argfile = open(argfileName, 'wb')
argfile.write('\n'.join(javafilelist))
argfile.close()
processorArgs = []
ap = p.annotation_processors()
if len(ap) > 0:
processorPath = classpath(ap, resolve=True)
genDir = p.source_gen_dir();
if exists(genDir):
shutil.rmtree(genDir)
os.mkdir(genDir)
processorArgs += ['-processorpath', join(processorPath), '-s', genDir]
else:
processorArgs += ['-proc:none']
toBeDeleted = [argfileName]
try:
compliance = str(p.javaCompliance) if p.javaCompliance is not None else args.compliance
if jdtJar is None:
log('Compiling Java sources for {0} with javac...'.format(p.name))
javacCmd = [java().javac, '-g', '-J-Xmx1g', '-source', compliance, '-classpath', cp, '-d', outputDir]
if java().debug_port is not None:
javacCmd += ['-J-Xdebug', '-J-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=' + str(java().debug_port)]
javacCmd += processorArgs
javacCmd += ['@' + argfile.name]
if not args.warnAPI:
javacCmd.append('-XDignore.symbol.file')
run(javacCmd)
else:
log('Compiling Java sources for {0} with JDT...'.format(p.name))
jdtArgs = [java().java, '-Xmx1g']
if java().debug_port is not None:
jdtArgs += ['-Xdebug', '-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=' + str(java().debug_port)]
jdtArgs += [ '-jar', jdtJar,
'-' + compliance,
'-cp', cp, '-g', '-enableJavadoc',
'-d', outputDir]
jdtArgs += processorArgs
jdtProperties = join(p.dir, '.settings', 'org.eclipse.jdt.core.prefs')
rootJdtProperties = join(p.suite.dir, 'mx', 'eclipse-settings', 'org.eclipse.jdt.core.prefs')
if not exists(jdtProperties) or os.path.getmtime(jdtProperties) < os.path.getmtime(rootJdtProperties):
# Try to fix a missing properties file by running eclipseinit
eclipseinit([], buildProcessorJars=False)
if not exists(jdtProperties):
log('JDT properties file {0} not found'.format(jdtProperties))
else:
# convert all warnings to errors
if args.jdt_warning_as_error:
jdtPropertiesTmp = jdtProperties + '.tmp'
with open(jdtProperties) as fp:
content = fp.read().replace('=warning', '=error')
with open(jdtPropertiesTmp, 'w') as fp:
fp.write(content)
toBeDeleted.append(jdtPropertiesTmp)
jdtArgs += ['-properties', jdtPropertiesTmp]
else:
jdtArgs += ['-properties', jdtProperties]
jdtArgs.append('@' + argfile.name)
run(jdtArgs)
finally:
for n in toBeDeleted:
os.remove(n)
for dist in _dists.values():
archive(['@' + dist.name])
if suppliedParser:
return args
return None
def eclipseformat(args):
"""run the Eclipse Code Formatter on the Java sources
The exit code 1 denotes that at least one file was modified."""
parser = ArgumentParser(prog='mx eclipseformat')
parser.add_argument('-e', '--eclipse-exe', help='location of the Eclipse executable')
parser.add_argument('-C', '--no-backup', action='store_false', dest='backup', help='do not save backup of modified files')
parser.add_argument('--projects', action='store', help='comma separated projects to process (omit to process all projects)')
args = parser.parse_args(args)
if args.eclipse_exe is None:
args.eclipse_exe = os.environ.get('ECLIPSE_EXE')
if args.eclipse_exe is None:
abort('Could not find Eclipse executable. Use -e option or ensure ECLIPSE_EXE environment variable is set.')
# Maybe an Eclipse installation dir was specified - look for the executable in it
if join(args.eclipse_exe, exe_suffix('eclipse')):
args.eclipse_exe = join(args.eclipse_exe, exe_suffix('eclipse'))
if not os.path.isfile(args.eclipse_exe) or not os.access(args.eclipse_exe, os.X_OK):
abort('Not an executable file: ' + args.eclipse_exe)
eclipseinit([], buildProcessorJars=False)
# build list of projects to be processed
projects = sorted_deps()
if args.projects is not None:
projects = [project(name) for name in args.projects.split(',')]
class Batch:
def __init__(self, settingsFile):
self.path = settingsFile
self.javafiles = list()
def settings(self):
with open(self.path) as fp:
return fp.read()
class FileInfo:
def __init__(self, path):
self.path = path
with open(path) as fp:
self.content = fp.read()
self.times = (os.path.getatime(path), os.path.getmtime(path))
def update(self):
with open(self.path) as fp:
content = fp.read()
if self.content != content:
self.content = content
return True
os.utime(self.path, self.times)
modified = list()
batches = dict() # all sources with the same formatting settings are formatted together
for p in projects:
if p.native:
continue
sourceDirs = p.source_dirs()
batch = Batch(join(p.dir, '.settings', 'org.eclipse.jdt.core.prefs'))
if not exists(batch.path):
if _opts.verbose:
log('[no Eclipse Code Formatter preferences at {0} - skipping]'.format(batch.path))
continue
for sourceDir in sourceDirs:
for root, _, files in os.walk(sourceDir):
for f in [join(root, name) for name in files if name.endswith('.java')]:
batch.javafiles.append(FileInfo(f))
if len(batch.javafiles) == 0:
logv('[no Java sources in {0} - skipping]'.format(p.name))
continue
res = batches.setdefault(batch.settings(), batch)
if res is not batch:
res.javafiles = res.javafiles + batch.javafiles
for batch in batches.itervalues():
run([args.eclipse_exe, '-nosplash', '-application', 'org.eclipse.jdt.core.JavaCodeFormatter', '-config', batch.path] + [f.path for f in batch.javafiles])
for fi in batch.javafiles:
if fi.update():
modified.append(fi)
log('{0} files were modified'.format(len(modified)))
if len(modified) != 0:
if args.backup:
backup = os.path.abspath('eclipseformat.backup.zip')
arcbase = _mainSuite.dir
zf = zipfile.ZipFile(backup, 'w', zipfile.ZIP_DEFLATED)
for fi in modified:
arcname = os.path.relpath(fi.path, arcbase).replace(os.sep, '/')
zf.writestr(arcname, fi.content)
zf.close()
log('Wrote backup of {0} modified files to {1}'.format(len(modified), backup))
return 1
return 0
def processorjars():
projs = set()
for p in sorted_deps():
if _isAnnotationProcessorDependency(p):
projs.add(p)
if len(projs) < 0:
return
pnames = [p.name for p in projs]
build(['--projects', ",".join(pnames)])
archive(pnames)
def archive(args):
"""create jar files for projects and distributions"""
parser = ArgumentParser(prog='mx archive');
parser.add_argument('names', nargs=REMAINDER, metavar='[<project>|@<distribution>]...')
args = parser.parse_args(args)
for name in args.names:
if name.startswith('@'):
dname = name[1:]
d = distribution(dname)
fd, tmp = tempfile.mkstemp(suffix='', prefix=basename(d.path) + '.', dir=dirname(d.path))
services = tempfile.mkdtemp(suffix='', prefix=basename(d.path) + '.', dir=dirname(d.path))
def overwriteCheck(zf, arcname, source):
if arcname in zf.namelist():
log('warning: ' + d.path + ': overwriting ' + arcname + ' [source: ' + source + ']')
try:
zf = zipfile.ZipFile(tmp, 'w')
for dep in sorted_deps(d.deps, includeLibs=True):
if dep.isLibrary():
l = dep
# merge library jar into distribution jar
logv('[' + d.path + ': adding library ' + l.name + ']')
lpath = l.get_path(resolve=True)
with zipfile.ZipFile(lpath, 'r') as lp:
for arcname in lp.namelist():
if arcname.startswith('META-INF/services/'):
f = arcname[len('META-INF/services/'):].replace('/', os.sep)
with open(join(services, f), 'a') as outfile:
for line in lp.read(arcname).splitlines():
outfile.write(line)
else:
overwriteCheck(zf, arcname, lpath + '!' + arcname)
zf.writestr(arcname, lp.read(arcname))
else:
p = dep
# skip a Java project if its Java compliance level is "higher" than the configured JDK
if java().javaCompliance < p.javaCompliance:
log('Excluding {0} from {2} (Java compliance level {1} required)'.format(p.name, p.javaCompliance, d.path))
continue
logv('[' + d.path + ': adding project ' + p.name + ']')
outputDir = p.output_dir()
for root, _, files in os.walk(outputDir):
relpath = root[len(outputDir) + 1:]
if relpath == join('META-INF', 'services'):
for f in files:
with open(join(services, f), 'a') as outfile:
with open(join(root, f), 'r') as infile:
for line in infile:
outfile.write(line)
elif relpath == join('META-INF', 'providers'):
for f in files:
with open(join(root, f), 'r') as infile:
for line in infile:
with open(join(services, line.strip()), 'a') as outfile:
outfile.write(f + '\n')
else:
for f in files:
arcname = join(relpath, f).replace(os.sep, '/')
overwriteCheck(zf, arcname, join(root, f))
zf.write(join(root, f), arcname)
for f in os.listdir(services):
arcname = join('META-INF', 'services', f).replace(os.sep, '/')
zf.write(join(services, f), arcname)
zf.close()
os.close(fd)
shutil.rmtree(services)
# Atomic on Unix
shutil.move(tmp, d.path)
#print time.time(), 'move:', tmp, '->', d.path
d.notify_updated()
finally:
if exists(tmp):
os.remove(tmp)
if exists(services):
shutil.rmtree(services)
else:
p = project(name)
outputDir = p.output_dir()
fd, tmp = tempfile.mkstemp(suffix='', prefix=p.name, dir=p.dir)
try:
zf = zipfile.ZipFile(tmp, 'w')
for root, _, files in os.walk(outputDir):
for f in files:
relpath = root[len(outputDir) + 1:]
arcname = join(relpath, f).replace(os.sep, '/')
zf.write(join(root, f), arcname)
zf.close()
os.close(fd)
# Atomic on Unix
shutil.move(tmp, join(p.dir, p.name + '.jar'))
finally:
if exists(tmp):
os.remove(tmp)
def canonicalizeprojects(args):
"""process all project files to canonicalize the dependencies
The exit code of this command reflects how many files were updated."""
changedFiles = 0
for s in suites():
projectsFile = join(s.dir, 'mx', 'projects')
if not exists(projectsFile):
continue
with open(projectsFile) as f:
out = StringIO.StringIO()
pattern = re.compile('project@([^@]+)@dependencies=.*')
lineNo = 1
for line in f:
line = line.strip()
m = pattern.match(line)
if m is None:
out.write(line + '\n')
else:
p = project(m.group(1))
for pkg in p.defined_java_packages():
if not pkg.startswith(p.name):
abort('package in {0} does not have prefix matching project name: {1}'.format(p, pkg))
ignoredDeps = set([name for name in p.deps if project(name, False) is not None])
for pkg in p.imported_java_packages():
for name in p.deps:
dep = project(name, False)
if dep is None:
ignoredDeps.discard(name)
else:
if pkg in dep.defined_java_packages():
ignoredDeps.discard(name)
if pkg in dep.extended_java_packages():
ignoredDeps.discard(name)
if len(ignoredDeps) != 0:
candidates = set();
# Compute dependencies based on projects required by p
for d in sorted_deps():
if not d.defined_java_packages().isdisjoint(p.imported_java_packages()):
candidates.add(d)
# Remove non-canonical candidates
for c in list(candidates):
candidates.difference_update(c.all_deps([], False, False))
candidates = [d.name for d in candidates]
abort('{0}:{1}: {2} does not use any packages defined in these projects: {3}\nComputed project dependencies: {4}'.format(
projectsFile, lineNo, p, ', '.join(ignoredDeps), ','.join(candidates)))
out.write('project@' + m.group(1) + '@dependencies=' + ','.join(p.canonical_deps()) + '\n')
lineNo = lineNo + 1
content = out.getvalue()
if update_file(projectsFile, content):
changedFiles += 1
return changedFiles;
def checkstyle(args):
"""run Checkstyle on the Java sources
Run Checkstyle over the Java sources. Any errors or warnings
produced by Checkstyle result in a non-zero exit code."""
parser = ArgumentParser(prog='mx checkstyle')
parser.add_argument('-f', action='store_true', dest='force', help='force checking (disables timestamp checking)')
args = parser.parse_args(args)
totalErrors = 0
for p in sorted_deps():
if p.native:
continue
sourceDirs = p.source_dirs()
dotCheckstyle = join(p.dir, '.checkstyle')
if not exists(dotCheckstyle):
continue
# skip checking this Java project if its Java compliance level is "higher" than the configured JDK
if java().javaCompliance < p.javaCompliance:
log('Excluding {0} from checking (Java compliance level {1} required)'.format(p.name, p.javaCompliance))
continue
for sourceDir in sourceDirs:
javafilelist = []
for root, _, files in os.walk(sourceDir):
javafilelist += [join(root, name) for name in files if name.endswith('.java') and name != 'package-info.java']
if len(javafilelist) == 0:
logv('[no Java sources in {0} - skipping]'.format(sourceDir))
continue
timestampFile = join(p.suite.mxDir, 'checkstyle-timestamps', sourceDir[len(p.suite.dir) + 1:].replace(os.sep, '_') + '.timestamp')
if not exists(dirname(timestampFile)):
os.makedirs(dirname(timestampFile))
mustCheck = False
if not args.force and exists(timestampFile):
timestamp = os.path.getmtime(timestampFile)
for f in javafilelist:
if os.path.getmtime(f) > timestamp:
mustCheck = True
break
else:
mustCheck = True
if not mustCheck:
if _opts.verbose:
log('[all Java sources in {0} already checked - skipping]'.format(sourceDir))
continue
dotCheckstyleXML = xml.dom.minidom.parse(dotCheckstyle)
localCheckConfig = dotCheckstyleXML.getElementsByTagName('local-check-config')[0]
configLocation = localCheckConfig.getAttribute('location')
configType = localCheckConfig.getAttribute('type')
if configType == 'project':
# Eclipse plugin "Project Relative Configuration" format:
#
# '/<project_name>/<suffix>'
#
if configLocation.startswith('/'):
name, _, suffix = configLocation.lstrip('/').partition('/')
config = join(project(name).dir, suffix)
else:
config = join(p.dir, configLocation)
else:
logv('[unknown Checkstyle configuration type "' + configType + '" in {0} - skipping]'.format(sourceDir))
continue
exclude = join(p.dir, '.checkstyle.exclude')
if exists(exclude):
with open(exclude) as f:
# Convert patterns to OS separators
patterns = [name.rstrip().replace('/', os.sep) for name in f.readlines()]
def match(name):
for p in patterns:
if p in name:
if _opts.verbose:
log('excluding: ' + name)
return True
return False
javafilelist = [name for name in javafilelist if not match(name)]
auditfileName = join(p.dir, 'checkstyleOutput.txt')
log('Running Checkstyle on {0} using {1}...'.format(sourceDir, config))
try:
# Checkstyle is unable to read the filenames to process from a file, and the
# CreateProcess function on Windows limits the length of a command line to
# 32,768 characters (http://msdn.microsoft.com/en-us/library/ms682425%28VS.85%29.aspx)
# so calling Checkstyle must be done in batches.
while len(javafilelist) != 0:
i = 0
size = 0
while i < len(javafilelist):
s = len(javafilelist[i]) + 1
if (size + s < 30000):
size += s
i += 1
else:
break
batch = javafilelist[:i]
javafilelist = javafilelist[i:]
try:
run_java(['-Xmx1g', '-jar', library('CHECKSTYLE').get_path(True), '-f', 'xml', '-c', config, '-o', auditfileName] + batch, nonZeroIsFatal=False)
finally:
if exists(auditfileName):
errors = []
source = None
def start_element(name, attrs):
if name == 'file':
global source
source = attrs['name']
elif name == 'error':
errors.append('{}:{}: {}'.format(source, attrs['line'], attrs['message']))
xp = xml.parsers.expat.ParserCreate()
xp.StartElementHandler = start_element
with open(auditfileName) as fp:
xp.ParseFile(fp)
if len(errors) != 0:
map(log, errors)
totalErrors = totalErrors + len(errors)
else:
if exists(timestampFile):
os.utime(timestampFile, None)
else:
file(timestampFile, 'a')
finally:
if exists(auditfileName):
os.unlink(auditfileName)
return totalErrors
def clean(args, parser=None):
"""remove all class files, images, and executables
Removes all files created by a build, including Java class files, executables, and
generated images.
"""
suppliedParser = parser is not None
parser = parser if suppliedParser else ArgumentParser(prog='mx build');
parser.add_argument('--no-native', action='store_false', dest='native', help='do not clean native projects')
parser.add_argument('--no-java', action='store_false', dest='java', help='do not clean Java projects')
args = parser.parse_args(args)
for p in projects():
if p.native:
if args.native:
run([gmake_cmd(), '-C', p.dir, 'clean'])
else:
if args.java:
genDir = p.source_gen_dir();
if genDir != '' and exists(genDir):
log('Clearing {0}...'.format(genDir))
for f in os.listdir(genDir):
shutil.rmtree(join(genDir, f))
outputDir = p.output_dir()
if outputDir != '' and exists(outputDir):
log('Removing {0}...'.format(outputDir))
shutil.rmtree(outputDir)
if suppliedParser:
return args
def about(args):
"""show the 'man page' for mx"""
print __doc__
def help_(args):
"""show help for a given command
With no arguments, print a list of commands and short help for each command.
Given a command name, print help for that command."""
if len(args) == 0:
_argParser.print_help()
return
name = args[0]
if not commands.has_key(name):
hits = [c for c in commands.iterkeys() if c.startswith(name)]
if len(hits) == 1:
name = hits[0]
elif len(hits) == 0:
abort('mx: unknown command \'{0}\'\n{1}use "mx help" for more options'.format(name, _format_commands()))
else:
abort('mx: command \'{0}\' is ambiguous\n {1}'.format(name, ' '.join(hits)))
value = commands[name]
(func, usage) = value[:2]
doc = func.__doc__
if len(value) > 2:
docArgs = value[2:]
fmtArgs = []
for d in docArgs:
if isinstance(d, Callable):
fmtArgs += [d()]
else:
fmtArgs += [str(d)]
doc = doc.format(*fmtArgs)
print 'mx {0} {1}\n\n{2}\n'.format(name, usage, doc)
def projectgraph(args, suite=None):
"""create dot graph for project structure ("mx projectgraph | dot -Tpdf -oprojects.pdf")"""
print 'digraph projects {'
print 'rankdir=BT;'
print 'node [shape=rect];'
for p in projects():
for dep in p.canonical_deps():
print '"' + p.name + '"->"' + dep + '"'
print '}'
def _source_locator_memento(deps):
slm = XMLDoc()
slm.open('sourceLookupDirector')
slm.open('sourceContainers', {'duplicates' : 'false'})
# Every Java program depends on the JRE
memento = XMLDoc().element('classpathContainer', {'path' : 'org.eclipse.jdt.launching.JRE_CONTAINER'}).xml(standalone='no')
slm.element('classpathContainer', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.classpathContainer'})
for dep in deps:
if dep.isLibrary():
if hasattr(dep, 'eclipse.container'):
memento = XMLDoc().element('classpathContainer', {'path' : getattr(dep, 'eclipse.container')}).xml(standalone='no')
slm.element('classpathContainer', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.classpathContainer'})
else:
memento = XMLDoc().element('javaProject', {'name' : dep.name}).xml(standalone='no')
slm.element('container', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.javaProject'})
slm.close('sourceContainers')
slm.close('sourceLookupDirector')
return slm
def make_eclipse_attach(hostname, port, name=None, deps=[]):
"""
Creates an Eclipse launch configuration file for attaching to a Java process.
"""
slm = _source_locator_memento(deps)
launch = XMLDoc()
launch.open('launchConfiguration', {'type' : 'org.eclipse.jdt.launching.remoteJavaApplication'})
launch.element('stringAttribute', {'key' : 'org.eclipse.debug.core.source_locator_id', 'value' : 'org.eclipse.jdt.launching.sourceLocator.JavaSourceLookupDirector'})
launch.element('stringAttribute', {'key' : 'org.eclipse.debug.core.source_locator_memento', 'value' : '%s'})
launch.element('booleanAttribute', {'key' : 'org.eclipse.jdt.launching.ALLOW_TERMINATE', 'value' : 'true'})
launch.open('mapAttribute', {'key' : 'org.eclipse.jdt.launching.CONNECT_MAP'})
launch.element('mapEntry', {'key' : 'hostname', 'value' : hostname})
launch.element('mapEntry', {'key' : 'port', 'value' : port})
launch.close('mapAttribute')
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.PROJECT_ATTR', 'value' : ''})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.VM_CONNECTOR_ID', 'value' : 'org.eclipse.jdt.launching.socketAttachConnector'})
launch.close('launchConfiguration')
launch = launch.xml(newl='\n', standalone='no') % slm.xml(escape=True, standalone='no')
if name is None:
name = 'attach-' + hostname + '-' + port
eclipseLaunches = join('mx', 'eclipse-launches')
if not exists(eclipseLaunches):
os.makedirs(eclipseLaunches)
return update_file(join(eclipseLaunches, name + '.launch'), launch)
def make_eclipse_launch(javaArgs, jre, name=None, deps=[]):
"""
Creates an Eclipse launch configuration file for running/debugging a Java command.
"""
mainClass = None
vmArgs = []
appArgs = []
cp = None
argsCopy = list(reversed(javaArgs))
while len(argsCopy) != 0:
a = argsCopy.pop()
if a == '-jar':
mainClass = '-jar'
appArgs = list(reversed(argsCopy))
break
if a == '-cp' or a == '-classpath':
assert len(argsCopy) != 0
cp = argsCopy.pop()
vmArgs.append(a)
vmArgs.append(cp)
elif a.startswith('-'):
vmArgs.append(a)
else:
mainClass = a
appArgs = list(reversed(argsCopy))
break
if mainClass is None:
log('Cannot create Eclipse launch configuration without main class or jar file: java ' + ' '.join(javaArgs))
return False
if name is None:
if mainClass == '-jar':
name = basename(appArgs[0])
if len(appArgs) > 1 and not appArgs[1].startswith('-'):
name = name + '_' + appArgs[1]
else:
name = mainClass
name = time.strftime('%Y-%m-%d-%H%M%S_' + name)
if cp is not None:
for e in cp.split(os.pathsep):
for s in suites():
deps += [p for p in s.projects if e == p.output_dir()]
deps += [l for l in s.libs if e == l.get_path(False)]
slm = _source_locator_memento(deps)
launch = XMLDoc()
launch.open('launchConfiguration', {'type' : 'org.eclipse.jdt.launching.localJavaApplication'})
launch.element('stringAttribute', {'key' : 'org.eclipse.debug.core.source_locator_id', 'value' : 'org.eclipse.jdt.launching.sourceLocator.JavaSourceLookupDirector'})
launch.element('stringAttribute', {'key' : 'org.eclipse.debug.core.source_locator_memento', 'value' : '%s'})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.JRE_CONTAINER', 'value' : 'org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/' + jre})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.MAIN_TYPE', 'value' : mainClass})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.PROGRAM_ARGUMENTS', 'value' : ' '.join(appArgs)})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.PROJECT_ATTR', 'value' : ''})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.VM_ARGUMENTS', 'value' : ' '.join(vmArgs)})
launch.close('launchConfiguration')
launch = launch.xml(newl='\n', standalone='no') % slm.xml(escape=True, standalone='no')
eclipseLaunches = join('mx', 'eclipse-launches')
if not exists(eclipseLaunches):
os.makedirs(eclipseLaunches)
return update_file(join(eclipseLaunches, name + '.launch'), launch)
def eclipseinit(args, suite=None, buildProcessorJars=True):
"""(re)generate Eclipse project configurations and working sets"""
if suite is None:
suite = _mainSuite
if buildProcessorJars:
processorjars()
projToDist = dict()
for dist in _dists.values():
distDeps = sorted_deps(dist.deps)
for p in distDeps:
projToDist[p.name] = (dist, [dep.name for dep in distDeps])
for p in projects():
if p.native:
continue
if not exists(p.dir):
os.makedirs(p.dir)
out = XMLDoc()
out.open('classpath')
for src in p.srcDirs:
srcDir = join(p.dir, src)
if not exists(srcDir):
os.mkdir(srcDir)
out.element('classpathentry', {'kind' : 'src', 'path' : src})
if len(p.annotation_processors()) > 0:
genDir = p.source_gen_dir();
if not exists(genDir):
os.mkdir(genDir)
out.element('classpathentry', {'kind' : 'src', 'path' : 'src_gen'})
# Every Java program depends on the JRE
out.element('classpathentry', {'kind' : 'con', 'path' : 'org.eclipse.jdt.launching.JRE_CONTAINER'})
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
out.element('classpathentry', {'kind' : 'con', 'path' : 'org.eclipse.pde.core.requiredPlugins'})
for dep in p.all_deps([], True):
if dep == p:
continue;
if dep.isLibrary():
if hasattr(dep, 'eclipse.container'):
out.element('classpathentry', {'exported' : 'true', 'kind' : 'con', 'path' : getattr(dep, 'eclipse.container')})
elif hasattr(dep, 'eclipse.project'):
out.element('classpathentry', {'combineaccessrules' : 'false', 'exported' : 'true', 'kind' : 'src', 'path' : '/' + getattr(dep, 'eclipse.project')})
else:
path = dep.path
dep.get_path(resolve=True)
if not exists(path) and not dep.mustExist:
continue;
if not isabs(path):
# Relative paths for "lib" class path entries have various semantics depending on the Eclipse
# version being used (e.g. see https://bugs.eclipse.org/bugs/show_bug.cgi?id=274737) so it's
# safest to simply use absolute paths.
path = join(suite.dir, path)
attributes = {'exported' : 'true', 'kind' : 'lib', 'path' : path}
sourcePath = dep.get_source_path(resolve=True)
if sourcePath is not None:
attributes['sourcepath'] = sourcePath
out.element('classpathentry', attributes)
else:
out.element('classpathentry', {'combineaccessrules' : 'false', 'exported' : 'true', 'kind' : 'src', 'path' : '/' + dep.name})
out.element('classpathentry', {'kind' : 'output', 'path' : getattr(p, 'eclipse.output', 'bin')})
out.close('classpath')
update_file(join(p.dir, '.classpath'), out.xml(indent='\t', newl='\n'))
csConfig = join(project(p.checkstyleProj).dir, '.checkstyle_checks.xml')
if exists(csConfig):
out = XMLDoc()
dotCheckstyle = join(p.dir, ".checkstyle")
checkstyleConfigPath = '/' + p.checkstyleProj + '/.checkstyle_checks.xml'
out.open('fileset-config', {'file-format-version' : '1.2.0', 'simple-config' : 'true'})
out.open('local-check-config', {'name' : 'Checks', 'location' : checkstyleConfigPath, 'type' : 'project', 'description' : ''})
out.element('additional-data', {'name' : 'protect-config-file', 'value' : 'false'})
out.close('local-check-config')
out.open('fileset', {'name' : 'all', 'enabled' : 'true', 'check-config-name' : 'Checks', 'local' : 'true'})
out.element('file-match-pattern', {'match-pattern' : '.', 'include-pattern' : 'true'})
out.close('fileset')
out.open('filter', {'name' : 'all', 'enabled' : 'true', 'check-config-name' : 'Checks', 'local' : 'true'})
out.element('filter-data', {'value' : 'java'})
out.close('filter')
exclude = join(p.dir, '.checkstyle.exclude')
if exists(exclude):
out.open('filter', {'name' : 'FilesFromPackage', 'enabled' : 'true'})
with open(exclude) as f:
for line in f:
if not line.startswith('#'):
line = line.strip()
exclDir = join(p.dir, line)
assert isdir(exclDir), 'excluded source directory listed in ' + exclude + ' does not exist or is not a directory: ' + exclDir
out.element('filter-data', {'value' : line})
out.close('filter')
out.close('fileset-config')
update_file(dotCheckstyle, out.xml(indent=' ', newl='\n'))
out = XMLDoc()
out.open('projectDescription')
out.element('name', data=p.name)
out.element('comment', data='')
out.element('projects', data='')
out.open('buildSpec')
out.open('buildCommand')
out.element('name', data='org.eclipse.jdt.core.javabuilder')
out.element('arguments', data='')
out.close('buildCommand')
if exists(csConfig):
out.open('buildCommand')
out.element('name', data='net.sf.eclipsecs.core.CheckstyleBuilder')
out.element('arguments', data='')
out.close('buildCommand')
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
for buildCommand in ['org.eclipse.pde.ManifestBuilder', 'org.eclipse.pde.SchemaBuilder']:
out.open('buildCommand')
out.element('name', data=buildCommand)
out.element('arguments', data='')
out.close('buildCommand')
if _isAnnotationProcessorDependency(p):
_genEclipseBuilder(out, p, 'Jar.launch', 'archive ' + p.name, refresh = False, async = False, xmlIndent='', xmlStandalone='no')
_genEclipseBuilder(out, p, 'Refresh.launch', '', refresh = True, async = True)
if projToDist.has_key(p.name):
dist, distDeps = projToDist[p.name]
_genEclipseBuilder(out, p, 'Create' + dist.name + 'Dist.launch', 'archive @' + dist.name, refresh=False, async=True)
out.close('buildSpec')
out.open('natures')
out.element('nature', data='org.eclipse.jdt.core.javanature')
if exists(csConfig):
out.element('nature', data='net.sf.eclipsecs.core.CheckstyleNature')
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
out.element('nature', data='org.eclipse.pde.PluginNature')
out.close('natures')
out.close('projectDescription')
update_file(join(p.dir, '.project'), out.xml(indent='\t', newl='\n'))
settingsDir = join(p.dir, ".settings")
if not exists(settingsDir):
os.mkdir(settingsDir)
eclipseSettingsDir = join(suite.mxDir, 'eclipse-settings')
if exists(eclipseSettingsDir):
for name in os.listdir(eclipseSettingsDir):
if name == "org.eclipse.jdt.apt.core.prefs" and not len(p.annotation_processors()) > 0:
continue
path = join(eclipseSettingsDir, name)
if isfile(path):
with open(join(eclipseSettingsDir, name)) as f:
content = f.read()
content = content.replace('${javaCompliance}', str(p.javaCompliance))
if len(p.annotation_processors()) > 0:
content = content.replace('org.eclipse.jdt.core.compiler.processAnnotations=disabled', 'org.eclipse.jdt.core.compiler.processAnnotations=enabled')
update_file(join(settingsDir, name), content)
if len(p.annotation_processors()) > 0:
out = XMLDoc()
out.open('factorypath')
out.element('factorypathentry', {'kind' : 'PLUGIN', 'id' : 'org.eclipse.jst.ws.annotations.core', 'enabled' : 'true', 'runInBatchMode' : 'false'})
for ap in p.annotation_processors():
for dep in dependency(ap).all_deps([], True):
if dep.isLibrary():
if not hasattr(dep, 'eclipse.container') and not hasattr(dep, 'eclipse.project'):
if dep.mustExist:
path = dep.get_path(resolve=True)
if not isabs(path):
# Relative paths for "lib" class path entries have various semantics depending on the Eclipse
# version being used (e.g. see https://bugs.eclipse.org/bugs/show_bug.cgi?id=274737) so it's
# safest to simply use absolute paths.
path = join(suite.dir, path)
out.element('factorypathentry', {'kind' : 'EXTJAR', 'id' : path, 'enabled' : 'true', 'runInBatchMode' : 'false'})
else:
out.element('factorypathentry', {'kind' : 'WKSPJAR', 'id' : '/' + dep.name + '/' + dep.name + '.jar', 'enabled' : 'true', 'runInBatchMode' : 'false'})
out.close('factorypath')
update_file(join(p.dir, '.factorypath'), out.xml(indent='\t', newl='\n'))
make_eclipse_attach('localhost', '8000', deps=projects())
generate_eclipse_workingsets(suite)
def _isAnnotationProcessorDependency(p):
"""
Determines if a given project is part of an annotation processor.
"""
return p in sorted_deps(annotation_processors())
def _genEclipseBuilder(dotProjectDoc, p, name, mxCommand, refresh=True, async=False, logToConsole=False, xmlIndent='\t', xmlStandalone=None):
launchOut = XMLDoc();
consoleOn = 'true' if logToConsole else 'false'
launchOut.open('launchConfiguration', {'type' : 'org.eclipse.ui.externaltools.ProgramBuilderLaunchConfigurationType'})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.core.capture_output', 'value': consoleOn})
launchOut.open('mapAttribute', {'key' : 'org.eclipse.debug.core.environmentVariables'})
launchOut.element('mapEntry', {'key' : 'JAVA_HOME', 'value' : java().jdk})
launchOut.close('mapAttribute')
if refresh:
launchOut.element('stringAttribute', {'key' : 'org.eclipse.debug.core.ATTR_REFRESH_SCOPE', 'value': '${project}'})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.ui.ATTR_CONSOLE_OUTPUT_ON', 'value': consoleOn})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.ui.ATTR_LAUNCH_IN_BACKGROUND', 'value': 'true' if async else 'false'})
baseDir = dirname(dirname(os.path.abspath(__file__)))
cmd = 'mx.sh'
if get_os() == 'windows':
cmd = 'mx.cmd'
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_LOCATION', 'value': join(baseDir, cmd) })
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_RUN_BUILD_KINDS', 'value': 'auto,full,incremental'})
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_TOOL_ARGUMENTS', 'value': mxCommand})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_TRIGGERS_CONFIGURED','value': 'true'})
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_WORKING_DIRECTORY', 'value': p.suite.dir})
launchOut.close('launchConfiguration')
externalToolDir = join(p.dir, '.externalToolBuilders')
if not exists(externalToolDir):
os.makedirs(externalToolDir)
update_file(join(externalToolDir, name), launchOut.xml(indent=xmlIndent, standalone=xmlStandalone, newl='\n'))
dotProjectDoc.open('buildCommand')
dotProjectDoc.element('name', data='org.eclipse.ui.externaltools.ExternalToolBuilder')
dotProjectDoc.element('triggers', data='auto,full,incremental,')
dotProjectDoc.open('arguments')
dotProjectDoc.open('dictionary')
dotProjectDoc.element('key', data = 'LaunchConfigHandle')
dotProjectDoc.element('value', data = '<project>/.externalToolBuilders/' + name)
dotProjectDoc.close('dictionary')
dotProjectDoc.open('dictionary')
dotProjectDoc.element('key', data = 'incclean')
dotProjectDoc.element('value', data = 'true')
dotProjectDoc.close('dictionary')
dotProjectDoc.close('arguments')
dotProjectDoc.close('buildCommand')
def generate_eclipse_workingsets(suite):
"""
Populate the workspace's working set configuration with working sets generated from project data.
If the workspace already contains working set definitions, the existing ones will be retained and extended.
In case mx/env does not contain a WORKSPACE definition pointing to the workspace root directory, the Graal project root directory will be assumed.
If no workspace root directory can be identified, the Graal project root directory is used and the user has to place the workingsets.xml file by hand.
"""
# identify the location where to look for workingsets.xml
wsfilename = 'workingsets.xml'
wsroot = suite.dir
if os.environ.has_key('WORKSPACE'):
wsroot = os.environ['WORKSPACE']
wsdir = join(wsroot, '.metadata/.plugins/org.eclipse.ui.workbench')
if not exists(wsdir):
wsdir = wsroot
wspath = join(wsdir, wsfilename)
# gather working set info from project data
workingSets = dict()
for p in projects():
if p.workingSets is None:
continue
for w in p.workingSets.split(","):
if not workingSets.has_key(w):
workingSets[w] = [p.name]
else:
workingSets[w].append(p.name)
if exists(wspath):
wsdoc = _copy_workingset_xml(wspath, workingSets)
else:
wsdoc = _make_workingset_xml(workingSets)
update_file(wspath, wsdoc.xml(newl='\n'))
def _make_workingset_xml(workingSets):
wsdoc = XMLDoc()
wsdoc.open('workingSetManager')
for w in sorted(workingSets.keys()):
_workingset_open(wsdoc, w)
for p in workingSets[w]:
_workingset_element(wsdoc, p)
wsdoc.close('workingSet')
wsdoc.close('workingSetManager')
return wsdoc
def _copy_workingset_xml(wspath, workingSets):
target = XMLDoc()
target.open('workingSetManager')
parser = xml.parsers.expat.ParserCreate()
class ParserState(object):
def __init__(self):
self.current_ws_name = 'none yet'
self.current_ws = None
self.seen_ws = list()
self.seen_projects = list()
ps = ParserState()
# parsing logic
def _ws_start(name, attributes):
if name == 'workingSet':
ps.current_ws_name = attributes['name']
if workingSets.has_key(ps.current_ws_name):
ps.current_ws = workingSets[ps.current_ws_name]
ps.seen_ws.append(ps.current_ws_name)
ps.seen_projects = list()
else:
ps.current_ws = None
target.open(name, attributes)
parser.StartElementHandler = _ws_item
def _ws_end(name):
if name == 'workingSet':
if not ps.current_ws is None:
for p in ps.current_ws:
if not p in ps.seen_projects:
_workingset_element(target, p)
target.close('workingSet')
parser.StartElementHandler = _ws_start
elif name == 'workingSetManager':
# process all working sets that are new to the file
for w in sorted(workingSets.keys()):
if not w in ps.seen_ws:
_workingset_open(target, w)
for p in workingSets[w]:
_workingset_element(target, p)
target.close('workingSet')
def _ws_item(name, attributes):
if name == 'item':
if ps.current_ws is None:
target.element(name, attributes)
else:
p_name = attributes['elementID'][1:] # strip off the leading '='
_workingset_element(target, p_name)
ps.seen_projects.append(p_name)
# process document
parser.StartElementHandler = _ws_start
parser.EndElementHandler = _ws_end
with open(wspath, 'r') as wsfile:
parser.ParseFile(wsfile)
target.close('workingSetManager')
return target
def _workingset_open(wsdoc, ws):
wsdoc.open('workingSet', {'editPageID': 'org.eclipse.jdt.ui.JavaWorkingSetPage', 'factoryID': 'org.eclipse.ui.internal.WorkingSetFactory', 'id': 'wsid_' + ws, 'label': ws, 'name': ws})
def _workingset_element(wsdoc, p):
wsdoc.element('item', {'elementID': '=' + p, 'factoryID': 'org.eclipse.jdt.ui.PersistableJavaElementFactory'})
def netbeansinit(args, suite=None):
"""(re)generate NetBeans project configurations"""
if suite is None:
suite = _mainSuite
def println(out, obj):
out.write(str(obj) + '\n')
updated = False
for p in projects():
if p.native:
continue
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
continue
if not exists(join(p.dir, 'nbproject')):
os.makedirs(join(p.dir, 'nbproject'))
out = XMLDoc()
out.open('project', {'name' : p.name, 'default' : 'default', 'basedir' : '.'})
out.element('description', data='Builds, tests, and runs the project ' + p.name + '.')
out.element('import', {'file' : 'nbproject/build-impl.xml'})
out.open('target', {'name' : '-post-compile'})
out.open('exec', { 'executable' : sys.executable})
out.element('env', {'key' : 'JAVA_HOME', 'value' : java().jdk})
out.element('arg', {'value' : os.path.abspath(__file__)})
out.element('arg', {'value' : 'archive'})
out.element('arg', {'value' : '@GRAAL'})
out.close('exec')
out.close('target')
out.close('project')
updated = update_file(join(p.dir, 'build.xml'), out.xml(indent='\t', newl='\n')) or updated
out = XMLDoc()
out.open('project', {'xmlns' : 'http://www.netbeans.org/ns/project/1'})
out.element('type', data='org.netbeans.modules.java.j2seproject')
out.open('configuration')
out.open('data', {'xmlns' : 'http://www.netbeans.org/ns/j2se-project/3'})
out.element('name', data=p.name)
out.element('explicit-platform', {'explicit-source-supported' : 'true'})
out.open('source-roots')
out.element('root', {'id' : 'src.dir'})
if len(p.annotation_processors()) > 0:
out.element('root', {'id' : 'src.ap-source-output.dir'})
out.close('source-roots')
out.open('test-roots')
out.close('test-roots')
out.close('data')
firstDep = True
for dep in p.all_deps([], True):
if dep == p:
continue;
if not dep.isLibrary():
n = dep.name.replace('.', '_')
if firstDep:
out.open('references', {'xmlns' : 'http://www.netbeans.org/ns/ant-project-references/1'})
firstDep = False
out.open('reference')
out.element('foreign-project', data=n)
out.element('artifact-type', data='jar')
out.element('script', data='build.xml')
out.element('target', data='jar')
out.element('clean-target', data='clean')
out.element('id', data='jar')
out.close('reference')
if not firstDep:
out.close('references')
out.close('configuration')
out.close('project')
updated = update_file(join(p.dir, 'nbproject', 'project.xml'), out.xml(indent=' ', newl='\n')) or updated
out = StringIO.StringIO()
jdkPlatform = 'JDK_' + str(java().version)
annotationProcessorEnabled = "false"
annotationProcessorReferences = ""
annotationProcessorSrcFolder = ""
if len(p.annotation_processors()) > 0:
annotationProcessorEnabled = "true"
annotationProcessorSrcFolder = "src.ap-source-output.dir=${build.generated.sources.dir}/ap-source-output"
content = """
annotation.processing.enabled=""" + annotationProcessorEnabled + """
annotation.processing.enabled.in.editor=""" + annotationProcessorEnabled + """
annotation.processing.processors.list=
annotation.processing.run.all.processors=true
application.title=""" + p.name + """
application.vendor=mx
build.classes.dir=${build.dir}
build.classes.excludes=**/*.java,**/*.form
# This directory is removed when the project is cleaned:
build.dir=bin
build.generated.dir=${build.dir}/generated
build.generated.sources.dir=${build.dir}/generated-sources
# Only compile against the classpath explicitly listed here:
build.sysclasspath=ignore
build.test.classes.dir=${build.dir}/test/classes
build.test.results.dir=${build.dir}/test/results
# Uncomment to specify the preferred debugger connection transport:
#debug.transport=dt_socket
debug.classpath=\\
${run.classpath}
debug.test.classpath=\\
${run.test.classpath}
# This directory is removed when the project is cleaned:
dist.dir=dist
dist.jar=${dist.dir}/""" + p.name + """.jar
dist.javadoc.dir=${dist.dir}/javadoc
endorsed.classpath=
excludes=
includes=**
jar.compress=false
# Space-separated list of extra javac options
javac.compilerargs=
javac.deprecation=false
javac.source=1.7
javac.target=1.7
javac.test.classpath=\\
${javac.classpath}:\\
${build.classes.dir}
javadoc.additionalparam=
javadoc.author=false
javadoc.encoding=${source.encoding}
javadoc.noindex=false
javadoc.nonavbar=false
javadoc.notree=false
javadoc.private=false
javadoc.splitindex=true
javadoc.use=true
javadoc.version=false
javadoc.windowtitle=
main.class=
manifest.file=manifest.mf
meta.inf.dir=${src.dir}/META-INF
mkdist.disabled=false
platforms.""" + jdkPlatform + """.home=""" + java().jdk + """
platform.active=""" + jdkPlatform + """
run.classpath=\\
${javac.classpath}:\\
${build.classes.dir}
# Space-separated list of JVM arguments used when running the project
# (you may also define separate properties like run-sys-prop.name=value instead of -Dname=value
# or test-sys-prop.name=value to set system properties for unit tests):
run.jvmargs=
run.test.classpath=\\
${javac.test.classpath}:\\
${build.test.classes.dir}
test.src.dir=./test
""" + annotationProcessorSrcFolder + """
source.encoding=UTF-8""".replace(':', os.pathsep).replace('/', os.sep)
print >> out, content
mainSrc = True
for src in p.srcDirs:
srcDir = join(p.dir, src)
if not exists(srcDir):
os.mkdir(srcDir)
ref = 'file.reference.' + p.name + '-' + src
print >> out, ref + '=' + src
if mainSrc:
print >> out, 'src.dir=${' + ref + '}'
mainSrc = False
else:
print >> out, 'src.' + src + '.dir=${' + ref + '}'
javacClasspath = []
deps = p.all_deps([], True)
annotationProcessorOnlyDeps = []
if len(p.annotation_processors()) > 0:
for ap in p.annotation_processors():
apDep = dependency(ap)
if not apDep in deps:
deps.append(apDep)
annotationProcessorOnlyDeps.append(apDep)
annotationProcessorReferences = [];
for dep in deps:
if dep == p:
continue;
if dep.isLibrary():
if not dep.mustExist:
continue
path = dep.get_path(resolve=True)
if os.sep == '\\':
path = path.replace('\\', '\\\\')
ref = 'file.reference.' + dep.name + '-bin'
print >> out, ref + '=' + path
else:
n = dep.name.replace('.', '_')
relDepPath = os.path.relpath(dep.dir, p.dir).replace(os.sep, '/')
ref = 'reference.' + n + '.jar'
print >> out, 'project.' + n + '=' + relDepPath
print >> out, ref + '=${project.' + n + '}/dist/' + dep.name + '.jar'
if not dep in annotationProcessorOnlyDeps:
javacClasspath.append('${' + ref + '}')
else:
annotationProcessorReferences.append('${' + ref + '}')
annotationProcessorReferences += ":\\\n ${" + ref + "}"
print >> out, 'javac.classpath=\\\n ' + (os.pathsep + '\\\n ').join(javacClasspath)
print >> out, 'javac.test.processorpath=${javac.test.classpath}\\\n ' + (os.pathsep + '\\\n ').join(annotationProcessorReferences)
print >> out, 'javac.processorpath=${javac.classpath}\\\n ' + (os.pathsep + '\\\n ').join(annotationProcessorReferences)
updated = update_file(join(p.dir, 'nbproject', 'project.properties'), out.getvalue()) or updated
out.close()
if updated:
log('If using NetBeans:')
log(' 1. Ensure that a platform named "JDK_' + str(java().version) + '" is defined (Tools -> Java Platforms)')
log(' 2. Open/create a Project Group for the directory containing the projects (File -> Project Group -> New Group... -> Folder of Projects)')
def ideclean(args, suite=None):
"""remove all Eclipse and NetBeans project configurations"""
def rm(path):
if exists(path):
os.remove(path)
for p in projects():
if p.native:
continue
shutil.rmtree(join(p.dir, '.settings'), ignore_errors=True)
shutil.rmtree(join(p.dir, '.externalToolBuilders'), ignore_errors=True)
shutil.rmtree(join(p.dir, 'nbproject'), ignore_errors=True)
rm(join(p.dir, '.classpath'))
rm(join(p.dir, '.project'))
rm(join(p.dir, '.factorypath'))
rm(join(p.dir, 'build.xml'))
rm(join(p.dir, 'eclipse-build.xml'))
try:
rm(join(p.dir, p.name + '.jar'))
except:
log("Error removing {0}".format(p.name + '.jar'))
def ideinit(args, suite=None):
"""(re)generate Eclipse and NetBeans project configurations"""
eclipseinit(args, suite)
netbeansinit(args, suite)
fsckprojects([])
def fsckprojects(args):
"""find directories corresponding to deleted Java projects and delete them"""
for suite in suites():
projectDirs = [p.dir for p in suite.projects]
for root, dirnames, files in os.walk(suite.dir):
currentDir = join(suite.dir, root)
if currentDir in projectDirs:
# don't traverse subdirs of an existing project
dirnames[:] = []
else:
projectConfigFiles = frozenset(['.classpath', 'nbproject'])
indicators = projectConfigFiles.intersection(files)
if len(indicators) != 0:
if not sys.stdout.isatty() or raw_input(currentDir + ' looks like a removed project -- delete it? [yn]: ') == 'y':
shutil.rmtree(currentDir)
log('Deleted ' + currentDir)
def javadoc(args, parser=None, docDir='javadoc', includeDeps=True, stdDoclet=True):
"""generate javadoc for some/all Java projects"""
parser = ArgumentParser(prog='mx javadoc') if parser is None else parser
parser.add_argument('-d', '--base', action='store', help='base directory for output')
parser.add_argument('--unified', action='store_true', help='put javadoc in a single directory instead of one per project')
parser.add_argument('--force', action='store_true', help='(re)generate javadoc even if package-list file exists')
parser.add_argument('--projects', action='store', help='comma separated projects to process (omit to process all projects)')
parser.add_argument('--Wapi', action='store_true', dest='warnAPI', help='show warnings about using internal APIs')
parser.add_argument('--argfile', action='store', help='name of file containing extra javadoc options')
parser.add_argument('--arg', action='append', dest='extra_args', help='extra Javadoc arguments (e.g. --arg @-use)', metavar='@<arg>', default=[])
parser.add_argument('-m', '--memory', action='store', help='-Xmx value to pass to underlying JVM')
parser.add_argument('--packages', action='store', help='comma separated packages to process (omit to process all packages)')
parser.add_argument('--exclude-packages', action='store', help='comma separated packages to exclude')
args = parser.parse_args(args)
# build list of projects to be processed
candidates = sorted_deps()
if args.projects is not None:
candidates = [project(name) for name in args.projects.split(',')]
# optionally restrict packages within a project
packages = []
if args.packages is not None:
packages = [name for name in args.packages.split(',')]
exclude_packages = []
if args.exclude_packages is not None:
exclude_packages = [name for name in args.exclude_packages.split(',')]
def outDir(p):
if args.base is None:
return join(p.dir, docDir)
return join(args.base, p.name, docDir)
def check_package_list(p):
return not exists(join(outDir(p), 'package-list'))
def assess_candidate(p, projects):
if p in projects:
return False
if args.force or args.unified or check_package_list(p):
projects.append(p)
return True
return False
projects = []
for p in candidates:
if not p.native:
if includeDeps:
deps = p.all_deps([], includeLibs=False, includeSelf=False)
for d in deps:
assess_candidate(d, projects)
if not assess_candidate(p, projects):
logv('[package-list file exists - skipping {0}]'.format(p.name))
def find_packages(sourceDirs, pkgs=set()):
for sourceDir in sourceDirs:
for root, _, files in os.walk(sourceDir):
if len([name for name in files if name.endswith('.java')]) != 0:
pkg = root[len(sourceDir) + 1:].replace(os.sep,'.')
if len(packages) == 0 or pkg in packages:
if len(exclude_packages) == 0 or not pkg in exclude_packages:
pkgs.add(pkg)
return pkgs
extraArgs = [a.lstrip('@') for a in args.extra_args]
if args.argfile is not None:
extraArgs += ['@' + args.argfile]
memory = '2g'
if args.memory is not None:
memory = args.memory
memory = '-J-Xmx' + memory
if not args.unified:
for p in projects:
# The project must be built to ensure javadoc can find class files for all referenced classes
build(['--no-native', '--projects', p.name])
pkgs = find_packages(p.source_dirs(), set())
deps = p.all_deps([], includeLibs=False, includeSelf=False)
links = ['-link', 'http://docs.oracle.com/javase/' + str(p.javaCompliance.value) + '/docs/api/']
out = outDir(p)
for d in deps:
depOut = outDir(d)
links.append('-link')
links.append(os.path.relpath(depOut, out))
cp = classpath(p.name, includeSelf=True)
sp = os.pathsep.join(p.source_dirs())
overviewFile = join(p.dir, 'overview.html')
delOverviewFile = False
if not exists(overviewFile):
with open(overviewFile, 'w') as fp:
print >> fp, '<html><body>Documentation for the <code>' + p.name + '</code> project.</body></html>'
delOverviewFile = True
nowarnAPI = []
if not args.warnAPI:
nowarnAPI.append('-XDignore.symbol.file')
# windowTitle onloy applies to the standard doclet processor
windowTitle = []
if stdDoclet:
windowTitle = ['-windowtitle', p.name + ' javadoc']
try:
log('Generating {2} for {0} in {1}'.format(p.name, out, docDir))
run([java().javadoc, memory,
'-XDignore.symbol.file',
'-classpath', cp,
'-quiet',
'-d', out,
'-overview', overviewFile,
'-sourcepath', sp] +
links +
extraArgs +
nowarnAPI +
windowTitle +
list(pkgs))
log('Generated {2} for {0} in {1}'.format(p.name, out, docDir))
finally:
if delOverviewFile:
os.remove(overviewFile)
else:
# The projects must be built to ensure javadoc can find class files for all referenced classes
build(['--no-native'])
pkgs = set()
sp = []
names = []
for p in projects:
find_packages(p.source_dirs(), pkgs)
sp += p.source_dirs()
names.append(p.name)
links = ['-link', 'http://docs.oracle.com/javase/' + str(_java.javaCompliance.value) + '/docs/api/']
out = join(_mainSuite.dir, docDir)
if args.base is not None:
out = join(args.base, docDir)
cp = classpath()
sp = os.pathsep.join(sp)
nowarnAPI = []
if not args.warnAPI:
nowarnAPI.append('-XDignore.symbol.file')
log('Generating {2} for {0} in {1}'.format(', '.join(names), out, docDir))
run([java().javadoc, memory,
'-classpath', cp,
'-quiet',
'-d', out,
'-sourcepath', sp] +
links +
extraArgs +
nowarnAPI +
list(pkgs))
log('Generated {2} for {0} in {1}'.format(', '.join(names), out, docDir))
class Chunk:
def __init__(self, content, ldelim, rdelim=None):
lindex = content.find(ldelim)
if rdelim is not None:
rindex = content.find(rdelim)
else:
rindex = lindex + len(ldelim)
self.ldelim = ldelim
self.rdelim = rdelim
if lindex != -1 and rindex != -1 and rindex > lindex:
self.text = content[lindex + len(ldelim):rindex]
else:
self.text = None
def replace(self, content, repl):
lindex = content.find(self.ldelim)
if self.rdelim is not None:
rindex = content.find(self.rdelim)
rdelimLen = len(self.rdelim)
else:
rindex = lindex + len(self.ldelim)
rdelimLen = 0
old = content[lindex:rindex + rdelimLen]
return content.replace(old, repl)
# Post-process an overview-summary.html file to move the
# complete overview to the top of the page
def _fix_overview_summary(path, topLink):
"""
Processes an "overview-summary.html" generated by javadoc to put the complete
summary text above the Packages table.
"""
# This uses scraping and so will break if the relevant content produced by javadoc changes in any way!
with open(path) as fp:
content = fp.read()
chunk1 = Chunk(content, """<div class="header">
<div class="subTitle">
<div class="block">""", """</div>
</div>
<p>See: <a href="#overview_description">Description</a></p>
</div>""")
chunk2 = Chunk(content, """<div class="footer"><a name="overview_description">
<!-- -->
</a>
<div class="subTitle">
<div class="block">""", """</div>
</div>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->""")
assert chunk1.text, 'Could not find header section in ' + path
assert chunk2.text, 'Could not find footer section in ' + path
content = chunk1.replace(content, '<div class="header"><div class="subTitle"><div class="block">' + topLink + chunk2.text +'</div></div></div>')
content = chunk2.replace(content, '')
with open(path, 'w') as fp:
fp.write(content)
# Post-process a package-summary.html file to move the
# complete package description to the top of the page
def _fix_package_summary(path):
"""
Processes an "overview-summary.html" generated by javadoc to put the complete
summary text above the Packages table.
"""
# This uses scraping and so will break if the relevant content produced by javadoc changes in any way!
with open(path) as fp:
content = fp.read()
chunk1 = Chunk(content, """<div class="header">
<h1 title="Package" class="title">Package""", """<p>See: <a href="#package_description">Description</a></p>
</div>""")
chunk2 = Chunk(content, """<a name="package_description">
<!-- -->
</a>""", """</div>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->""")
if chunk1.text:
if chunk2.text:
repl = re.sub(r'<h2 title=(.*) Description</h2>', r'<h1 title=\1</h1>', chunk2.text, 1)
content = chunk1.replace(content, '<div class="header">' + repl +'</div></div>')
content = chunk2.replace(content, '')
with open(path, 'w') as fp:
fp.write(content)
else:
log('warning: Could not find package description detail section in ' + path)
else:
# no package description given
pass
def site(args):
"""creates a website containing javadoc and the project dependency graph"""
parser = ArgumentParser(prog='site')
parser.add_argument('-d', '--base', action='store', help='directory for generated site', required=True, metavar='<dir>')
parser.add_argument('--name', action='store', help='name of overall documentation', required=True, metavar='<name>')
parser.add_argument('--overview', action='store', help='path to the overview content for overall documentation', required=True, metavar='<path>')
parser.add_argument('--projects', action='store', help='comma separated projects to process (omit to process all projects)')
parser.add_argument('--jd', action='append', help='extra Javadoc arguments (e.g. --jd @-use)', metavar='@<arg>', default=[])
parser.add_argument('--exclude-packages', action='store', help='comma separated packages to exclude', metavar='<pkgs>')
parser.add_argument('--dot-output-base', action='store', help='base file name (relative to <dir>/all) for project dependency graph .svg and .jpg files generated by dot (omit to disable dot generation)', metavar='<path>')
parser.add_argument('--title', action='store', help='value used for -windowtitle and -doctitle javadoc args for overall documentation (default: "<name>")', metavar='<title>')
args = parser.parse_args(args)
args.base = os.path.abspath(args.base)
tmpbase = tempfile.mkdtemp(prefix=basename(args.base) + '.', dir=dirname(args.base))
unified = join(tmpbase, 'all')
exclude_packages_arg = []
if args.exclude_packages is not None:
exclude_packages_arg = ['--exclude-packages', args.exclude_packages]
projects = sorted_deps()
projects_arg = []
if args.projects is not None:
projects_arg = ['--projects', args.projects]
projects = [project(name) for name in args.projects.split(',')]
extra_javadoc_args = []
for a in args.jd:
extra_javadoc_args.append('--arg')
extra_javadoc_args.append('@' + a)
try:
# Create javadoc for each project
javadoc(['--base', tmpbase] + exclude_packages_arg + projects_arg + extra_javadoc_args)
# Create unified javadoc for all projects
with open(args.overview) as fp:
content = fp.read()
idx = content.rfind('</body>')
if idx != -1:
args.overview = join(tmpbase, 'overview_with_projects.html')
with open(args.overview, 'w') as fp2:
print >> fp2, content[0:idx]
print >> fp2, """<div class="contentContainer">
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Projects table">
<caption><span>Projects</span><span class="tabEnd"> </span></caption>
<tr><th class="colFirst" scope="col">Project</th><th class="colLast" scope="col"> </th></tr>
<tbody>"""
color = 'row'
for p in projects:
print >> fp2, '<tr class="{1}Color"><td class="colFirst"><a href="../{0}/javadoc/index.html", target = "_top">{0}</a></td><td class="colLast"> </td></tr>'.format(p.name, color)
color = 'row' if color == 'alt' else 'alt'
print >> fp2, '</tbody></table></div>'
print >> fp2, content[idx:]
title = args.title if args.title is not None else args.name
javadoc(['--base', tmpbase,
'--unified',
'--arg', '@-windowtitle', '--arg', '@' + title,
'--arg', '@-doctitle', '--arg', '@' + title,
'--arg', '@-overview', '--arg', '@' + args.overview] + exclude_packages_arg + projects_arg + extra_javadoc_args)
os.rename(join(tmpbase, 'javadoc'), unified)
# Generate dependency graph with Graphviz
if args.dot_output_base is not None:
dotErr = None
try:
if not 'version' in subprocess.check_output(['dot', '-V'], stderr=subprocess.STDOUT):
dotErr = 'dot -V does not print a string containing "version"'
except subprocess.CalledProcessError as e:
dotErr = 'error calling "dot -V": {}'.format(e)
except OSError as e:
dotErr = 'error calling "dot -V": {}'.format(e)
if dotErr != None:
abort('cannot generate dependency graph: ' + dotErr)
dot = join(tmpbase, 'all', str(args.dot_output_base) + '.dot')
svg = join(tmpbase, 'all', str(args.dot_output_base) + '.svg')
jpg = join(tmpbase, 'all', str(args.dot_output_base) + '.jpg')
html = join(tmpbase, 'all', str(args.dot_output_base) + '.html')
with open(dot, 'w') as fp:
dim = len(projects)
print >> fp, 'digraph projects {'
print >> fp, 'rankdir=BT;'
print >> fp, 'size = "' + str(dim) + ',' + str(dim) + '";'
print >> fp, 'node [shape=rect, fontcolor="blue"];'
#print >> fp, 'edge [color="green"];'
for p in projects:
print >> fp, '"' + p.name + '" [URL = "../' + p.name + '/javadoc/index.html", target = "_top"]'
for dep in p.canonical_deps():
if dep in [proj.name for proj in projects]:
print >> fp, '"' + p.name + '" -> "' + dep + '"'
depths = dict()
for p in projects:
d = p.max_depth()
depths.setdefault(d, list()).append(p.name)
print >> fp, '}'
run(['dot', '-Tsvg', '-o' + svg, '-Tjpg', '-o' + jpg, dot])
# Post-process generated SVG to remove title elements which most browsers
# render as redundant (and annoying) tooltips.
with open(svg, 'r') as fp:
content = fp.read()
content = re.sub('<title>.*</title>', '', content)
content = re.sub('xlink:title="[^"]*"', '', content)
with open(svg, 'w') as fp:
fp.write(content)
# Create HTML that embeds the svg file in an <object> frame
with open(html, 'w') as fp:
print >> fp, '<html><body><object data="{}.svg" type="image/svg+xml"></object></body></html>'.format(args.dot_output_base)
top = join(tmpbase, 'all', 'overview-summary.html')
for root, _, files in os.walk(tmpbase):
for f in files:
if f == 'overview-summary.html':
path = join(root, f)
topLink = ''
if top != path:
link = os.path.relpath(join(tmpbase, 'all', 'index.html'), dirname(path))
topLink = '<p><a href="' + link + '", target="_top"><b>[return to the overall ' + args.name + ' documentation]</b></a></p>'
_fix_overview_summary(path, topLink)
elif f == 'package-summary.html':
path = join(root, f)
_fix_package_summary(path)
if exists(args.base):
shutil.rmtree(args.base)
shutil.move(tmpbase, args.base)
print 'Created website - root is ' + join(args.base, 'all', 'index.html')
finally:
if exists(tmpbase):
shutil.rmtree(tmpbase)
def findclass(args, logToConsole=True):
"""find all classes matching a given substring"""
matches = []
for entry, filename in classpath_walk(includeBootClasspath=True):
if filename.endswith('.class'):
if isinstance(entry, zipfile.ZipFile):
classname = filename.replace('/', '.')
else:
classname = filename.replace(os.sep, '.')
classname = classname[:-len('.class')]
for a in args:
if a in classname:
matches.append(classname)
if logToConsole:
log(classname)
return matches
def select_items(items, descriptions=None, allowMultiple=True):
"""
Presents a command line interface for selecting one or more (if allowMultiple is true) items.
"""
if len(items) <= 1:
return items
else:
if allowMultiple:
log('[0] <all>')
for i in range(0, len(items)):
if descriptions is None:
log('[{0}] {1}'.format(i + 1, items[i]))
else:
assert len(items) == len(descriptions)
wrapper = textwrap.TextWrapper(subsequent_indent=' ')
log('\n'.join(wrapper.wrap('[{0}] {1} - {2}'.format(i + 1, items[i], descriptions[i]))))
while True:
if allowMultiple:
s = raw_input('Enter number(s) of selection (separate multiple choices with spaces): ').split()
else:
s = [raw_input('Enter number of selection: ')]
try:
s = [int(x) for x in s]
except:
log('Selection contains non-numeric characters: "' + ' '.join(s) + '"')
continue
if allowMultiple and 0 in s:
return items
indexes = []
for n in s:
if n not in range(1, len(items) + 1):
log('Invalid selection: ' + str(n))
continue
else:
indexes.append(n - 1)
if allowMultiple:
return [items[i] for i in indexes]
if len(indexes) == 1:
return items[indexes[0]]
return None
def javap(args):
"""disassemble classes matching given pattern with javap"""
javap = java().javap
if not exists(javap):
abort('The javap executable does not exists: ' + javap)
else:
candidates = findclass(args, logToConsole=False)
if len(candidates) == 0:
log('no matches')
selection = select_items(candidates)
run([javap, '-private', '-verbose', '-classpath', classpath()] + selection)
def show_projects(args):
"""show all loaded projects"""
for s in suites():
projectsFile = join(s.dir, 'mx', 'projects')
if exists(projectsFile):
log(projectsFile)
for p in s.projects:
log('\t' + p.name)
def add_argument(*args, **kwargs):
"""
Define how a single command-line argument.
"""
assert _argParser is not None
_argParser.add_argument(*args, **kwargs)
def update_commands(suite, new_commands):
for key, value in new_commands.iteritems():
if commands.has_key(key) and not suite.primary:
pass
# print("WARNING: attempt to redefine command '" + key + "' in suite " + suite.dir)
else:
commands[key] = value
# Table of commands in alphabetical order.
# Keys are command names, value are lists: [<function>, <usage msg>, <format args to doc string of function>...]
# If any of the format args are instances of Callable, then they are called with an 'env' are before being
# used in the call to str.format().
# Extensions should update this table directly
commands = {
'about': [about, ''],
'build': [build, '[options]'],
'checkstyle': [checkstyle, ''],
'canonicalizeprojects': [canonicalizeprojects, ''],
'clean': [clean, ''],
'eclipseinit': [eclipseinit, ''],
'eclipseformat': [eclipseformat, ''],
'findclass': [findclass, ''],
'fsckprojects': [fsckprojects, ''],
'help': [help_, '[command]'],
'ideclean': [ideclean, ''],
'ideinit': [ideinit, ''],
'archive': [archive, '[options]'],
'projectgraph': [projectgraph, ''],
'javap': [javap, '<class name patterns>'],
'javadoc': [javadoc, '[options]'],
'site': [site, '[options]'],
'netbeansinit': [netbeansinit, ''],
'projects': [show_projects, ''],
}
_argParser = ArgParser()
def _findPrimarySuite():
def is_suite_dir(d):
for f in os.listdir(d):
if fnmatch.fnmatch(f, 'mx*'):
mxDir = join(d, f)
if exists(mxDir) and isdir(mxDir) and exists(join(mxDir, 'projects')):
return dirname(mxDir)
# try current working directory first
if is_suite_dir(os.getcwd()):
return os.getcwd()
# now search path of my executable
me = sys.argv[0]
parent = dirname(me)
while parent:
if is_suite_dir(parent):
return parent
parent = dirname(parent)
return None
def main():
primarySuiteDir = _findPrimarySuite()
if primarySuiteDir:
global _mainSuite
_mainSuite = _loadSuite(primarySuiteDir, True)
opts, commandAndArgs = _argParser._parse_cmd_line()
global _opts, _java
_opts = opts
_java = JavaConfig(opts)
for s in suites():
s._post_init(opts)
if len(commandAndArgs) == 0:
_argParser.print_help()
return
command = commandAndArgs[0]
command_args = commandAndArgs[1:]
if not commands.has_key(command):
hits = [c for c in commands.iterkeys() if c.startswith(command)]
if len(hits) == 1:
command = hits[0]
elif len(hits) == 0:
abort('mx: unknown command \'{0}\'\n{1}use "mx help" for more options'.format(command, _format_commands()))
else:
abort('mx: command \'{0}\' is ambiguous\n {1}'.format(command, ' '.join(hits)))
c, _ = commands[command][:2]
def term_handler(signum, frame):
abort(1)
signal.signal(signal.SIGTERM, term_handler)
try:
if opts.timeout != 0:
def alarm_handler(signum, frame):
abort('Command timed out after ' + str(opts.timeout) + ' seconds: ' + ' '.join(commandAndArgs))
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(opts.timeout)
retcode = c(command_args)
if retcode is not None and retcode != 0:
abort(retcode)
except KeyboardInterrupt:
# no need to show the stack trace when the user presses CTRL-C
abort(1)
if __name__ == '__main__':
# rename this module as 'mx' so it is not imported twice by the commands.py modules
sys.modules['mx'] = sys.modules.pop('__main__')
main()
| kevinmcain/graal | mxtool/mx.py | Python | gpl-2.0 | 148,173 |
# Copyright (C) 2022 Lunatixz
#
#
# This file is part of PseudoTV Live.
#
# PseudoTV Live is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV Live is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV Live. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
from resources.lib.globals import *
class Fillers:
def __init__(self, builder=None):
if builder is None: return
self.builder = builder
self.writer = builder.writer
self.cache = builder.cache
self.pool = builder.pool
def log(self, msg, level=xbmc.LOGDEBUG):
return log('%s: %s'%(self.__class__.__name__,msg),level)
def buildLocalTrailers(self, citem, fileList, includeVFS=False):
#parse filelist for trailers, collect duration meta.
self.log("buildLocalTrailers, channel = %s, fileList = %s"%(citem.get('id'),len(fileList)))
def getItem(item):
file = item.get('trailer','')
if file:
if not includeVFS and file.lower().startswith(tuple(VFS_TYPES)): return
return {'label':'%s - Trailer'%(item['label']),'duration':self.writer.jsonRPC.parseDuration(file),'path':'','file':file,'art':item.get('art',{})}
return setDictLST(list(filter(None,[getItem(fileItem) for fileItem in fileList])))
def buildBCTresource(self, type, path, media='video'):
self.log('buildBCTresource, type = %s, path = %s, media = %s'%(type,path,media))
if not path.startswith(('resource://')): checksum = ADDON_VERSION
else: checksum = self.writer.jsonRPC.getPluginMeta(path).get('version',ADDON_VERSION)
if type in PRE_ROLL: ignoreDuration = True
else: ignoreDuration = False
return self.writer.jsonRPC.getFileDirectory(cleanResourcePath(path),media,ignoreDuration,checksum)
def buildResourceType(self, type, paths):
for resource in paths:
yield self.getPlayablePaths(type,resource)
def getPlayablePaths(self, type, resource):
self.log('getPlayablePaths, type = %s, resource = %s'%(type,resource))
if not resource.startswith('resource://'): resource = 'resource://%s'%(resource)
tmpdict = dict()
items = list(self.buildBCTresource(type, resource))
for item in items:
folder = os.path.basename(os.path.normpath(item.get('path','')))
if folder and folder != 'resources':
tmpdict.setdefault(folder.lower(),[]).append(item)
else:
if type == "ratings":
tmpdict.setdefault(os.path.splitext(item.get('label'))[0].lower(),{}).update(item)
else:
tmpdict.setdefault('root',[]).append(item)
return tmpdict
def injectBCTs(self, citem, fileList):
if not fileList: return fileList
self.log("injectBCTs, channel = %s, fileList = %s"%(citem.get('id'),len(fileList)))
#bctTypes = {"ratings" :{"min":1,"max":1,"enabled":True ,"paths":[SETTINGS.getSetting('Resource_Ratings')]}}
lstop = 0
bctItems = dict()
nfileList = list()
chname = citem.get('name','')
chcats = citem.get('groups',[])
isMovie = 'movie' in citem.get('type','').lower()
[[bctItems.setdefault(key,{}).update(d) for d in list(self.buildResourceType(key, self.builder.bctTypes[key].get('paths',[])))] for key in self.builder.bctTypes.keys() if self.builder.bctTypes[key].get('enabled',False)]
if 'ratings' in bctItems:
ratings = bctItems.get('ratings',{})
else:
ratings = {}
if 'bumpers' in bctItems:
bumpers = bctItems.get('bumpers',{}).get('root',[])
bumpers.extend(bctItems.get('bumpers',{}).get(chname.lower(),[]))
else:
bumpers = []
# min_commercials = self.builder.bctTypes.get('commercials',{}).get('min',0) #0==Disabled,1==Auto
# max_commercials = self.builder.bctTypes.get('commercials',{}).get('max',4)
# auto_commercials = min_commercials == 1
# if 'commercials' in bctItems:
# commercials = bctItems.get('commercials',{}).get(chname.lower(),[])
# commercials.extend(bctItems.get('commercials',{}).get('root',[]))
# if isinstance(commercials,list) and len(commercials) > 0: random.shuffle(commercials)
# print('commercials',commercials)
# else:
# commercials = []
# auto_commercials = False
# min_trailers = self.builder.bctTypes.get('trailers',{}).get('min',0) #0==Disabled,1==Auto
# max_trailers = self.builder.bctTypes.get('trailers',{}).get('max',4)
# auto_trailers = min_trailers == 1
# if 'trailers' in bctItems:
# trailers = []
# for chcat in chcats: trailers.extend(bctItems.get('trailers',{}).get(chcat.lower(),[]))
# trailers.extend(bctItems.get('trailers',{}).get('root',[]))
# trailers.extend(self.buildLocalTrailers(citem, fileList))
# if isinstance(trailers,list) and len(trailers) > 0: random.shuffle(trailers)
# print('trailers',trailers)
# else:
# trailers = []
# auto_trailers = False
for idx,fileItem in enumerate(fileList):
file = fileItem.get('file','')
fileItem['originalfile'] = file
fileItem['start'] = fileItem['start'] if lstop == 0 else lstop
fileItem['stop'] = fileItem['start'] + fileItem['duration']
if not file.startswith(tuple(VFS_TYPES)): #stacks not compatible with VFS sources.
if isStack(file):
paths = splitStacks(file)
else:
paths = [file]
oPaths = paths.copy()
stop = fileItem['stop']
end = abs(roundTimeUp(stop) - stop) #auto mode
# print('duration',fileItem['duration'])
# print('start',datetime.datetime.fromtimestamp(fileItem['start']))
# print('stop',datetime.datetime.fromtimestamp(stop))
# print('end',end)
#ratings (auto == 1)
mpaa = cleanMPAA(fileItem.get('mpaa',''))
if is3D(fileItem): mpaa += ' (3DSBS)'
rating = ratings.get(mpaa.lower(), {})
if rating:
paths.insert(0,rating.get('file'))
end -= rating.get('duration')
# print('end ratings', end)
# print('mpaa',mpaa)
# print('rating',rating)
#bumpers (auto == 1)
if bumpers:
bumper = random.choice(bumpers)
paths.insert(0,bumper.get('file'))
end -= bumper.get('duration')
# print('end bumper', end)
# print('chname',chname)
# print('bumper',bumper)
# CTItems = set()
# cnt_commercials = 0
# cnt_trailers = 0
# #commercials
# if commercials and not auto_commercials:
# for cnt in range(min_commercials):
# commercial = random.choice(commercials)
# CTItems.add(commercial.get('file'))
# end -= commercial.get('duration')
# print('end commercial', end)
# print('commercial',commercial)
#trailers
# if trailers and not auto_trailers:
# trailers_sel = random.sample(trailers, random.randint(min_trailers,max_trailers))
# print('trailers_sel',trailers_sel)
# for trailer in trailers_sel:
# tfile = trailer.get('file')
# # if tfile.startwith(tuple(VFS_TYPES)):
# CTItems.add(tfile)
# end -= trailer.get('duration')
# print('end trailer', end)
# print('trailer',trailer)
# #auto fill POST_ROLL
# if auto_commercials | auto_trailers:
# while end > 0 and not self.writer.monitor.abortRequested():
# if self.writer.monitor.waitForAbort(0.001): break
# print('autofill while loop',end)
# stpos = end
# if commercials and auto_commercials and cnt_commercials <= max_commercials:
# commercial = random.choice(commercials)
# CTItems.add(commercial.get('file'))
# end -= commercial.get('duration')
# print('end commercial', end)
# print('commercial',commercial)
# if trailers and auto_trailers and cnt_trailers <= max_trailers:
# trailer = random.choice(trailers)
# CTItems.add(trailer.get('file'))
# end -= trailer.get('duration')
# print('end trailer', end)
# print('trailer',trailer)
# if stpos == end: break #empty list
# CTItems = list(CTItems)
# print('CTItems',CTItems)
# if len(CTItems) > 0:
# random.shuffle(CTItems)#shuffle, then random sample for increased diversity.
# paths.extend(random.sample(CTItems, len(CTItems)))
# #todo trailers, commercials when "Auto" loop fill till end time close to 0. else fill random min,max count.
# #trailers, commercials do not match by chname, random.choice from list, for variation users change resource folder in adv. rules.
# #trailers always incorporate local_trailers from the media in current fileList playlist.
# print('oPaths',oPaths)
# print('paths',paths)
if oPaths != paths:
fileItem['file'] = buildStack(paths)
fileItem['stop'] = abs(roundTimeUp(stop) - abs(end))
fileItem['duration'] = (datetime.datetime.fromtimestamp(fileItem['stop']) - datetime.datetime.fromtimestamp(fileItem['start'])).seconds
# print('end',end,'lstop',datetime.datetime.fromtimestamp(fileItem['stop']),'dur',fileItem['duration'])
# print('fileItem',fileItem)
lstop = fileItem['stop'] #new stop time, offset next start time.
nfileList.append(fileItem)
return nfileList
# # todo use zip to inject bcts?
# # for r, b, f, c, t in zip(ratings, bumpers, filelist, commercials, trailers):
# def buildResourcePaths(paths):
# return list([self.writer.jsonRPC.resources.walkResource(path,VIDEO_EXTS) for path in paths])
# def buildResourceType():
# for key in self.builder.bctTypes.keys():
# if self.builder.bctTypes[key].get('enabled',False):
# resources = buildResourcePaths(self.builder.bctTypes[key].get('paths',[]))
# for resource in resources:
# bcts = {}
# for id, filenames in resource.items():
# for file in filenames:
# bcts.setdefault(splitFilename(file)[0],[]).append(os.path.join(id,file))
# yield key,bcts
# print('injectBCTs',self.builder.bctTypes)
# print('injectBCTs',dict(buildResourceType()))
# {
# 'ratings': [{
# 'special://home/addons/resource.videos.ratings.mpaa.classic/resources': ['G (3DSBS).mp4', 'G.mp4', 'NC-17 (3DSBS).mp4', 'NC-17.mp4', 'NR (3DSBS).mp4', 'NR.mp4', 'PG (3DSBS).mp4', 'PG-13 (3DSBS).mp4', 'PG-13.mp4', 'PG.mp4', 'R (3DSBS).mp4', 'R.mp4']
# }],
# 'bumpers': [{
# 'special://home/addons/resource.videos.bumpers.pseudotv/resources': ['Glass Prism 1080p.mp4', 'HBO 1080p.mp4', 'Netflix 1080p.mp4', 'Netflix Colors 1080p.mp4']
# }, {
# 'special://home/addons/resource.videos.bumpers.sample/resources': [],
# 'special://home/addons/resource.videos.bumpers.sample/resources/Cartoon Network': ['Cartoon Network.mp4'],
# 'special://home/addons/resource.videos.bumpers.sample/resources/Discovery Channel': ['1.mp4'],
# 'special://home/addons/resource.videos.bumpers.sample/resources/HBO': ['bumper.mp4'],
# 'special://home/addons/resource.videos.bumpers.sample/resources/ITV': ['ITV.mp4', 'ITV2.mp4']
# }],
# 'commercials': [{
# 'special://home/addons/resource.videos.commercials.sample/resources': ['t30s.mp4', 'teS5.mp4', 'teSG.mp4', 'teSI.mp4']
# }],
# 'trailers': [{
# 'special://home/addons/resource.videos.trailers.sample/resources': ['Coming 2 America Trailer #2 (2021) - Movieclips Trailers.mp4', 'Raya and the Last Dragon Super Bowl TV Spot (2021) - Movieclips Trailers.mp4', 'Super Bowl Movie & TV Trailers (2021) - Movieclips Trailers.mp4']
# }]
# }
# interleave
# intersperse
#ratings
| PseudoTV/PseudoTV_Live | plugin.video.pseudotv.live/resources/lib/fillers.py | Python | gpl-3.0 | 14,573 |
class ComplexClause(object):
type_string = ''
def __init__(self, *args):
self.clauses = args
self.add_prefix(self.type_string)
def is_matrix(self):
for c in self.clauses:
if not c.is_matrix():
return False
return True
def involves(self, annotation):
for c in self.clauses:
if c.involves(annotation):
return True
return False
@property
def nodes(self):
"""
Get all annotations involved in the clause.
"""
nodes = []
for a in self.clauses:
nodes.extend(a.nodes)
return nodes
@property
def in_subquery(self):
for a in self.clauses:
if a.in_subquery:
return True
return False
@property
def attributes(self):
"""
Get all attributes involved in the clause.
"""
attributes = []
for a in self.clauses:
attributes.extend(a.attributes)
return attributes
def add_prefix(self, prefix):
"""
Adds a prefix to a clause
Parameters
----------
prefix : str
the prefix to add
"""
for i, c in enumerate(self.clauses):
if isinstance(c, ComplexClause):
c.add_prefix(prefix + str(i))
else:
try:
c.value_alias_prefix += prefix + str(i)
except AttributeError:
pass
def generate_params(self):
"""
Generates dictionary of parameters of ComplexClause
Returns
-------
params : dict
a dictionary of parameters
"""
from .attributes import NodeAttribute
params = {}
for c in self.clauses:
if isinstance(c, ComplexClause):
params.update(c.generate_params())
else:
try:
if not isinstance(c.value, NodeAttribute):
params[c.cypher_value_string()[1:-1].replace('`', '')] = c.value
except AttributeError:
pass
return params
class or_(ComplexClause):
type_string = 'or_'
def for_cypher(self):
"""
Return a Cypher representation of the clause.
"""
temp = ' OR '.join(x.for_cypher() for x in self.clauses)
temp = "(" + temp + ")"
return temp
class and_(ComplexClause):
type_string = 'and_'
def for_cypher(self):
"""
Return a Cypher representation of the clause.
"""
temp = ' AND '.join(x.for_cypher() for x in self.clauses)
temp = "(" + temp + ")"
return temp | PhonologicalCorpusTools/PolyglotDB | polyglotdb/query/base/complex.py | Python | mit | 2,772 |
#!/usr/bin/env python
import rospy
import math
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
odom1 = None
def callbacallback_odom1(odom):
global odom1
odom1 = odom
"""
Estimate the friction force
"""
def listener():
global pub1, pub2, pub3
rospy.init_node('ramping', anonymous=True)
# Odometry
rospy.Subscriber('/crazyflie03/odom', Odometry, callbacallback_odom1)
# Topics toward the robots
pub1 = rospy.Publisher('/crazyflie03/omni_force', Twist, queue_size=0)
pwm = 10000
freq = 5. # .5hz
rate = rospy.Rate(freq)
force = 1 # in grams
while not rospy.is_shutdown():
if odom1 is None:
print "sem odometria"
rospy.logwarn("No Odometry")
rate.sleep()
continue
twist = Twist()
force += 1/freq
# twist.linear.x = force
twist.angular.z = force * .1#0.01
print force, odom1.twist.twist.linear.x
# velocity
if odom1.twist.twist.linear.x > 0.01:
print "Friction force = ", force-1/freq
break
pub1.publish(twist)
rate.sleep()
# End
pub1.publish(Twist())
if __name__ == '__main__':
listener()
| dsaldana/gmodrotor | gmodrotor_control/scripts/test_friction.py | Python | gpl-3.0 | 1,252 |
config = {
"pretty-name": "Short-Piesome",
"server-host": "0.0.0.0",
"server-port": 8080,
"server": "wsgiref",
"base-url": "http://localhost:8080/",
"backend": {
"name": "redis",
"host": "localhost",
"port": 6379
},
"hashids-salt": "E1pVf785RbBNftdy8xwdtTWcvlAu7R3U",
"regex-name": "([a-zA-Z_\-+]){1,32}",
"regex-url": "^(https?):\/\/[-a-zA-Z0-9+&@#\/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#\/%=~_|]"
}
| JuhaniImberg/short-piesome | short/config.py | Python | mit | 455 |
#!/usr/bin/env python
import random
import sys
import drinks.manual_db
# MUST MAP TO ORDER OF PHYSICAL VALVES
INGREDIENTS_ORDERED = (
"angostura bitters",
"lime juice",
"lemon juice",
"grenadine", # brown bottle dark liquid
"agave syrup", # clear bottle amber liquid
"simple syrup",
"kahlua",
"pimms",
"triple sec",
"tequila",
"gin",
"rum",
"rye",
"bourbon",
"vodka",
)
| markstev/nebree8 | config/ingredients.py | Python | gpl-2.0 | 435 |
def settings(dictionary):
with open('data/settings.json') as data_file:
settingsjsonold = json.load(data_file)
settingsjsonnew = {}
answerwrong = True
while answerwrong:
settingsanswer = input('Run Settingsprogramm? (yes/no/exit)')
if settingsanswer == "exit":
sys.exit("aborted by user")
elif settingsanswer == "yes":
for key in dictionary:
answerwrong = True
if debug == True:
settingsjsonnew[key] = ">"
else:
while answerwrong:
settingsjsonnew[key] = input("Should {} be greater, less or equal 0? Currently: '{}' (<,>,=,exit)".format(key,">" if settingsjsonold[key] else "<"))
if settingsjsonnew[key] == "exit":
sys.exit("aborted by user")
elif settingsjsonnew[key] == "<":
settingsjsonnew[key] = "<"
answerwrong = False
elif settingsjsonnew[key] == ">":
settingsjsonnew[key] = ">"
answerwrong = False
elif settingsjsonnew[key] == "=":
settingsjsonnew[key] = "="
answerwrong = False
answerwrong = False
f = open('data/settings.json', 'w')
f.write(json.dumps(settingsjsonnew, ensure_ascii=True))
elif settingsanswer == "no":
answerwrong = False | kingrodriguez/oni-optimizer | settings.py | Python | gpl-3.0 | 1,579 |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import setuptools
repo_root = os.path.dirname(os.path.abspath(__file__))
try:
execfile
except NameError:
def execfile(fname, globs, locs=None):
locs = locs or globs
exec(compile(open(fname).read(), fname, "exec"), globs, locs)
version = sys.version_info
PY2 = version[0] == 2
PY3 = version[0] == 3
PY26 = PY2 and version[1] == 6
if PY2 and version[:2] < (2, 6):
raise Exception('Datahub Python SDK supports Python 2.6+ (including Python 3+).')
version_ns = {}
execfile(os.path.join(repo_root, 'tordatahub', 'version.py'), version_ns)
requirements = []
with open('requirements.txt') as f:
requirements.extend(f.read().splitlines())
if PY26:
requirements.append('simplejson>=2.1.0')
long_description = None
if os.path.exists('README.rst'):
with open('README.rst') as f:
long_description = f.read()
setuptools.setup(
name='tordatahub',
version=version_ns['__version__'],
keywords='pydatahub, python, aliyun, tordatahub, sdk',
description='Datahub Python SDK',
long_description=long_description,
author='andy.xs',
author_email='[email protected]',
url='https://github.com/aliyun/aliyun-tordatahub-sdk-python',
packages=setuptools.find_packages(exclude=('unittest')),
install_requires=requirements,
license='Apache License 2.0'
)
| jasonz93/python-tordatahub | setup.py | Python | apache-2.0 | 2,165 |
from . import TweetAdapter
from ttp import ttp
import re
def breakMessage(messageToBreak, tweetId, userId, userDataStrategy):
if len(messageToBreak) <= 140:
return [ messageToBreak ]
username = TweetAdapter.getUsernameForTweet(tweetId, userId, userDataStrategy)
splitMessageList = []
urls = getUrls(messageToBreak)
messageToBreak = transformMessageLinksToShortUrls(messageToBreak, urls)
while messageToBreak!="":
if len(messageToBreak) > 140:
if messageToBreak[140] != " ":
indexToSplitMessageAt = messageToBreak[0:140 + 1].rfind(" ")
else:
indexToSplitMessageAt = 140
messageToAppend = messageToBreak[0:indexToSplitMessageAt].rstrip()
messageToBreak = "@%s %s"%(username, messageToBreak[indexToSplitMessageAt:].lstrip())
messageToAppend, urls = transformShortUrlsBackToOriginalLinks(messageToAppend, urls[:])
splitMessageList.append(messageToAppend)
else:
if messageToBreak.rstrip() != "@%s"%(username):
splitMessageList.append(transformShortUrlsBackToOriginalLinks(messageToBreak.rstrip(), urls[:])[0])
break
return splitMessageList
def transformShortUrlsBackToOriginalLinks(messageToTransform, urls):
findUrlRegex = re.compile('http://short\.co[#]+')
for foundUrl in findUrlRegex.findall(messageToTransform):
messageToTransform = messageToTransform.replace(foundUrl, urls[0])
urls.pop(0)
return messageToTransform, urls
def getUrls(messageToParse):
p = ttp.Parser()
parsed = p.parse(messageToParse)
urls = parsed.urls
return urls
def transformMessageLinksToShortUrls(messageToBreak, urls):
urlLength, urlLengthHttps = TweetAdapter.getUrlLengths()
for url in urls:
if url.startswith('https://'):
lengthOfUrl = urlLengthHttps
else:
lengthOfUrl = urlLength
messageToBreak = messageToBreak.replace(url, 'http://short.co'+'#'*(lengthOfUrl - 15))
return messageToBreak
| kiriappeee/reply-later | src/core/messager/MessageBreaker.py | Python | mit | 2,071 |
# QTLAB class for remote access of an TIP temperature control server version 2,
# Author: HR @ KIT 2019-
import time
import types
from numpy import arange, size, linspace, sqrt, ones, delete, append, argmin, array, abs
import logging
import zmq
import json
import qkit
from qkit.core.instrument_base import Instrument
class Error(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# zero.1 version of a remote tip command
RANGE = {0: 0.02,
1: 0.2,
2: 2,
3: 20,
4: 200,
5: 2e3,
6: 20e3,
7: 200e3,
8: 2e6,
9: 20e6
}
class tip2_client(Instrument):
"""
This is the remote tip client to connect to the TIP2 temperature control program
Usage:
Initialize with
<name> = instruments.create('<name>', 'tip2_client', url = "tcp://localhost:5000")
# The controled devices are automatically provided as parameters. Currently only supported for thermometers.
# If you want to control the temperature of a device called 'mxc', you can do:
name.get_mxc_temperature()
name.enable_PID('mxc')
name.r_set_T(0.050) # temperature in Kelvin
"""
def __init__(self, name, url = "tcp://localhost:5000"):
Instrument.__init__(self, name, tags=['physical'])
# Add some global constants
#self._address = address
#self._port = port
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
self.socket.setsockopt(zmq.LINGER, 0)
self.setup_connection(url=url)
self.default_device = "mxc"
self.control_device = ''
self.T = 0.0
self.add_function('set_param')
self.add_function('get_param')
self.add_function('get_device')
self.add_function('get_devices')
self.add_function('get_config')
self.add_function('get_controled_thermometers')
self.add_function('define_default_thermometer')
self.add_function("setup_devices")
self.add_function('r_get_T')
self.add_function('r_get_R')
self.add_function('new_T')
self.add_function('close')
self.add_parameter('P',flags=Instrument.FLAG_GETSET,type=float,units='')
self.add_parameter('I',flags=Instrument.FLAG_GETSET,type=float,units='')
self.add_parameter('D',flags=Instrument.FLAG_GETSET,type=float,units='')
# self.add_parameter('range', type=int,
# flags=Instrument.FLAG_GETSET)
# self.add_parameter('excitation', type=int,
# flags=Instrument.FLAG_GETSET)
self.setup_devices()
def close(self):
print ("closing zmq socket")
self.socket.close()
def setup_connection(self,url="tcp://localhost:5000"):
print("Connecting to TIP server...")
self.socket.connect(url)
def setup_devices(self):
for d in self.get_devices():
i = self.get_device(d)
if i['type'] == "thermometer":
self.add_parameter("%s_temperature"%d,type=float,flags=Instrument.FLAG_GET,
units=i['unit'],get_func=lambda d=d:self.get_param(d,'temperature'))
self.add_parameter("%s_active"%d, type=bool, flags=Instrument.FLAG_GETSET,
get_func=lambda d=d:self._boolean(self.get_param(d,"active")),
set_func=lambda x,d=d: self.set_param(d,"active",str(x)))
self.add_parameter("%s_interval" % d, type=float, flags=Instrument.FLAG_GETSET,
get_func=lambda d=d: self.get_param(d, "interval"),
set_func=lambda x, d=d: self.set_param(d, "interval", str(x)))
self.get(["%s_temperature"%d,"%s_active"%d,"%s_interval" % d])
def set_param(self,device, param, value):
self.socket.send_string("set/"+device+"/"+param+"/"+str(value))
message = self.socket.recv_string()
return message
def get_param(self,device, param):
self.socket.send_string("get/"+device+"/"+param)
message = self.socket.recv_string()
return message
def get_device(self,device):
self.socket.send_string("get/"+device+"/:")
message = self.socket.recv_string()
return json.loads(message)
def get_devices(self):
self.socket.send_string("get/:")
message = self.socket.recv_string()
return json.loads(message)
def get_config(self):
self.socket.send_string("get/::")
message = self.socket.recv_string()
return json.loads(message)
def get_controled_thermometers(self):
devices = self.get_devices()
controlled_devices = []
for dev in devices:
if self._boolean(self.get_param(dev,'active')):
if self._boolean(self.get_param(dev,'control_active')):
controlled_devices.append(dev)
return controlled_devices
def define_default_thermometer(self,thermometer):
self.default_device = thermometer
def enable_PID(self,channel):
"""
Enables the PID control for the given channel.
:param channel: Channel name, i.e. "mxc"
:return:
"""
rv = self.set_param(channel,"control_active",True)
if not self._boolean(rv):
raise ValueError("enable_PID not successful, device responded '%s'"%rv)
self.default_device = channel
def disable_PID(self,channel=None):
if channel is None:
channel = self.default_device
self.set_param(channel, "control_active", False)
return self.get_controled_thermometers()
# get and set Temperature
def r_set_T(self, T, thermometer = None, safety_checks = True):
if thermometer is None:
thermometer = self.default_device
self.T = T
if T > 1.0 and safety_checks:
raise ValueError(__name__+": r_set_T cancelled. Target temperature > 1K. Make sure what you are doing and disable safety checks.")
self.set_param(thermometer,'control_temperature',T)
def r_get_T(self,thermometer = None):
if thermometer is None:
thermometer = self.default_device
return float(self.get_param(thermometer,'temperature'))
def r_get_R(self, thermometer = None):
if thermometer is None: thermometer = self.default_device
return float(self.get_param(thermometer,'resistance'))
def new_T(self, T, dT_max=0.0005, thermometer = None):
if thermometer is None: thermometer = self.default_device
def rms(Ts):
return sqrt(sum(Ts * Ts) / len(Ts))
Ts = ones(20)
settling_time = time.time()
print ("T set to "+str(T))
self.r_set_T(T)
T_current = self.r_get_T()
print (T_current)
# qkit.flow.sleep(15)
# print T_current
while True:
T_current = self.r_get_T()
Ts = delete(append(Ts, T_current), 0)
rmsTs = rms(Ts)
if abs(rmsTs - T) > dT_max:
print ("dT > dT_max(%.5f): %.5f at Tctl: %.5f Curr T: %.5f" % (dT_max, rmsTs - T, T, T_current))
qkit.flow.sleep(2)
else:
break
print ("settling time: %s"%(time.time() - settling_time))
def do_set_T(self, val):
try:
self.r_set_T(val)
self.T = self.r_get_T()
return self.T
except ValueError:
logging.warning('TIP connection probably lost. Nothing set.')
return False
def do_get_T(self,thermometer = None ):
if thermometer is None: thermometer = self.default_device
return float(self.get_param(thermometer,'temperature'))
def do_get_P(self,thermometer = None):
if thermometer is None: thermometer = self.default_device
return float(self.get_param(thermometer,'control_P'))
def do_set_P(self, P,thermometer = None):
if thermometer is None: thermometer = self.default_device
return float(self.set_param(thermometer,'control_P',P))
def do_get_I(self,thermometer = None):
if thermometer is None: thermometer = self.default_device
return float(self.get_param(thermometer,'control_I'))
def do_set_I(self, I,thermometer = None):
if thermometer is None: thermometer = self.default_device
return float(self.set_param(thermometer,'control_I',I))
def do_get_D(self,thermometer = None):
if thermometer is None: thermometer = self.default_device
return float(self.get_param(thermometer,'control_D'))
def do_set_D(self, D,thermometer = None):
if thermometer is None: thermometer = self.default_device
return float(self.set_param(thermometer,'control_D',D))
# bridge settings for different channels
def do_get_range(self, thermometer = None):
if thermometer is None: thermometer = self.default_device
return float(self.get_param(thermometer,'device_range'))
def do_set_range(self, range, thermometer = None):
"""
Set the resistance range of the specified channel. Check RANGE dict for help.
"""
if thermometer is None: thermometer = self.default_device
self.send("SET/T/%i/Range/%i" % (channel, range))
return bool(self.recv())
def do_get_excitation(self, thermometer = None):
if thermometer is None: thermometer = self.default_device
return float(self.get_param(thermometer,'device_excitation'))
def do_set_excitation(self, excitation, thermometer = None):
"""
set the measurement excitation of the specified channel.
-1: Excitation off
-1: (excitation off)
0:3uV
1:10uV
2:30uV
3:100uV
4:300uV
5:1 mV
6:3 mV
7:10 mV
8:30 mV
"""
if thermometer is None: thermometer = self.default_device
self.send("SET/T/%i/EX/%i" % (channel, excitation))
return bool(self.recv())
def do_get_temperature(self, thermometer = None):
if thermometer is None: thermometer = self.default_device
return float(self.get_param(thermometer,'temperature'))
def do_get_resistance(self, thermometer = None):
if thermometer is None: thermometer = self.default_device
return float(self.get_param(thermometer,'resistance'))
def _boolean(self,s): return s.lower() in ("yes", "true", "t", "1")
| qkitgroup/qkit | qkit/drivers/tip2_client.py | Python | gpl-2.0 | 10,763 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import ContainerServiceClientConfiguration
from .operations import AgentPoolsOperations, ManagedClustersOperations, Operations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ContainerServiceClient:
"""The Container Service Client.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerservice.v2019_04_01.aio.operations.Operations
:ivar managed_clusters: ManagedClustersOperations operations
:vartype managed_clusters:
azure.mgmt.containerservice.v2019_04_01.aio.operations.ManagedClustersOperations
:ivar agent_pools: AgentPoolsOperations operations
:vartype agent_pools:
azure.mgmt.containerservice.v2019_04_01.aio.operations.AgentPoolsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ContainerServiceClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.managed_clusters = ManagedClustersOperations(self._client, self._config, self._serialize, self._deserialize)
self.agent_pools = AgentPoolsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ContainerServiceClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_04_01/aio/_container_service_client.py | Python | mit | 4,623 |
"""Synchronization primitives."""
__all__ = ('Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore')
import collections
import warnings
from . import events
from . import futures
from .coroutines import coroutine
class _ContextManager:
"""Context manager.
This enables the following idiom for acquiring and releasing a
lock around a block:
with (yield from lock):
<block>
while failing loudly when accidentally using:
with lock:
<block>
Deprecated, use 'async with' statement:
async with lock:
<block>
"""
def __init__(self, lock):
self._lock = lock
def __enter__(self):
# We have no use for the "as ..." clause in the with
# statement for locks.
return None
def __exit__(self, *args):
try:
self._lock.release()
finally:
self._lock = None # Crudely prevent reuse.
class _ContextManagerMixin:
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
# This must exist because __enter__ exists, even though that
# always raises; that's how the with-statement works.
pass
@coroutine
def __iter__(self):
# This is not a coroutine. It is meant to enable the idiom:
#
# with (yield from lock):
# <block>
#
# as an alternative to:
#
# yield from lock.acquire()
# try:
# <block>
# finally:
# lock.release()
# Deprecated, use 'async with' statement:
# async with lock:
# <block>
warnings.warn("'with (yield from lock)' is deprecated "
"use 'async with lock' instead",
DeprecationWarning, stacklevel=2)
yield from self.acquire()
return _ContextManager(self)
async def __acquire_ctx(self):
await self.acquire()
return _ContextManager(self)
def __await__(self):
warnings.warn("'with await lock' is deprecated "
"use 'async with lock' instead",
DeprecationWarning, stacklevel=2)
# To make "with await lock" work.
return self.__acquire_ctx().__await__()
async def __aenter__(self):
await self.acquire()
# We have no use for the "as ..." clause in the with
# statement for locks.
return None
async def __aexit__(self, exc_type, exc, tb):
self.release()
class Lock(_ContextManagerMixin):
"""Primitive lock objects.
A primitive lock is a synchronization primitive that is not owned
by a particular coroutine when locked. A primitive lock is in one
of two states, 'locked' or 'unlocked'.
It is created in the unlocked state. It has two basic methods,
acquire() and release(). When the state is unlocked, acquire()
changes the state to locked and returns immediately. When the
state is locked, acquire() blocks until a call to release() in
another coroutine changes it to unlocked, then the acquire() call
resets it to locked and returns. The release() method should only
be called in the locked state; it changes the state to unlocked
and returns immediately. If an attempt is made to release an
unlocked lock, a RuntimeError will be raised.
When more than one coroutine is blocked in acquire() waiting for
the state to turn to unlocked, only one coroutine proceeds when a
release() call resets the state to unlocked; first coroutine which
is blocked in acquire() is being processed.
acquire() is a coroutine and should be called with 'await'.
Locks also support the asynchronous context management protocol.
'async with lock' statement should be used.
Usage:
lock = Lock()
...
await lock.acquire()
try:
...
finally:
lock.release()
Context manager usage:
lock = Lock()
...
async with lock:
...
Lock objects can be tested for locking state:
if not lock.locked():
await lock.acquire()
else:
# lock is acquired
...
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._locked = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self._locked else 'unlocked'
if self._waiters:
extra = f'{extra}, waiters:{len(self._waiters)}'
return f'<{res[1:-1]} [{extra}]>'
def locked(self):
"""Return True if lock is acquired."""
return self._locked
async def acquire(self):
"""Acquire a lock.
This method blocks until the lock is unlocked, then sets it to
locked and returns True.
"""
if not self._locked and all(w.cancelled() for w in self._waiters):
self._locked = True
return True
fut = self._loop.create_future()
self._waiters.append(fut)
# Finally block should be called before the CancelledError
# handling as we don't want CancelledError to call
# _wake_up_first() and attempt to wake up itself.
try:
try:
await fut
finally:
self._waiters.remove(fut)
except futures.CancelledError:
if not self._locked:
self._wake_up_first()
raise
self._locked = True
return True
def release(self):
"""Release a lock.
When the lock is locked, reset it to unlocked, and return.
If any other coroutines are blocked waiting for the lock to become
unlocked, allow exactly one of them to proceed.
When invoked on an unlocked lock, a RuntimeError is raised.
There is no return value.
"""
if self._locked:
self._locked = False
self._wake_up_first()
else:
raise RuntimeError('Lock is not acquired.')
def _wake_up_first(self):
"""Wake up the first waiter if it isn't done."""
try:
fut = next(iter(self._waiters))
except StopIteration:
return
# .done() necessarily means that a waiter will wake up later on and
# either take the lock, or, if it was cancelled and lock wasn't
# taken already, will hit this again and wake up a new waiter.
if not fut.done():
fut.set_result(True)
class Event:
"""Asynchronous equivalent to threading.Event.
Class implementing event objects. An event manages a flag that can be set
to true with the set() method and reset to false with the clear() method.
The wait() method blocks until the flag is true. The flag is initially
false.
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._value = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'set' if self._value else 'unset'
if self._waiters:
extra = f'{extra}, waiters:{len(self._waiters)}'
return f'<{res[1:-1]} [{extra}]>'
def is_set(self):
"""Return True if and only if the internal flag is true."""
return self._value
def set(self):
"""Set the internal flag to true. All coroutines waiting for it to
become true are awakened. Coroutine that call wait() once the flag is
true will not block at all.
"""
if not self._value:
self._value = True
for fut in self._waiters:
if not fut.done():
fut.set_result(True)
def clear(self):
"""Reset the internal flag to false. Subsequently, coroutines calling
wait() will block until set() is called to set the internal flag
to true again."""
self._value = False
async def wait(self):
"""Block until the internal flag is true.
If the internal flag is true on entry, return True
immediately. Otherwise, block until another coroutine calls
set() to set the flag to true, then return True.
"""
if self._value:
return True
fut = self._loop.create_future()
self._waiters.append(fut)
try:
await fut
return True
finally:
self._waiters.remove(fut)
class Condition(_ContextManagerMixin):
"""Asynchronous equivalent to threading.Condition.
This class implements condition variable objects. A condition variable
allows one or more coroutines to wait until they are notified by another
coroutine.
A new Lock object is created and used as the underlying lock.
"""
def __init__(self, lock=None, *, loop=None):
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
if lock is None:
lock = Lock(loop=self._loop)
elif lock._loop is not self._loop:
raise ValueError("loop argument must agree with lock")
self._lock = lock
# Export the lock's locked(), acquire() and release() methods.
self.locked = lock.locked
self.acquire = lock.acquire
self.release = lock.release
self._waiters = collections.deque()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked'
if self._waiters:
extra = f'{extra}, waiters:{len(self._waiters)}'
return f'<{res[1:-1]} [{extra}]>'
async def wait(self):
"""Wait until notified.
If the calling coroutine has not acquired the lock when this
method is called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks
until it is awakened by a notify() or notify_all() call for
the same condition variable in another coroutine. Once
awakened, it re-acquires the lock and returns True.
"""
if not self.locked():
raise RuntimeError('cannot wait on un-acquired lock')
self.release()
try:
fut = self._loop.create_future()
self._waiters.append(fut)
try:
await fut
return True
finally:
self._waiters.remove(fut)
finally:
# Must reacquire lock even if wait is cancelled
cancelled = False
while True:
try:
await self.acquire()
break
except futures.CancelledError:
cancelled = True
if cancelled:
raise futures.CancelledError
async def wait_for(self, predicate):
"""Wait until a predicate becomes true.
The predicate should be a callable which result will be
interpreted as a boolean value. The final predicate value is
the return value.
"""
result = predicate()
while not result:
await self.wait()
result = predicate()
return result
def notify(self, n=1):
"""By default, wake up one coroutine waiting on this condition, if any.
If the calling coroutine has not acquired the lock when this method
is called, a RuntimeError is raised.
This method wakes up at most n of the coroutines waiting for the
condition variable; it is a no-op if no coroutines are waiting.
Note: an awakened coroutine does not actually return from its
wait() call until it can reacquire the lock. Since notify() does
not release the lock, its caller should.
"""
if not self.locked():
raise RuntimeError('cannot notify on un-acquired lock')
idx = 0
for fut in self._waiters:
if idx >= n:
break
if not fut.done():
idx += 1
fut.set_result(False)
def notify_all(self):
"""Wake up all threads waiting on this condition. This method acts
like notify(), but wakes up all waiting threads instead of one. If the
calling thread has not acquired the lock when this method is called,
a RuntimeError is raised.
"""
self.notify(len(self._waiters))
class Semaphore(_ContextManagerMixin):
"""A Semaphore implementation.
A semaphore manages an internal counter which is decremented by each
acquire() call and incremented by each release() call. The counter
can never go below zero; when acquire() finds that it is zero, it blocks,
waiting until some other thread calls release().
Semaphores also support the context management protocol.
The optional argument gives the initial value for the internal
counter; it defaults to 1. If the value given is less than 0,
ValueError is raised.
"""
def __init__(self, value=1, *, loop=None):
if value < 0:
raise ValueError("Semaphore initial value must be >= 0")
self._value = value
self._waiters = collections.deque()
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else f'unlocked, value:{self._value}'
if self._waiters:
extra = f'{extra}, waiters:{len(self._waiters)}'
return f'<{res[1:-1]} [{extra}]>'
def _wake_up_next(self):
while self._waiters:
waiter = self._waiters.popleft()
if not waiter.done():
waiter.set_result(None)
return
def locked(self):
"""Returns True if semaphore can not be acquired immediately."""
return self._value == 0
async def acquire(self):
"""Acquire a semaphore.
If the internal counter is larger than zero on entry,
decrement it by one and return True immediately. If it is
zero on entry, block, waiting until some other coroutine has
called release() to make it larger than 0, and then return
True.
"""
while self._value <= 0:
fut = self._loop.create_future()
self._waiters.append(fut)
try:
await fut
except:
# See the similar code in Queue.get.
fut.cancel()
if self._value > 0 and not fut.cancelled():
self._wake_up_next()
raise
self._value -= 1
return True
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When it was zero on entry and another coroutine is waiting for it to
become larger than zero again, wake up that coroutine.
"""
self._value += 1
self._wake_up_next()
class BoundedSemaphore(Semaphore):
"""A bounded semaphore implementation.
This raises ValueError in release() if it would increase the value
above the initial value.
"""
def __init__(self, value=1, *, loop=None):
self._bound_value = value
super().__init__(value, loop=loop)
def release(self):
if self._value >= self._bound_value:
raise ValueError('BoundedSemaphore released too many times')
super().release()
| FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/asyncio/locks.py | Python | gpl-2.0 | 15,913 |
from gunviolence import app
import argparse
import os
import sys
def main():
# app.run(debug=True)
port = int(os.environ.get("PORT", 33507))
app.run(debug=True, host='0.0.0.0', port=port, passthrough_errors=False)
def parse_args():
parser = argparse.ArgumentParser(description="Chicago_Data")
parser.add_argument("-download_data", action="store_true",
help="use to download csv data file")
parser.add_argument("-download_metadata", action="store_true",
help="use to download csv meta data files")
parser.add_argument("-download_fbi", action="store_true",
help="pull and parse fbi code data to csv")
parser.add_argument("-repull", action="store_true",
help="repull pivot data object")
parser.add_argument("-limit", metavar='limit', type=int, default=None,
help="limit size of data for faster testing of code")
args = parser.parse_args()
return args
args = parse_args()
if __name__=="__main__":
main() | afenichel/ENGI4800-CAPSTONE | runserver.py | Python | apache-2.0 | 985 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Simone F. <[email protected]>
#
# This file is part of wikipedia-tags-in-osm.
# wikipedia-tags-in-osm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# wikipedia-tags-in-osm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with wikipedia-tags-in-osm. If not, see <http://www.gnu.org/licenses/>.
"""Starting from a list of Wikipedia categories written by the user in
'config.cfg' file, the script:
- downloads/updates a national OSM data file
- downloads from (from Quick Intersection) Wikipedia data regarding the selected
categories (subcategories names and articles titles)
- creates webpages for showing which articles are already tagged and
which ones are not.
"""
import argparse
import os
import time
import locale
from subprocess import call
import csv
import ConfigParser
import sys
import json
import webbrowser
from babel.support import Translations
#local imports
from osm_parser import ParseOSMData
import osm_downloader as OSM
from data_manager import Themes, Regions
from users import Users
from webpages_creator import Creator, Redirect
import wikipedia_downloader
import nuts4nuts_infer
class App:
def __init__(self):
#Options
text = """Starting from a list of Wikipedia categories written by the user in
'config.cfg' file, the script:
- downloads/updates a national OSM data file
- downloads from (from Quick Intersection) Wikipedia data regarding the selected
categories (subcategories and articles names)
- creates webpages for showing which articles are already tagged and
which ones are not.
"""
parser = argparse.ArgumentParser(description=text)
group = parser.add_mutually_exclusive_group()
#Manage OSM data
parser.add_argument("-d", "--download_osm",
help="Download OSM data of the country (from Geofabrik)",
action="store_true")
parser.add_argument("-u", "--update_osm",
help="Update downloaded OSM data of the country (through osmupdate)",
action="store_true")
#Analyze data from Wikipedia and OSM
parser.add_argument("-a", "--analyze",
help="Analyze Wikipedia data (categories' sub-categories and articles) ed OSM data (existing Wikipedia tags)",
action="store_true")
parser.add_argument("--category_info",
help="Analyze data and print informations regarding a specific category",
action="store")
parser.add_argument("-t", "--show_missing_templates",
help="Mark on web pages the articles that miss geo template (Coord)",
action="store_true")
parser.add_argument("-c", "--show_link_to_wikipedia_coordinates",
help="If a non-tagged article have the coordinates on Wikipedia, show on the web pages a link to zoom on its position with JOSM/iD",
action="store_true")
parser.add_argument("-o", "--show_coordinates_from_osm",
help="Calculate OSM coordinates of articles (point for nodes, centroids for ways and relations)",
action="store_true")
parser.add_argument("-n", "--infer_coordinates_from_wikipedia",
help="Use Nuts4Nuts to calculate the coordinates of a non tagged article whithout coordinates on Wikipedia",
action="store_true")
group.add_argument("-p", "--print_categories_list",
help="Analyze data and print project's categories.",
action="store_true")
#Create webpages
group.add_argument("-w", "--create_webpages",
help="Analyze data and create web pages",
action="store_true")
parser.add_argument("-s", "--save_stats",
help="If web pages have been created, store the updated number of tagged articles (default: ask to user).",
action="store_true")
parser.add_argument("--browser",
help="Open the web pages with the system browser after creation.",
action="store_true")
parser.add_argument("--copy",
help="Copy html folder to the directory configured on `config.cfg` (eg. dropbox dir).",
action="store_true")
parser.add_argument("--locale",
nargs='+',
dest='locales',
metavar='LANG',
help="Generate pages in the specified locales. Default: use the system locale. ")
self.args = parser.parse_args()
if self.args.category_info or self.args.category_info\
or self.args.create_webpages or self.args.print_categories_list\
or self.args.show_missing_templates\
or self.args.show_coordinates_from_osm:
self.args.analyze = True
# Default value for locale
# get system locale
sys_locale_langcode, sys_locale_encoding = locale.getdefaultlocale()
if not self.args.locales:
self.args.locales = [sys_locale_langcode]
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
os.chdir(os.path.dirname(sys.argv[0]))
#Configurations
themesAndCatsNames = self.read_config()
### Manage OpenStreetMap data ##########################################
#Analyse national OSM data file and create lists of already
#tagged Wikipedia articles.
#Download/update OSM data
if self.args.download_osm or self.args.update_osm:
if self.args.download_osm:
OSM.download_osm_data(self)
if self.args.update_osm:
status = OSM.update_osm_data(self)
if self.args.download_osm or (self.args.update_osm and status):
OSM.filter_wikipedia_data_in_osm_file(self)
if self.args.update_osm and not status:
print "OSM data where already uptodate or osmupdate has been interrupted.\
To repeat the updating process, launch the script again with the `-u` option."
if not self.args.analyze:
#"There's nothing left for me to tell you"
sys.exit(1)
else:
if not os.path.isfile(self.wOSMFile):
OSM.filter_wikipedia_data_in_osm_file(self)
#Extract Wikipedia articles tagged in OSM with preferred language.
#If an article is tagged in a foreign language, ask to Wikpedia
#what is the corresponding article of the preferred language, so
#that we can flag it as tagged aswell.
print "\n- Read from the OSM file the articles already tagged"
parseOSMData = ParseOSMData(self)
#list of Wikipedia tags in OSM
self.tagsInOSM = parseOSMData.allTags
self.tagsData = parseOSMData.tagsData
#list of tagged Wikipedia articles
self.taggedTitles = parseOSMData.titles
#tags with errors
self.wrongTags = parseOSMData.wrongTags
#ugly tags (with url, language capitalized...), not errors
self.badTags = parseOSMData.badTags
#add articles manually flagged as tagged in data/workaround/tagged.csv
#in case the parser misses them (strange tags)
self.add_tagged_articles()
if self.args.show_coordinates_from_osm:
print "\n--- Add OSM coordinates to the articles"
parseOSMData.get_centroids()
### Manage Wikipedia data ##############################################
#Read from 'non-mappable' file the categories and articles that
#aren't mappable e.g. "Paintings in the X museum",
#self.nonMappable = {mainCategory.name : {"articles" : [], "subcategories" : []}}
self.nonMappable = self.read_non_mappable_items()
#Check if we have Wikipedia data from Quick Intersection of all the
#categories in the project (config.cfg file)
themesAndCatsNames = wikipedia_downloader.check_catscan_data(self, themesAndCatsNames)
#Organize Wikipedia data.
#self.themes = [Theme(), ...]
# Theme().categories = [Category(), ...]
# Category().subcategories = [Category(), ...]
# Category().articles = [Article(), ...]
#categories without Quick Intersection data
self.categoriesWithoutData = []
allThemes = Themes(self, themesAndCatsNames)
self.themes = allThemes.themesList
#Organize data in regions, for a different visualization
#self.regions = [Region()]
# Region().categories = [Category(), ... ]
self.regions = []
if self.regionsNames != []:
self.regions = Regions(self).regionsList
#Print names of all categories
if self.args.print_categories_list:
self.display_categories_names()
if not self.args.category_info:
#"There's nothing left for me to tell you"
sys.exit(1)
### Merge OSM info into Wikipedia data #################################
#Add to Wikipedia categories and articles istances info about
#their status in OSM: (tagged/not tagged), osm ids and counters
print ("\n- Check which articles are already tagged in the country's "
"OSM file")
for theme in self.themes:
for category in theme.categories:
category.check_articles_in_osm()
self.titlesInOSM, self.titlesNotInOSM = allThemes.lists_of_titles_in_osm_or_not()
#Ask to Wikipedia which articles have/have not Coord template.
#Articles with article.hasTemplate == False will be marked on web pages.
if self.args.show_missing_templates:
print "\n- Check which articles miss geo template (Coord) in Wikipedia"
self.templatesStatus = wikipedia_downloader.read_old_templates_status(self)
wikipedia_downloader.update_templates_status(self)
#Set hasTemplate = False to articles without Coord template
for theme in self.themes:
for category in theme.categories:
category.set_has_template_in_articles()
#If an article is not already tagged in OSM but Wikipedia knows its
#position, it is possible to add a link to zoom to that position
#with JOSM.
if self.args.show_link_to_wikipedia_coordinates:
print "\n- Check the non tagged articles whose position is known by Wikipedia"
wikipedia_downloader.add_wikipedia_coordinates(self)
#Save GeoJSON file with titles and coordinates known by Wikipedia
self.save_titles_with_coords_geojson()
if self.args.infer_coordinates_from_wikipedia:
print "\n- Use Nuts4Nuts to infer coordinates of non tagged articles, whose position is unknown by Wikipedia"
nuts4nuts_infer.infer_coordinates_with_nuts4nuts(self)
#For debugging
# print info about a specific category
if self.args.category_info:
self.print_category_info(self.args.category_info.replace(" ", "_"))
if self.args.create_webpages:
raw_input("\nContinue?[Press any key]")
# write categories trees to text files (uncomment lines)
if self.print_categories_to_text_files == "true":
for theme in self.themes:
for category in theme.categories:
category.print_category_tree_to_file()
#Read and update stats with the number of tagged articles
self.dates, self.days = self.read_past_stats()
download_other_countries = False
self.todayDate, today = self.read_new_stats(download_other_countries)
self.days.append(today)
self.dates.append(self.todayDate)
if len(self.dates) > 1 and self.todayDate == self.dates[-2]:
#This is the second analysis of today.
#Overwrite the previous statistics
del self.dates[-2]
del self.days[-2]
print "\n This is the second time that data ara analyzed today. \
The number of tagged articles will replace that of the lust run in the tags' numbers table."
#Count tags added by each user
self.users = Users(self).users
#Create a json file with the data (needed by non_mappable.html)
tree = {"mappable": True,
"name": "Main",
"size": 1,
"children": []}
for theme in self.themes:
for category in theme.categories:
tree["children"].append(category.build_json_tree())
ifile = open(os.path.join(self.HTMLDIR, "json", "main.json"), "w")
data = json.dumps(tree)
ifile.write(data)
ifile.close()
#Create webpages
if self.args.create_webpages:
# Restrict to the supported locales
self.locales = frozenset(self.SUPPORTED_LOCALES).intersection(
frozenset(self.args.locales))
non_supported_locales = frozenset(self.args.locales) - \
frozenset(self.SUPPORTED_LOCALES)
for locale_langcode in non_supported_locales:
print 'Warning: dropping unsupported locale: {0}'.format(
locale_langcode)
# if no supported locale is chosen fallback to en_US
if not self.locales:
self.locales = frozenset(['en_US'])
for locale_langcode in self.locales:
self.translations = Translations.load("locale",
[locale_langcode]
)
self._ = self.translations.ugettext
print "\n- Create web pages with locale: ", locale_langcode
Creator(self, locale_langcode)
if self.args.browser:
url = os.path.join('html', locale_langcode, 'index.html')
# using .get() suppress stdout output from browser, won't
# suppress stderr
webbrowser.get().open_new(url)
# Create the index.html in the main HTMLDIR to redirect to one
# locales directory
for lang in self.locales:
if self.WIKIPEDIALANG in lang:
Redirect(self, lang)
break
#Save stats
if self.args.save_stats:
self.save_stats_to_csv()
print "\nNew stats have been saved."
else:
print "\nNo stats saved."
#Copy files from html dir to outdir (for example a Dropbox directory)
if self.args.copy:
self.copy_html_files_to_outdir()
print "\nDone."
def save_titles_with_coords_geojson(self):
"""Save a GeoJSON file with the coordinates known by Wikipedia.
It is used by the "Map" tab in homepage
"""
print ("\n- Save a GeoJSON file with the coordinates from "
"Wikipedia (map markers)")
tree = {"type": "FeatureCollection", "features": []}
i = 0
for title, coords in self.titlesWithCoordsFromWikipedia.iteritems():
if title in self.titlesNotInOSM:
i += 1
lat, lon = coords
feature = {"type": "Feature",
"properties": {"id": str(i),
"title": title.replace("_", " ").encode("utf-8")
},
"geometry": {"type": "Point",
"coordinates": [lon, lat]
}
}
tree["features"].append(feature)
print " markers: %d" % len(tree["features"])
coordsFile = open(os.path.join("html", "GeoJSON", "coords.js"), "w")
data = json.dumps(tree)
data = "var coords = %s" % data
coordsFile.write(data)
coordsFile.close()
### Configurations #####################################################
def read_config(self):
"""Setup configurations
"""
# Program version
self.version = "v0.4"
#Read configuration from config files
configFile = "config.cfg"
if not os.path.isfile(configFile):
call("cp %s %s" % ("config.template", configFile), shell=True)
print "* A new config file has been created:\n %s\n\n Fill it with the necessary information (see README.md and config.template)." % configFile
answer = raw_input("\n Continue? [Y/n]\n")
if answer not in ("", "Y", "y"):
sys.exit()
configparser = ConfigParser.RawConfigParser()
configparser.optionxform = str
configparser.read(configFile)
#country
self.WIKIPEDIALANG = configparser.get("general", "preferred language")
self.category_translation = configparser.get("general",
"category translation")
self.country = configparser.get("general", "country")
self.OSMDIR = configparser.get("general", "osmdir")
self.COUNTRYBBOX = configparser.get("general", "osmbbox")
self.countryPoly = os.path.join("data", "OSM", "%s.poly" % self.country)
if not os.path.isfile(self.countryPoly):
print "\n* Poly file is missing: \n %s" % self.countryPoly
sys.exit(1)
if self.WIKIPEDIALANG == "" or self.country == "" or self.OSMDIR == "":
print "\n* Fill in `config.cfg` file the following options: `osmdir`, `preferred language`, `country`"
sys.exit(1)
#regions names
if not configparser.has_option("general", "regions names") or \
configparser.get("general", "regions names") == "":
self.regionsNames = []
else:
self.regionsNames = [r.decode("utf-8") \
for r in configparser.get("general", "regions names").split("|")]
# directory where html files must be copied after creation
#(for example, Dropbox dir)
self.OUTDIR = configparser.get("general", "outdir")
#debugging
self.print_categories_to_text_files = configparser.get("debug", "print categories to text files")
self.clickable_cells = configparser.get("debug", "clickable cells")
# user agent for requests to Wikipedia and Quick Intersections APIs
self.user_agent = configparser.get("general", "user agent")
if self.user_agent == "":
sys.exit("* Error: please, fill 'user agent' option in the "
"config file.\n"
"The User-Agent will be used when making requests to "
"Wikipedia API and Quick Intersections.\n"
"User agent example: wikipedia-tags-in-osm ("
"https://openstreetmap.it/wikipedia-tags-in-osm; "
"[email protected])")
#themes and categories
themesAndCatsNames = {}
for themeName in configparser.options("themes"):
categoriesNames = [c.strip().replace(" ", "_").decode("utf-8") for c in configparser.get("themes", themeName).split("|")]
themesAndCatsNames[themeName.replace(" ", "_").decode("utf-8")] = categoriesNames
# Wikipedia categories data, downloaded from quick_intersection
self.CATSCANDIR = os.path.join("data", "wikipedia", "catscan")
self.make_dir(self.CATSCANDIR)
#categories dates
self.categoriesDates = {}
catsDatesFile = os.path.join(self.CATSCANDIR, "update_dates.cfg")
catsDatesConfigparser = ConfigParser.RawConfigParser()
catsDatesConfigparser.optionxform = str
if not os.path.isfile(catsDatesFile):
catsDatesConfigparser.add_section('catscan dates')
with open(catsDatesFile, 'wb') as configfile:
catsDatesConfigparser.write(configfile)
else:
catsDatesConfigparser.read(catsDatesFile)
for categoryName, date in catsDatesConfigparser.items("catscan dates"):
self.categoriesDates[categoryName] = date
# OSM data
self.countryPBF = os.path.join(self.OSMDIR, "%s-latest.osm.pbf" % self.country)
self.oldCountryPBF = os.path.join(self.OSMDIR, "%s.osm.pbf" % self.country)
self.countryO5M = os.path.join(self.OSMDIR, "%s-latest.o5m" % self.country)
self.oldCountryO5M = os.path.join(self.OSMDIR, "%s.o5m" % self.country)
self.osmObjs = {}
# OSM data with wikipedia tag
self.wOSMFile = os.path.join("data", "OSM", "Wikipedia-data-in-OSM.osm")
# OSM data SQlite database
self.wOSMdb = os.path.join("data", "OSM", "Wikipedia-data-in-OSM.sqlite")
# libspatialite path
self.libspatialitePath = configparser.get("general", "libspatialite-path")
# OSM data of foreign coountries
self.FOREIGNOSMDIR = "/tmp/"
# lists of categories and articles that should be ignored
# (not geographic content)
self.NONMAPPABLE = os.path.join("data", "wikipedia", "non_mappable")
self.make_dir(self.NONMAPPABLE)
for fileName in ("articles", "subcategories", "redirects"):
fullName = os.path.join(self.NONMAPPABLE, fileName)
if not os.path.isfile(fullName):
open(fullName, "w").close()
# workaround files
workaroundDir = os.path.join("data", "workaround")
self.make_dir(workaroundDir)
fileName = os.path.join(workaroundDir, "tagged.csv")
if not os.path.isfile(fileName):
f = open(fileName, "w")
f.write("#In case the parser doesn't discover a tagged article, \
it can be added here, so that it will anyhow appear in the webpages.\n\
#Article nosmid,wosmid,wosmid,rosmid...")
f.close()
fileName = os.path.join(workaroundDir, "false_positive_tags.csv")
if not os.path.isfile(fileName):
f = open(fileName, "w")
f.write("#If the script flags a correct tag as an error, write \
the tag here and it will not be detected as error anymore.")
f.close()
# conversions foreign articles titles - preferred language articles
self.WIKIPEDIAANSWERS = os.path.join("data", "wikipedia", "answers")
self.WIKIPEDIAANSWER = os.path.join(self.WIKIPEDIAANSWERS, "answer")
self.make_dir(self.WIKIPEDIAANSWERS)
# web pages dir
self.HTMLDIR = 'html'
self.make_dir(os.path.join(self.HTMLDIR, "GeoJSON"))
self.make_dir(os.path.join(self.HTMLDIR, "json"))
self.UPDATETIME = time.strftime("%b %d, ore %H", time.localtime())
# stats and logs dir
statsDir = os.path.join("data", "stats")
self.make_dir(statsDir)
self.make_dir(os.path.join("data", "logs"))
# templates dir
self.MISSINGTEMPLATESDIR = os.path.join("data", "wikipedia", "missing_templates")
self.make_dir(self.MISSINGTEMPLATESDIR)
self.TEMPLATESSTATUSFILE = os.path.join(self.MISSINGTEMPLATESDIR, "missing_templates.csv")
supported_locales = configparser.get("i18n", "supported_locales")
self.SUPPORTED_LOCALES = [lcode.strip()
for lcode in supported_locales.split('|')
]
return themesAndCatsNames
def make_dir(self, path):
"""Create a directory if it does not already exist
"""
if not os.path.exists(path):
os.makedirs(path)
### Not mappable items and false positive tags #########################
def read_non_mappable_items(self):
"""Read lists of categories and articles that must be ignored,
because not mappable.
Wikipedia articles or categories like: "Paintings in the X museum",
"Opere nel Castello Sforzesco"...
"""
print "\n- Read the lists of articles and categories which must be ignored because flagged as non-mappable from the files in `./data/wikipedia/non_mappable`"
articles = []
subcategories = []
redirects = []
nonMappable = {"subcategories": subcategories,
"articles": articles,
"redirects": redirects}
for itemsType, itemsList in nonMappable.iteritems():
fileName = open(os.path.join(self.NONMAPPABLE, itemsType), "r")
nonMappableItems = fileName.read().replace(" ", "_").decode("utf-8").splitlines()
fileName.close()
nonMappableItems.sort()
nonMappable[itemsType] = nonMappableItems
return nonMappable
def add_tagged_articles(self):
"""Read from file "./data/workaround/tagged.csv" articles flagged as tagged
by hand, in case the parser did not detected them.
"""
ifile = open(os.path.join("data", "workaround", "tagged.csv"), "rb")
reader = csv.reader(ifile, delimiter='\t')
for row in reader:
if row != [] and row[0][0] != "#":
if len(row) == 2:
articleName = row[0].replace(" ", "_")
osmIds = row[1].split(",")
self.taggedTitles[articleName] = osmIds
ifile.close()
### Print info to terminal #############################################
def print_category_info(self, categoryName):
"""Print to the terminal informations about the requested category
"""
catFound = False
for theme in self.themes:
for category in theme.categories:
if catFound:
break
if category.name == categoryName:
category.print_info()
catFound = True
break
for subcategory in category.subcategories:
if subcategory.name == categoryName:
subcategory.print_info()
catFound = True
break
if not catFound:
print "\nNo categories found with the specified name."
def display_categories_names(self):
"""Print to terminal the list of main categories
"""
print "\n=CATEGORIES="
categoryNum = 0
for theme in self.themes:
print "\n%s:" % theme.name
for category in theme.categories:
print "%d - %s" % (categoryNum, category.name.replace("_", " "))
categoryNum += 1
### Statistics #########################################################
def read_past_stats(self):
"""Read stats
"""
statsFile = os.path.join("data", "stats", "stats.csv")
if not os.path.isfile(statsFile):
dates = []
days = []
else:
ifile = open(statsFile, "r")
reader = csv.reader(ifile, delimiter='\t', quotechar='"')
#list with the number of tagged articles, per day
days = []
for rowNum, row in enumerate(reader):
if rowNum == 0:
#date
dates = row[1:]
for date in dates:
days.append({})
else:
#data
status = row[0]
for dateIndex, value in enumerate(row[1:]):
if value == "":
days[dateIndex][status] = ""
else:
days[dateIndex][status] = int(value)
ifile.close()
return dates, days
def count_wkp_tags_in_file(self, country):
"""Count the number of 'wikipedia=*' in OSM file
"""
print "\n number of wikipedia tags"
if country == "italy":
path = self.OSMDIR
else:
path = "/tmp/"
call('osmfilter %s%s.o5m --out-count | grep wikipedia > data/stats/%s' % (path, country, country), shell=True)
file_in = open("data/%s" % country, "r")
lines = file_in.readlines()
file_in.close()
tagsInCountry = 0
for line in lines:
line = line.replace(" ", "")[:-1]
tagsInCountry += int(line.split("\t")[0])
return tagsInCountry
def read_new_stats(self, download_other_countries):
"""Add latest numbers to stats
"""
todayDate = self.UPDATETIME.split(",")[0]
today = {"to do": len(self.titlesNotInOSM),
"mapped": len(self.titlesInOSM),
"total": len(self.tagsInOSM)}
#Print tags numbers of other countries
if download_other_countries:
print "\n- Tags numbers in countries (with duplicate articles"
tagsNum = {"italy": self.tagsInOSM, "spain": "", "france": "", "germany": ""}
for country in tagsNum:
print "\n- %s:" % country
if self.country != "italy":
#download other countries
print "\n downloading..."
url = "http://download.geofabrik.de/osm/europe/%s.osm.pbf" % country
call('wget -c %s -O %s.osm.pbf' % (url, country), shell=True)
print "\n converting to O5M..."
call('osmconvert %s.osm.pbf -o=%s.o5m' % (country, country), shell=True)
call('rm %s.osm.pbf' % (country), shell=True)
#count tags "wikipedia=*"
tagsInCountry = self.count_wkp_tags_in_file(country)
tagsNum[country] = tagsInCountry
print country, tagsNum[country]
return todayDate, today
def save_stats_to_csv(self):
"""Save stats to file
"""
print "\n- Saving stats to CSV file"
statsDir = os.path.join("data", "stats")
statsFile = os.path.join(statsDir, "stats.csv")
oldStatsFile = os.path.join(statsDir, "old_stats.csv")
if os.path.isfile(oldStatsFile):
call('mv %s %s' % (statsFile, oldStatsFile), shell=True)
ofile = open(statsFile, "w")
writer = csv.writer(ofile, delimiter='\t', quotechar='"', quoting=csv.QUOTE_ALL)
#headers
header = [" "] + [date for date in self.dates]
writer.writerow(header)
#days that must be saved to CSV file
daysToSave = self.days
#data
for status in self.days[0]:
values = [status] + [day[status] for day in daysToSave]
writer.writerow(values)
ofile.close()
### Copy webpages to a second directory ################################
def copy_html_files_to_outdir(self):
"""Copy html files to another directory, for example Dropbox dir
"""
print "\n- Copy files from `html` dir to: '%s'" % self.OUTDIR
if self.OUTDIR == "":
print "\n *Write in `config.cfg` --> `outdir` teh path of the directory in which you want to copy the files."
else:
call("cp -R ./html/* %s" % self.OUTDIR, shell=True)
def main():
App()
if __name__ == '__main__':
main()
| simone-f/wikipedia-tags-in-osm | launch_script.py | Python | gpl-3.0 | 32,261 |
# Copyright 2017 Tonis Piip, Rumma & Ko Ltd
#
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""Adds a radicale caldav server to the application.
Requires radicale to be installed.
.. autosummary::
:toctree:
views
"""
import logging
from lino import ad
from django.utils.translation import gettext_lazy as _
from . import views
from django.conf.urls import url
# import radicale.config
# import radicale.log
# try:
# from configparser import RawConfigParser as ConfigParser
# except ImportError:
# from ConfigParser import RawConfigParser as ConfigParser
# try:
# from io import StringIO as StringIO
# except ImportError:
# from StringIO import StringIO as StringIO
# Django autoreload fails when some value in settings is not
# hashable. So we use a hack copied from
# https://github.com/kyokenn/djradicale/blob/master/djradicale/__init__.py
# class HashableConfigParser(ConfigParser):
# def __hash__(self):
# output = StringIO()
# self.write(output)
# hash_ = hash(output.getvalue())
# output.close()
# return hash_
# radicale.config.__class__ = HashableConfigParser
# radicale.log.LOGGER = logging.Logger("maildev")
# # fh = logging.FileHandler('/home/luc/rad.log')
# # fh.setLevel(logging.DEBUG)
# ch = logging.StreamHandler()
# # ch.setLevel(logging.DEBUG)
# # radicale.log.LOGGER.addHandler(fh)
# radicale.log.LOGGER.addHandler(ch)
class Plugin(ad.Plugin):
verbose_name = _("CalDav")
needs_plugins = ['lino.xl.cal']
# RADICALE_CONFIG = {
# 'server': {
# 'base_prefix': '/.rad/',
# 'realm': 'Radicale - Password Required',
# },
# 'logging':{
# 'debug':True
# },
# 'encoding': {
# 'request': 'utf-8',
# 'stock': 'utf-8',
# },
# 'auth': {
# 'type': 'custom',
# 'custom_handler': 'djradicale.auth.django',
# },
# 'rights': {
# 'type': 'custom',
# 'custom_handler': 'djradicale.rights.django',
# },
# 'storage': {
# 'type': 'filesystem',
# 'type': 'custom',
# 'filesystem_folder' : '~/.config/foo/radicale/collections',
# },
# 'custom_handler': 'djradicale.storage.django',
# },
# 'well-known': {
# 'carddav': '/pim/%(user)s/addressbook.vcf',
# 'caldav': '/pim/%(user)s/calendar.ics',
# },
# }
# radicale.log.LOGGER.debug("xxxxxxxxxxxxxxx")
# def on_init(self):
# for section, values in self.RADICALE_CONFIG.items():
# for key, value in values.items():
# if not radicale.config.has_section(section):
# radicale.config.add_section(section)
# radicale.config.set(section, key, value)
def get_patterns(self):
return [
url(r'^caldav/(?P<url>.*)$', views.CalDavView.as_view())
]
def unused_get_middleware_classes(self):
yield 'django.middleware.csrf.CsrfViewMiddleware'
yield 'django.middleware.clickjacking.XFrameOptionsMiddleware'
# for section, values in self.RADICALE_CONFIG.items():
# for key, value in values.items():
# if not radicale.config.has_section(section):
# radicale.config.add_section(section)
# radicale.config.set(section, key, value)
| lino-framework/xl | lino_xl/lib/caldav/__init__.py | Python | bsd-2-clause | 3,354 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from keystone import config
from keystone import exception
from keystone.tests import unit as tests
CONF = cfg.CONF
class ConfigTestCase(tests.TestCase):
def config_files(self):
config_files = super(ConfigTestCase, self).config_files()
# Insert the keystone sample as the first config file to be loaded
# since it is used in one of the code paths to determine the paste-ini
# location.
config_files.insert(0, tests.dirs.etc('keystone.conf.sample'))
return config_files
def test_paste_config(self):
self.assertEqual(tests.dirs.etc('keystone-paste.ini'),
config.find_paste_config())
self.config_fixture.config(group='paste_deploy',
config_file=uuid.uuid4().hex)
self.assertRaises(exception.ConfigFileNotFound,
config.find_paste_config)
self.config_fixture.config(group='paste_deploy', config_file='')
self.assertEqual(tests.dirs.etc('keystone.conf.sample'),
config.find_paste_config())
def test_config_default(self):
self.assertIs(None, CONF.auth.password)
self.assertIs(None, CONF.auth.token)
class DeprecatedTestCase(tests.TestCase):
"""Test using the original (deprecated) name for renamed options."""
def config_files(self):
config_files = super(DeprecatedTestCase, self).config_files()
config_files.append(tests.dirs.tests_conf('deprecated.conf'))
return config_files
def test_sql(self):
# Options in [sql] were moved to [database] in Icehouse for the change
# to use oslo-incubator's db.sqlalchemy.sessions.
self.assertEqual('sqlite://deprecated', CONF.database.connection)
self.assertEqual(54321, CONF.database.idle_timeout)
class DeprecatedOverrideTestCase(tests.TestCase):
"""Test using the deprecated AND new name for renamed options."""
def config_files(self):
config_files = super(DeprecatedOverrideTestCase, self).config_files()
config_files.append(tests.dirs.tests_conf('deprecated_override.conf'))
return config_files
def test_sql(self):
# Options in [sql] were moved to [database] in Icehouse for the change
# to use oslo-incubator's db.sqlalchemy.sessions.
self.assertEqual('sqlite://new', CONF.database.connection)
self.assertEqual(65432, CONF.database.idle_timeout)
| jonnary/keystone | keystone/tests/unit/test_config.py | Python | apache-2.0 | 3,118 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
import pytz
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.auditlog \
import forms as admin_forms
from openstack_dashboard.dashboards.admin.auditlog \
import tables as admin_tables
from openstack_dashboard.dashboards.admin.auditlog \
import tabs as admin_tabs
from openstack_dashboard.dashboards.project.auditlog \
import tables as project_tables
from openstack_dashboard.openstack.common import timeutils
from openstack_dashboard import settings
ISO_TIME_FORMAT = timeutils._ISO8601_TIME_FORMAT
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
timezone = getattr(settings, 'TIME_ZONE', 'UTC')
TZ = pytz.timezone(timezone)
class AdminIndexView(tables.DataTableView, forms.ModalFormView):
table_class = admin_tables.AdminAuditlogTable
form_class = admin_forms.SearchForm
template_name = 'admin/auditlog/index.html'
success_url = reverse_lazy("horizon:admin:auditlog:index")
def get_context_data(self, **kwargs):
context = super(AdminIndexView, self).get_context_data(
**kwargs)
context['form'] = self.get_form(self.form_class)
return context
def has_more_data(self, table):
return self._more
def get_data(self, **kwargs):
auditlogs = []
marker = self.request.GET.get(
admin_tables.AdminAuditlogTable._meta.pagination_param, None)
last_hour = get_last_hour()
if marker:
user_id = self.request.GET.get('user_id')
tenant_id = self.request.GET.get('tenant_id')
start_date = self.request.GET.get('start_date')
#(NOTE st.wang) change time format
if start_date:
start = timeutils.parse_strtime(start_date, ISO_TIME_FORMAT)
start_date = timeutils.strtime(start, TIME_FORMAT)
end_date = self.request.GET.get('end_date')
if end_date:
end = timeutils.parse_strtime(end_date, ISO_TIME_FORMAT)
end_date = timeutils.strtime(end, TIME_FORMAT)
path = self.request.GET.get('path')
method = self.request.GET.get('method')
else:
user_id = self.request.POST.get('user_id')
tenant_id = self.request.POST.get('tenant_id')
start_date = self.request.POST.get('start_date', last_hour)
end_date = self.request.POST.get('end_date')
path = self.request.POST.get('path')
method = self.request.POST.get('method')
q = query_data(self.request,
user_id,
tenant_id,
start_date,
end_date,
path,
method,
marker)
admin_tables.AdminAuditlogTable._meta.data_fields = q
project_tables.AuditlogTable._meta.data_fields = q
try:
auditlogs, self._more = api.auditlog.auditlog_list(
self.request,
marker=marker,
paginate=True,
q=q)
except Exception:
self._more = False
exceptions.handle(self.request,
_('Unable to retrieve auditlogs list.'))
if auditlogs:
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve auditlogs information.')
exceptions.handle(self.request, msg)
try:
users = api.keystone.user_list(self.request)
except Exception:
users = []
msg = _('Unable to retrieve auditlogs user information.')
exceptions.handle(self.request, msg)
try:
resources = api.auditlog.resource_list(self.request)
except Exception:
resources = []
msg = _('Unable to retrieve auditlogs resource information.')
exceptions.handle(self.request, msg)
tenant_dict = SortedDict([(f.id, f) for f in tenants])
user_dict = SortedDict([(t.id, t) for t in users])
resource_dict = SortedDict([(r.rid, r) for r in resources])
tz_utc = pytz.timezone('UTC')
for auditlog in auditlogs:
auditlog.display_id = "(" + auditlog.id.split('-')[0] + ")"
tenant = tenant_dict.get(auditlog.tenant_id, None)
user = user_dict.get(auditlog.user_id, None)
resource = resource_dict.get(auditlog.rid, None)
auditlog.tenant_name = getattr(tenant, "name", None)
auditlog.user_name = getattr(user, "name", None)
auditlog.path = getattr(resource, "name", None)
auditlog.status_code = get_status_code(auditlog.status_code)
# NOTE(xg.song) defence code to ignore microsecond
str_begin = auditlog.begin_at.split('.')[0]
begin_utc = timeutils.parse_strtime(str_begin,
ISO_TIME_FORMAT)
begin_utc = begin_utc.replace(tzinfo=tz_utc)
begin_local = TZ.fromutc(begin_utc)
begin = timeutils.strtime(begin_local, TIME_FORMAT)
# NOTE(xg.song) defence code to ignore microsecond
str_end = auditlog.end_at.split('.')[0]
end_utc = timeutils.parse_strtime(str_end,
ISO_TIME_FORMAT)
end_utc = end_utc.replace(tzinfo=tz_utc)
end_local = TZ.fromutc(end_utc)
end = timeutils.strtime(end_local, TIME_FORMAT)
auditlog.begin_at = begin
auditlog.end_at = end
return auditlogs
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
kwargs['form'] = form
return self.get(request, *args, **kwargs)
else:
kwargs['form'] = form
return self.get(request, *args, **kwargs)
def get_form_kwargs(self):
"""Returns the keyword arguments for instantiating the form."""
kwargs = {'initial': self.get_initial()}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
elif self.request.method in ('GET'):
#(NOTE st.wang)initial auditlog button is GET method
#if 'GET' is empty use initial start_date
#else change the time format
if self.request.GET:
request = self.request.GET.copy()
start_date = request['start_date']
start = timeutils.parse_strtime(start_date, ISO_TIME_FORMAT)
start_date = timeutils.strtime(start, TIME_FORMAT)
request.update({'start_date': start_date})
if request.get('end_date', None):
end_date = request['end_date']
end = timeutils.parse_strtime(end_date, ISO_TIME_FORMAT)
end_date = timeutils.strtime(end, TIME_FORMAT)
request.update({'end_date': end_date})
kwargs.update({
'data': request,
'files': self.request.FILES,
})
return kwargs
def get_status_code(code):
if code < 200:
return "Informational"
elif (code >= 200 and code < 300):
return "Success"
elif (code >= 300 and code < 400):
return "Redirection"
elif (code >= 400 and code < 500):
return "Client Error"
else:
return "Server Error"
def query_data(request,
user_id,
tenant_id,
start_date,
end_date,
path,
method,
marker):
query = []
if start_date:
start = timeutils.parse_strtime(start_date, TIME_FORMAT)
start_local = TZ.localize(start)
utc = timeutils.normalize_time(start_local)
start_date = timeutils.strtime(utc, ISO_TIME_FORMAT)
start_date = unicode(start_date, "utf-8")
query += [{"field": "begin_at",
"op": "ge",
"type": "string",
"value": start_date}]
if end_date:
end = datetime.datetime.strptime(end_date, TIME_FORMAT)
end_local = TZ.localize(end)
utc = timeutils.normalize_time(end_local)
end_date = timeutils.strtime(utc, ISO_TIME_FORMAT)
end_date = unicode(end_date, "utf-8")
query += [{"field": "begin_at",
"op": "le",
"type": "string",
"value": end_date}]
if user_id:
query += [{"field": "user_id",
"op": "eq",
"type": "string",
"value": user_id}]
if tenant_id:
query += [{"field": "tenant_id",
"op": "eq",
"type": "string",
"value": tenant_id}]
if path:
query += [{"field": "rid",
"op": "eq",
"type": "string",
"value": path}]
if method:
query += [{"field": "method",
"op": "eq",
"type": "string",
"value": method}]
return query
def get_last_hour():
now = TZ.localize(datetime.datetime.now())
now = now.replace(microsecond=0)
last_hour = now - datetime.timedelta(0, 3600)
return timeutils.strtime(last_hour, TIME_FORMAT)
class DetailView(tabs.TabView):
tab_group_class = admin_tabs.AuditlogDetailTabs
template_name = 'admin/auditlog/detail.html'
redirect_url = 'horizon:admin:auditlog:index'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["auditlogs"] = self.get_data()
return context
@memoized.memoized_method
def get_data(self):
try:
auditlog = api.auditlog.auditlog_get(self.request,
self.kwargs['auditlog'])
except Exception:
redirect = reverse(self.redirect_url)
exceptions.handle(self.request,
_('Unable to retrieve details'),
redirect=redirect)
raise exceptions.Http302(redirect)
return auditlog
def get_tabs(self, request, *args, **kwargs):
auditlog = self.get_data()
return self.tab_group_class(request, auditlogs=auditlog, **kwargs)
| shhui/horizon | openstack_dashboard/dashboards/admin/auditlog/views.py | Python | apache-2.0 | 12,068 |
mapping = {
'cannot_access_config_file': 1,
'invalid_yaml_file': 2,
'excluded_paths_must_be_a_list': 3,
'string_failed_to_expand': 4,
'no_paths_configured': 5,
'file_not_found': 6,
'type_path_collision': 7,
'to_file_requires_explicit_path': 8,
'validate_before_must_be_boolean': 9,
'prevalidation_failed': 10,
'no_config_found_for_package': 11,
'excluded_conflict': 12,
'packages_file_not_found': 13,
'no_packages_defined': 14,
'sources_empty': 15,
'path_already_exists_no_overwrite': 16,
'distro_not_supported': 17,
'sources_path_required': 18,
'sources_and_package_paths_identical': 19,
'must_specify_file_or_dir': 20,
'ppa_not_supported_by_distro': 21,
'failed_create_package': 22,
'module_could_not_be_installed': 23,
'failed_to_download_file': 24,
'failed_to_execute_command': 25,
'package_types_must_be_list': 26,
'unsupported_package_type': 27,
'failed_to_download_module': 28,
'targz_exists': 29,
'failed_to_mkdir': 30,
'distro not supported': 31,
'package_must_be_of_type_dict': 32,
'template_dir_must_be_of_type_string': 33,
'template_dir_missing': 34,
'template_file_must_be_of_type_string': 35,
'template_file_missing': 36,
'could_not_generate_template': 37,
'could_not_write_to_file': 38,
'failed_to_download_gem': 39,
'overriding_modules_import_error': 40,
}
| cloudify-cosmo/packman | packman/codes.py | Python | apache-2.0 | 1,434 |
from __future__ import print_function
import websocket
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.create_connection("ws://nasutoma-here.cgfm.jp:8000/demo")
# ws = websocket.create_connection("ws://nasutoma-here.cgfm.jp:8000", http_proxy_port=8000)
print("Sending 'Hello, World'...")
ws.send("Hello, World")
print("Sent")
print("Receiving...")
result = ws.recv()
print("Received '%s'" % result)
ws.close()
| Garyuten/nasutoma_switch | examples/echo_client.py | Python | mit | 472 |
"""Tests for distutils.command.install_scripts."""
import os
import unittest
from distutils.command.install_scripts import install_scripts
from distutils.core import Distribution
from distutils.tests import support
from test.test_support import run_unittest
class InstallScriptsTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_default_settings(self):
dist = Distribution()
dist.command_obj["build"] = support.DummyCommand(
build_scripts="/foo/bar")
dist.command_obj["install"] = support.DummyCommand(
install_scripts="/splat/funk",
force=1,
skip_build=1,
)
cmd = install_scripts(dist)
self.assertFalse(cmd.force)
self.assertFalse(cmd.skip_build)
self.assertIsNone(cmd.build_dir)
self.assertIsNone(cmd.install_dir)
cmd.finalize_options()
self.assertTrue(cmd.force)
self.assertTrue(cmd.skip_build)
self.assertEqual(cmd.build_dir, "/foo/bar")
self.assertEqual(cmd.install_dir, "/splat/funk")
def test_installation(self):
source = self.mkdtemp()
expected = []
def write_script(name, text):
expected.append(name)
f = open(os.path.join(source, name), "w")
try:
f.write(text)
finally:
f.close()
write_script("script1.py", ("#! /usr/bin/env python2.3\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
write_script("script2.py", ("#!/usr/bin/python\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
write_script("shell.sh", ("#!/bin/sh\n"
"# bogus shell script w/ sh-bang\n"
"exit 0\n"))
target = self.mkdtemp()
dist = Distribution()
dist.command_obj["build"] = support.DummyCommand(build_scripts=source)
dist.command_obj["install"] = support.DummyCommand(
install_scripts=target,
force=1,
skip_build=1,
)
cmd = install_scripts(dist)
cmd.finalize_options()
cmd.run()
installed = os.listdir(target)
for name in expected:
self.assertIn(name, installed)
def test_suite():
return unittest.makeSuite(InstallScriptsTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| gusai-francelabs/datafari | windows/python/Lib/distutils/tests/test_install_scripts.py | Python | apache-2.0 | 2,630 |
#!/usr/bin/env python
"""import_sample_data.py - script for importing sample data"""
from collectionbatchtool import *
apply_user_settings('settings.cfg') # change to your own config-file!
agt = AgentDataset()
agt.from_csv('agent_sample.csv', quiet=False)
agt.to_database(defaults={'agenttype': 1}, quiet=False)
loc = LocalityDataset()
loc.from_csv('locality_sample.csv', quiet=False)
loc.to_database(defaults={'srclatlongunit': 3}, quiet=False)
cev = CollectingeventDataset()
cev.from_csv('collectingevent_sample.csv', quiet=False)
cev.update_foreign_keys([agt, loc], quiet=False)
cev.to_database(quiet=False)
col = CollectorDataset()
col.from_csv('collector_sample.csv', quiet=False)
col.update_foreign_keys([agt, cev], quiet=False)
col.to_database(defaults={'isprimary': 1}, quiet=False)
cob = CollectionobjectDataset()
cob.from_csv('collectionobject_sample.csv', quiet=False)
cob.update_foreign_keys(cev, quiet=False)
cob.to_database(quiet=False)
pty = PreptypeDataset()
pty.from_csv('preptype_sample.csv', quiet=False)
pty.match_database_records('name') # match existing preptypes by "name"
pty.to_database(defaults={'isloanable': 1}, quiet=False)
pre = PreparationDataset()
pre.from_csv('preparation_sample.csv', quiet=False)
pre.update_foreign_keys([pty, cob], quiet=False)
pre.to_database(quiet=False)
| jmenglund/CollectionBatchTool | docs/source/_static/import_sample_data.py | Python | mit | 1,322 |
#!/usr/bin/env python3
# The MIT License (MIT)
# Copyright (c) 2016 Michael Sasser <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from _thread import start_new_thread, allocate_lock
import ntpath
import glob
from fflib._version import version
from fflib.meshviewer import MeshViewer
from fflib.std import operating_system, origin_name, re_name_line, re_mac_line, re_key_line, re_comment_line, re_peer_line, \
re_name, re_mac, re_key
VERSION = str(version())
os_type, os_release, os_version, os_distribution = operating_system()
NAME = origin_name()
class PeerObject(object):
"""(Use getter and setter to change a Value manuelly!)"""
def __init__(self, file):
self.file = file
# Steps
self.__read_done = False
self.__updated = False
self.__syntax_verified = False
self.__written = False
self.__committed = False
# Data
self.__filename = ntpath.basename(file)
self.__filename_given = self.__filename
self.__filename_old = None
self.__name = None
self.__name_old = None
self.__name_veryfied = None
self.__name_inconsistent = False
self.__mac = None
self.__mac_old = None
self.__mac_veryfied = None
self.__comment = None
self.__comment_old = None # Should never be used!
self.__key = None
self.__key_old = None
self.__key_veryfied = None # For future; Currently testing in 4.0.0
# Meshviewer specific
self.__mv_probability = None
self.__script_Name = 'TestScript' # Static
self.__script_Names_old = () # Static
self.__script_use_found = None # True == Yes; False == No; None == Not checked yet
self.__script_last_edit_date = None
self.__script_version = None
self.__script_verified = None
# Errors (Checks)
# Do not change these Integers (keys)! These are needed for Errorhandling.
#
# 1 - 19 reserved
# 20 - 199 Std. errors, warnings and verbose
# 200 - 219 reserved
# 220 - 400 Git commit Std. errors, warnings and verbose
#
# Error Code | Message string
self.__msg = {40: 'Duplicate: The name \"{}\" is already in use in file \"{}\".',
41: 'Duplicate: The MAC \"{}\" is already in use in file \"{}\".',
42: 'Duplicate: The key \"{}\" is already in use in file \"{}\".',
43: 'Duplicate: The Filename \"{}\" is already present as filename \"{}\".',
50: 'The file includes non unicode content.',
51: 'The gwconfig line seems to be broken',
52: 'Inconsistent Name. Filename \"{}\" not matching Name {} in the file on line {}.',
60: 'Name not match pattern on line {}.',
61: 'MAC not match pattern on line {}.',
62: 'Key not match pattern on line {}.',
63: 'Comments not matching pattern on line {}.',
64: 'Gwconfig line not match pattern on line {}.',
70: 'Name found on line {} is already present at line {}.',
71: 'MAC found on line {} is already present at line {}.',
72: 'Key found on line {} is already present at line {}.',
73: 'Comment found on line {} is already present at line {}.',
74: 'Gwconfig line found on line {} is already present at line {}.',
80: 'Name not found, the filename \"{}\" is the new name for now',
90: 'Name not found.',
91: 'MAC not found.',
92: 'Comment not found. The comment will be added on rewriting the file.',
93: 'key not found.',
94: 'Gwconfig line not found in the file. The gwconfig line will be added on rewriting the file.',
100: 'Name changed from \"{}\" to \"{}\" with a Meshviewer match probability of {:6.2f}%.',
101: 'MAC changed from \"{}\" to \"{}\" with a Meshviewer match probability of {:6.2f}%.',
102: 'Filename changed from \"{}\" to \"{}\" with a Meshviewer match probability of {:6.2f}%.',
103: 'Name not found in Meshviewer data.',
104: 'Name found in Meshviewer data (Node ID: {})',
105: 'Mac address found in Meshviewer data (Node ID: {})',
106: 'Mac not found in Meshviewer data.',
107: 'File checked with Meshviewer data informations. The Meshviewer match probability is {:6.2f}%.',
108: 'Meshviewer data are insufficient for the file.'}
# Status
self.__debug = {}
self.__warn = {}
self.__error = {}
# Commit data
self.__commit_head_msg = None
self.__commit_head_msg_manuelly = False
self.__commit_comment_msg = []
self.__commit_comment_msg_manuelly = False
self.__commit_dict = {}
# Statics
self.force_rewrite = False
self.__force_rewrite_versiones = ([0, 0, 0])
# Open the File
self.read()
def __repr__(self):
return self.__filename_given
def __str__(self):
return self.__filename
def read(self):
file_raw = []
# Unicode error
self.__del_msgs(50)
try:
with open(self.file) as fp:
for line in fp:
file_raw.append(line)
except UnicodeError:
# Unicode error
self.__error.update({50: self.__msg[50]})
return False
# Start processing
self.__read_process(file_raw)
def __del_msgs(self, *args):
for err in args:
self.__error.pop(err, None)
self.__warn.pop(err, None)
self.__debug.pop(err, None)
return 0
def __read_process(self, file_raw):
self.__del_msgs(70, 51, 52, 60, 71, 61, 72, 62, 73, 80)
line_no = 0
self.__read_done = False
cache_name = None
cache_mac = None
cache_key = None
cache_comment = None
cache_script_name = None
cache_script_version = None
cache_script_last_edit_date = None
cache_script_used_found = False
cache_script_verified = None
for line in file_raw:
check_matches_found = 0
line_no += 1
# ToDo: Detect supernodes!!!!!
# Check: Name (perfect match)
if re_name_line.match(line):
l = line.rstrip().split(' ')[1]
if re_name.match(l):
if self.__filename == l:
if not cache_name:
cache_name = (l, line_no)
check_matches_found += 1
else:
# Already found
self.__error.update({70: self.__msg[70].format(line_no, cache_name[1])})
else:
self.__name_inconsistent = True
cache_name = (l, line_no)
# Inconsistent Name (name != filename)
self.__debug.update({52: self.__msg[52].format(self.__filename, l, line_no)})
else:
# Not match pattern
self.__error.update({60: self.__msg[60].format(line_no)})
# Check: MAC (perfect match)
if re_mac_line.match(line):
l = line.rstrip().split(' ')[1]
if re_mac.match(l):
if not cache_mac:
cache_mac = (l, line_no)
check_matches_found += 1
else:
# Already found
self.__error.update({71: self.__msg[71].format(line_no, cache_mac[1])})
else:
# Not match pattern
self.__error.update({61: self.__msg[61].format(line_no)})
# Check: key (perfect match)
if re_key_line.match(line):
l = line.rstrip().split('\"')[1]
if re_key.match(l):
if not cache_key:
cache_key = (l, line_no)
check_matches_found += 1
else:
# Already found
self.__error.update({72: self.__msg[72](line_no, cache_key[1])})
else:
# Not match pattern
self.__error.update({62: self.__msg[62].format(line_no)})
# Check: Comment (perfect match)
if re_comment_line.match(line):
l = line.rstrip().split(':', 1)[1].lstrip()
if not cache_comment:
cache_comment = (l, line_no)
check_matches_found += 1
else:
# Already found
self.__error.update({73: self.__msg[73].format(line_no, cache_comment[1])})
# Check: Peer Line (perfect match) (#gwconfig: v0.1.0, 2015-08-20 #Verification: Name, MAC)
if re_peer_line.match(line):
l = line.rstrip().split('#')
try:
l.remove('')
except ValueError:
pass
# ToDo: Use RegEx and remove this!!!! Where is this *** duck?
try:
cache_script_version = l[0].split(',')[0].split(':')[1].rstrip().lstrip().split('v')[1].split('.')
cache_script_last_edit_date = l[0].split(',')[1].rstrip().lstrip() # ToDo: Use date
cache_script_verified = [x.rstrip().lstrip() for x in l[1].split(':')[1].split(',')]
cache_script_name = (l[0].split(',')[0].split(':')[0].rstrip().lstrip())
cache_script_used_found = True
except: # ToDo: Add exceptions (PEP8)
self.__debug.update(self.__msg[51])
cache_script_version = None
cache_script_last_edit_date = None
cache_script_verified = None
cache_script_name = None
cache_script_used_found = False # True? Script was found but with None in script_* = err
if check_matches_found > 1:
pass # ToDo: Add err
# Write cache into variables
if cache_name:
self.__name = cache_name[0]
else:
# Name not found & Filename is now Name
self.__debug.update({80: self.__msg[80].format(self.__filename)})
self.__name = self.__filename
if cache_mac:
self.__mac = cache_mac[0]
if cache_key:
self.__key = cache_key[0]
if cache_comment:
self.__comment = cache_comment[0]
self.__script_version = cache_script_version
self.__script_last_edit_date = cache_script_last_edit_date
self.__script_verified = cache_script_verified
self.__script_Name = cache_script_name
self.__script_use_found = cache_script_used_found
self.__read_done = True
def update(self, node_object, probability, found):
if not self.__updated:
# print(node_object, ratio)
if found['name']:
self.__debug.update({104: self.__msg[104].format(found['name'])})
else:
self.__debug.update({103: self.__msg[103]})
if found['mac']:
self.__debug.update({105: self.__msg[105].format(found['mac'])})
else:
self.__debug.update({106: self.__msg[106]})
if node_object:
self.__mv_probability = probability
self.__debug.update({107: self.__msg[107].format(self.__mv_probability)})
if node_object.hostname:
if not self.__name == node_object.hostname:
self.__name_old = self.__name
self.__name = node_object.hostname
self.__debug.update({100: self.__msg[100].format(self.__name_old,
self.__name,
self.__mv_probability)})
if node_object.hostname: # There is no filename.
if not self.__filename == node_object.hostname:
self.__filename_old = self.__filename
self.__filename = node_object.hostname
self.__debug.update({102: self.__msg[102].format(self.__filename_old,
self.__filename,
self.__mv_probability)})
if node_object.mac:
if not self.__mac == node_object.mac:
self.__mac_old = self.__mac_old
self.__mac = node_object.mac
self.__debug.update({101: self.__msg[101].format(self.__mac_old,
self.__mac,
self.__mv_probability)})
else:
self.__mv_probability = None
self.__warn.update({107: self.__msg[108]})
else:
raise RuntimeError('It is only possible to run the update once.')
self.__updated = True
def check(self):
self.__del_msgs(90, 91, 92, 94)
if not self.__name:
# Name not found
self.__error.update({90: self.__msg[90]})
if not self.__mac:
# MAC not found
self.__error.update({91: self.__msg[91]})
if not self.__comment:
# Comment not found
self.__debug.update({92: self.__msg[92]})
# Key ??
if not self.__script_use_found:
# gwconfig line not found
self.__debug.update({94: self.__msg[94]})
def solve_duplicates(self):
pass
def write(self):
# Obsoleted code. New implementation, see show_verbose methode.
if len(self.__error) > 0:
raise RuntimeError('Error(s) has occurred! It is not possible to write changes into the File\n'
'Please check the error message(s) and fix them first:\n\n', self.show_verbose())
if self.__mac_old:
self.__comment += ' Old MAC: {}'.format(self.__mac_old)
# ToDo: Add offline since or something.
dataoutput = {1: '#Name {}'.format(self.__name),
2: '#MAC {}'.format(self.__mac),
3: '#Comment {}'.format(self.__comment),
4: '#gwconfig: v{}, {} #Verification: {}'.format(VERSION, 'ADD DATE HERE', 'ADD VERI HERE'),
5: 'key \"{}\"'.format(self.__key)}
def commit(self):
raise NotImplementedError('Committing is currently not implemented!')
def show_verbose(self):
if len(self.__error) > 0 or len(self.__debug) > 0:
print('\nFILE:', self.__filename_given)
if len(self.__error) > 0:
err_line_no = 0
print('Error Messages:')
for error in self.__error:
err_line_no += 1
print(' [{}]: {}'.format(err_line_no, self.__error[error]))
if len(self.__warn) > 0:
warn_line_no = 0
print('Warning Messages:')
for warn in self.__warn:
warn_line_no += 1
print(' [{}]: {}'.format(warn_line_no, self.__warn[warn]))
if len(self.__debug) > 0:
debug_line_no = 0
print('Debug Messages:')
for debug in self.__debug:
debug_line_no += 1
print(' [{}]: {}'.format(debug_line_no, self.__debug[debug])) # For debuging just "debug"
def flag_as_duplicate(self, error_name, value, file):
if error_name == 'name':
self.__error.update({40: self.__msg[40].format(value, file)})
elif error_name == 'mac':
self.__error.update({41: self.__msg[41].format(value, file)})
elif error_name == 'key':
self.__error.update({42: self.__msg[42].format(value, file)})
elif error_name == 'filename':
self.__warn.update({43: self.__msg[43].format(value, file)})
else:
raise SyntaxError('error_code not found.')
@property
def name(self):
return self.__name
@property
def mac(self):
return self.__mac
@property
def key(self):
return self.__key
@property
def filename(self):
return self.__filename
class Peers(object):
def __init__(self, directory):
self.directory = directory
self.peers = {}
self.files = []
self.filter_out_files = ('README.md', 'README.rst', 'LICENSE')
# Threads (low level implementation)
self.__run_init_threads = 0
self.__run_init_thread_started = False
self.__run_init_lock = allocate_lock()
# More operating systems will be added soon.
if os_type == 'Windows':
files = glob.glob(self.directory + '\\*')
elif os_type == 'Linux':
files = glob.glob(self.directory + '/*')
else:
raise OSError('Your OS is not supported')
for file in files:
filename = ntpath.basename(file)
if filename not in self.filter_out_files:
self.files.append(filename)
self.elements = len(self.files)
# Todo: Remove none unicode chars from filenames ## Doh... not possile! do it later in peerobj. update.
# Read all
self.__read_all()
def __run_class_generator(self, tid, file):
self.__run_init_lock.acquire()
self.__run_init_threads += 1
self.__run_init_thread_started = True
self.__run_init_lock.release()
self.peers.update({file: PeerObject(self.directory + '/' + file)})
self.__run_init_lock.acquire()
self.__run_init_threads -= 1
self.__run_init_lock.release()
return tid
def __read_all(self):
tid = 0
for file in self.files:
tid += 1
start_new_thread(self.__run_class_generator, (tid, file)) # Class generator
while not self.__run_init_thread_started:
pass
while self.__run_init_threads > 0:
pass
# Reset Thread
self.__run_init_threads = 0
self.__run_init_thread_started = False
if not len(self.peers) == self.elements:
raise RuntimeError('Error: This should never happened. If you see this message something is deeply wrong.'
' This is a precheck, so no changes has been written to the files.'
' Try it again.')
peer_names = {}
peer_macs = {}
peer_keys = {}
peer_filenames = {}
for peer in self.peers:
# Check duplicates (name)
if self.peers[peer].name is not None:
if self.peers[peer].name not in peer_names:
peer_names.update({self.peers[peer].name: self.peers[peer].filename})
else:
self.peers[peer].flag_as_duplicate('name', self.peers[peer].name, peer_names[self.peers[peer].name])
self.peers[peer_names[self.peers[peer].name]].flag_as_duplicate('name', self.peers[peer].name,
self.peers[peer].filename)
# Check duplicates (MAC)
if self.peers[peer].mac is not None:
if self.peers[peer].mac not in peer_macs:
peer_macs.update({self.peers[peer].mac: self.peers[peer].filename})
else:
self.peers[peer].flag_as_duplicate('mac', self.peers[peer].mac, peer_macs[self.peers[peer].mac])
self.peers[peer_macs[self.peers[peer].mac]].flag_as_duplicate('mac', self.peers[peer].mac,
self.peers[peer].filename)
# Check duplicates (key)
if self.peers[peer].key is not None:
if self.peers[peer].key not in peer_keys:
peer_keys.update({self.peers[peer].key: self.peers[peer].filename})
else:
self.peers[peer].flag_as_duplicate('key', self.peers[peer].key, peer_keys[self.peers[peer].key])
self.peers[peer_keys[self.peers[peer].key]].flag_as_duplicate('key', self.peers[peer].key,
self.peers[peer].filename)
# Check duplicates (filename)
if self.peers[peer].filename is not None:
if self.peers[peer].filename.lower() not in peer_filenames:
peer_filenames.update({self.peers[peer].filename.lower(): self.peers[peer].filename})
else:
self.peers[peer].flag_as_duplicate('filename', self.peers[peer].filename,
peer_filenames[self.peers[peer].filename.lower()])
self.peers[peer_filenames[self.peers[peer].filename.lower()]].flag_as_duplicate('filename',
self.peers[
peer].filename,
self.peers[
peer].filename)
self.peers[peer].check()
def show_verbose(self):
for peer in self.peers:
self.peers[peer].show_verbose()
def update(self, url):
mv = MeshViewer(url)
found = {'name': None, 'mac': None, 'name_obj': None, 'mac_obj': None, 'name_ratio': None, 'mac_ratio': None}
for peer in self.peers:
# Init
found['name'] = None
found['name_ratio'] = None
found['name_obj'] = None
found['mac'] = None
found['mac_ratio'] = None
found['mac_obj'] = None
node_object_name_object = None
node_object_name_ratio = None
node_object_mac_object = None
node_object_mac_ratio = None
# First step (Check Naming)
node_object_name_object = mv.get(mv.find_id_by_name(str(peer)))
if node_object_name_object:
node_object_name_ratio = 100.0
found['name'] = node_object_name_object.node_id
found['name_ratio'] = node_object_name_ratio
found['name_obj'] = node_object_name_object
if not node_object_name_object:
try_node_object_id, try_node_object_ratio, try_node_object_name = mv.find_id_by_name_like(str(peer))
if try_node_object_ratio > 84.0:
found['name'] = try_node_object_id
found['name_ratio'] = try_node_object_ratio
node_object_name_object = mv.get(try_node_object_id)
found['name_obj'] = node_object_name_object
node_object_name_ratio = try_node_object_ratio
else: # Not needed
node_object_name_object = None
node_object_name_ratio = None
# Second step (Check MAC)
try:
node_object_mac_object = mv.get(self.peers[peer].mac)
except KeyError:
node_object_mac_object = None
if node_object_mac_object:
node_object_mac_ratio = 100.0
found['mac'] = node_object_mac_object.node_id
found['mac_ratio'] = node_object_mac_ratio
found['mac_obj'] = node_object_mac_object
else:
node_object_mac_ratio = None
# Third step (Evaluate Data)
if node_object_name_object == node_object_mac_object and node_object_name_object is not None:
node_ratio = node_object_name_ratio / 2.0 + node_object_mac_ratio / 2.0
node_object = node_object_name_object
# Final (Update the PeerObject)
self.peers[peer].update(node_object, node_ratio, found)
else:
self.peers[peer].update(None, None, found)
for peer in self.peers:
self.peers[peer].solve_duplicates()
if __name__ == '__main__':
# ToDo: Unitest with GitHub "dumps" (nas1.local.real-instruments.de:/servers/g/ffp-test);put .tar into repo as well.
# Test - delete after unitest is done!
test = 1
testurl = 'http://map.freifunk-westpfalz.de/meshviewer/nodes.json'
if test == 1:
# Test TestPeers
tp = Peers('/home/michael/repos/peers-ffwp') # Ensure to use "test" branch.
tp.update(testurl)
tp.show_verbose()
| Real-Instruments/fflib | fflib/peer.py | Python | agpl-3.0 | 26,829 |
import pytest
from citrination_client.models.columns import *
from citrination_client.base.errors import *
class TestCategoricalColumn(object):
@classmethod
def setup_class(self):
self.name = "Property Band gap"
self.role = "Input"
self.group_by_key = False
self.units = "eV"
def test_categorical_column_writes_categories_correctly(self):
categories = ["Grey", "Blue"]
column = CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)
c_dict = column.to_dict()
assert c_dict["name"] == self.name
assert c_dict["role"] == self.role
assert c_dict["group_by_key"] == self.group_by_key
assert c_dict["type"] == CategoricalColumn.TYPE
assert c_dict["units"] == None
assert c_dict["options"]["categories"] == categories
def test_categorical_column_validates_categories(self):
"""
Tests that the CategoricalColumn class validates that the categories
value is a list of strings.
"""
categories = 1
with pytest.raises(CitrinationClientError):
CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)
categories = ["Grey", 1]
with pytest.raises(CitrinationClientError):
CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)
categories = ["Grey", "Blue"]
CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)
| CitrineInformatics/python-citrination-client | citrination_client/models/columns/tests/test_categorical.py | Python | apache-2.0 | 1,707 |
import contextlib
from decimal import Decimal
import importlib
import unittest
from dependency_injector.wiring import (
wire,
Provide,
Provider,
Closing,
register_loader_containers,
unregister_loader_containers,
)
from dependency_injector import errors
# Runtime import to avoid syntax errors in samples on Python < 3.5
import os
_TOP_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../',
)),
)
_SAMPLES_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../samples/',
)),
)
import sys
sys.path.append(_TOP_DIR)
sys.path.append(_SAMPLES_DIR)
from asyncutils import AsyncTestCase
from wiringsamples import module, package
from wiringsamples.service import Service
from wiringsamples.container import Container, SubContainer
class WiringTest(unittest.TestCase):
container: Container
def setUp(self) -> None:
self.container = Container(config={'a': {'b': {'c': 10}}})
self.container.wire(
modules=[module],
packages=[package],
)
self.addCleanup(self.container.unwire)
def test_package_lookup(self):
from wiringsamples.package import test_package_function
service = test_package_function()
self.assertIsInstance(service, Service)
def test_package_subpackage_lookup(self):
from wiringsamples.package.subpackage import test_package_function
service = test_package_function()
self.assertIsInstance(service, Service)
def test_package_submodule_lookup(self):
from wiringsamples.package.subpackage.submodule import test_function
service = test_function()
self.assertIsInstance(service, Service)
def test_module_attributes_wiring(self):
self.assertIsInstance(module.service, Service)
self.assertIsInstance(module.service_provider(), Service)
self.assertIsInstance(module.undefined, Provide)
def test_module_attribute_wiring_with_invalid_marker(self):
from wiringsamples import module_invalid_attr_injection
with self.assertRaises(Exception) as context:
self.container.wire(modules=[module_invalid_attr_injection])
self.assertEqual(
str(context.exception),
'Unknown type of marker {0}'.format(module_invalid_attr_injection.service),
)
def test_class_wiring(self):
test_class_object = module.TestClass()
self.assertIsInstance(test_class_object.service, Service)
def test_class_wiring_context_arg(self):
test_service = self.container.service()
test_class_object = module.TestClass(service=test_service)
self.assertIs(test_class_object.service, test_service)
def test_class_method_wiring(self):
test_class_object = module.TestClass()
service = test_class_object.method()
self.assertIsInstance(service, Service)
def test_class_classmethod_wiring(self):
service = module.TestClass.class_method()
self.assertIsInstance(service, Service)
def test_instance_classmethod_wiring(self):
instance = module.TestClass()
service = instance.class_method()
self.assertIsInstance(service, Service)
def test_class_staticmethod_wiring(self):
service = module.TestClass.static_method()
self.assertIsInstance(service, Service)
def test_instance_staticmethod_wiring(self):
instance = module.TestClass()
service = instance.static_method()
self.assertIsInstance(service, Service)
def test_class_attribute_wiring(self):
self.assertIsInstance(module.TestClass.service, Service)
self.assertIsInstance(module.TestClass.service_provider(), Service)
self.assertIsInstance(module.TestClass.undefined, Provide)
def test_function_wiring(self):
service = module.test_function()
self.assertIsInstance(service, Service)
def test_function_wiring_context_arg(self):
test_service = self.container.service()
service = module.test_function(service=test_service)
self.assertIs(service, test_service)
def test_function_wiring_provider(self):
service = module.test_function_provider()
self.assertIsInstance(service, Service)
def test_function_wiring_provider_context_arg(self):
test_service = self.container.service()
service = module.test_function_provider(service_provider=lambda: test_service)
self.assertIs(service, test_service)
def test_configuration_option(self):
(
value_int,
value_float,
value_str,
value_decimal,
value_required,
value_required_int,
value_required_float,
value_required_str,
value_required_decimal,
) = module.test_config_value()
self.assertEqual(value_int, 10)
self.assertEqual(value_float, 10.0)
self.assertEqual(value_str, '10')
self.assertEqual(value_decimal, Decimal(10))
self.assertEqual(value_required, 10)
self.assertEqual(value_required_int, 10)
self.assertEqual(value_required_float, 10.0)
self.assertEqual(value_required_str, '10')
self.assertEqual(value_required_decimal, Decimal(10))
def test_configuration_option_required_undefined(self):
self.container.config.reset_override()
with self.assertRaisesRegex(errors.Error, 'Undefined configuration option "config.a.b.c"'):
module.test_config_value_required_undefined()
def test_provide_provider(self):
service = module.test_provide_provider()
self.assertIsInstance(service, Service)
def test_provided_instance(self):
class TestService:
foo = {
'bar': lambda: 10,
}
with self.container.service.override(TestService()):
some_value = module.test_provided_instance()
self.assertEqual(some_value, 10)
def test_subcontainer(self):
some_value = module.test_subcontainer_provider()
self.assertEqual(some_value, 1)
def test_config_invariant(self):
config = {
'option': {
'a': 1,
'b': 2,
},
'switch': 'a',
}
self.container.config.from_dict(config)
value_default = module.test_config_invariant()
self.assertEqual(value_default, 1)
with self.container.config.switch.override('a'):
value_a = module.test_config_invariant()
self.assertEqual(value_a, 1)
with self.container.config.switch.override('b'):
value_b = module.test_config_invariant()
self.assertEqual(value_b, 2)
def test_wire_with_class_error(self):
with self.assertRaises(Exception):
wire(
container=Container,
modules=[module],
)
def test_unwire_function(self):
self.container.unwire()
self.assertIsInstance(module.test_function(), Provide)
def test_unwire_class(self):
self.container.unwire()
test_class_object = module.TestClass()
self.assertIsInstance(test_class_object.service, Provide)
def test_unwire_class_method(self):
self.container.unwire()
test_class_object = module.TestClass()
self.assertIsInstance(test_class_object.method(), Provide)
def test_unwire_package_function(self):
self.container.unwire()
from wiringsamples.package.subpackage.submodule import test_function
self.assertIsInstance(test_function(), Provide)
def test_unwire_package_function_by_reference(self):
from wiringsamples.package.subpackage import submodule
self.container.unwire()
self.assertIsInstance(submodule.test_function(), Provide)
def test_unwire_module_attributes(self):
self.container.unwire()
self.assertIsInstance(module.service, Provide)
self.assertIsInstance(module.service_provider, Provider)
self.assertIsInstance(module.undefined, Provide)
def test_unwire_class_attributes(self):
self.container.unwire()
self.assertIsInstance(module.TestClass.service, Provide)
self.assertIsInstance(module.TestClass.service_provider, Provider)
self.assertIsInstance(module.TestClass.undefined, Provide)
def test_wire_multiple_containers(self):
sub_container = SubContainer()
sub_container.wire(
modules=[module],
packages=[package],
)
self.addCleanup(sub_container.unwire)
service, some_value = module.test_provide_from_different_containers()
self.assertIsInstance(service, Service)
self.assertEqual(some_value, 1)
def test_closing_resource(self):
from wiringsamples import resourceclosing
resourceclosing.Service.reset_counter()
container = resourceclosing.Container()
container.wire(modules=[resourceclosing])
self.addCleanup(container.unwire)
result_1 = resourceclosing.test_function()
self.assertIsInstance(result_1, resourceclosing.Service)
self.assertEqual(result_1.init_counter, 1)
self.assertEqual(result_1.shutdown_counter, 1)
result_2 = resourceclosing.test_function()
self.assertIsInstance(result_2, resourceclosing.Service)
self.assertEqual(result_2.init_counter, 2)
self.assertEqual(result_2.shutdown_counter, 2)
self.assertIsNot(result_1, result_2)
def test_closing_resource_context(self):
from wiringsamples import resourceclosing
resourceclosing.Service.reset_counter()
service = resourceclosing.Service()
container = resourceclosing.Container()
container.wire(modules=[resourceclosing])
self.addCleanup(container.unwire)
result_1 = resourceclosing.test_function(service=service)
self.assertIs(result_1, service)
self.assertEqual(result_1.init_counter, 0)
self.assertEqual(result_1.shutdown_counter, 0)
result_2 = resourceclosing.test_function(service=service)
self.assertIs(result_2, service)
self.assertEqual(result_2.init_counter, 0)
self.assertEqual(result_2.shutdown_counter, 0)
def test_class_decorator(self):
service = module.test_class_decorator()
self.assertIsInstance(service, Service)
def test_container(self):
service = module.test_container()
self.assertIsInstance(service, Service)
class ModuleAsPackagingTest(unittest.TestCase):
def setUp(self):
self.container = Container(config={'a': {'b': {'c': 10}}})
self.addCleanup(self.container.unwire)
def test_module_as_package_wiring(self):
# See: https://github.com/ets-labs/python-dependency-injector/issues/481
self.container.wire(packages=[module])
self.assertIsInstance(module.service, Service)
class WiringAndQueue(unittest.TestCase):
def test_wire_queue(self) -> None:
from wiringsamples import queuemodule
container = Container()
self.addCleanup(container.unwire)
# Should not raise exception
# See: https://github.com/ets-labs/python-dependency-injector/issues/362
try:
container.wire(modules=[queuemodule])
except:
raise
class WiringAndFastAPITest(unittest.TestCase):
container: Container
def test_bypass_marker_injection(self):
container = Container()
container.wire(modules=[module])
self.addCleanup(container.unwire)
service = module.test_function(service=Provide[Container.service])
self.assertIsInstance(service, Service)
def test_closing_resource_bypass_marker_injection(self):
from wiringsamples import resourceclosing
resourceclosing.Service.reset_counter()
container = resourceclosing.Container()
container.wire(modules=[resourceclosing])
self.addCleanup(container.unwire)
result_1 = resourceclosing.test_function(
service=Closing[Provide[resourceclosing.Container.service]],
)
self.assertIsInstance(result_1, resourceclosing.Service)
self.assertEqual(result_1.init_counter, 1)
self.assertEqual(result_1.shutdown_counter, 1)
result_2 = resourceclosing.test_function(
service=Closing[Provide[resourceclosing.Container.service]],
)
self.assertIsInstance(result_2, resourceclosing.Service)
self.assertEqual(result_2.init_counter, 2)
self.assertEqual(result_2.shutdown_counter, 2)
self.assertIsNot(result_1, result_2)
class WiringAsyncInjectionsTest(AsyncTestCase):
def test_async_injections(self):
from wiringsamples import asyncinjections
container = asyncinjections.Container()
container.wire(modules=[asyncinjections])
self.addCleanup(container.unwire)
asyncinjections.resource1.reset_counters()
asyncinjections.resource2.reset_counters()
resource1, resource2 = self._run(asyncinjections.async_injection())
self.assertIs(resource1, asyncinjections.resource1)
self.assertEqual(asyncinjections.resource1.init_counter, 1)
self.assertEqual(asyncinjections.resource1.shutdown_counter, 0)
self.assertIs(resource2, asyncinjections.resource2)
self.assertEqual(asyncinjections.resource2.init_counter, 1)
self.assertEqual(asyncinjections.resource2.shutdown_counter, 0)
def test_async_injections_with_closing(self):
from wiringsamples import asyncinjections
container = asyncinjections.Container()
container.wire(modules=[asyncinjections])
self.addCleanup(container.unwire)
asyncinjections.resource1.reset_counters()
asyncinjections.resource2.reset_counters()
resource1, resource2 = self._run(asyncinjections.async_injection_with_closing())
self.assertIs(resource1, asyncinjections.resource1)
self.assertEqual(asyncinjections.resource1.init_counter, 1)
self.assertEqual(asyncinjections.resource1.shutdown_counter, 1)
self.assertIs(resource2, asyncinjections.resource2)
self.assertEqual(asyncinjections.resource2.init_counter, 1)
self.assertEqual(asyncinjections.resource2.shutdown_counter, 1)
resource1, resource2 = self._run(asyncinjections.async_injection_with_closing())
self.assertIs(resource1, asyncinjections.resource1)
self.assertEqual(asyncinjections.resource1.init_counter, 2)
self.assertEqual(asyncinjections.resource1.shutdown_counter, 2)
self.assertIs(resource2, asyncinjections.resource2)
self.assertEqual(asyncinjections.resource2.init_counter, 2)
self.assertEqual(asyncinjections.resource2.shutdown_counter, 2)
class AutoLoaderTest(unittest.TestCase):
container: Container
def setUp(self) -> None:
self.container = Container(config={'a': {'b': {'c': 10}}})
importlib.reload(module)
def tearDown(self) -> None:
with contextlib.suppress(ValueError):
unregister_loader_containers(self.container)
self.container.unwire()
@classmethod
def tearDownClass(cls) -> None:
importlib.reload(module)
def test_register_container(self):
register_loader_containers(self.container)
importlib.reload(module)
importlib.import_module('wiringsamples.imports')
service = module.test_function()
self.assertIsInstance(service, Service)
| rmk135/objects | tests/unit/wiring/test_wiring_py36.py | Python | bsd-3-clause | 15,706 |
"""
Cartoon+texture IPOL demo web app
"""
from .app import app
| juan-cardelino/matlab_demos | ipol_demo-light-1025b85/app_available/103/__init__.py | Python | gpl-2.0 | 64 |
import ast
from taichi.lang.exception import TaichiSyntaxError
from taichi.lang.shell import oinspect
class KernelSimplicityASTChecker(ast.NodeVisitor):
class ScopeGuard:
def __init__(self, checker):
self.c = checker
self._allows_for_loop = True
self._allows_more_stmt = True
@property
def allows_for_loop(self):
return self._allows_for_loop
@property
def allows_more_stmt(self):
return self._allows_more_stmt
def mark_no_more_for_loop(self):
self._allows_for_loop = False
def mark_no_more_stmt(self):
self._allows_for_loop = False
self._allows_more_stmt = False
def __enter__(self):
self.c._scope_guards.append(self)
def __exit__(self, exc_type, exc_val, exc_tb):
self.c._scope_guards.pop()
def __init__(self, func):
super().__init__()
self._func_file = oinspect.getsourcefile(func)
self._func_lineno = oinspect.getsourcelines(func)[1]
self._func_name = func.__name__
self._scope_guards = []
def new_scope(self):
return KernelSimplicityASTChecker.ScopeGuard(self)
@property
def current_scope(self):
return self._scope_guards[-1]
@property
def top_level(self):
return len(self._scope_guards) == 0
def get_error_location(self, node):
# -1 because ast's lineno is 1-based.
lineno = self._func_lineno + node.lineno - 1
return f'file={self._func_file} kernel={self._func_name} line={lineno}'
@staticmethod
def should_check(node):
if not isinstance(node, ast.stmt):
return False
# TODO(#536): Frontend pass should help make sure |func| is a valid AST for
# Taichi.
ignored = [ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]
return not any(map(lambda t: isinstance(node, t), ignored))
def generic_visit(self, node):
if not self.should_check(node):
super().generic_visit(node)
return
if not (self.top_level or self.current_scope.allows_more_stmt):
raise TaichiSyntaxError(
f'No more statements allowed, at {self.get_error_location(node)}'
)
old_top_level = self.top_level
if old_top_level:
self._scope_guards.append(self.new_scope())
# Marking here before the visit has the effect of disallow for-loops in
# nested blocks. E.g. if |node| is a IfStmt, then the checker would disallow
# for-loops inside it.
self.current_scope.mark_no_more_for_loop()
super().generic_visit(node)
if old_top_level:
self._scope_guards.pop()
@staticmethod
def visit_for(node):
# TODO: since autodiff is enhanced, AST checker rules should be relaxed. This part should be updated.
# original code is #def visit_For(self, node) without #@staticmethod before fix pylint R0201
return
# is_static = (isinstance(node.iter, ast.Call)
# and isinstance(node.iter.func, ast.Attribute)
# and isinstance(node.iter.func.value, ast.Name)
# and node.iter.func.value.id == 'ti'
# and node.iter.func.attr == 'static')
# if not (self.top_level or self.current_scope.allows_for_loop
# or is_static):
# raise TaichiSyntaxError(
# f'No more for loops allowed, at {self.get_error_location(node)}'
# )
# with self.new_scope():
# super().generic_visit(node)
#
# if not (self.top_level or is_static):
# self.current_scope.mark_no_more_stmt()
| yuanming-hu/taichi | python/taichi/lang/ast/checkers.py | Python | mit | 3,791 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import socket
import smtplib
from email.mime.text import MIMEText
from email.utils import formatdate, formataddr
from fail2ban.server.actions import ActionBase, CallingMap
messages = {}
messages['start'] = \
"""Hi,
The jail %(jailname)s has been started successfully.
Regards,
Fail2Ban"""
messages['stop'] = \
"""Hi,
The jail %(jailname)s has been stopped.
Regards,
Fail2Ban"""
messages['ban'] = {}
messages['ban']['head'] = \
"""Hi,
The IP %(ip)s has just been banned for %(bantime)i seconds
by Fail2Ban after %(failures)i attempts against %(jailname)s.
"""
messages['ban']['tail'] = \
"""
Regards,
Fail2Ban"""
messages['ban']['matches'] = \
"""
Matches for this ban:
%(matches)s
"""
messages['ban']['ipmatches'] = \
"""
Matches for %(ip)s:
%(ipmatches)s
"""
messages['ban']['ipjailmatches'] = \
"""
Matches for %(ip)s for jail %(jailname)s:
%(ipjailmatches)s
"""
class SMTPAction(ActionBase):
"""Fail2Ban action which sends emails to inform on jail starting,
stopping and bans.
"""
def __init__(
self, jail, name, host="localhost", user=None, password=None,
sendername="Fail2Ban", sender="fail2ban", dest="root", matches=None):
"""Initialise action.
Parameters
----------
jail : Jail
The jail which the action belongs to.
name : str
Named assigned to the action.
host : str, optional
SMTP host, of host:port format. Default host "localhost" and
port "25"
user : str, optional
Username used for authentication with SMTP server.
password : str, optional
Password used for authentication with SMTP server.
sendername : str, optional
Name to use for from address in email. Default "Fail2Ban".
sender : str, optional
Email address to use for from address in email.
Default "fail2ban".
dest : str, optional
Email addresses of intended recipient(s) in comma space ", "
delimited format. Default "root".
matches : str, optional
Type of matches to be included from ban in email. Can be one
of "matches", "ipmatches" or "ipjailmatches". Default None
(see man jail.conf.5).
"""
super(SMTPAction, self).__init__(jail, name)
self.host = host
#TODO: self.ssl = ssl
self.user = user
self.password =password
self.fromname = sendername
self.fromaddr = sender
self.toaddr = dest
self.matches = matches
self.message_values = CallingMap(
jailname = self._jail.name,
hostname = socket.gethostname,
bantime = self._jail.actions.getBanTime,
)
def _sendMessage(self, subject, text):
"""Sends message based on arguments and instance's properties.
Parameters
----------
subject : str
Subject of the email.
text : str
Body of the email.
Raises
------
SMTPConnectionError
Error on connecting to host.
SMTPAuthenticationError
Error authenticating with SMTP server.
SMTPException
See Python `smtplib` for full list of other possible
exceptions.
"""
msg = MIMEText(text)
msg['Subject'] = subject
msg['From'] = formataddr((self.fromname, self.fromaddr))
msg['To'] = self.toaddr
msg['Date'] = formatdate()
smtp = smtplib.SMTP()
try:
self._logSys.debug("Connected to SMTP '%s', response: %i: %s",
self.host, *smtp.connect(self.host))
if self.user and self.password:
smtp.login(self.user, self.password)
failed_recipients = smtp.sendmail(
self.fromaddr, self.toaddr.split(", "), msg.as_string())
except smtplib.SMTPConnectError:
self._logSys.error("Error connecting to host '%s'", self.host)
raise
except smtplib.SMTPAuthenticationError:
self._logSys.error(
"Failed to authenticate with host '%s' user '%s'",
self.host, self.user)
raise
except smtplib.SMTPException:
self._logSys.error(
"Error sending mail to host '%s' from '%s' to '%s'",
self.host, self.fromaddr, self.toaddr)
raise
else:
if failed_recipients:
self._logSys.warning(
"Email to '%s' failed to following recipients: %r",
self.toaddr, failed_recipients)
self._logSys.debug("Email '%s' successfully sent", subject)
finally:
try:
self._logSys.debug("Disconnected from '%s', response %i: %s",
self.host, *smtp.quit())
except smtplib.SMTPServerDisconnected:
pass # Not connected
def start(self):
"""Sends email to recipients informing that the jail has started.
"""
self._sendMessage(
"[Fail2Ban] %(jailname)s: started on %(hostname)s" %
self.message_values,
messages['start'] % self.message_values)
def stop(self):
"""Sends email to recipients informing that the jail has stopped.
"""
self._sendMessage(
"[Fail2Ban] %(jailname)s: stopped on %(hostname)s" %
self.message_values,
messages['stop'] % self.message_values)
def ban(self, aInfo):
"""Sends email to recipients informing that ban has occurred.
Parameters
----------
aInfo : dict
Dictionary which includes information in relation to
the ban.
"""
aInfo.update(self.message_values)
message = "".join([
messages['ban']['head'],
messages['ban'].get(self.matches, ""),
messages['ban']['tail']
])
self._sendMessage(
"[Fail2Ban] %(jailname)s: banned %(ip)s from %(hostname)s" %
aInfo,
message % aInfo)
Action = SMTPAction
| ActualizeInMaterial/gentooskyline | system/Z575/OSes/gentoo/in_virtualbox/filesystem_now/gentoo/etc/fail2ban/action.d/smtp.py | Python | unlicense | 6,021 |
from fractions import Fraction
from typing import Tuple
from .base import list_to_number
Representation = Tuple[Tuple[int, int], Tuple[int, int]]
def unit_fraction_to_representation(denominator: int,
base: int=10) -> Representation:
return fraction_to_representation(Fraction(1, denominator), base)
def fraction_to_representation(fraction: Fraction,
base: int=10) -> Representation:
"""Returns fraction representation of 1 / denominator as
0.abcd(efgh)
as
((abcd, 4), (efgh, 4))."""
if fraction < 0 or fraction >= 1:
raise ValueError(f'Cannot find decimal expansion of {fraction}, '
f' require 0 <= x < 1.')
numerator = fraction.numerator
denominator = fraction.denominator
block_size = 1
block_length = 0
while block_size < denominator:
block_size *= base
block_length += 1
remainders = []
blocks = []
remainder = block_size * numerator
while (remainder not in remainders) and remainder != 0:
remainders.append(remainder)
block, remainder = divmod(remainder, denominator)
blocks.append(block)
remainder *= block_size
if remainder == 0: # terminating
index = len(remainders)
else: # repeating
index = remainders.index(remainder)
prefix = list_to_number(blocks[:index], block_size), \
index * block_length
repeat = list_to_number(blocks[index:], block_size), \
(len(blocks) - index) * block_length
return prefix, repeat
def representation_to_fraction(representation: Representation,
base: int=10) -> Fraction:
prefix_factor = base ** representation[0][1]
if representation[1][1] == 0:
return Fraction(representation[0][0], prefix_factor)
geometric = base ** representation[1][1] - 1
numerator = representation[0][0] * geometric + representation[1][0]
return Fraction(numerator, prefix_factor * geometric)
| cryvate/project-euler | project_euler/library/fraction_representation.py | Python | mit | 2,055 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function, unicode_literals
import os
import platform
import subprocess
import sys
from distutils.spawn import find_executable
from pipes import quote
SEARCH_PATHS = [
os.path.join("python", "tidy"),
os.path.join("tests", "wpt"),
os.path.join("tests", "wpt", "harness"),
]
# Individual files providing mach commands.
MACH_MODULES = [
os.path.join('python', 'servo', 'bootstrap_commands.py'),
os.path.join('python', 'servo', 'build_commands.py'),
os.path.join('python', 'servo', 'testing_commands.py'),
os.path.join('python', 'servo', 'post_build_commands.py'),
os.path.join('python', 'servo', 'devenv_commands.py'),
]
CATEGORIES = {
'bootstrap': {
'short': 'Bootstrap Commands',
'long': 'Bootstrap the build system',
'priority': 90,
},
'build': {
'short': 'Build Commands',
'long': 'Interact with the build system',
'priority': 80,
},
'post-build': {
'short': 'Post-build Commands',
'long': 'Common actions performed after completing a build.',
'priority': 70,
},
'testing': {
'short': 'Testing',
'long': 'Run tests.',
'priority': 60,
},
'devenv': {
'short': 'Development Environment',
'long': 'Set up and configure your development environment.',
'priority': 50,
},
'build-dev': {
'short': 'Low-level Build System Interaction',
'long': 'Interact with specific parts of the build system.',
'priority': 20,
},
'misc': {
'short': 'Potpourri',
'long': 'Potent potables and assorted snacks.',
'priority': 10,
},
'disabled': {
'short': 'Disabled',
'long': 'The disabled commands are hidden by default. Use -v to display them. These commands are unavailable '
'for your current context, run "mach <command>" to see why.',
'priority': 0,
}
}
def _get_exec(*names):
for name in names:
path = find_executable(name)
if path is not None:
return path
return None
def _get_virtualenv_script_dir():
# Virtualenv calls its scripts folder "bin" on linux/OSX/MSYS64 but "Scripts" on Windows
if os.name == "nt" and os.path.sep != "/":
return "Scripts"
return "bin"
# Possible names of executables, sorted from most to least specific
PYTHON_NAMES = ["python-2.7", "python2.7", "python2", "python"]
VIRTUALENV_NAMES = ["virtualenv-2.7", "virtualenv2.7", "virtualenv2", "virtualenv"]
PIP_NAMES = ["pip-2.7", "pip2.7", "pip2", "pip"]
def _activate_virtualenv(topdir):
virtualenv_path = os.path.join(topdir, "python", "_virtualenv")
python = _get_exec(*PYTHON_NAMES)
if python is None:
sys.exit("Python is not installed. Please install it prior to running mach.")
script_dir = _get_virtualenv_script_dir()
activate_path = os.path.join(virtualenv_path, script_dir, "activate_this.py")
if not (os.path.exists(virtualenv_path) and os.path.exists(activate_path)):
virtualenv = _get_exec(*VIRTUALENV_NAMES)
if virtualenv is None:
sys.exit("Python virtualenv is not installed. Please install it prior to running mach.")
process = subprocess.Popen(
[virtualenv, "-p", python, virtualenv_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.wait()
if process.returncode:
sys.exit("Python virtualenv failed to execute properly: {}"
.format(process.communicate()[1]))
execfile(activate_path, dict(__file__=quote(activate_path)))
# TODO: Right now, we iteratively install all the requirements by invoking
# `pip install` each time. If it were the case that there were conflicting
# requirements, we wouldn't know about them. Once
# https://github.com/pypa/pip/issues/988 is addressed, then we can just
# chain each of the requirements files into the same `pip install` call
# and it will check for conflicts.
requirements_paths = [
os.path.join("python", "requirements.txt"),
os.path.join("tests", "wpt", "harness", "requirements.txt"),
os.path.join("tests", "wpt", "harness", "requirements_servo.txt"),
]
for req_rel_path in requirements_paths:
req_path = os.path.join(topdir, req_rel_path)
marker_file = req_rel_path.replace(os.path.sep, '-')
marker_path = os.path.join(virtualenv_path, marker_file)
try:
if os.path.getmtime(req_path) + 10 < os.path.getmtime(marker_path):
continue
except OSError:
pass
pip = _get_exec(*PIP_NAMES)
if pip is None:
sys.exit("Python pip is not installed. Please install it prior to running mach.")
process = subprocess.Popen(
[pip, "install", "-q", "-r", req_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.wait()
if process.returncode:
sys.exit("Pip failed to execute properly: {}"
.format(process.communicate()[1]))
open(marker_path, 'w').close()
def bootstrap(topdir):
topdir = os.path.abspath(topdir)
# We don't support paths with Unicode characters for now
# https://github.com/servo/servo/issues/10002
try:
topdir.decode('ascii')
except UnicodeDecodeError:
print('Cannot run mach in a path with Unicode characters.')
print('Current path:', topdir)
sys.exit(1)
# We don't support paths with spaces for now
# https://github.com/servo/servo/issues/9442
if ' ' in topdir:
print('Cannot run mach in a path with spaces.')
print('Current path:', topdir)
sys.exit(1)
# Ensure we are running Python 2.7+. We put this check here so we generate a
# user-friendly error message rather than a cryptic stack trace on module
# import.
if not (3, 0) > sys.version_info >= (2, 7):
print('Python 2.7 or above (but not Python 3) is required to run mach.')
print('You are running Python', platform.python_version())
sys.exit(1)
_activate_virtualenv(topdir)
def populate_context(context, key=None):
if key is None:
return
if key == 'topdir':
return topdir
raise AttributeError(key)
sys.path[0:0] = [os.path.join(topdir, path) for path in SEARCH_PATHS]
import mach.main
mach = mach.main.Mach(os.getcwd())
mach.populate_context_handler = populate_context
for category, meta in CATEGORIES.items():
mach.define_category(category, meta['short'], meta['long'],
meta['priority'])
for path in MACH_MODULES:
mach.load_commands_from_file(os.path.join(topdir, path))
return mach
| catchmrbharath/servo | python/mach_bootstrap.py | Python | mpl-2.0 | 7,079 |
""":mod:`asuka.services.wsgi` --- WSGI server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It supports the following servers through `Green Unicorn`_:
``sync`` (default)
It should handle most 'normal' types of workloads.
You'll want to read http://gunicorn.org/design.html for information
on when you might want to choose one of the other worker classes.
``eventlet``
Eventlet_ is a concurrent networking library for Python that
allows you to change how you run your code, not how you write it.
``gevent``
gevent_ is a coroutine_-based Python networking library thaat
uses greenlet_ to provide a high-level synchronous API on top
of the libevent_ event loop.
``tornado``
Tornado_ is an open source version of the scalable,
non-blocking web server and tools that power FriendFeed.
``meinheld``
Meinheld_ is a high-performance WSGI-compliant web server that
takes advantage of greenlet_ and picoev_ to enable asynchronous
network I/O in a light-weight manner.
.. _Green Unicorn: http://gunicorn.org/
.. _Eventlet: http://eventlet.net/
.. _gevent: http://www.gevent.org/
.. _coroutine: http://en.wikipedia.org/wiki/Coroutine
.. _greenlet: http://codespeak.net/py/0.9.2/greenlet.html
.. _libevent: http://monkey.org/~provos/libevent/
.. _Tornado: http://www.tornadoweb.org/
.. _Meinheld: http://meinheld.org/
.. _picoev: http://developer.cybozu.co.jp/kazuho/2009/08/picoev-a-tiny-e.html
"""
import pipes
from ..service import Service
__all__ = ('EventletWorker', 'GeventWorker', 'GunicornService',
'MeinheldWorker', 'SyncWorker', 'TornadoWorker', 'Worker')
class GunicornService(Service):
def __init__(self, *args, **kwargs):
super(GunicornService, self).__init__(*args, **kwargs)
self.worker = WORKERS[self.config['server']]()
@property
def required_apt_packages(self):
packages = set(super(GunicornService, self).required_apt_packages)
packages.update(['python-setproctitle', 'gunicorn'])
packages |= self.worker.required_apt_packages
return packages
@property
def required_python_packages(self):
packages = set(super(GunicornService, self).required_python_packages)
packages.update(self.worker.required_python_packages)
if self.auth_required:
packages.add('Werkzeug')
return packages
@property
def wsgi_app(self):
if 'wsgi_script' in self.config or self.auth_required:
return 'web_wsgi:application'
return self.config['wsgi_app']
@property
def wsgi_script(self):
wsgi_script = self.config.get('wsgi_script')
if not self.auth_required:
return wsgi_script
if wsgi_script is None:
imp = self.config['wsgi_app'].split(':')
wsgi_script = 'from {0} import {1} as application'.format(*imp)
appended_script = '''
import datetime
import hashlib
import hmac
import werkzeug.urls
import werkzeug.wrappers
@werkzeug.wrappers.BaseRequest.application
def auth_application(request):
environ = request.environ
if (environ.get('HTTP_USER_AGENT', '').startswith('ELB-HealthChecker/') and
'X-Forwarded-For' not in request.headers and
'X-Forwarded-Port' not in request.headers and
'X-Forwarded-Proto' not in request.headers):
return werkzeug.wrappers.BaseResponse(
['ELB Pong'],
status=200,
mimetype='text/plain'
)
auth = request.cookies.get('asuka_auth')
sig = request.cookies.get('asuka_sig')
if auth and sig:
secret = {secret!r}
if sig == hmac.new(secret, auth, hashlib.sha256).hexdigest():
try:
auth = datetime.datetime.strptime(auth, '%Y%m%d%H%M%S')
except ValueError:
pass
else:
if datetime.datetime.utcnow() <= auth:
return auth_application.application
token = request.args.get('token')
sig = request.args.get('sig')
if token and sig:
secret = {consistent_secret!r}
if sig == hmac.new(secret, token, hashlib.sha256).hexdigest():
try:
ts, login, host = token.split('/', 2)
ts = datetime.datetime.strptime(ts, '%Y%m%d%H%M%S')
except ValueError:
pass
else:
if host == request.host:
gap = datetime.datetime.utcnow() - ts
if gap <= datetime.timedelta(minutes=1):
back = request.cookies.get('asuka_auth_back',
request.url)
expires = datetime.timedelta(seconds={auth_expires!r})
auth_ts = datetime.datetime.utcnow() + expires
auth = auth_ts.strftime('%Y%m%d%H%M%S')
sig = hmac.new({secret!r}, auth, hashlib.sha256)
response = werkzeug.wrappers.Response(
['Authenticated; redirecting to ', back],
status=302,
headers=dict(Location=back),
mimetype='text/plain'
)
response.delete_cookie('asuka_auth_back')
response.set_cookie('asuka_auth', auth,
expires=auth_ts)
response.set_cookie('asuka_sig', sig.hexdigest(),
expires=auth_ts)
return response
delegate_url = {delegate_url!r} + '?' + werkzeug.urls.url_encode(
dict(back=request.url)
)
response = werkzeug.wrappers.BaseResponse(
['Redirecting to ', delegate_url],
status=302,
headers=dict(Location=delegate_url),
mimetype='text/plain'
)
response.set_cookie('asuka_auth_back', request.url)
return response
auth_application.application = application
application = auth_application
'''
secret = '.'.join((
self.app.name,
self.branch.label,
self.app.consistent_secret
))
wsgi_script += appended_script.format(
secret=secret,
consistent_secret=self.app.consistent_secret,
delegate_url=self.app.url_base + '/delegate/',
auth_expires=self.config.get('auth_expires', 3 * 3600)
)
return wsgi_script
@property
def auth_required(self):
return bool(self.config.get('auth_required'))
def install(self, instance):
super(GunicornService, self).install(instance)
format_args = {
'service': self,
'app_name': instance.app.name,
'service_name': self.name,
'service_path': instance.app.name + '/' + self.name
}
wsgi_script = self.wsgi_script
if wsgi_script is not None:
instance.write_file(
'/etc/{service_path}/web_wsgi.py'.format(**format_args),
wsgi_script,
sudo=True
)
server_options = self.config.get('server_options', {})
server_options.setdefault('worker_class', self.worker.worker_class)
gunicorn_options = ' '.join(
'--' + k.replace('_', '-')
if v is True
else '--' + k.replace('_', '-') + '=' + pipes.quote(str(v))
for k, v in server_options.items()
if v is not False and v is not None
)
instance.write_file(
'/etc/init/{app_name}-{service_name}.conf'.format(**format_args),
'''\
description "{app_name} {service_name} service"
start on runlevel [2345]
stop on runlevel [06]
env PYTHONPATH="/etc/{service_path}"
pre-start script
mkdir -p -m0777 /var/run/{app_name} /var/log/{service_path}
chown {app_name}:{app_name} /var/run/{app_name} /var/log/{app_name}
end script
script
exec gunicorn --name {app_name}-{service_name} \
{gunicorn_options} \
--user={app_name} --group={app_name} \
--pid /var/run/{service_path}.pid \
--access-logfile=/var/log/{service_path}/access.log \
--error-logfile=/var/log/{service_path}/error.log \
{service.wsgi_app}
end script
post-stop script
rm -f /var/run/{service_path}.pid
end script
# vim: set et sw=4 ts=4 sts=4
'''.format(gunicorn_options=gunicorn_options, **format_args),
sudo=True
)
instance.sudo([
'service', instance.app.name + '-' + self.name, 'start'
])
class Worker(object):
@property
def worker_class(self):
raise NotImplementedError('worker_class has to be provided')
@property
def required_apt_packages(self):
return frozenset()
@property
def required_python_packages(self):
return frozenset()
class SyncWorker(Worker):
worker_class = 'sync'
class EventletWorker(Worker):
worker_class = 'eventlet'
@property
def required_apt_packages(self):
return frozenset(['python-eventlet'])
class GeventWorker(Worker):
worker_class = 'gevent'
@property
def required_apt_packages(self):
return frozenset(['python-gevent'])
class TornadoWorker(Worker):
worker_class = 'tornado'
@property
def required_apt_packages(self):
return frozenset(['python-tornado'])
class MeinheldWorker(Worker):
worker_class = 'egg:meinheld#gunicorn_worker'
@property
def required_apt_packages(self):
return frozenset(['build-essential', 'python-dev', 'python-greenlet',
'python-greenlet-dev'])
@property
def required_python_packages(self):
return frozenset(['meinheld'])
#: (:class:`collections.Mapping`) The mapping of server identifier
#: strings (e.g. ``'sync'``, ``'eventlet'``) to worker classes
#: (e.g. :class:`SyncWorker`, :class:`EventletWorker`).
WORKERS = {
'sync': SyncWorker,
'eventlet': EventletWorker,
'gevent': GeventWorker,
'tornado': TornadoWorker,
'meinheld': MeinheldWorker
}
| crosspop/asuka | asuka/services/wsgi.py | Python | mit | 10,209 |
"""
145. Binary Tree Postorder Traversal
https://leetcode.com/problems/binary-tree-postorder-traversal/
"""
from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def postorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
res = []
def go(node):
if not node:
return []
go(node.left)
go(node.right)
res.append(node.val)
go(root)
return res
def main():
s = Solution()
print(s.xxx())
if __name__ == '__main__':
raise(SystemExit(main()))
| pisskidney/leetcode | easy/145.py | Python | mit | 737 |
# coding=utf-8
from tkinter import *
import random
WIDTH = 800
HEIGHT = 600
BALL_RADIUS = 3
BALL_SPEED = 1
root = Tk()
root.title("Tkinter demo")
# область анимации
c = Canvas(root, width=WIDTH, height=HEIGHT, background="#ffffff")
c.pack()
mover = c.create_oval(WIDTH/2-BALL_RADIUS/2,
HEIGHT/2-BALL_RADIUS/2,
WIDTH/2+BALL_RADIUS/2,
HEIGHT/2+BALL_RADIUS/2, fill="black")
def mover_move():
rand = random.random()
if rand < 0.25:
c.move(mover, 0, BALL_SPEED)
elif 0.25 <= rand < 0.5:
c.move(mover, 0, -BALL_SPEED)
elif 0. <= rand < 0.75:
c.move(mover, BALL_SPEED, 0)
elif 0.75 <= rand:
c.move(mover, -BALL_SPEED, 0)
def main():
mover_move()
root.after(25, main) # 40 fps
main()
root.mainloop()
| sejros/The-Nature-of-Python-Examples | introduction/0 Intro.py | Python | mit | 848 |
from __future__ import division
from __future__ import print_function
import os
import sys
import functools
# Update path
root = os.path.join(os.getcwd().split('proj1')[0], 'proj1')
if root not in sys.path:
sys.path.append(root)
import numpy as np
import pandas as pd
import multiprocessing
from pdb import set_trace
from Simulator import simulate
from Utils.PlotsUtils import line, line2
from Utils.RandomUtil import Random
from Utils.MisclUtils import TimeUtil
rand = Random()
timer = TimeUtil()
# Set seed
rand.set_seed(seed_val=12458)
def customer_loss_rate(customers):
served = np.sum([customer.serviced for customer in customers])
total = len(customers)
return served / total
def plot_runtime(x=None, y=None):
line(x, y, x_label=r"$\rho$", y_label=r"Run Times", the_title=r"$\mathrm{Run\ Times\ in\ }\mu\mathrm{s\ vs.\ }\rho$")
def plot_runtime_vs_avg(x, y, y_1):
line2(x, y, x, y_1, label_1="Actual Runtimes", label_2="Expected value of $\rho$", x_label=r"$\rho$", y_label=r"Run Times", the_title=r"$\mathrm{Run\ Times\ in\ }\mu\mathrm{s\ vs.\ }\rho$")
def task_5():
rho_list = np.arange(0.05, 1, 0.1)
C = 1e5
elapsed = []
for rho in rho_list:
start_time = timer.current_time()
serviced = simulate(l = rho, server_lim = 40, max_serviced=C, L=1, verbose=False)
end_time = timer.current_time()
elapsed.append(end_time-start_time)
data = pd.DataFrame([[a,b] for a, b in zip(rho_list, elapsed)], columns=["Rho", "Seconds"])
data.to_csv(os.path.abspath(os.path.join(root,"tasks/task5.csv")))
def task5_plot():
data = pd.read_csv(os.path.abspath("tasks/task5.csv"))
plot_runtime(data["Rho"], data["Seconds"])
set_trace()
def compare_plot():
rho_list = np.arange(0.05, 1, 0.1)
average_rho = [np.mean([rand.exponential(lam=p) for _ in xrange(10000)]) for p in rho_list]
data = pd.read_csv(os.path.abspath("tasks/task5.csv"))
plot_runtime(data["Rho"], average_rho)
if __name__ == "__main__":
task_5()
task5_plot()
compare_plot()
| rahlk/CSC579__Computer_Performance_Modeling | simulation/proj1/tasks/task5.py | Python | mit | 2,063 |
from collections import OrderedDict
from tasks.meta import OBSColumn
from tasks.base_tasks import ColumnsTask
class POIColumns(ColumnsTask):
def version(self):
return 3
def columns(self):
return OrderedDict([
('house_number', OBSColumn(type='Text',
name='House number'
)),
('street_name', OBSColumn(type='Text',
name='Street name'
)),
('postal_code', OBSColumn(type='Text',
name='Postal code'
)),
('address', OBSColumn(type='Text',
name='Address'
)),
])
| CartoDB/bigmetadata | tasks/poi.py | Python | bsd-3-clause | 833 |
#--*-- coding:utf-8 --*--
'''
Created on 2015��5��20��
@author: stm
'''
from model.viosmachine import VIOSMachineRecord, IP_ID, NAME_ID, SN_ID, PHASE,\
DATA_COLS, PROGRESS, SEL_ID, IP_FOR_NIMINSTALL, IP_GW_FOR_NIMINSTALL
from utils import DebugLog
from base.msgcodec import MsgCodec
from base.cmdmsg import CMDMsg, SERVERSCAN, SAVESELECTION, GETSTATUS, \
REMVSERVER, CREATEVIOSLPAR, CHECKCREATEVIOSLPAR, UPDATESTATUS
import time
import pickle
import os.path
import string
from base.vdstate import PRGRS_FINISHED, StateBase
from utils.PingTest import PingTest
from utils.NimIPPool import NimIPPool
class DataEntity(object):
'''
classdocs
'''
def __init__(self, vd_config):
'''
Constructor
'''
self.config = vd_config
self.machine_records = []
self.ip_pool_obj = NimIPPool()
self.msg_decoder = MsgCodec()
self.data_created = False
self.data_empty = False
self.file_name = "data_"+time.strftime("%Y_%m_%d_%H_%M_%S.dat", time.gmtime())
records_dir_name = "records"
if not os.path.exists(records_dir_name):
os.mkdir(records_dir_name)
self.file_name = os.path.join(records_dir_name, self.file_name)
DebugLog.info_print("The record file path: %s." % os.path.abspath(self.file_name))
self.file_handler = open(self.file_name, "wb")
self.init_data()
self.cur_data_pointer = 0
def __del__(self):
'''
Destructor
'''
self.file_handler.close()
def init_data(self):
for idx in range(10):
self.machine_records.append(VIOSMachineRecord())
self.machine_records[idx].setIndex(idx)
ip_nim_internal = self.config.get('nimserver_ip_pool', 'ip_pool_startip')
self.ip_nim_internal_prefix, self.ip_nim_internal_base = \
string.rsplit(ip_nim_internal, '.', 1)
self.ip_gw_nim_internal = self.config.get('nimserver_ip_pool',
'ip_pool_gateway_ip')
pickle.dump(self.machine_records, self.file_handler)
def test_existed(self, sn_tested):
sn_existed_list = []
bexisting = False
if 0 == self.cur_data_pointer:
bexisting = False
else:
for indx in range(self.cur_data_pointer):
sn_existed_list.append(self.machine_records[indx].getSerialNm())
bexisting = sn_tested in sn_existed_list
return bexisting
def savedata(self, msg_body):
'''
Data format: name, ip_addr, sn, state
'''
DebugLog.debug_print_level1(msg_body)
_cmd_key, _server_id, data = self.msg_decoder.decodeMsg(msg_body)
#data_list = eval(data)
data_list = self.reformat(data)
#_ret_cmd, msg_body =
# name, ip_addr, serial_nm, stat
isempty = len(data_list[0]) < 2
if CMDMsg.getCMD(SERVERSCAN) == _cmd_key:
if isempty:
return
if not self.data_created:
self.data_created = True
MSG_IDS_MAP = [NAME_ID, IP_ID, SN_ID, PHASE]
for idx in range(len(data_list)):
# 2: SN_ID, 3: PHASE,
phase_info = data_list[idx][3]
if phase_info.endswith("No Connection"):
DebugLog.info_print("Got server %s status: %s" %
(data_list[idx][0], phase_info))
continue
sn_tested = data_list[idx][2]
if self.test_existed(sn_tested):
continue
pointer_indx = self.cur_data_pointer
for n_id in range(len(MSG_IDS_MAP)):
self.machine_records[pointer_indx].updateValue(
MSG_IDS_MAP[n_id],
data_list[idx][n_id])
self.machine_records[pointer_indx].updateValue(
PROGRESS,
StateBase.get_state_progress_const_name(PRGRS_FINISHED))
# ip_nim_internal_temp = "%s.%s" % \
# (self.ip_nim_internal_prefix,
# int(self.ip_nim_internal_base) + pointer_indx*10)
# ip_test_obj = PingTest(ip_nim_internal_temp, 10)
#_ip_nim_internal = self.ip_pool_obj.getAvailableIP(sn_tested)
#ip_pool_obj = NimIPPool()
_ip_nim_internal = NimIPPool.getAvailableIP(sn_tested)
self.machine_records[pointer_indx].updateValue(IP_FOR_NIMINSTALL,
_ip_nim_internal)
self.machine_records[pointer_indx].updateValue(IP_GW_FOR_NIMINSTALL,
self.ip_gw_nim_internal)
self.cur_data_pointer += 1
elif CMDMsg.getCMD(SAVESELECTION) == _cmd_key:
data_row_id = int(_server_id)
data_val = bool(int(data_list[0][0]))
self.machine_records[data_row_id].updateValue(SEL_ID, data_val)
# elif CMDMsg.getCMD(REMVSERVER) == _cmd_key:
# data_row_id = int(_server_id)
# data_val = '[Removed from HMC]'
# self.machine_records[data_row_id].updateValue(PHASE, data_val)
elif CMDMsg.getCMD(CREATEVIOSLPAR) == _cmd_key or \
CMDMsg.getCMD(CHECKCREATEVIOSLPAR) == _cmd_key:
data_row_id = int(_server_id)
_phase, _progress = eval(data)
self.machine_records[data_row_id].updateValue(PHASE, _phase)
self.machine_records[data_row_id].updateValue(PROGRESS, _progress)
elif CMDMsg.getCMD(GETSTATUS) == _cmd_key:
DebugLog.debug_print_level1("In data entity: save data, " + str(msg_body))
if self.data_created:
# update server state or name only if the sn is equal
ret_id_name = 0
ret_id_sn = 2
ret_id_state = 3
for idx in range(len(data_list)):
for server_idx in range(len(self.machine_records)):
if data_list[idx][ret_id_sn] == self.machine_records[server_idx].getSerialNm():
if self.machine_records[server_idx].get_stage_id_val() < CREATEVIOSLPAR:
if not (self.machine_records[server_idx].getState() == data_list[idx][ret_id_state]):
self.machine_records[server_idx].setState(data_list[idx][ret_id_state])
if not (self.machine_records[server_idx].getServerName() == data_list[idx][ret_id_name]):
self.machine_records[server_idx].setServerName(data_list[idx][ret_id_name])
pickle.dump(self.machine_records, self.file_handler)
elif CMDMsg.getCMD(UPDATESTATUS) == _cmd_key:
data_row_id = int(_server_id)
_phase, _progress, _server_name = eval(data)
self.machine_records[data_row_id].updateValue(PHASE, _phase)
self.machine_records[data_row_id].updateValue(PROGRESS, _progress)
self.machine_records[data_row_id].updateValue(NAME_ID, _server_name)
else:
data_row_id = int(_server_id)
_phase, _progress = eval(data)
self.machine_records[data_row_id].updateValue(PHASE, _phase)
self.machine_records[data_row_id].updateValue(PROGRESS, _progress)
def reformat(self, body):
data_list = None
if isinstance(body, str):
data_list = body[2:-2].split("', '")
elif isinstance(body, list):
data_list = body
format_data_list = []
totalcol = 4
row = len(data_list)
for row_indx in range(row):
temp = data_list[row_indx].split(",")
# for indx in range(totalcol):
# temp.append(data_list[row_indx * totalcol + indx])
format_data_list.append(temp)
return format_data_list
def updateAndSaveDataByName(self, body):
'''
Data format: name, state
'''
data_list = self.reformat(body)
# name, ip_addr, serial_nm, stat
for idx in range(len(data_list)):
for machine_record_id in range(DATA_COLS):
if self.machine_records[machine_record_id].getServerName() == data_list[idx][0]:
self.machine_records[machine_record_id].setState(data_list[idx][1])
def retrieve(self):
'''
return the data as:
[ [AUTO_FLAG, SEL_ID, IDX_ID, IP_ID, PORT_ID, SN_ID, PHASE, PROGRESS, NAME_ID, IP_FOR_NIMINSTALL, IP_GW_FOR_NIMINSTALL],
[SEL_ID, IDX_ID, IP_ID, PORT_ID, SN_ID, PHASE, PROGRESS, NAME_ID],
[SEL_ID, IDX_ID, IP_ID, PORT_ID, SN_ID, PHASE, PROGRESS, NAME_ID],
]
'''
data_ret = []
if not self.machine_records:
data_ret = None
for row_idx in range(len(self.machine_records)):
data_temp = []
for col_idx in range(DATA_COLS):
data_temp.append(self.machine_records[row_idx].getValueById(col_idx))
data_ret.append(data_temp)
return data_ret
def get_data_status_key(self, idx):
'''
get data status by id
The format is "PROGRESS"+"_"+"PHASE"
'''
assert(idx < len(self.machine_records))
server_data = self.getServerData(idx)
progress = server_data[PROGRESS]
phase_name = server_data[PHASE]
#return stage_name + '_' + state_name
return (phase_name, progress)
def update_data_status(self, idx, status):
self.machine_records[idx].updateValue(PROGRESS, status)
def getCheckedItemsList(self):
data_lst = self.retrieve()
data_ret = []
for idx in range(len(data_lst)):
if data_lst[idx][SEL_ID]:
data_ret.append(data_lst[idx][:])
return data_ret
def getServerData(self, server_id):
data_lst = self.retrieve()
return data_lst[server_id]
def getServerCount(self):
return len(self.machine_records)
| xhan-shannon/SystemControlView | model/dataentity.py | Python | gpl-2.0 | 11,293 |
"""
Module Overview
---------------
Generic **views** the extend the Django REST Framework's generics to
easily integrate with Ember Data and the JSON API.
Let's discuss the some of the important design choices in this module.
JsonApiView
^^^^^^^^^^^
The ``JsonApiView`` derives from DRF's ``GenericAPIView``. It sets
the parser and renderer to **drf-ember's** JSON API versions. This
is for convenience. Otherwise, the user would have to keep setting
the parser and renderer when using the Frameworks's generics.
Additionally, ``JsonApiView`` overrides the ``finalize_response`` method
to include the JSON API's bulk extension's requirement that responses
include an accepted media type that denotes ``supported-ext="bulk"`` to
the client.
DRF CRUD class extensions
^^^^^^^^^^^^^^^^^^^^^^^^^
DRF provides many useful generic view classes that greatly simplify
create-retrieve-update-delete functionality for resources/models persisted in
database/data store.
This module swaps the DRF ``GenericAPIView`` for this module's ``JsonApiView``
in each of the following DRF generic view classes:
``CreateAPIView``, ``ListAPIView``, ``ListCreateAPIView``, ``RetrieveAPIView``,
``DestroyAPIView``, ``UpdateAPI``, ``RetrieveUpdateAPIView``,
``RetrieveDestroyAPIView``, ``RetrieveUpdateDestroyAPIView``.
For DRF generic view classes with update or destroy functionality,
the framework's mixin is replaced with the **drf-ember** mixin. When
updating, the JSON API requires different response formats if the
server performed changes to a resource in addition to client changes.
The **drf-ember** ``UpdateModelMixin`` provides logic to implement said
requirement, as well as a content type that indicates support for the
"bulk" API extension. Similarly, for view classes that destroy (i.e. delete)
resources, **drf-ember's** ``DestroyModelMixin`` replaces the DRF one
in order to include a content type indicating that the JSON API "bulk"
extension is supported.
CollectionWithBulkAPIView
^^^^^^^^^^^^^^^^^^^^^^^^^
The JSON API provides a "bulk" extension that is quite useful for
client applications that want to reduce the overhead of data transmission.
One potentially tricky developer preference would be for a single endpoint,
such as '/api/persons' or '/api/songs' to handle both the single resource
*and* bulk functionality that the DRF ``ListCreateAPI`` view would typically
handles.
The ``CollectionWithBulkAPIView`` is an attempt at satisfying that preference
in a single view class. The view supports JSON API logic for resource
collections. Specifically:
- GET (retrieve) of a list of resources
- single and bulk POST (create)
- bulk PATCH (update)
- bulk DELETE (destroy)
However, it **does not support PUT** as the JSON API does not currently identify
a use for PUT's approach. The view logic will internally prepare a response with
a 405 "Method 'PUT' not allowed" which the default **drf_ember** exception
handler will convert to a 400 to avoid unwanted granularity/information leakage
about the API.
View Classes
------------
The public API for this module's view classes.
"""
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import generics, mixins, status
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from .parsers import JsonApiParser
from .renderers import JsonApiRenderer, JsonApiBulkRenderer
from .mixins import DestroyModelMixin, ListModelMixin, \
RetrieveModelMixin, UpdateModelMixin
class JsonApiView(generics.GenericAPIView):
"""
Sets parser and renderer classes compatible with the Ember.js
framework's JSON request and response formats by extending
the Django REST Frameworks's GenericAPIView.
Sets parser and renderer to **drf-ember's** JSON API versions. This
is for convenience. Otherwise, the user would have to keep setting
the parser and renderer when using the Frameworks's generics.
Additionally, overrides the ``finalize_response`` method
to include the JSON API's bulk extension's requirement that responses
include an accepted media type that denotes ``supported-ext="bulk"`` to
the client.
Attributes:
parser_classes (tuple): Default is a tuple that
includes :py:class:`~drf_ember.parsers.JsonApiParser`
renderer_classes (tuple): Default is a tuple that
includes :py:class:`~drf_ember.renderers.JsonApiRenderer`
"""
parser_classes = (JsonApiParser,)
renderer_classes = (JsonApiRenderer,)
def finalize_response(self, request, response, *args, **kwargs):
"""
Overrides ``GenericAPIView`` method to denote support for JSON bulk
extension by including ``suppurted-ext="bulk"`` in response.
Returns:
Response: A DRF response object
"""
response = super(JsonApiView, self).finalize_response(request, response, *args, **kwargs)
with_supported_extensions = '{0}; supported-ext="bulk"'.format(response.accepted_media_type)
response.accepted_media_type = with_supported_extensions
return response
class CreateAPIView(mixins.CreateModelMixin,
JsonApiView):
"""
Concrete view for creating a model instance.
"""
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class ListAPIView(ListModelMixin,
JsonApiView):
"""
Concrete view for listing a queryset.
"""
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class RetrieveAPIView(RetrieveModelMixin,
JsonApiView):
"""
Concrete view for retrieving a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class DestroyAPIView(DestroyModelMixin,
JsonApiView):
"""
Concrete view for deleting a model instance.
"""
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class UpdateAPIView(UpdateModelMixin,
JsonApiView):
"""
Concrete view for updating a model instance.
"""
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class ListCreateAPIView(ListModelMixin,
mixins.CreateModelMixin,
JsonApiView):
"""
Concrete view for listing a queryset or creating a model instance.
"""
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class CollectionWithBulkAPIView(ListModelMixin,
mixins.CreateModelMixin,
UpdateModelMixin,
DestroyModelMixin,
JsonApiView):
"""
This view supports JSON API logic for resource collections. Specifically:
- GET (retrieve) of a list of resources
- single and bulk POST (create)
- bulk PATCH (update)
- bulk DELETE (destroy)
It **does not support PUT** and the view logic will internally prepare a response
with a 405 "Method 'PUT' not allowed" which the default **drf_ember** exception
handler will convert to a 400 to avoid unwanted granularity/information leakage
about the API.
"""
renderer_classes = (JsonApiRenderer, JsonApiBulkRenderer)
def get(self, request, *args, **kwargs):
"""
Lists a queryset for the view.
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Creates an object or a list of objects.
A single object is created unless a list of objects
is passed.
Returns:
Response: Includes data, headers and cotent type that indicates
whether or not the bulk extension was utilized.
"""
is_bulk = isinstance(request.data, list)
if is_bulk:
serializer = self.get_serializer(data=request.data, many=True)
content_type = 'application/vnd.api+json; ext="bulk"; supported-ext="bulk"'
else:
serializer = self.get_serializer(data=request.data)
content_type = 'application/vnd.api+json; supported-ext="bulk"'
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data,
status=status.HTTP_201_CREATED,
headers=headers,
content_type=content_type)
def patch(self, request, *args, **kwargs):
"""
Updates a list of objects.
Raises:
ParseError: ``ParseError`` raised if request's data property is not a list.
Returns:
Response: Includes data, headers and cotent type that indicates
whether or not the bulk extension was utilized.
"""
is_bulk = isinstance(request.data, list)
if is_bulk:
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(
queryset,
data=request.data,
many=True,
partial=True,
)
serializer.is_valid(raise_exception=True)
update = self.perform_update(serializer)
response_data = update['data'] if update['has_server_update'] else None
content_type = 'application/vnd.api+json; ext="bulk"; supported-ext="bulk"'
return Response(response_data, status=status.HTTP_200_OK, content_type=content_type)
else:
raise ParseError
def delete(self, request, *args, **kwargs):
"""
Deletes a list of objects.
Raises:
ParseError: ``ParseError`` raised if request's data property is not a list.
Returns:
Response: Includes data, headers and cotent type that indicates
whether or not the bulk extension was utilized.
"""
is_bulk = isinstance(request.data, list)
if is_bulk:
queryset = self.filter_queryset(self.get_queryset())
primary_key_field = getattr(self.serializer_class.Meta, 'bulk_lookup_field', 'id')
destroyable = list()
for candidate_object in request.data:
try:
identifier_query = {self.lookup_field: candidate_object[primary_key_field]}
instance = queryset.get(**identifier_query)
destroyable.append(instance)
except ObjectDoesNotExist:
# Since user intent is deletion, a 204 response reflects actual server state
# If you disagree with this judgement call, remove pass and
# raise exception here
pass
for instance in destroyable:
self.perform_destroy(instance)
content_type = 'application/vnd.api+json; ext="bulk"; supported-ext="bulk"'
return Response(data=None, status=status.HTTP_204_NO_CONTENT, content_type=content_type)
else:
raise ParseError
class RetrieveUpdateAPIView(RetrieveModelMixin,
UpdateModelMixin,
JsonApiView):
"""
Concrete view for retrieving, updating a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class RetrieveDestroyAPIView(RetrieveModelMixin,
DestroyModelMixin,
JsonApiView):
"""
Concrete view for retrieving or deleting a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class RetrieveUpdateDestroyAPIView(RetrieveModelMixin,
UpdateModelMixin,
DestroyModelMixin,
JsonApiView):
"""
Concrete view for retrieving, updating or deleting a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| symfonico/drf-ember | drf_ember/generics.py | Python | mit | 13,215 |
from tornado import web,ioloop
import os
from pymongo import *
class user_checkHandler(web.RequestHandler):
def get(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
username = self.get_query_arguments("username")[0]
client = MongoClient()
db_livechat = client.livechat
collec_login = db_livechat.login
query = collec_login.find_one({"username":username},{"username":1,"_id":0})
print(query)
if query is None:
self.write("avaliable")
else:
self.write("notavaliable")
app = web.Application(
[ (r"/register/user_check" , user_checkHandler) ],
static_path='../static',
debug=True
)
app.listen(7888)
ioloop.IOLoop.current().start() | ezamlee/live-chat | registration.py | Python | gpl-3.0 | 825 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.