code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/router/bin/python
from .stl_general_test import CStlGeneral_Test, CTRexScenario
from nose.plugins.attrib import attr
import time
import os
@attr('wlc')
class CTRexWLC_Test(CStlGeneral_Test):
"""This class tests TRex WLC related code"""
def get_pkts(self):
stats = self.client.get_stats()
return stats[0]['opackets'], stats[1]['ipackets']
def clear_stats(self):
self.client.clear_stats(clear_flow_stats = False, clear_latency_stats = False, clear_xstats = False)
def test_basic_wlc(self):
''' Joins 1 AP, 1 client, sends traffic '''
ap_count = 1
client_count = 1
self.start_trex()
self.connect()
if self.elk:
self.update_elk_obj()
self.client = CTRexScenario.stl_trex
from trex_stl_lib.trex_stl_wlc import AP_Manager
ap_manager = AP_Manager(self.client)
base_data = CTRexScenario.config_dict['base']
ap_manager.set_base_values(
mac = base_data['ap_mac'],
ip = base_data['ap_ip'],
udp = base_data['ap_udp'],
radio = base_data['ap_radio'],
client_mac = base_data['client_mac'],
client_ip = base_data['client_ip'],
)
self.client.acquire([0, 1], force = True)
ap_manager.init(0)
for _ in range(ap_count):
ap_params = ap_manager._gen_ap_params()
ap_manager.create_ap(0, *ap_params)
for _ in range(client_count):
client_params = ap_manager._gen_client_params()
ap_manager.create_client(*client_params, ap_id = ap_params[0])
try:
start_time = time.time()
print('Joining APs')
ap_manager.join_aps()
print('Took: %gs' % round(time.time() - start_time, 2))
start_time = time.time()
print('Associating clients')
ap_manager.join_clients()
print('Took: %gs' % round(time.time() - start_time, 2))
print('Adding streams')
profile = os.path.join(CTRexScenario.scripts_path, 'stl', 'imix_wlc.py')
for client in ap_manager.clients:
ap_manager.add_profile(client, profile, src = client.ip, dst = '48.0.0.1')
duration = 10
print('Starting traffic for %s sec' % duration)
self.clear_stats()
self.client.start(ports = [0], mult = '10kpps', force = True)
time.sleep(duration)
self.client.stop()
opackets, ipackets = self.get_pkts()
print('Sent: %s, received: %s' % (opackets, ipackets))
if opackets * 2 < duration * 10000:
self.fail('Too few output packets')
if ipackets * 2 < opackets:
self.fail('Too few input packets')
finally:
ap_manager.close()
| kisel/trex-core | scripts/automation/regression/stateless_tests/stl_wlc_test.py | Python | apache-2.0 | 2,909 |
# $Id$
#
# Copyright (C) 2003-2008 Greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
from __future__ import print_function
from rdkit import RDConfig
from rdkit import DataStructs
from rdkit.DataStructs.TopNContainer import TopNContainer
import bisect
class GenericPicker(object):
_picks = None
def MakePicks(self, force=0):
raise NotImplementedError("GenericPicker is a virtual base class")
def __len__(self):
if self._picks is None:
self.MakePicks()
return len(self._picks)
def __getitem__(self, which):
if self._picks is None:
self.MakePicks()
return self._picks[which]
class TopNOverallPicker(GenericPicker):
""" A class for picking the top N overall best matches across a library
Connect to a database and build molecules:
>>> from rdkit import Chem
>>> import os.path
>>> from rdkit.Dbase.DbConnection import DbConnect
>>> dbName = RDConfig.RDTestDatabase
>>> conn = DbConnect(dbName,'simple_mols1')
>>> [x.upper() for x in conn.GetColumnNames()]
['SMILES', 'ID']
>>> mols = []
>>> for smi,id in conn.GetData():
... mol = Chem.MolFromSmiles(str(smi))
... mol.SetProp('_Name',str(id))
... mols.append(mol)
>>> len(mols)
12
Calculate fingerprints:
>>> probefps = []
>>> for mol in mols:
... fp = Chem.RDKFingerprint(mol)
... fp._id = mol.GetProp('_Name')
... probefps.append(fp)
Start by finding the top matches for a single probe. This ether should pull
other ethers from the db:
>>> mol = Chem.MolFromSmiles('COC')
>>> probeFp = Chem.RDKFingerprint(mol)
>>> picker = TopNOverallPicker(numToPick=2,probeFps=[probeFp],dataSet=probefps)
>>> len(picker)
2
>>> fp,score = picker[0]
>>> id = fp._id
>>> str(id)
'ether-1'
>>> score
1.0
The results come back in order:
>>> fp,score = picker[1]
>>> id = fp._id
>>> str(id)
'ether-2'
Now find the top matches for 2 probes. We'll get one ether and one acid:
>>> fps = []
>>> fps.append(Chem.RDKFingerprint(Chem.MolFromSmiles('COC')))
>>> fps.append(Chem.RDKFingerprint(Chem.MolFromSmiles('CC(=O)O')))
>>> picker = TopNOverallPicker(numToPick=3,probeFps=fps,dataSet=probefps)
>>> len(picker)
3
>>> fp,score = picker[0]
>>> id = fp._id
>>> str(id)
'acid-1'
>>> fp,score = picker[1]
>>> id = fp._id
>>> str(id)
'ether-1'
>>> score
1.0
>>> fp,score = picker[2]
>>> id = fp._id
>>> str(id)
'acid-2'
"""
def __init__(self, numToPick=10, probeFps=None, dataSet=None,
simMetric=DataStructs.TanimotoSimilarity):
"""
dataSet should be a sequence of BitVectors
"""
self.numToPick = numToPick
self.probes = probeFps
self.data = dataSet
self.simMetric = simMetric
self._picks = None
def MakePicks(self, force=0):
if self._picks is not None and not force:
return
picks = TopNContainer(self.numToPick)
for fp in self.data:
origFp = fp
bestScore = -1.0
for probeFp in self.probes:
score = DataStructs.FingerprintSimilarity(origFp, probeFp, self.simMetric)
bestScore = max(score, bestScore)
picks.Insert(bestScore, fp)
self._picks = []
for score, pt in picks:
self._picks.append((pt, score))
self._picks.reverse()
class SpreadPicker(GenericPicker):
""" A class for picking the best matches across a library
Connect to a database:
>>> from rdkit import Chem
>>> import os.path
>>> from rdkit.Dbase.DbConnection import DbConnect
>>> dbName = RDConfig.RDTestDatabase
>>> conn = DbConnect(dbName,'simple_mols1')
>>> [x.upper() for x in conn.GetColumnNames()]
['SMILES', 'ID']
>>> mols = []
>>> for smi,id in conn.GetData():
... mol = Chem.MolFromSmiles(str(smi))
... mol.SetProp('_Name',str(id))
... mols.append(mol)
>>> len(mols)
12
Calculate fingerprints:
>>> probefps = []
>>> for mol in mols:
... fp = Chem.RDKFingerprint(mol)
... fp._id = mol.GetProp('_Name')
... probefps.append(fp)
Start by finding the top matches for a single probe. This ether should pull
other ethers from the db:
>>> mol = Chem.MolFromSmiles('COC')
>>> probeFp = Chem.RDKFingerprint(mol)
>>> picker = SpreadPicker(numToPick=2,probeFps=[probeFp],dataSet=probefps)
>>> len(picker)
2
>>> fp,score = picker[0]
>>> id = fp._id
>>> str(id)
'ether-1'
>>> score
1.0
The results come back in order:
>>> fp,score = picker[1]
>>> id = fp._id
>>> str(id)
'ether-2'
Now find the top matches for 2 probes. We'll get one ether and one acid:
>>> fps = []
>>> fps.append(Chem.RDKFingerprint(Chem.MolFromSmiles('COC')))
>>> fps.append(Chem.RDKFingerprint(Chem.MolFromSmiles('CC(=O)O')))
>>> picker = SpreadPicker(numToPick=3,probeFps=fps,dataSet=probefps)
>>> len(picker)
3
>>> fp,score = picker[0]
>>> id = fp._id
>>> str(id)
'ether-1'
>>> score
1.0
>>> fp,score = picker[1]
>>> id = fp._id
>>> str(id)
'acid-1'
>>> score
1.0
>>> fp,score = picker[2]
>>> id = fp._id
>>> str(id)
'ether-2'
"""
def __init__(self, numToPick=10, probeFps=None, dataSet=None,
simMetric=DataStructs.TanimotoSimilarity, expectPickles=True, onlyNames=False):
"""
dataSet should be a sequence of BitVectors or, if expectPickles
is False, a set of strings that can be converted to bit vectors
"""
self.numToPick = numToPick
self.probes = probeFps
self.data = dataSet
self.simMetric = simMetric
self.expectPickles = expectPickles
self.onlyNames = onlyNames
self._picks = None
def MakePicks(self, force=0, silent=True):
if self._picks is not None and not force:
return
# start by getting the NxM score matrix
# (N=num probes, M=num fps)
nProbes = len(self.probes)
scores = [None] * nProbes
for i in range(nProbes):
scores[i] = []
j = 0
fps = []
for origFp in self.data:
for i in range(nProbes):
score = DataStructs.FingerprintSimilarity(self.probes[i], origFp, self.simMetric)
bisect.insort(scores[i], (score, j))
if len(scores[i]) >= self.numToPick:
del scores[self.numToPick:]
if self.onlyNames and hasattr(origFp, '_fieldsFromDb'):
fps.append(origFp._fieldsFromDb[0])
else:
fps.append(origFp)
j += 1
if not silent and not j % 1000:
print('scored %d fps' % j)
# sort the rows of that matrix:
#for i in range(nProbes):
# scores[i].sort()
# now go probe by probe and select the current top entry until we are finished:
nPicked = 0
self._picks = []
taken = [0] * len(fps)
while nPicked < self.numToPick:
rowIdx = nPicked % len(scores)
row = scores[rowIdx]
score, idx = row.pop()
# make sure we haven't taken this one already (from another row):
while taken[idx] and len(row):
score, idx = row.pop()
if not taken[idx]:
fp = fps[idx]
self._picks.append((fp, score))
taken[idx] = 1
nPicked += 1
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
| jandom/rdkit | rdkit/SimDivFilters/SimilarityPickers.py | Python | bsd-3-clause | 7,303 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from urbansim.functions import attribute_label
from numpy import float32
class age_times_population_per_acre(Variable):
""" age of head * zone population per acre"""
z_population_per_acre = "population_per_acre"
hh_age = "age_of_head"
def dependencies(self):
return ["psrc.zone." + self.z_population_per_acre,
"household.age_of_head"]
def compute(self, dataset_pool):
return self.get_dataset().multiply("age_of_head", self.z_population_per_acre)
from opus_core.tests import opus_unittest
from urbansim.variable_test_toolbox import VariableTestToolbox
from numpy import array
from numpy import ma
class Tests(opus_unittest.OpusTestCase):
variable_name = "psrc.household_x_zone.age_times_population_per_acre"
def test_my_inputs(self):
housing_cost = array([333.0, 500.55, 1000.26, 459])
income = array([1, 20, 500])
values = VariableTestToolbox().compute_variable(self.variable_name, \
{"zone":{ \
"population_per_acre":housing_cost}, \
"household":{ \
"age_of_head":income}}, \
dataset = "household_x_zone")
should_be = array([[333.0, 500.55, 1000.26, 459.0], [6660.0, 10011., 20005.2, 9180],
[166500., 250275., 500130., 229500.]])
self.assertEqual(ma.allclose(values, should_be, rtol=1e-3), \
True, msg = "Error in " + self.variable_name)
if __name__=='__main__':
opus_unittest.main() | christianurich/VIBe2UrbanSim | 3rdparty/opus/src/psrc/household_x_zone/age_times_population_per_acre.py | Python | gpl-2.0 | 1,765 |
from .openstep_parser import OpenStepDecoder
__version__ = '1.5.3'
| kronenthaler/openstep-parser | openstep_parser/__init__.py | Python | bsd-2-clause | 68 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from keystone import exception
@six.add_metaclass(abc.ABCMeta)
class FederationDriverBase(object):
@abc.abstractmethod
def create_idp(self, idp_id, idp):
"""Create an identity provider.
:param idp_id: ID of IdP object
:type idp_id: string
:param idp: idp object
:type idp: dict
:returns: idp ref
:rtype: dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_idp(self, idp_id):
"""Delete an identity provider.
:param idp_id: ID of IdP object
:type idp_id: string
:raises keystone.exception.IdentityProviderNotFound: If the IdP
doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_idp(self, idp_id):
"""Get an identity provider by ID.
:param idp_id: ID of IdP object
:type idp_id: string
:raises keystone.exception.IdentityProviderNotFound: If the IdP
doesn't exist.
:returns: idp ref
:rtype: dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_idp_from_remote_id(self, remote_id):
"""Get an identity provider by remote ID.
:param remote_id: ID of remote IdP
:type idp_id: string
:raises keystone.exception.IdentityProviderNotFound: If the IdP
doesn't exist.
:returns: idp ref
:rtype: dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_idp(self, idp_id, idp):
"""Update an identity provider by ID.
:param idp_id: ID of IdP object
:type idp_id: string
:param idp: idp object
:type idp: dict
:raises keystone.exception.IdentityProviderNotFound: If the IdP
doesn't exist.
:returns: idp ref
:rtype: dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_protocol(self, idp_id, protocol_id, protocol):
"""Add an IdP-Protocol configuration.
:param idp_id: ID of IdP object
:type idp_id: string
:param protocol_id: ID of protocol object
:type protocol_id: string
:param protocol: protocol object
:type protocol: dict
:raises keystone.exception.IdentityProviderNotFound: If the IdP
doesn't exist.
:returns: protocol ref
:rtype: dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_protocol(self, idp_id, protocol_id, protocol):
"""Change an IdP-Protocol configuration.
:param idp_id: ID of IdP object
:type idp_id: string
:param protocol_id: ID of protocol object
:type protocol_id: string
:param protocol: protocol object
:type protocol: dict
:raises keystone.exception.IdentityProviderNotFound: If the IdP
doesn't exist.
:raises keystone.exception.FederatedProtocolNotFound: If the federated
protocol cannot be found.
:returns: protocol ref
:rtype: dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_protocol(self, idp_id, protocol_id):
"""Get an IdP-Protocol configuration.
:param idp_id: ID of IdP object
:type idp_id: string
:param protocol_id: ID of protocol object
:type protocol_id: string
:raises keystone.exception.IdentityProviderNotFound: If the IdP
doesn't exist.
:raises keystone.exception.FederatedProtocolNotFound: If the federated
protocol cannot be found.
:returns: protocol ref
:rtype: dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_protocols(self, idp_id):
"""List an IdP's supported protocols.
:param idp_id: ID of IdP object
:type idp_id: string
:raises keystone.exception.IdentityProviderNotFound: If the IdP
doesn't exist.
:returns: list of protocol ref
:rtype: list of dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_protocol(self, idp_id, protocol_id):
"""Delete an IdP-Protocol configuration.
:param idp_id: ID of IdP object
:type idp_id: string
:param protocol_id: ID of protocol object
:type protocol_id: string
:raises keystone.exception.IdentityProviderNotFound: If the IdP
doesn't exist.
:raises keystone.exception.FederatedProtocolNotFound: If the federated
protocol cannot be found.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_mapping(self, mapping_id, mapping):
"""Create a mapping.
:param mapping_id: ID of mapping object
:type mapping_id: string
:param mapping: mapping ref with mapping name
:type mapping: dict
:returns: mapping ref
:rtype: dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_mapping(self, mapping_id):
"""Delete a mapping.
:param mapping_id: id of mapping to delete
:type mapping_ref: string
:returns: None
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_mapping(self, mapping_id, mapping_ref):
"""Update a mapping.
:param mapping_id: id of mapping to update
:type mapping_id: string
:param mapping_ref: new mapping ref
:type mapping_ref: dict
:returns: mapping ref
:rtype: dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_mappings(self):
"""List all mappings.
:returns: list of mapping refs
:rtype: list of dicts
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_mapping(self, mapping_id):
"""Get a mapping, returns the mapping based on mapping_id.
:param mapping_id: id of mapping to get
:type mapping_ref: string
:raises keystone.exception.MappingNotFound: If the mapping cannot
be found.
:returns: mapping ref
:rtype: dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id):
"""Get mapping based on idp_id and protocol_id.
:param idp_id: id of the identity provider
:type idp_id: string
:param protocol_id: id of the protocol
:type protocol_id: string
:raises keystone.exception.IdentityProviderNotFound: If the IdP
doesn't exist.
:raises keystone.exception.FederatedProtocolNotFound: If the federated
protocol cannot be found.
:returns: mapping ref
:rtype: dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_sp(self, sp_id, sp):
"""Create a service provider.
:param sp_id: id of the service provider
:type sp_id: string
:param sp: service provider object
:type sp: dict
:returns: service provider ref
:rtype: dict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_sp(self, sp_id):
"""Delete a service provider.
:param sp_id: id of the service provider
:type sp_id: string
:raises keystone.exception.ServiceProviderNotFound: If the service
provider doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_sp(self, sp_id):
"""Get a service provider.
:param sp_id: id of the service provider
:type sp_id: string
:returns: service provider ref
:rtype: dict
:raises keystone.exception.ServiceProviderNotFound: If the service
provider doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_sp(self, sp_id, sp):
"""Update a service provider.
:param sp_id: id of the service provider
:type sp_id: string
:param sp: service prvider object
:type sp: dict
:returns: service provider ref
:rtype: dict
:raises keystone.exception.ServiceProviderNotFound: If the service
provider doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_enabled_service_providers(self):
"""List enabled service providers for Service Catalog.
Service Provider in a catalog contains three attributes: ``id``,
``auth_url``, ``sp_url``, where:
- id is a unique, user defined identifier for service provider object
- auth_url is an authentication URL of remote Keystone
- sp_url a URL accessible at the remote service provider where SAML
assertion is transmitted.
:returns: list of dictionaries with enabled service providers
:rtype: list of dicts
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_idps(self, hints):
"""List all identity providers.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: list of idp refs
:rtype: list of dicts
:raises keystone.exception.IdentityProviderNotFound: If the IdP
doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_sps(self, hints):
"""List all service providers.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: List of service provider ref objects
:rtype: list of dicts
:raises keystone.exception.ServiceProviderNotFound: If the SP
doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
| ilay09/keystone | keystone/federation/backends/base.py | Python | apache-2.0 | 11,167 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
retile.py
---------------------
Date : January 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : mederic dot ribreux at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessing,
QgsProcessingParameterDefinition,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterCrs,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingOutputFolder,
QgsProcessingParameterFileDestination,
QgsProcessingParameterFolderDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.system import isWindows
class retile(GdalAlgorithm):
INPUT = 'INPUT'
TILE_SIZE_X = 'TILE_SIZE_X'
TILE_SIZE_Y = 'TILE_SIZE_Y'
OVERLAP = 'OVERLAP'
LEVELS = 'LEVELS'
SOURCE_CRS = 'SOURCE_CRS'
FORMAT = 'FORMAT'
RESAMPLING = 'RESAMPLING'
OPTIONS = 'OPTIONS'
DATA_TYPE = 'DATA_TYPE'
DELIMITER = 'DELIMITER'
ONLY_PYRAMIDS = 'ONLY_PYRAMIDS'
DIR_FOR_ROW = 'DIR_FOR_ROW'
OUTPUT = 'OUTPUT'
OUTPUT_CSV = 'OUTPUT_CSV'
TYPES = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.methods = ((self.tr('Nearest neighbour'), 'near'),
(self.tr('Bilinear'), 'bilinear'),
(self.tr('Cubic'), 'cubic'),
(self.tr('Cubic spline'), 'cubicspline'),
(self.tr('Lanczos windowed sinc'), 'lanczos'),)
self.addParameter(QgsProcessingParameterMultipleLayers(self.INPUT,
self.tr('Input files'),
QgsProcessing.TypeRaster))
self.addParameter(QgsProcessingParameterNumber(self.TILE_SIZE_X,
self.tr('Tile width'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=256))
self.addParameter(QgsProcessingParameterNumber(self.TILE_SIZE_Y,
self.tr('Tile height'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=256))
self.addParameter(QgsProcessingParameterNumber(self.OVERLAP,
self.tr('Overlap in pixels between consecutive tiles'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=0))
self.addParameter(QgsProcessingParameterNumber(self.LEVELS,
self.tr('Number of pyramids levels to build'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=1))
params = []
params.append(QgsProcessingParameterCrs(self.SOURCE_CRS,
self.tr('Source coordinate reference system'),
optional=True))
params.append(QgsProcessingParameterEnum(self.RESAMPLING,
self.tr('Resampling method'),
options=[i[0] for i in self.methods],
allowMultiple=False,
defaultValue=0))
params.append(QgsProcessingParameterString(self.DELIMITER,
self.tr('Column delimiter used in the CSV file'),
defaultValue=';',
optional=True))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
params.append(options_param)
params.append(QgsProcessingParameterEnum(self.DATA_TYPE,
self.tr('Output data type'),
self.TYPES,
allowMultiple=False,
defaultValue=5))
params.append(QgsProcessingParameterBoolean(self.ONLY_PYRAMIDS,
self.tr('Build only the pyramids'),
defaultValue=False))
params.append(QgsProcessingParameterBoolean(self.DIR_FOR_ROW,
self.tr('Use separate directory for each tiles row'),
defaultValue=False))
for param in params:
param.setFlags(param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(param)
self.addParameter(QgsProcessingParameterFolderDestination(self.OUTPUT,
self.tr('Output directory')))
output_csv_param = QgsProcessingParameterFileDestination(self.OUTPUT_CSV,
self.tr('CSV file containing the tile(s) georeferencing information'),
'CSV files (*.csv)',
optional=True)
output_csv_param.setCreateByDefault(False)
self.addParameter(output_csv_param)
def name(self):
return 'retile'
def displayName(self):
return self.tr('Retile')
def group(self):
return self.tr('Raster miscellaneous')
def groupId(self):
return 'rastermiscellaneous'
def commandName(self):
return "gdal_retile"
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = []
arguments.append('-ps')
arguments.append(str(self.parameterAsInt(parameters, self.TILE_SIZE_X, context)))
arguments.append(str(self.parameterAsInt(parameters, self.TILE_SIZE_Y, context)))
arguments.append('-overlap')
arguments.append(str(self.parameterAsInt(parameters, self.OVERLAP, context)))
arguments.append('-levels')
arguments.append(str(self.parameterAsInt(parameters, self.LEVELS, context)))
crs = self.parameterAsCrs(parameters, self.SOURCE_CRS, context)
if crs.isValid():
arguments.append('-s_srs')
arguments.append(GdalUtils.gdal_crs_string(crs))
arguments.append('-r')
arguments.append(self.methods[self.parameterAsEnum(parameters, self.RESAMPLING, context)][1])
arguments.append('-ot')
arguments.append(self.TYPES[self.parameterAsEnum(parameters, self.DATA_TYPE, context)])
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
if self.parameterAsBool(parameters, self.DIR_FOR_ROW, context):
arguments.append('-pyramidOnly')
if self.parameterAsBool(parameters, self.ONLY_PYRAMIDS, context):
arguments.append('-useDirForEachRow')
csvFile = self.parameterAsFileOutput(parameters, self.OUTPUT_CSV, context)
if csvFile:
arguments.append('-csv')
arguments.append(csvFile)
delimiter = self.parameterAsString(parameters, self.DELIMITER, context)
if delimiter:
arguments.append('-csvDelim')
arguments.append('"{}"'.format(delimiter))
arguments.append('-targetDir')
arguments.append(self.parameterAsString(parameters, self.OUTPUT, context))
layers = [l.source() for l in self.parameterAsLayerList(parameters, self.INPUT, context)]
arguments.extend(layers)
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', self.commandName() + '.bat',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = [self.commandName() + '.py',
GdalUtils.escapeAndJoin(arguments)]
return commands
| dwadler/QGIS | python/plugins/processing/algs/gdal/retile.py | Python | gpl-2.0 | 10,480 |
# get all files from links given from a txt file
import subprocess
import os
# edit this to your own directory i guess
pdir = "/home/nitrous/projects/python/scripts/scripts/"
def get_from_txt(location):
filenames = []
files = os.listdir(pdir + "get")
print("Retrieving textfile..")
# lets fetch the textfile first
linklist = subprocess.check_output(["curl", "-G", location]).decode("utf-8").splitlines()
print("Textfile retrived")
# get the files and give them an approprate filename
for link in linklist:
filename = link.split("/")
filename = filename[len(filename)-1] # lol
if filename not in files and len(link) != 0:
filenames.append(filename)
print("Downloading " + filename)
subprocess.call(["wget", link])
else: print("Skipped " + filename)
if not os.path.exists(pdir + "get"):
os.mkdir(pdir + "get")
for filename in filenames: # move all the files to the get subdirectory
os.rename(pdir + filename, pdir + "get/" + filename)
if __name__ == "__main__":
location = input("URL for txt -> ")
get_from_txt(location)
| NitrousPG/scripts | scripts/get-txt.py | Python | gpl-3.0 | 1,171 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "raxui.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| rmyers/dtrove-ui | manage.py | Python | mit | 248 |
#! /usr/bin/python
# Copyright (c) 2015 Dave McCoy ([email protected])
#
# This file is part of Nysa.
# (http://wiki.cospandesign.com/index.php?title=Nysa.org)
#
# Nysa is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# Nysa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nysa; If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from array import array as Array
import collections
from sdb import SDBInfo
from sdb import SDBWarning
from sdb import SDBError
import string
DESCRIPTION = "SDB Component Parser and Generator"
__author__ = "[email protected] (Dave McCoy)"
DESCRIPTION_DICT = collections.OrderedDict()
DESCRIPTION_DICT["SDB_VENDOR_ID"] = "Set the Vendor ID (Hexidecimal 64-bit Number)"
DESCRIPTION_DICT["SDB_DEVICE_ID"] = "Set the Device ID (Hexidecimal 32-bit Number)"
DESCRIPTION_DICT["SDB_CORE_VERSION"] = "Set the Version of the core"
DESCRIPTION_DICT["SDB_NAME"] = "Set the name of the core"
DESCRIPTION_DICT["SDB_ABI_CLASS"] = "Class of the Device"
DESCRIPTION_DICT["SDB_ABI_VERSION_MAJOR"] = "Set ABI Major Version"
DESCRIPTION_DICT["SDB_ABI_VERSION_MINOR"] = "Set ABI Minor Version"
DESCRIPTION_DICT["SDB_ABI_ENDIAN"] = "Set Endian (Big, Little)"
DESCRIPTION_DICT["SDB_ABI_DEVICE_WIDTH"] = "Set Device Width (8 16 32 64)"
DESCRIPTION_DICT["SDB_MODULE_URL"] = "Set the Module URL"
DESCRIPTION_DICT["SDB_DATE"] = "Set the date of module YYYY/MM/DD or date of image build for synth"
DESCRIPTION_DICT["SDB_EXECUTABLE"] = "Device is executable"
DESCRIPTION_DICT["SDB_WRITEABLE"] = "Device is writeable"
DESCRIPTION_DICT["SDB_READABLE"] = "Device is readable"
DESCRIPTION_DICT["SDB_NRECS"] = "Number of Records"
DESCRIPTION_DICT["SDB_VERSION"] = "Version of SDB"
DESCRIPTION_DICT["SDB_BUS_TYPE"] = "Bus Type: Wishbone, Storage"
DESCRIPTION_DICT["SDB_BRIDGE_CHILD_ADDR"] = "Bridge Child SDB Address Location Relative to SDB (Hex)"
DESCRIPTION_DICT["SDB_START_ADDRESS"] = "Start Address (Hex)"
DESCRIPTION_DICT["SDB_LAST_ADDRESS"] = "Last Address (Hex)"
DESCRIPTION_DICT["SDB_SIZE"] = "Number of Registers"
DESCRIPTION_DICT["SDB_SYNTH_NAME"] = "Name of Synthesis Vendor (16 chars)"
DESCRIPTION_DICT["SDB_SYNTH_COMMIT_ID"] = "Commit ID of build Hex"
DESCRIPTION_DICT["SDB_SYNTH_TOOL_NAME"] = "Name of Synthesis Tool (16 chars)"
DESCRIPTION_DICT["SDB_SYNTH_TOOL_VER"] = "Version of Synthesis Tool"
DESCRIPTION_DICT["SDB_SYNTH_USER_NAME"] = "User name of the person who built image"
DESCRIPTION_DICT["SDB_RECORD_TYPE"] = "Type of device"
SDB_ROM_RECORD_LENGTH = 64
SDB_INTERCONNECT_MAGIC = 0x5344422D
SDB_BUS_TYPE_WISHBONE = 0x00
SDB_BUS_TYPE_STORAGE = 0x01
SDB_RECORD_TYPE_INTERCONNECT = 0x00
SDB_RECORD_TYPE_DEVICE = 0x01
SDB_RECORD_TYPE_BRIDGE = 0x02
SDB_RECORD_TYPE_INTEGRATION = 0x80
SDB_RECORD_TYPE_REPO_URL = 0x81
SDB_RECORD_TYPE_SYNTHESIS = 0x82
SDB_RECORD_TYPE_EMPTY = 0xFF
def create_device_record( name = None,
vendor_id = None,
device_id = None,
core_version = None,
abi_class = None,
version_major = None,
version_minor = None,
size = None):
sdb = SDBComponent()
sdb.d["SDB_RECORD_TYPE"] = SDB_RECORD_TYPE_DEVICE
if name is not None:
sdb.d["SDB_NAME"] = name
if vendor_id is not None:
sdb.d["SDB_VENDOR_ID"] = hex(vendor_id)
if device_id is not None:
sdb.d["SDB_DEVICE_ID"] = hex(device_id)
if core_version is not None:
sdb.d["SDB_CORE_VERSION"] = core_version
if abi_class is not None:
sdb.d["SDB_ABI_CLASS"] = hex(abi_class)
if version_major is not None:
sdb.d["SDB_ABI_VERSION_MAJOR"] = hex(version_major)
if version_minor is not None:
sdb.d["SDB_ABI_VERSION_MINOR"] = hex(version_minor)
if size is not None:
sdb.set_size(size)
return sdb
def create_interconnect_record( name = None,
vendor_id = None,
device_id = None,
start_address = None,
size = None):
sdb = SDBComponent()
sdb.d["SDB_RECORD_TYPE"] = SDB_RECORD_TYPE_INTERCONNECT
if name is not None:
sdb.d["SDB_NAME"] = name
if vendor_id is not None:
sdb.d["SDB_VENDOR_ID"] = hex(vendor_id)
if device_id is not None:
sdb.d["SDB_DEVICE_ID"] = hex(device_id)
if start_address is not None:
sdb.set_start_address(start_address)
if size is not None:
sdb.set_size(size)
return sdb
def create_bridge_record( name = None,
vendor_id = None,
device_id = None,
start_address = None,
size = None):
sdb = SDBComponent()
sdb.d["SDB_RECORD_TYPE"] = SDB_RECORD_TYPE_BRIDGE
if name is not None:
sdb.d["SDB_NAME"] = name
if vendor_id is not None:
sdb.d["SDB_VENDOR_ID"] = hex(vendor_id)
if device_id is not None:
sdb.d["SDB_DEVICE_ID"] = hex(device_id)
if start_address is not None:
sdb.set_start_address(start_address)
if size is not None:
sdb.set_size(size)
return sdb
def create_integration_record( information,
vendor_id = None,
device_id = None):
sdb = SDBComponent()
sdb.d["SDB_RECORD_TYPE"] = SDB_RECORD_TYPE_INTEGRATION
sdb.d["SDB_NAME"] = information
if vendor_id is not None:
sdb.d["SDB_VENDOR_ID"] = hex(vendor_id)
if device_id is not None:
sdb.d["SDB_DEVICE_ID"] = hex(device_id)
return sdb
def create_synthesis_record(synthesis_name,
commit_id,
tool_name,
tool_version,
user_name):
sdb = SDBComponent()
sdb.d["SDB_RECORD_TYPE"] = SDB_RECORD_TYPE_SYNTHESIS
sdb.d["SDB_SYNTH_NAME"] = synthesis_name
if isinstance(commit_id, int):
commit_id = hex(commit_id)
sdb.d["SDB_SYNTH_COMMIT_ID"] = commit_id
sdb.d["SDB_SYNTH_TOOL_NAME"] = tool_name
if not isinstance(tool_version, str):
tool_version = str(tool_version)
sdb.d["SDB_SYNTH_TOOL_VER"] = tool_version
sdb.d["SDB_SYNTH_USER_NAME"] = user_name
return sdb
def create_repo_url_record(url):
sdb = SDBComponent()
sdb.d["SDB_RECORD_TYPE"] = SDB_RECORD_TYPE_REPO_URL
sdb.d["SDB_RECORD_REPO_URL"] = url
return sdb
class SDBComponent (object):
SDB_VERSION = 1
ELEMENTS = [
"SDB_VENDOR_ID",
"SDB_DEVICE_ID",
"SDB_CORE_VERSION",
"SDB_NAME",
"SDB_ABI_CLASS",
"SDB_ABI_VERSION_MAJOR",
"SDB_ABI_VERSION_MINOR",
"SDB_ABI_ENDIAN",
"SDB_ABI_DEVICE_WIDTH",
"SDB_MODULE_URL",
"SDB_DATE",
"SDB_EXECUTABLE",
"SDB_READABLE",
"SDB_WRITEABLE",
"SDB_NRECS",
"SDB_BUS_TYPE",
"SDB_VERSION",
"SDB_BRIDGE_CHILD_ADDR",
"SDB_SIZE",
"SDB_START_ADDRESS",
"SDB_LAST_ADDRESS",
"SDB_SYNTH_NAME",
"SDB_SYNTH_COMMIT_ID",
"SDB_SYNTH_TOOL_NAME",
"SDB_SYNTH_TOOL_VER",
"SDB_SYNTH_USER_NAME",
"SDB_RECORD_TYPE"
]
def __init__(self):
self.d = {}
for e in self.ELEMENTS:
self.d[e] = ""
self.d["SDB_SIZE"] = hex(0)
self.d["SDB_START_ADDRESS"] = "0x00"
self.d["SDB_LAST_ADDRESS"] = "0x00"
self.d["SDB_NRECS"] = "0"
self.d["SDB_BUS_TYPE"] = "Wishbone"
self.d["SDB_VERSION"] = str(self.SDB_VERSION)
self.d["SDB_CORE_VERSION"] = "0.0.01"
self.d["SDB_BRIDGE_CHILD_ADDR"] = "0"
self.d["SDB_RECORD_TYPE"] = SDB_RECORD_TYPE_INTERCONNECT
self.d["SDB_ABI_CLASS"] = hex(0x00)
self.d["SDB_ABI_VERSION_MAJOR"] = hex(0x00)
self.d["SDB_ABI_VERSION_MINOR"] = hex(0x00)
self.d["SDB_VENDOR_ID"] = hex(0x8000000000000000)
self.d["SDB_DEVICE_ID"] = hex(0x00000000)
self.d["SDB_ABI_ENDIAN"] = "BIG"
self.d["SDB_ABI_DEVICE_WIDTH"] = "32"
self.d["SDB_EXECUTABLE"] = "True"
self.d["SDB_WRITEABLE"] = "True"
self.d["SDB_READABLE"] = "True"
d = datetime.now()
sd = "%04d/%02d/%02d" % (d.year, d.month, d.day)
self.d["SDB_DATE"] = sd
#Verilog Module -> SDB Device
def parse_buffer(self, in_buffer):
#Seperate the buffer into a list of lines
buffers = in_buffer.splitlines()
for buf in buffers:
for e in self.ELEMENTS:
#Make this case insesitive
if e.lower() in buf.lower():
value = buf.partition(":")[2]
value = value.strip()
self.d[e] = value
#Verilog Module Template
def generate_slave_template_buffer(self):
buf = ""
for e in self.ELEMENTS:
buf += "%s\n" % DESCRIPTION_DICT[e]
buf += "%s:%s\n\n" % (e, self.d[e])
return buf
#SDB -> Ordered Dict
def generated_ordered_dict(self):
od = collections.OrderedDict()
for e in self.ELEMENTS:
od[e] = self.d[e]
return od
#Utility Functions
def set_bridge_address(self, addr):
self.d["SDB_BRIDGE_CHILD_ADDR"] = hex(addr)
def get_bridge_address_as_int(self):
return int(self.d["SDB_BRIDGE_CHILD_ADDR"], 16)
def set_start_address(self, addr):
"""
Sets the start address of the entity
Args:
addr (integer) start address
Return:
Nothing
Raises:
Nothing
"""
self.d["SDB_START_ADDRESS"] = hex(addr)
addr = long(addr)
self.d["SDB_LAST_ADDRESS"] = hex(addr + self.get_size_as_int())
def get_start_address_as_int(self):
return long(self.d["SDB_START_ADDRESS"], 16)
def set_size(self, size):
self.d["SDB_SIZE"] = hex(size)
start_addr = self.get_start_address_as_int()
self.d["SDB_LAST_ADDRESS"] = hex(start_addr + self.get_size_as_int())
def set_number_of_records(self, nrecs):
self.d["SDB_NRECS"] = str(nrecs)
def get_number_of_records_as_int(self):
return int(self.d["SDB_NRECS"], 16)
def is_writeable(self):
return (self.d["SDB_WRITEABLE"].lower() == "true")
def enable_read(self, enable):
self.d["SDB_READABLE"] = str(enable)
def is_readable(self):
return (self.d["SDB_READABLE"].lower() == "true")
def set_name(self, name):
self.d["SDB_NAME"] = name
def get_name(self):
return self.d["SDB_NAME"]
#Integer Rerpresentation of values
def get_size_as_int(self):
return long(self.d["SDB_SIZE"], 0)
def get_end_address_as_int(self):
return long(self.d["SDB_LAST_ADDRESS"], 16)
def get_vendor_id_as_int(self):
return long(self.d["SDB_VENDOR_ID"], 16)
def get_device_id_as_int(self):
#print "device id: %s" % self.d["SDB_DEVICE_ID"]
return int(self.d["SDB_DEVICE_ID"], 16)
def get_abi_class_as_int(self):
return int(self.d["SDB_ABI_CLASS"], 16)
def get_abi_version_major_as_int(self):
return long(self.d["SDB_ABI_VERSION_MAJOR"], 16)
def get_abi_version_minor_as_int(self):
return long(self.d["SDB_ABI_VERSION_MINOR"], 16)
def get_endian_as_int(self):
if string.upper(self.d["SDB_ABI_ENDIAN"]) == "LITTLE":
return 1
else:
return 0
def get_bus_width_as_int(self):
return int(self.d["SDB_ABI_DEVICE_WIDTH"])
def _translate_buf_width_to_rom_version(self):
value = int(self.d["SDB_ABI_DEVICE_WIDTH"])
if value == 8:
return 0
if value == 16:
return 1
if value == 32:
return 2
if value == 64:
return 3
raise SDBError("Unknown Device Width: %d" % value)
def get_core_version_as_int(self):
version_strings = self.d["SDB_CORE_VERSION"].split(".")
#print "version string: %s" % self.d["SDB_CORE_VERSION"]
version = 0
version |= (0x0F & int(version_strings[0], 0)) << 24
version |= (0x0F & int(version_strings[1], 0)) << 16
version |= (0xFF & int(version_strings[2], 0))
#print "Version output: %04d" % version
#Base 10
return version
def get_date_as_int(self):
date = self.d["SDB_DATE"]
#print "date: %s" % date
year = int(date[0:4])
month = int(date[5:7])
day = int(date[9:10])
return year, month, day
def enable_executable(self, enable):
self.d["SDB_EXECUTABLE"] = str(enable)
def is_executable(self):
return (self.d["SDB_EXECUTABLE"].lower() == "true")
def enable_write(self, enable):
self.d["SDB_WRITEABLE"] = str(enable)
def get_bus_type_as_int(self):
if self.d["SDB_BUS_TYPE"].lower() == "wishbone":
return 0
elif self.d["SDB_BUS_TYPE"].lower() == "storage":
return 1
else:
raise SDBError("Unknown Bus Type: %s" % self.d["SDB_BUS_TYPE"])
def get_url(self):
return self.d["SDB_MODULE_URL"]
def get_synthesis_name(self):
return self.d["SDB_SYNTH_NAME"]
def get_synthesis_commit_id(self):
return self.d["SDB_SYNTH_COMMIT_ID"]
def get_synthesis_tool_name(self):
return self.d["SDB_SYNTH_TOOL_NAME"]
def get_synthesis_tool_version(self):
return self.d["SDB_SYNTH_TOOL_VER"]
def get_synthesis_user_name(self):
return self.d["SDB_SYNTH_USERNAME"]
def get_version_as_int(self):
return int (self.d["SDB_VERSION"])
def set_bridge_child_addr(self, addr):
self.d["SDB_BRIDGE_CHILD_ADDR"] = hex(addr)
def get_bridge_child_addr_as_int(self):
return int(self.d["SDB_BRIDGE_CHILD_ADDR"], 16)
def is_device(self):
if self.d["SDB_RECORD_TYPE"] == SDB_RECORD_TYPE_DEVICE:
return True
return False
def is_interconnect(self):
if self.d["SDB_RECORD_TYPE"] == SDB_RECORD_TYPE_INTERCONNECT:
return True
return False
def is_bridge(self):
if self.d["SDB_RECORD_TYPE"] == SDB_RECORD_TYPE_BRIDGE:
return True
return False
def is_integration_record(self):
if self.d["SDB_RECORD_TYPE"] == SDB_RECORD_TYPE_INTEGRATION:
return True
return False
def is_url_record(self):
if self.d["SDB_RECORD_TYPE"] == SDB_RECORD_TYPE_REPO_URL:
return True
return False
def is_synthesis_record(self):
if self.d["SDB_RECORD_TYPE"] == SDB_RECORD_TYPE_SYNTHESIS:
return True
return False
def is_empty_record(self):
if self.d["SDB_RECORD_TYPE"] == SDB_RECORD_TYPE_EMPTY:
return True
return False
def get_module_record_type(self):
return self.d["SDB_RECORD_TYPE"]
def __str__(self):
buf = ""
buf += "SDB Component\n"
buf += "\tName: %s\n" % self.d["SDB_NAME"]
buf += "\tType: %s\n" % self.d["SDB_RECORD_TYPE"]
buf += "\tSize: 0x%08X\n" % self.get_size_as_int()
if self.is_interconnect():
buf += "\tNum Devices: %d\n" % self.get_number_of_records_as_int()
buf += "\tStart Address: 0x%010X\n" % self.get_start_address_as_int()
buf += "\tEnd Address: 0x%010X\n" % self.get_end_address_as_int()
return buf
def is_valid_bus_type(bus_type):
if bus_type == "wishbone":
return True
if bus_type == "storage":
return True
return False
def convert_rom_to_32bit_buffer(rom):
buf = ""
last = False
for i in range(0, len(rom), 4):
if i + 4 >= len(rom):
last = True
buf += "%02X%02X%02X%02X" % (rom[i], rom[i + 1], rom[i + 2], rom[i + 3])
if not last:
buf += "\n"
return buf
| CospanDesign/nysa | nysa/cbuilder/sdb_component.py | Python | mit | 16,800 |
#!/usr/bin/env python
import logging
l = logging.getLogger("claripy.frontends.constrained_frontend")
from ..frontend import Frontend
class ConstrainedFrontend(Frontend): # pylint:disable=abstract-method
def __init__(self):
Frontend.__init__(self)
self.constraints = []
self.variables = set()
self._finalized = False
def _blank_copy(self, c):
super(ConstrainedFrontend, self)._blank_copy(c)
c.constraints = []
c.variables = set()
c._finalized = False
def _copy(self, c):
super(ConstrainedFrontend, self)._copy(c)
c.constraints = list(self.constraints)
c.variables = set(self.variables)
# finalize both
self.finalize()
c.finalize()
#
# Storable support
#
def _ana_getstate(self):
self.finalize()
return self.constraints, self.variables, Frontend._ana_getstate(self)
def _ana_setstate(self, s):
self.constraints, self.variables, base_state = s
Frontend._ana_setstate(self, base_state)
self._finalized = True
#
# Constraint management
#
def independent_constraints(self):
return self._split_constraints(self.constraints)
#
# Serialization and such.
#
def downsize(self):
Frontend.downsize(self)
#
# Merging and splitting
#
def finalize(self):
self._finalized = True
def merge(self, others, merge_conditions, common_ancestor=None):
if common_ancestor is None:
merged = self.blank_copy()
options = []
for s,v in zip([self] + others, merge_conditions):
options.append(And(*([v] + s.constraints)))
merged.add([Or(*options)])
else:
merged = common_ancestor.branch()
merged.add([Or(*merge_conditions)])
return False, merged
def combine(self, others):
combined = self.blank_copy()
combined.add(self.constraints) # pylint:disable=E1101
for o in others:
combined.add(o.constraints)
return combined
def split(self):
results = []
l.debug("Splitting!")
for variables, c_list in self.independent_constraints():
l.debug("... got %d constraints with %d variables", len(c_list), len(variables))
s = self.blank_copy()
s.add(c_list)
results.append(s)
return results
#
# Light functionality
#
def add(self, constraints):
self.constraints += constraints
for c in constraints:
self.variables.update(c.variables)
return constraints
def simplify(self):
to_simplify = [ c for c in self.constraints if not any(
isinstance(a, SimplificationAvoidanceAnnotation) for a in c.annotations
) ]
no_simplify = [ c for c in self.constraints if any(
isinstance(a, SimplificationAvoidanceAnnotation) for a in c.annotations
) ]
if len(to_simplify) == 0:
return self.constraints
simplified = simplify(And(*to_simplify)).split(['And']) #pylint:disable=no-member
self.constraints = no_simplify + simplified
return self.constraints
#
# Stuff that should be implemented by subclasses
#
def satisfiable(self, extra_constraints=(), exact=None):
raise NotImplementedError("satisfiable() is not implemented")
def batch_eval(self, e, n, extra_constraints=(), exact=None):
raise NotImplementedError("batch_eval() is not implemented")
def eval(self, e, n, extra_constraints=(), exact=None):
raise NotImplementedError("eval() is not implemented")
def min(self, e, extra_constraints=(), exact=None):
raise NotImplementedError("min() is not implemented")
def max(self, e, extra_constraints=(), exact=None):
raise NotImplementedError("max() is not implemented")
def solution(self, e, v, extra_constraints=(), exact=None):
raise NotImplementedError("solution() is not implemented")
def is_true(self, e, extra_constraints=(), exact=None):
raise NotImplementedError("is_true() is not implemented")
def is_false(self, e, extra_constraints=(), exact=None):
raise NotImplementedError("is_false() is not implemented")
from ..ast.base import simplify
from ..ast.bool import And, Or
from ..annotation import SimplificationAvoidanceAnnotation
| Ruide/angr-dev | claripy/claripy/frontends/constrained_frontend.py | Python | bsd-2-clause | 4,485 |
#!/usr/bin/env python
from bs4 import BeautifulSoup
import sys
import urllib
import urllib2
import json
from nltk.corpus import stopwords
import urlparse
import re
package = ""
visited = set()
unvisited = set()
count = 0
cachedStopWords = stopwords.words("english")
#Entry point
def CrawlPage (url):
global package
package = url
#Cleans the soup
def cleanSoup(soup):
cleanedHtml_soup = soup.get_text()
nonewline_soup = " ".join(line.strip() for line in cleanedHtml_soup.split("\n"))
lowercase_soup = nonewline_soup.lower()
meaningful_soup = ' '.join([word for word in lowercase_soup.split() if word not in cachedStopWords])
#print meaningful_soup
return soup
def fix_url(url):
if url[-1] == '/':
url = url[:-1]
return url
def is_valid_webpage(url):
extensions_notincluded = ('pdf','png','jpg','zip','tar','exe', 'ppt', 'doc')
return not url[-3:] in extensions_notincluded
def is_valid_url(url):
regUNL = re.compile('http://(?:www\.)?(cse.unl.edu/*)')
return regUNL.search(url)
#Creates the soup
def createSoup(url):
global visited
global unvisited
global count
fixed_url = fix_url(url)
if is_valid_url(fixed_url) and not (fixed_url in visited):
#print "Now crawling: ", fixed_url
print fixed_url
try:
response = urllib.urlopen(fixed_url)
except urllib.error.HTTPError as e:
print( "HTTPError with: ", fixed_url, "\t", e )
return None
#print "BOW for: %s",url
the_page = response.read()
soup = BeautifulSoup(the_page)
cleaned_soup = cleanSoup(soup)
visited.add(fixed_url)
file_name = fixed_url + ".txt"
#print file_name
s = file_name[7:]
s = s.replace("/","_")
#print s
#f = open(s, 'w+')
#f.write(str(cleaned_soup))
#f.close()
#print "Crawled: ", fixed_url
count = count + 1
#print count
for link in soup.find_all('a'):
href = link.get('href')
if href:
#print href
fixed_href = fix_url(href)
if is_valid_url(fixed_href) and is_valid_webpage(fixed_href) and not (fixed_href in visited) and not (fixed_href in unvisited):
#print "Added to unvisited: ",fixed_href
unvisited.add(fixed_href)
createSoup(fixed_href)
#for unvisitedpage in unvisited:
#if unvisitedpage not in visited:
#createSoup(unvisitedpage)
if __name__ == '__main__':
args = sys.argv[1:]
if not args:
print >> sys.stderr, 'SYNTAX: unlCrawler.py [webpage]'
sys.exit(-1)
createSoup(args[0])
| srikanthmaturu/webpage-classifier | crawler/unlCralwer.py | Python | gpl-2.0 | 2,600 |
from maxixe.api import *
@step(r"there is a feature$")
def step_one(*args, **kwargs):
pass
@step(r"I attempt to import that feature like a Python file")
def step_two(*args, **kwargs):
pass
@step(r"I can access it as a module")
def step_three(*args, **kwargs):
pass
| tswicegood/maxixe | maxixe/features/steps.py | Python | apache-2.0 | 283 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""\
Wrapper script around Rietveld's upload.py that simplifies working with groups
of files.
"""
import json
import optparse
import os
import random
import re
import ssl
import string
import sys
import tempfile
import time
import urllib2
import breakpad # pylint: disable=W0611
import auth
import fix_encoding
import gclient_utils
import git_cl
import presubmit_support
import rietveld
from scm import SVN
import subprocess2
from third_party import upload
__version__ = '1.2.1'
CODEREVIEW_SETTINGS = {
# To make gcl send reviews to a server, check in a file named
# "codereview.settings" (see |CODEREVIEW_SETTINGS_FILE| below) to your
# project's base directory and add the following line to codereview.settings:
# CODE_REVIEW_SERVER: codereview.yourserver.org
}
# globals that store the root of the current repository and the directory where
# we store information about changelists.
REPOSITORY_ROOT = ""
# Replacement for project name.
SWITCH_TO_GIT = "SWITCH_TO_GIT_ALREADY"
# Filename where we store repository specific information for gcl.
CODEREVIEW_SETTINGS_FILE = "codereview.settings"
CODEREVIEW_SETTINGS_FILE_NOT_FOUND = (
'No %s file found. Please add one.' % CODEREVIEW_SETTINGS_FILE)
# Warning message when the change appears to be missing tests.
MISSING_TEST_MSG = "Change contains new or modified methods, but no new tests!"
# Global cache of files cached in GetCacheDir().
FILES_CACHE = {}
# Valid extensions for files we want to lint.
DEFAULT_LINT_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_IGNORE_REGEX = r"$^"
def CheckHomeForFile(filename):
"""Checks the users home dir for the existence of the given file. Returns
the path to the file if it's there, or None if it is not.
"""
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
full_path = os.path.join(home, filename)
if os.path.exists(full_path):
return full_path
return None
def UnknownFiles():
"""Runs svn status and returns unknown files."""
return [
item[1] for item in SVN.CaptureStatus([], GetRepositoryRoot())
if item[0][0] == '?'
]
def GetRepositoryRoot():
"""Returns the top level directory of the current repository.
The directory is returned as an absolute path.
"""
global REPOSITORY_ROOT
if not REPOSITORY_ROOT:
REPOSITORY_ROOT = SVN.GetCheckoutRoot(os.getcwd())
if not REPOSITORY_ROOT:
raise gclient_utils.Error("gcl run outside of repository")
return REPOSITORY_ROOT
def GetInfoDir():
"""Returns the directory where gcl info files are stored."""
return os.path.join(GetRepositoryRoot(), '.svn', 'gcl_info')
def GetChangesDir():
"""Returns the directory where gcl change files are stored."""
return os.path.join(GetInfoDir(), 'changes')
def GetCacheDir():
"""Returns the directory where gcl change files are stored."""
return os.path.join(GetInfoDir(), 'cache')
def GetCachedFile(filename, max_age=60*60*24*3, use_root=False):
"""Retrieves a file from the repository and caches it in GetCacheDir() for
max_age seconds.
use_root: If False, look up the arborescence for the first match, otherwise go
directory to the root repository.
Note: The cache will be inconsistent if the same file is retrieved with both
use_root=True and use_root=False. Don't be stupid.
"""
if filename not in FILES_CACHE:
# Don't try to look up twice.
FILES_CACHE[filename] = None
# First we check if we have a cached version.
try:
cached_file = os.path.join(GetCacheDir(), filename)
except (gclient_utils.Error, subprocess2.CalledProcessError):
return None
if (not os.path.exists(cached_file) or
(time.time() - os.stat(cached_file).st_mtime) > max_age):
dir_info = SVN.CaptureLocalInfo([], '.')
repo_root = dir_info['Repository Root']
if use_root:
url_path = repo_root
else:
url_path = dir_info['URL']
while True:
# Look in the repository at the current level for the file.
for _ in range(5):
content = None
try:
# Take advantage of the fact that svn won't output to stderr in case
# of success but will do in case of failure so don't mind putting
# stderr into content_array.
content_array = []
svn_path = url_path + '/' + filename
args = ['svn', 'cat', svn_path]
if sys.platform != 'darwin':
# MacOSX 10.5.2 has a bug with svn 1.4.4 that will trigger the
# 'Can\'t get username or password' and can be fixed easily.
# The fix doesn't work if the user upgraded to svn 1.6.x. Bleh.
# I don't have time to fix their broken stuff.
args.append('--non-interactive')
gclient_utils.CheckCallAndFilter(
args, cwd='.', filter_fn=content_array.append)
# Exit the loop if the file was found. Override content.
content = '\n'.join(content_array)
break
except (gclient_utils.Error, subprocess2.CalledProcessError):
if content_array[0].startswith(
'svn: Can\'t get username or password'):
ErrorExit('Your svn credentials expired. Please run svn update '
'to fix the cached credentials')
if content_array[0].startswith('svn: Can\'t get password'):
ErrorExit('If are using a Mac and svn --version shows 1.4.x, '
'please hack gcl.py to remove --non-interactive usage, it\'s'
'a bug on your installed copy')
if (content_array[0].startswith('svn: File not found:') or
content_array[0].endswith('path not found')):
break
# Otherwise, fall through to trying again.
if content:
break
if url_path == repo_root:
# Reached the root. Abandoning search.
break
# Go up one level to try again.
url_path = os.path.dirname(url_path)
if content is not None or filename != CODEREVIEW_SETTINGS_FILE:
# Write a cached version even if there isn't a file, so we don't try to
# fetch it each time. codereview.settings must always be present so do
# not cache negative.
gclient_utils.FileWrite(cached_file, content or '')
else:
content = gclient_utils.FileRead(cached_file, 'r')
# Keep the content cached in memory.
FILES_CACHE[filename] = content
return FILES_CACHE[filename]
def GetCodeReviewSetting(key):
"""Returns a value for the given key for this repository."""
# Use '__just_initialized' as a flag to determine if the settings were
# already initialized.
if '__just_initialized' not in CODEREVIEW_SETTINGS:
settings_file = GetCachedFile(CODEREVIEW_SETTINGS_FILE)
if settings_file:
CODEREVIEW_SETTINGS.update(
gclient_utils.ParseCodereviewSettingsContent(settings_file))
CODEREVIEW_SETTINGS.setdefault('__just_initialized', None)
return CODEREVIEW_SETTINGS.get(key, "")
def Warn(msg):
print >> sys.stderr, msg
def ErrorExit(msg):
print >> sys.stderr, msg
sys.exit(1)
def RunShellWithReturnCode(command, print_output=False):
"""Executes a command and returns the output and the return code."""
p = subprocess2.Popen(
command,
cwd=GetRepositoryRoot(),
stdout=subprocess2.PIPE,
stderr=subprocess2.STDOUT,
universal_newlines=True)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
if print_output:
print line.strip('\n')
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
p.stdout.close()
return output, p.returncode
def RunShell(command, print_output=False):
"""Executes a command and returns the output."""
return RunShellWithReturnCode(command, print_output)[0]
def FilterFlag(args, flag):
"""Returns True if the flag is present in args list.
The flag is removed from args if present.
"""
if flag in args:
args.remove(flag)
return True
return False
class ChangeInfo(object):
"""Holds information about a changelist.
name: change name.
issue: the Rietveld issue number or 0 if it hasn't been uploaded yet.
patchset: the Rietveld latest patchset number or 0.
description: the description.
files: a list of 2 tuple containing (status, filename) of changed files,
with paths being relative to the top repository directory.
local_root: Local root directory
rietveld: rietveld server for this change
"""
# Kept for unit test support. This is for the old format, it's deprecated.
SEPARATOR = "\n-----\n"
def __init__(self, name, issue, patchset, description, files, local_root,
rietveld_url, needs_upload):
# Defer the description processing to git_cl.ChangeDescription.
self._desc = git_cl.ChangeDescription(description)
self.name = name
self.issue = int(issue)
self.patchset = int(patchset)
self._files = files or []
self.patch = None
self._local_root = local_root
self.needs_upload = needs_upload
self.rietveld = gclient_utils.UpgradeToHttps(
rietveld_url or GetCodeReviewSetting('CODE_REVIEW_SERVER'))
self._rpc_server = None
@property
def description(self):
return self._desc.description
def force_description(self, new_description):
self._desc = git_cl.ChangeDescription(new_description)
self.needs_upload = True
def append_footer(self, line):
self._desc.append_footer(line)
def get_reviewers(self):
return self._desc.get_reviewers()
def update_reviewers(self, reviewers):
self._desc.update_reviewers(reviewers)
def NeedsUpload(self):
return self.needs_upload
def GetFileNames(self):
"""Returns the list of file names included in this change."""
return [f[1] for f in self._files]
def GetFiles(self):
"""Returns the list of files included in this change with their status."""
return self._files
def GetLocalRoot(self):
"""Returns the local repository checkout root directory."""
return self._local_root
def Exists(self):
"""Returns True if this change already exists (i.e., is not new)."""
return (self.issue or self.description or self._files)
def _NonDeletedFileList(self):
"""Returns a list of files in this change, not including deleted files."""
return [f[1] for f in self.GetFiles()
if not f[0].startswith("D")]
def _AddedFileList(self):
"""Returns a list of files added in this change."""
return [f[1] for f in self.GetFiles() if f[0].startswith("A")]
def Save(self):
"""Writes the changelist information to disk."""
data = json.dumps({
'issue': self.issue,
'patchset': self.patchset,
'needs_upload': self.NeedsUpload(),
'files': self.GetFiles(),
'description': self.description,
'rietveld': self.rietveld,
}, sort_keys=True, indent=2)
gclient_utils.FileWrite(GetChangelistInfoFile(self.name), data)
def Delete(self):
"""Removes the changelist information from disk."""
os.remove(GetChangelistInfoFile(self.name))
def RpcServer(self):
if not self._rpc_server:
if not self.rietveld:
ErrorExit(CODEREVIEW_SETTINGS_FILE_NOT_FOUND)
# TODO(vadimsh): glc.py should be deleted soon. Do not bother much about
# authentication options and always use defaults.
self._rpc_server = rietveld.CachingRietveld(
self.rietveld, auth.make_auth_config())
return self._rpc_server
def CloseIssue(self):
"""Closes the Rietveld issue for this changelist."""
# Newer versions of Rietveld require us to pass an XSRF token to POST, so
# we fetch it from the server.
xsrf_token = self.SendToRietveld(
'/xsrf_token',
extra_headers={'X-Requesting-XSRF-Token': '1'})
# You cannot close an issue with a GET.
# We pass an empty string for the data so it is a POST rather than a GET.
data = [("description", self.description),
("xsrf_token", xsrf_token)]
ctype, body = upload.EncodeMultipartFormData(data, [])
self.SendToRietveld('/%d/close' % self.issue, payload=body,
content_type=ctype)
def UpdateRietveldDescription(self):
"""Sets the description for an issue on Rietveld."""
data = [("description", self.description),]
ctype, body = upload.EncodeMultipartFormData(data, [])
self.SendToRietveld('/%d/description' % self.issue, payload=body,
content_type=ctype)
self.needs_upload = False
def GetIssueDescription(self):
"""Returns the issue description from Rietveld."""
return self.SendToRietveld('/%d/description' % self.issue).replace('\r\n',
'\n')
def UpdateDescriptionFromIssue(self):
"""Updates self.description with the issue description from Rietveld."""
self._desc = git_cl.ChangeDescription(self.GetIssueDescription())
def GetApprovingReviewers(self):
"""Returns the issue reviewers list from Rietveld."""
return git_cl.get_approving_reviewers(
self.RpcServer().get_issue_properties(self.issue, True))
def AddComment(self, comment):
"""Adds a comment for an issue on Rietveld.
As a side effect, this will email everyone associated with the issue."""
return self.RpcServer().add_comment(self.issue, comment)
def PrimeLint(self):
"""Do background work on Rietveld to lint the file so that the results are
ready when the issue is viewed."""
if self.issue and self.patchset:
try:
self.SendToRietveld('/lint/issue%s_%s' % (self.issue, self.patchset),
timeout=60)
except ssl.SSLError as e:
# It takes more than 60 seconds to lint some CLs. Silently ignore
# the expected timeout.
if e.message != 'The read operation timed out':
raise
def SendToRietveld(self, request_path, timeout=None, **kwargs):
"""Send a POST/GET to Rietveld. Returns the response body."""
try:
return self.RpcServer().Send(request_path, timeout=timeout, **kwargs)
except urllib2.URLError:
if timeout is None:
ErrorExit('Error accessing url %s' % request_path)
else:
return None
def MissingTests(self):
"""Returns True if the change looks like it needs unit tests but has none.
A change needs unit tests if it contains any new source files or methods.
"""
SOURCE_SUFFIXES = [".cc", ".cpp", ".c", ".m", ".mm"]
# Ignore third_party entirely.
files = [f for f in self._NonDeletedFileList()
if f.find("third_party") == -1]
added_files = [f for f in self._AddedFileList()
if f.find("third_party") == -1]
# If the change is entirely in third_party, we're done.
if len(files) == 0:
return False
# Any new or modified test files?
# A test file's name ends with "test.*" or "tests.*".
test_files = [test for test in files
if os.path.splitext(test)[0].rstrip("s").endswith("test")]
if len(test_files) > 0:
return False
# Any new source files?
source_files = [item for item in added_files
if os.path.splitext(item)[1] in SOURCE_SUFFIXES]
if len(source_files) > 0:
return True
# Do the long test, checking the files for new methods.
return self._HasNewMethod()
def _HasNewMethod(self):
"""Returns True if the changeset contains any new functions, or if a
function signature has been changed.
A function is identified by starting flush left, containing a "(" before
the next flush-left line, and either ending with "{" before the next
flush-left line or being followed by an unindented "{".
Currently this returns True for new methods, new static functions, and
methods or functions whose signatures have been changed.
Inline methods added to header files won't be detected by this. That's
acceptable for purposes of determining if a unit test is needed, since
inline methods should be trivial.
"""
# To check for methods added to source or header files, we need the diffs.
# We'll generate them all, since there aren't likely to be many files
# apart from source and headers; besides, we'll want them all if we're
# uploading anyway.
if self.patch is None:
self.patch = GenerateDiff(self.GetFileNames())
definition = ""
for line in self.patch.splitlines():
if not line.startswith("+"):
continue
line = line.strip("+").rstrip(" \t")
# Skip empty lines, comments, and preprocessor directives.
# TODO(pamg): Handle multiline comments if it turns out to be a problem.
if line == "" or line.startswith("/") or line.startswith("#"):
continue
# A possible definition ending with "{" is complete, so check it.
if definition.endswith("{"):
if definition.find("(") != -1:
return True
definition = ""
# A { or an indented line, when we're in a definition, continues it.
if (definition != "" and
(line == "{" or line.startswith(" ") or line.startswith("\t"))):
definition += line
# A flush-left line starts a new possible function definition.
elif not line.startswith(" ") and not line.startswith("\t"):
definition = line
return False
@staticmethod
def Load(changename, local_root, fail_on_not_found, update_status):
"""Gets information about a changelist.
Args:
fail_on_not_found: if True, this function will quit the program if the
changelist doesn't exist.
update_status: if True, the svn status will be updated for all the files
and unchanged files will be removed.
Returns: a ChangeInfo object.
"""
info_file = GetChangelistInfoFile(changename)
if not os.path.exists(info_file):
if fail_on_not_found:
ErrorExit("Changelist " + changename + " not found.")
return ChangeInfo(changename, 0, 0, '', None, local_root, None, False)
content = gclient_utils.FileRead(info_file)
save = False
try:
values = ChangeInfo._LoadNewFormat(content)
except ValueError:
try:
values = ChangeInfo._LoadOldFormat(content)
save = True
except ValueError:
ErrorExit(
('Changelist file %s is corrupt.\n'
'Either run "gcl delete %s" or manually edit the file') % (
info_file, changename))
files = values['files']
if update_status:
for item in files[:]:
status_result = SVN.CaptureStatus(item[1], local_root)
if not status_result or not status_result[0][0]:
# File has been reverted.
save = True
files.remove(item)
continue
status = status_result[0][0]
if status != item[0]:
save = True
files[files.index(item)] = (status, item[1])
change_info = ChangeInfo(
changename,
values['issue'],
values['patchset'],
values['description'],
files,
local_root,
values.get('rietveld'),
values['needs_upload'])
if save:
change_info.Save()
return change_info
@staticmethod
def _LoadOldFormat(content):
# The info files have the following format:
# issue_id, patchset\n (, patchset is optional)
# SEPARATOR\n
# filepath1\n
# filepath2\n
# .
# .
# filepathn\n
# SEPARATOR\n
# description
split_data = content.split(ChangeInfo.SEPARATOR, 2)
if len(split_data) != 3:
raise ValueError('Bad change format')
values = {
'issue': 0,
'patchset': 0,
'needs_upload': False,
'files': [],
}
items = split_data[0].split(', ')
if items[0]:
values['issue'] = int(items[0])
if len(items) > 1:
values['patchset'] = int(items[1])
if len(items) > 2:
values['needs_upload'] = (items[2] == "dirty")
for line in split_data[1].splitlines():
status = line[:7]
filename = line[7:]
values['files'].append((status, filename))
values['description'] = split_data[2]
return values
@staticmethod
def _LoadNewFormat(content):
return json.loads(content)
def __str__(self):
out = ['%s:' % self.__class__.__name__]
for k in dir(self):
if k.startswith('__'):
continue
v = getattr(self, k)
if v is self or callable(getattr(self, k)):
continue
out.append(' %s: %r' % (k, v))
return '\n'.join(out)
def GetChangelistInfoFile(changename):
"""Returns the file that stores information about a changelist."""
if not changename or re.search(r'[^\w-]', changename):
ErrorExit("Invalid changelist name: " + changename)
return os.path.join(GetChangesDir(), changename)
def LoadChangelistInfoForMultiple(changenames, local_root, fail_on_not_found,
update_status):
"""Loads many changes and merge their files list into one pseudo change.
This is mainly usefull to concatenate many changes into one for a 'gcl try'.
"""
changes = changenames.split(',')
aggregate_change_info = ChangeInfo(
changenames, 0, 0, '', None, local_root, None, False)
for change in changes:
aggregate_change_info._files += ChangeInfo.Load(
change, local_root, fail_on_not_found, update_status).GetFiles()
return aggregate_change_info
def GetCLs():
"""Returns a list of all the changelists in this repository."""
cls = os.listdir(GetChangesDir())
if CODEREVIEW_SETTINGS_FILE in cls:
cls.remove(CODEREVIEW_SETTINGS_FILE)
return cls
def GenerateChangeName():
"""Generate a random changelist name."""
random.seed()
current_cl_names = GetCLs()
while True:
cl_name = (random.choice(string.ascii_lowercase) +
random.choice(string.digits) +
random.choice(string.ascii_lowercase) +
random.choice(string.digits))
if cl_name not in current_cl_names:
return cl_name
def GetModifiedFiles():
"""Returns a set that maps from changelist name to (status,filename) tuples.
Files not in a changelist have an empty changelist name. Filenames are in
relation to the top level directory of the current repository. Note that
only the current directory and subdirectories are scanned, in order to
improve performance while still being flexible.
"""
files = {}
# Since the files are normalized to the root folder of the repositary, figure
# out what we need to add to the paths.
dir_prefix = os.getcwd()[len(GetRepositoryRoot()):].strip(os.sep)
# Get a list of all files in changelists.
files_in_cl = {}
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(),
fail_on_not_found=True, update_status=False)
for status, filename in change_info.GetFiles():
files_in_cl[filename] = change_info.name
# Get all the modified files down the current directory.
for line in SVN.CaptureStatus(None, os.getcwd()):
status = line[0]
filename = line[1]
if status[0] == "?":
continue
if dir_prefix:
filename = os.path.join(dir_prefix, filename)
change_list_name = ""
if filename in files_in_cl:
change_list_name = files_in_cl[filename]
files.setdefault(change_list_name, []).append((status, filename))
return files
def GetFilesNotInCL():
"""Returns a list of tuples (status,filename) that aren't in any changelists.
See docstring of GetModifiedFiles for information about path of files and
which directories are scanned.
"""
modified_files = GetModifiedFiles()
if "" not in modified_files:
return []
return modified_files[""]
def ListFiles(show_unknown_files):
files = GetModifiedFiles()
cl_keys = files.keys()
cl_keys.sort()
for cl_name in cl_keys:
if not cl_name:
continue
note = ""
change_info = ChangeInfo.Load(cl_name, GetRepositoryRoot(),
fail_on_not_found=True, update_status=False)
if len(change_info.GetFiles()) != len(files[cl_name]):
note = " (Note: this changelist contains files outside this directory)"
print "\n--- Changelist " + cl_name + note + ":"
for filename in files[cl_name]:
print "".join(filename)
if show_unknown_files:
unknown_files = UnknownFiles()
if (files.get('') or (show_unknown_files and len(unknown_files))):
print "\n--- Not in any changelist:"
for item in files.get('', []):
print "".join(item)
if show_unknown_files:
for filename in unknown_files:
print "? %s" % filename
return 0
def GenerateDiff(files):
return SVN.GenerateDiff(
files, GetRepositoryRoot(), full_move=False, revision=None)
def GetTreeStatus():
tree_status_url = GetCodeReviewSetting('STATUS')
return git_cl.GetTreeStatus(tree_status_url) if tree_status_url else "unset"
def OptionallyDoPresubmitChecks(change_info, committing, args):
if FilterFlag(args, "--no_presubmit") or FilterFlag(args, "--force"):
breakpad.SendStack(
breakpad.DEFAULT_URL + '/breakpad',
'GclHooksBypassedCommit',
'Issue %s/%s bypassed hook when committing (tree status was "%s")' %
(change_info.rietveld, change_info.issue, GetTreeStatus()),
verbose=False)
return presubmit_support.PresubmitOutput()
return DoPresubmitChecks(change_info, committing, True)
def defer_attributes(a, b):
"""Copy attributes from an object (like a function) to another."""
for x in dir(a):
if not getattr(b, x, None):
setattr(b, x, getattr(a, x))
def need_change(function):
"""Converts args -> change_info."""
# pylint: disable=W0612,W0621
def hook(args):
if not len(args) == 1:
ErrorExit("You need to pass a change list name")
change_info = ChangeInfo.Load(args[0], GetRepositoryRoot(), True, True)
return function(change_info)
defer_attributes(function, hook)
hook.need_change = True
hook.no_args = True
return hook
def need_change_and_args(function):
"""Converts args -> change_info."""
# pylint: disable=W0612,W0621
def hook(args):
if not args:
ErrorExit("You need to pass a change list name")
change_info = ChangeInfo.Load(args.pop(0), GetRepositoryRoot(), True, True)
return function(change_info, args)
defer_attributes(function, hook)
hook.need_change = True
return hook
def no_args(function):
"""Make sure no args are passed."""
# pylint: disable=W0612,W0621
def hook(args):
if args:
ErrorExit("Doesn't support arguments")
return function()
defer_attributes(function, hook)
hook.no_args = True
return hook
def attrs(**kwargs):
"""Decorate a function with new attributes."""
def decorate(function):
for k in kwargs:
setattr(function, k, kwargs[k])
return function
return decorate
@no_args
def CMDopened():
"""Lists modified files in the current directory down."""
return ListFiles(False)
@no_args
def CMDstatus():
"""Lists modified and unknown files in the current directory down."""
return ListFiles(True)
@need_change_and_args
@attrs(usage='[--no_presubmit] [--no_watchlists]')
def CMDupload(change_info, args):
"""Uploads the changelist to the server for review.
This does not submit a try job; use gcl try to submit a try job.
"""
if '-s' in args or '--server' in args:
ErrorExit('Don\'t use the -s flag, fix codereview.settings instead')
if not change_info.GetFiles():
print "Nothing to upload, changelist is empty."
return 0
output = OptionallyDoPresubmitChecks(change_info, False, args)
if not output.should_continue():
return 1
no_watchlists = (FilterFlag(args, "--no_watchlists") or
FilterFlag(args, "--no-watchlists"))
# Map --send-mail to --send_mail
if FilterFlag(args, "--send-mail"):
args.append("--send_mail")
# Replace -m with -t and --message with --title, but make sure to
# preserve anything after the -m/--message.
found_deprecated_arg = [False]
def replace_message(a):
if a.startswith('-m'):
found_deprecated_arg[0] = True
return '-t' + a[2:]
elif a.startswith('--message'):
found_deprecated_arg[0] = True
return '--title' + a[9:]
return a
args = map(replace_message, args)
if found_deprecated_arg[0]:
print >> sys.stderr, (
'\nWARNING: Use -t or --title to set the title of the patchset.\n'
'In the near future, -m or --message will send a message instead.\n'
'See http://goo.gl/JGg0Z for details.\n')
upload_arg = ["upload.py", "-y"]
upload_arg.append("--server=%s" % change_info.rietveld.encode('utf-8'))
reviewers = change_info.get_reviewers() or output.reviewers
if (reviewers and
not any(arg.startswith('-r') or arg.startswith('--reviewer') for
arg in args)):
upload_arg.append('--reviewers=%s' % ','.join(reviewers))
upload_arg.extend(args)
desc_file = None
try:
if change_info.issue:
# Uploading a new patchset.
upload_arg.append("--issue=%d" % change_info.issue)
project = GetCodeReviewSetting("PROJECT")
if project:
upload_arg.append("--project=%s" % SWITCH_TO_GIT)
if not any(i.startswith('--title') or i.startswith('-t') for i in args):
upload_arg.append('--title= ')
else:
# First time we upload.
handle, desc_file = tempfile.mkstemp(text=True)
os.write(handle, change_info.description)
os.close(handle)
# Watchlist processing -- CC people interested in this changeset
# http://dev.chromium.org/developers/contributing-code/watchlists
if not no_watchlists:
import watchlists
watchlist = watchlists.Watchlists(change_info.GetLocalRoot())
watchers = watchlist.GetWatchersForPaths(change_info.GetFileNames())
# We check this before applying the "PRIVATE" parameter of codereview
# settings assuming that the author of the settings file has put
# addresses which we can send private CLs to, and so we should ignore
# CC_LIST only when --private is specified explicitly on the command
# line.
if "--private" in upload_arg:
Warn("WARNING: CC_LIST and WATCHLISTS are ignored when --private is "
"specified. You need to review and add them manually if "
"necessary.")
cc_list = ""
no_watchlists = True
else:
cc_list = GetCodeReviewSetting("CC_LIST")
if not no_watchlists and watchers:
# Filter out all empty elements and join by ','
cc_list = ','.join(filter(None, [cc_list] + watchers))
if cc_list:
upload_arg.append("--cc=" + cc_list)
upload_arg.append("--file=%s" % desc_file)
if GetCodeReviewSetting("PRIVATE") == "True":
upload_arg.append("--private")
project = GetCodeReviewSetting("PROJECT")
if project:
upload_arg.append("--project=%s" % SWITCH_TO_GIT)
# If we have a lot of files with long paths, then we won't be able to fit
# the command to "svn diff". Instead, we generate the diff manually for
# each file and concatenate them before passing it to upload.py.
if change_info.patch is None:
change_info.patch = GenerateDiff(change_info.GetFileNames())
# Change the current working directory before calling upload.py so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(change_info.GetLocalRoot())
try:
try:
issue, patchset = upload.RealMain(upload_arg, change_info.patch)
except KeyboardInterrupt:
sys.exit(1)
if issue and patchset:
change_info.issue = int(issue)
change_info.patchset = int(patchset)
change_info.Save()
change_info.PrimeLint()
finally:
os.chdir(previous_cwd)
finally:
if desc_file:
os.remove(desc_file)
print "*** Upload does not submit a try; use gcl try to submit a try. ***"
return 0
@need_change_and_args
@attrs(usage='[--upload]')
def CMDpresubmit(change_info, args):
"""Runs presubmit checks on the change.
The actual presubmit code is implemented in presubmit_support.py and looks
for PRESUBMIT.py files."""
if not change_info.GetFiles():
print('Nothing to presubmit check, changelist is empty.')
return 0
parser = optparse.OptionParser()
parser.add_option('--upload', action='store_true')
options, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % args)
if options.upload:
print('*** Presubmit checks for UPLOAD would report: ***')
return not DoPresubmitChecks(change_info, False, False)
else:
print('*** Presubmit checks for COMMIT would report: ***')
return not DoPresubmitChecks(change_info, True, False)
def TryChange(change_info, args, swallow_exception):
"""Create a diff file of change_info and send it to the try server."""
try:
import trychange
except ImportError:
if swallow_exception:
return 1
ErrorExit("You need to install trychange.py to use the try server.")
trychange_args = []
if change_info:
trychange_args.extend(['--name', change_info.name])
if change_info.issue:
trychange_args.extend(["--issue", str(change_info.issue)])
if change_info.patchset:
trychange_args.extend(["--patchset", str(change_info.patchset)])
change = presubmit_support.SvnChange(change_info.name,
change_info.description,
change_info.GetLocalRoot(),
change_info.GetFiles(),
change_info.issue,
change_info.patchset,
None)
else:
change = None
trychange_args.extend(args)
return trychange.TryChange(
trychange_args,
change=change,
swallow_exception=swallow_exception,
prog='gcl try',
extra_epilog='\n'
'When called from gcl, use the format gcl try <change_name>.\n')
@need_change_and_args
@attrs(usage='[--no_presubmit]')
def CMDcommit(change_info, args):
"""Commits the changelist to the repository."""
if not change_info.GetFiles():
print "Nothing to commit, changelist is empty."
return 1
# OptionallyDoPresubmitChecks has a side-effect which eats these flags.
bypassed = '--no_presubmit' in args or '--force' in args
output = OptionallyDoPresubmitChecks(change_info, True, args)
if not output.should_continue():
return 1
# We face a problem with svn here: Let's say change 'bleh' modifies
# svn:ignore on dir1\. but another unrelated change 'pouet' modifies
# dir1\foo.cc. When the user `gcl commit bleh`, foo.cc is *also committed*.
# The only fix is to use --non-recursive but that has its issues too:
# Let's say if dir1 is deleted, --non-recursive must *not* be used otherwise
# you'll get "svn: Cannot non-recursively commit a directory deletion of a
# directory with child nodes". Yay...
commit_cmd = ["svn", "commit"]
if change_info.issue:
# Get the latest description from Rietveld.
change_info.UpdateDescriptionFromIssue()
change_info.update_reviewers(change_info.GetApprovingReviewers())
commit_desc = git_cl.ChangeDescription(change_info.description)
if change_info.issue:
server = change_info.rietveld
if not server.startswith("http://") and not server.startswith("https://"):
server = "http://" + server
commit_desc.append_footer('Review URL: %s/%d' % (server, change_info.issue))
handle, commit_filename = tempfile.mkstemp(text=True)
os.write(handle, commit_desc.description)
os.close(handle)
try:
handle, targets_filename = tempfile.mkstemp(text=True)
os.write(handle, "\n".join(change_info.GetFileNames()))
os.close(handle)
try:
commit_cmd += ['--file=' + commit_filename]
commit_cmd += ['--targets=' + targets_filename]
# Change the current working directory before calling commit.
output = ''
try:
output = RunShell(commit_cmd, True)
except subprocess2.CalledProcessError, e:
ErrorExit('Commit failed.\n%s' % e)
finally:
os.remove(commit_filename)
finally:
os.remove(targets_filename)
if output.find("Committed revision") != -1:
change_info.Delete()
if change_info.issue:
revision = re.compile(".*?\nCommitted revision (\d+)",
re.DOTALL).match(output).group(1)
viewvc_url = GetCodeReviewSetting('VIEW_VC')
if viewvc_url and revision:
change_info.append_footer('Committed: ' + viewvc_url + revision)
elif revision:
change_info.append_footer('Committed: ' + revision)
change_info.CloseIssue()
props = change_info.RpcServer().get_issue_properties(
change_info.issue, False)
patch_num = len(props['patchsets'])
comment = "Committed patchset #%d (id:%d) manually as r%s" % (
patch_num, props['patchsets'][-1], revision)
if bypassed:
comment += ' (tree was closed).' if GetTreeStatus() == 'closed' else '.'
else:
comment += ' (presubmit successful).'
change_info.AddComment(comment)
return 0
def CMDchange(args):
"""Creates or edits a changelist.
Only scans the current directory and subdirectories.
"""
# Verify the user is running the change command from a read-write checkout.
svn_info = SVN.CaptureLocalInfo([], '.')
if not svn_info:
ErrorExit("Current checkout is unversioned. Please retry with a versioned "
"directory.")
if len(args) == 0:
# Generate a random changelist name.
changename = GenerateChangeName()
elif args[0] == '--force':
changename = GenerateChangeName()
else:
changename = args[0]
change_info = ChangeInfo.Load(changename, GetRepositoryRoot(), False, True)
if len(args) == 2:
if not os.path.isfile(args[1]):
ErrorExit('The change "%s" doesn\'t exist.' % args[1])
f = open(args[1], 'rU')
override_description = f.read()
f.close()
else:
override_description = None
if change_info.issue and not change_info.NeedsUpload():
try:
description = change_info.GetIssueDescription()
except urllib2.HTTPError, err:
if err.code == 404:
# The user deleted the issue in Rietveld, so forget the old issue id.
description = change_info.description
change_info.issue = 0
change_info.Save()
else:
ErrorExit("Error getting the description from Rietveld: " + err)
else:
if override_description:
description = override_description
else:
description = change_info.description
other_files = GetFilesNotInCL()
# Edited files (as opposed to files with only changed properties) will have
# a letter for the first character in the status string.
file_re = re.compile(r"^[a-z].+\Z", re.IGNORECASE)
affected_files = [x for x in other_files if file_re.match(x[0])]
unaffected_files = [x for x in other_files if not file_re.match(x[0])]
description = description.rstrip() + '\n'
separator1 = ("\n---All lines above this line become the description.\n"
"---Repository Root: " + change_info.GetLocalRoot() + "\n"
"---Paths in this changelist (" + change_info.name + "):\n")
separator2 = "\n\n---Paths modified but not in any changelist:\n\n"
text = (description + separator1 + '\n' +
'\n'.join([f[0] + f[1] for f in change_info.GetFiles()]))
if change_info.Exists():
text += (separator2 +
'\n'.join([f[0] + f[1] for f in affected_files]) + '\n')
else:
text += ('\n'.join([f[0] + f[1] for f in affected_files]) + '\n' +
separator2)
text += '\n'.join([f[0] + f[1] for f in unaffected_files]) + '\n'
result = gclient_utils.RunEditor(text, False)
if not result:
ErrorExit('Running editor failed')
split_result = result.split(separator1, 1)
if len(split_result) != 2:
ErrorExit("Don't modify the text starting with ---!\n\n%r" % result)
# Update the CL description if it has changed.
new_description = split_result[0]
cl_files_text = split_result[1]
if new_description != description or override_description:
change_info.force_description(new_description)
new_cl_files = []
for line in cl_files_text.splitlines():
if not len(line):
continue
if line.startswith("---"):
break
status = line[:7]
filename = line[7:]
new_cl_files.append((status, filename))
if (not len(change_info.GetFiles()) and not change_info.issue and
not len(new_description) and not new_cl_files):
ErrorExit("Empty changelist not saved")
change_info._files = new_cl_files
change_info.Save()
if svn_info.get('URL', '').startswith('http:'):
Warn("WARNING: Creating CL in a read-only checkout. You will need to "
"commit using a commit queue!")
print change_info.name + " changelist saved."
if change_info.MissingTests():
Warn("WARNING: " + MISSING_TEST_MSG)
# Update the Rietveld issue.
if change_info.issue and change_info.NeedsUpload():
change_info.UpdateRietveldDescription()
change_info.Save()
return 0
@need_change_and_args
def CMDlint(change_info, args):
"""Runs cpplint.py on all the files in the change list.
Checks all the files in the changelist for possible style violations.
"""
# Access to a protected member _XX of a client class
# pylint: disable=W0212
try:
import cpplint
import cpplint_chromium
except ImportError:
ErrorExit("You need to install cpplint.py to lint C++ files.")
# Change the current working directory before calling lint so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(change_info.GetLocalRoot())
try:
# Process cpplints arguments if any.
filenames = cpplint.ParseArguments(args + change_info.GetFileNames())
white_list = GetCodeReviewSetting("LINT_REGEX")
if not white_list:
white_list = DEFAULT_LINT_REGEX
white_regex = re.compile(white_list)
black_list = GetCodeReviewSetting("LINT_IGNORE_REGEX")
if not black_list:
black_list = DEFAULT_LINT_IGNORE_REGEX
black_regex = re.compile(black_list)
extra_check_functions = [cpplint_chromium.CheckPointerDeclarationWhitespace]
for filename in filenames:
if white_regex.match(filename):
if black_regex.match(filename):
print "Ignoring file %s" % filename
else:
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
extra_check_functions)
else:
print "Skipping file %s" % filename
finally:
os.chdir(previous_cwd)
print "Total errors found: %d\n" % cpplint._cpplint_state.error_count
return 1
def DoPresubmitChecks(change_info, committing, may_prompt):
"""Imports presubmit, then calls presubmit.DoPresubmitChecks."""
root_presubmit = GetCachedFile('PRESUBMIT.py', use_root=True)
change = presubmit_support.SvnChange(change_info.name,
change_info.description,
change_info.GetLocalRoot(),
change_info.GetFiles(),
change_info.issue,
change_info.patchset,
None)
output = presubmit_support.DoPresubmitChecks(
change=change,
committing=committing,
verbose=False,
output_stream=sys.stdout,
input_stream=sys.stdin,
default_presubmit=root_presubmit,
may_prompt=may_prompt,
rietveld_obj=change_info.RpcServer())
if not output.should_continue() and may_prompt:
# TODO(dpranke): move into DoPresubmitChecks(), unify cmd line args.
print "\nPresubmit errors, can't continue (use --no_presubmit to bypass)"
return output
@no_args
def CMDchanges():
"""Lists all the changelists and their files."""
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(), True, True)
print "\n--- Changelist " + change_info.name + ":"
for filename in change_info.GetFiles():
print "".join(filename)
return 0
@no_args
def CMDdeleteempties():
"""Delete all changelists that have no files."""
print "\n--- Deleting:"
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(), True, True)
if not len(change_info.GetFiles()):
print change_info.name
change_info.Delete()
return 0
@no_args
def CMDnothave():
"""Lists files unknown to Subversion."""
for filename in UnknownFiles():
print "? " + "".join(filename)
return 0
@attrs(usage='<svn options>')
def CMDdiff(args):
"""Diffs all files in the changelist or all files that aren't in a CL."""
files = None
if args:
change_info = ChangeInfo.Load(args.pop(0), GetRepositoryRoot(), True, True)
files = change_info.GetFileNames()
else:
files = [f[1] for f in GetFilesNotInCL()]
root = GetRepositoryRoot()
cmd = ['svn', 'diff']
cmd.extend([os.path.join(root, x) for x in files])
cmd.extend(args)
return RunShellWithReturnCode(cmd, print_output=True)[1]
@no_args
def CMDsettings():
"""Prints code review settings for this checkout."""
# Force load settings
GetCodeReviewSetting("UNKNOWN")
del CODEREVIEW_SETTINGS['__just_initialized']
print '\n'.join(("%s: %s" % (str(k), str(v))
for (k,v) in CODEREVIEW_SETTINGS.iteritems()))
return 0
@need_change
def CMDdescription(change_info):
"""Prints the description of the specified change to stdout."""
print change_info.description
return 0
def CMDdelete(args):
"""Deletes a changelist."""
if not len(args) == 1:
ErrorExit('You need to pass a change list name')
filepath = GetChangelistInfoFile(args[0])
if not os.path.isfile(filepath):
ErrorExit('You need to pass a valid change list name')
os.remove(filepath)
return 0
def CMDtry(args):
"""Sends the change to the tryserver to do a test run on your code.
To send multiple changes as one path, use a comma-separated list of
changenames. Use 'gcl help try' for more information!"""
# When the change contains no file, send the "changename" positional
# argument to trychange.py.
# When the command is 'try' and --patchset is used, the patch to try
# is on the Rietveld server.
if not args:
ErrorExit("You need to pass a change list name")
if args[0].find(',') != -1:
change_info = LoadChangelistInfoForMultiple(args[0], GetRepositoryRoot(),
True, True)
else:
change_info = ChangeInfo.Load(args[0], GetRepositoryRoot(),
True, True)
props = change_info.RpcServer().get_issue_properties(
change_info.issue, False)
if props.get('private'):
ErrorExit('Cannot use trybots on a private issue')
if change_info.GetFiles():
args = args[1:]
else:
change_info = None
return TryChange(change_info, args, swallow_exception=False)
@attrs(usage='<old-name> <new-name>')
def CMDrename(args):
"""Renames an existing change."""
if len(args) != 2:
ErrorExit("Usage: gcl rename <old-name> <new-name>.")
src, dst = args
src_file = GetChangelistInfoFile(src)
if not os.path.isfile(src_file):
ErrorExit("Change '%s' does not exist." % src)
dst_file = GetChangelistInfoFile(dst)
if os.path.isfile(dst_file):
ErrorExit("Change '%s' already exists; pick a new name." % dst)
os.rename(src_file, dst_file)
print "Change '%s' renamed '%s'." % (src, dst)
return 0
def CMDpassthru(args):
"""Everything else that is passed into gcl we redirect to svn.
It assumes a change list name is passed and is converted with the files names.
"""
if not args or len(args) < 2:
ErrorExit("You need to pass a change list name for this svn fall-through "
"command")
cl_name = args[1]
args = ["svn", args[0]]
if len(args) > 1:
root = GetRepositoryRoot()
change_info = ChangeInfo.Load(cl_name, root, True, True)
args.extend([os.path.join(root, x) for x in change_info.GetFileNames()])
return RunShellWithReturnCode(args, print_output=True)[1]
def Command(name):
return getattr(sys.modules[__name__], 'CMD' + name, None)
def GenUsage(command):
"""Modify an OptParse object with the function's documentation."""
obj = Command(command)
display = command
more = getattr(obj, 'usage', '')
if command == 'help':
display = '<command>'
need_change_val = ''
if getattr(obj, 'need_change', None):
need_change_val = ' <change_list>'
options = ' [options]'
if getattr(obj, 'no_args', None):
options = ''
res = 'Usage: gcl %s%s%s %s\n\n' % (display, need_change_val, options, more)
res += re.sub('\n ', '\n', obj.__doc__)
return res
def CMDhelp(args):
"""Prints this help or help for the given command."""
if args and 'CMD' + args[0] in dir(sys.modules[__name__]):
print GenUsage(args[0])
# These commands defer to external tools so give this info too.
if args[0] == 'try':
TryChange(None, ['--help'], swallow_exception=False)
if args[0] == 'upload':
upload.RealMain(['upload.py', '--help'])
return 0
print GenUsage('help')
print sys.modules[__name__].__doc__
print 'version ' + __version__ + '\n'
print('Commands are:\n' + '\n'.join([
' %-12s %s' % (fn[3:], Command(fn[3:]).__doc__.split('\n')[0].strip())
for fn in dir(sys.modules[__name__]) if fn.startswith('CMD')]))
return 0
def main(argv):
if sys.hexversion < 0x02060000:
print >> sys.stderr, (
'\nYour python version %s is unsupported, please upgrade.\n' %
sys.version.split(' ', 1)[0])
return 2
sys.stderr.write('Warning: gcl is going away soon. Get off subversion!\n')
sys.stderr.write('See http://crbug.com/475321 for more details.\n')
if not argv:
argv = ['help']
command = Command(argv[0])
# Help can be run from anywhere.
if command == CMDhelp:
return command(argv[1:])
try:
GetRepositoryRoot()
except (gclient_utils.Error, subprocess2.CalledProcessError):
print >> sys.stderr, 'To use gcl, you need to be in a subversion checkout.'
return 1
# Create the directories where we store information about changelists if it
# doesn't exist.
try:
if not os.path.exists(GetInfoDir()):
os.mkdir(GetInfoDir())
if not os.path.exists(GetChangesDir()):
os.mkdir(GetChangesDir())
if not os.path.exists(GetCacheDir()):
os.mkdir(GetCacheDir())
if command:
return command(argv[1:])
# Unknown command, try to pass that to svn
return CMDpassthru(argv)
except (gclient_utils.Error, subprocess2.CalledProcessError), e:
print >> sys.stderr, 'Got an exception'
print >> sys.stderr, str(e)
return 1
except upload.ClientLoginError, e:
print >> sys.stderr, 'Got an exception logging in to Rietveld'
print >> sys.stderr, str(e)
return 1
except urllib2.HTTPError, e:
if e.code != 500:
raise
print >> sys.stderr, (
'AppEngine is misbehaving and returned HTTP %d, again. Keep faith '
'and retry or visit go/isgaeup.\n%s') % (e.code, str(e))
return 1
if __name__ == "__main__":
fix_encoding.fix_encoding()
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| chinmaygarde/depot_tools | gcl.py | Python | bsd-3-clause | 51,974 |
from nbodykit import CurrentMPIComm
from nbodykit.binned_statistic import BinnedStatistic
from collections import OrderedDict
import numpy
import logging
import kdcount
class Base3PCF(object):
"""
Base class for implementing common 3PCF calculations.
Users should use :class:`SimulationBox3PCF` or :class:`SurveyData3PCF`.
"""
def __init__(self, source, poles, edges, required_cols, BoxSize=None, periodic=None):
from .pair_counters.base import verify_input_sources
# verify the input sources
inspect = periodic is not None
BoxSize = verify_input_sources(source, None, BoxSize, required_cols, inspect_boxsize=inspect)
self.source = source
self.comm = self.source.comm
# save the meta-data
self.attrs = {}
self.attrs['poles'] = poles
self.attrs['edges'] = edges
# store periodic/BoxSize for SimulationBox
if periodic is not None:
self.attrs['BoxSize'] = BoxSize
self.attrs['periodic'] = periodic
def _run(self, pos, w, pos_sec, w_sec, boxsize=None, bunchsize=10000):
"""
Internal function to run the 3PCF algorithm on the input data and
weights.
The input data/weights have already been domain-decomposed, and
the loads should be balanced on all ranks.
"""
# maximum radius
rmax = numpy.max(self.attrs['edges'])
# the array to hold output values
nbins = len(self.attrs['edges'])-1
Nell = len(self.attrs['poles'])
zeta = numpy.zeros((Nell,nbins,nbins), dtype='f8')
alms = {}
walms = {}
# compute the Ylm expressions we need
if self.comm.rank == 0:
self.logger.info("computing Ylm expressions...")
Ylm_cache = YlmCache(self.attrs['poles'], self.comm)
if self.comm.rank == 0:
self.logger.info("...done")
# make the KD-tree holding the secondaries
tree_sec = kdcount.KDTree(pos_sec, boxsize=boxsize).root
def callback(r, i, j, iprim=None):
# remove self pairs
valid = r > 0.
r = r[valid]; i = i[valid]
# normalized, re-centered position array (periodic)
dpos = (pos_sec[i] - pos[iprim])
# enforce periodicity in dpos
if boxsize is not None:
for axis, col in enumerate(dpos.T):
col[col > boxsize[axis]*0.5] -= boxsize[axis]
col[col <= -boxsize[axis]*0.5] += boxsize[axis]
recen_pos = dpos / r[:,numpy.newaxis]
# find the mapping of r to rbins
dig = numpy.searchsorted(self.attrs['edges'], r, side='left')
# evaluate all Ylms
Ylms = Ylm_cache(recen_pos[:,0]+1j*recen_pos[:,1], recen_pos[:,2])
# sqrt of primary weight
w0 = w[iprim]
# loop over each (l,m) pair
for (l,m) in Ylms:
# the Ylm evaluated at galaxy positions
weights = Ylms[(l,m)] * w_sec[i]
# sum over for each radial bin
alm = alms.setdefault((l, m), numpy.zeros(nbins, dtype='c16'))
walm = walms.setdefault((l, m), numpy.zeros(nbins, dtype='c16'))
r1 = numpy.bincount(dig, weights=weights.real, minlength=nbins+2)[1:-1]
alm[...] += r1
walm[...] += w0 * r1
if m != 0:
i1 = numpy.bincount(dig, weights=weights.imag, minlength=nbins+2)[1:-1]
alm[...] += 1j*i1
walm[...] += w0*1j*i1
# determine rank with largest load
loads = self.comm.allgather(len(pos))
largest_load = numpy.argmax(loads)
chunk_size = max(loads) // 10
# compute multipoles for each primary (s vector in the paper)
for iprim in range(len(pos)):
# alms must be clean for each primary particle; (s) in eq 15 and 8 of arXiv:1506.02040v2
alms.clear()
walms.clear()
tree_prim = kdcount.KDTree(numpy.atleast_2d(pos[iprim]), boxsize=boxsize).root
tree_sec.enum(tree_prim, rmax, process=callback, iprim=iprim, bunch=bunchsize)
if self.comm.rank == largest_load and iprim % chunk_size == 0:
self.logger.info("%d%% done" % (10*iprim//chunk_size))
# combine alms into zeta(s);
# this cannot be done in the callback because
# it is a nonlinear function (outer product) of alm.
for (l, m) in alms:
alm = alms[(l, m)]
walm = walms[(l, m)]
# compute alm * conjugate(alm)
alm_w_alm = numpy.outer(walm, alm.conj())
if m != 0: alm_w_alm += alm_w_alm.T # add in the -m contribution for m != 0
zeta[Ylm_cache.ell_to_iell[l], ...] += alm_w_alm.real
# sum across all ranks
zeta = self.comm.allreduce(zeta)
# normalize according to Eq. 15 of Slepian et al. 2015
# differs by factor of (4 pi)^2 / (2l+1) from the C++ code
zeta /= (4*numpy.pi)
# make a BinnedStatistic
dtype = numpy.dtype([('corr_%d' % ell, zeta.dtype) for ell in self.attrs['poles']])
data = numpy.empty(zeta.shape[-2:], dtype=dtype)
for i, ell in enumerate(self.attrs['poles']):
data['corr_%d' % ell] = zeta[i]
# save the result
edges = self.attrs['edges']
poles = BinnedStatistic(['r1', 'r2'], [edges, edges], data)
return poles
def __getstate__(self):
return {'poles':self.poles.data, 'attrs':self.attrs}
def __setstate__(self, state):
self.__dict__.update(state)
self.poles = BinnedStatistic(['r1', 'r2'], [self.attrs['edges']]*2, self.poles)
def save(self, output):
"""
Save the :attr:`poles` result to a JSON file with name ``output``.
"""
import json
from nbodykit.utils import JSONEncoder
# only the master rank writes
if self.comm.rank == 0:
self.logger.info('measurement done; saving result to %s' % output)
with open(output, 'w') as ff:
json.dump(self.__getstate__(), ff, cls=JSONEncoder)
@classmethod
@CurrentMPIComm.enable
def load(cls, filename, comm=None):
"""
Load a result from ``filename`` that has been saved to
disk with :func:`save`.
"""
import json
from nbodykit.utils import JSONDecoder
if comm.rank == 0:
with open(filename, 'r') as ff:
state = json.load(ff, cls=JSONDecoder)
else:
state = None
state = comm.bcast(state)
self = object.__new__(cls)
self.__setstate__(state)
self.comm = comm
return self
class SimulationBox3PCF(Base3PCF):
"""
Compute the multipoles of the isotropic, three-point correlation function
in configuration space for data in a simulation box.
This uses the algorithm of Slepian and Eisenstein, 2015 which scales
as :math:`\mathcal{O}(N^2)`, where :math:`N` is the number of objects.
Results are computed when the object is inititalized. See the documenation
of :func:`run` for the attributes storing the results.
.. note::
The algorithm expects the positions of objects in a simulation box to
be the Cartesian ``x``, ``y``, and ``z`` vectors. For survey data,
in the form of right ascension, declination, and
redshift, see :class:`~nbodykit.algorithms.SurveyData3PCF`.
Parameters
----------
source : CatalogSource
the input source of particles providing the 'Position' column
poles : list of int
the list of multipole numbers to compute
edges : array_like
the edges of the bins of separation to use; length of nbins+1
BoxSize : float, 3-vector, optional
the size of the box; if periodic boundary conditions used, and 'BoxSize'
not provided in the source :attr:`attrs`, it must be provided here
periodic : bool, optional
whether to use periodic boundary conditions when computing separations
between objects
weight : str, optional
the name of the column in the source specifying the particle weights
References
----------
Slepian and Eisenstein, MNRAS 454, 4142-4158 (2015)
"""
logger = logging.getLogger("SimulationBox3PCF")
def __init__(self, source, poles, edges, BoxSize=None, periodic=True, weight='Weight', position='Position'):
# initialize the base class
required_cols = [position, weight]
Base3PCF.__init__(self, source, poles, edges, required_cols,
BoxSize=BoxSize, periodic=periodic)
# save the weight column
self.attrs['weight'] = weight
self.attrs['position'] = position
# check largest possible separation
if periodic:
min_box_side = 0.5*self.attrs['BoxSize'].min()
if numpy.amax(edges) > min_box_side:
raise ValueError(("periodic pair counts cannot be computed for Rmax > BoxSize/2"))
# run the algorithm
self.poles = self.run()
def run(self, pedantic=False):
"""
Compute the three-point CF multipoles. This attaches the following
the attributes to the class:
- :attr:`poles`
Attributes
----------
poles : :class:`~nbodykit.binned_statistic.BinnedStatistic`
a BinnedStatistic object to hold the multipole results; the
binned statistics stores the multipoles as variables ``corr_0``,
``corr_1``, etc for :math:`\ell=0,1,` etc. The coordinates
of the binned statistic are ``r1`` and ``r2``, which give the
separations between the three objects in CF.
"""
from .pair_counters.domain import decompose_box_data
# the box size to use
if self.attrs['periodic']:
boxsize = self.attrs['BoxSize']
else:
boxsize = None
# domain decompose the data
smoothing = numpy.max(self.attrs['edges'])
(pos, w), (pos_sec, w_sec) = decompose_box_data(self.source, None, self.attrs,
self.logger, smoothing)
# run the algorithm
if pedantic:
return self._run(pos, w, pos_sec, w_sec, boxsize=boxsize, bunchsize=1)
else:
return self._run(pos, w, pos_sec, w_sec, boxsize=boxsize)
class SurveyData3PCF(Base3PCF):
"""
Compute the multipoles of the isotropic, three-point correlation function
in configuration space for observational survey data.
This uses the algorithm of Slepian and Eisenstein, 2015 which scales
as :math:`\mathcal{O}(N^2)`, where :math:`N` is the number of objects.
Results are computed when the object is inititalized. See the documenation
of :func:`run` for the attributes storing the results.
.. note::
The algorithm expects the positions of objects from a survey catalog
be the sky coordinates, right ascension and declination, and redshift.
For simulation box data in Cartesian coordinates, see
:class:`~nbodykit.algorithms.SimulationBox3PCF`.
.. warning::
The right ascension and declination columns should be specified
in degrees.
Parameters
----------
source : CatalogSource
the input source of particles providing the 'Position' column
poles : list of int
the list of multipole numbers to compute
edges : array_like
the edges of the bins of separation to use; length of nbins+1
cosmo : :class:`~nbodykit.cosmology.cosmology.Cosmology`
the cosmology instance used to convert redshifts into comoving distances
ra : str, optional
the name of the column in the source specifying the
right ascension coordinates in units of degrees; default is 'RA'
dec : str, optional
the name of the column in the source specifying the declination
coordinates; default is 'DEC'
redshift : str, optional
the name of the column in the source specifying the redshift
coordinates; default is 'Redshift'
weight : str, optional
the name of the column in the source specifying the object weights
domain_factor : int, optional
the integer value by which to oversubscribe the domain decomposition
mesh before balancing loads; this number can affect the distribution
of loads on the ranks -- an optimal value will lead to balanced loads
References
----------
Slepian and Eisenstein, MNRAS 454, 4142-4158 (2015)
"""
logger = logging.getLogger("SurveyData3PCF")
def __init__(self, source, poles, edges, cosmo, domain_factor=4,
ra='RA', dec='DEC', redshift='Redshift', weight='Weight'):
# initialize the base class
required_cols = [ra, dec, redshift, weight]
Base3PCF.__init__(self, source, poles, edges, required_cols)
# save meta-data
self.attrs['cosmo'] = cosmo
self.attrs['weight'] = weight
self.attrs['ra'] = ra
self.attrs['dec'] = dec
self.attrs['redshift'] = redshift
self.attrs['domain_factor'] = domain_factor
# run the algorithm
self.poles = self.run()
def run(self):
"""
Compute the three-point CF multipoles. This attaches the following
the attributes to the class:
- :attr:`poles`
Attributes
----------
poles : :class:`~nbodykit.binned_statistic.BinnedStatistic`
a BinnedStatistic object to hold the multipole results; the
binned statistics stores the multipoles as variables ``corr_0``,
``corr_1``, etc for :math:`\ell=0,1,` etc. The coordinates
of the binned statistic are ``r1`` and ``r2``, which give the
separations between the three objects in CF.
"""
from .pair_counters.domain import decompose_survey_data
# domain decompose the data
# NOTE: pos and pos_sec are Cartesian!
smoothing = numpy.max(self.attrs['edges'])
(pos, w), (pos_sec, w_sec) = decompose_survey_data(self.source, None,
self.attrs, self.logger,
smoothing,
return_cartesian=True,
domain_factor=self.attrs['domain_factor'])
# run the algorithm
return self._run(pos, w, pos_sec, w_sec)
class YlmCache(object):
"""
A class to compute spherical harmonics :math:`Y_{lm}` up
to a specified maximum :math:`\ell`.
During calculation, the necessary power of Cartesian unit
vectors are cached in memory to avoid repeated calculations
for separate harmonics.
"""
def __init__(self, ells, comm):
import sympy as sp
from sympy.utilities.lambdify import implemented_function
from sympy.parsing.sympy_parser import parse_expr
from sympy.core import sympify
self.ells = numpy.asarray(ells).astype(int)
self.max_ell = max(self.ells)
# look up table from ell to iell, index for cummulating results.
self.ell_to_iell = numpy.empty(self.max_ell + 1, dtype=int)
for iell, ell in enumerate(self.ells):
self.ell_to_iell[ell] = iell
lms = [(l,m) for l in ells for m in range(0, l+1)]
# compute the Ylm string expressions in parallel
exprs = []
for i in range(comm.rank, len(lms), comm.size):
lm = lms[i]
exprs.append((lm, str(self._get_Ylm(*lm))))
exprs = [x for sublist in comm.allgather(exprs) for x in sublist]
# determine the powers entering into each expression
args = {}
for lm, expr in exprs:
matches = []
for var in ['xpyhat', 'zhat']:
for e in range(2, max(ells)+1):
name = var + '**' + str(e)
if name in expr:
matches.append((sympify(name), 'cached_'+var, str(e)))
args[lm] = matches
# define a function to return cached power
def from_cache(name, pow):
return self._cache[str(name)+str(pow)]
f = implemented_function(sp.Function('from_cache'), from_cache)
# arguments to the sympy functions
zhat = sp.Symbol('zhat', real=True, positive=True)
xpyhat = sp.Symbol('xpyhat', complex=True)
self._cache = {}
# make the Ylm functions
self._Ylms = OrderedDict()
for lm, expr in exprs:
expr = parse_expr(expr, local_dict={'zhat':zhat, 'xpyhat':xpyhat})
for var in args[lm]:
expr = expr.replace(var[0], sympify('from_cache(%s, %s)' %var[1:]))
self._Ylms[lm] = sp.lambdify((xpyhat, zhat), expr)
def __call__(self, xpyhat, zhat):
"""
Return a dictionary holding Ylm for each (l,m) combination
required
Parameters
----------
xpyhat : array_like
a complex array holding xhat + i * yhat, where xhat and yhat
are the two cartesian unit vectors
zhat : array_like
the third cartesian unit vector
"""
# fill the cache first
self._cache['cached_xpyhat2'] = xpyhat**2
self._cache['cached_zhat2'] = zhat**2
for name,x in zip(['cached_xpyhat', 'cached_zhat'], [xpyhat, zhat]):
for i in range(3, self.max_ell+1):
self._cache[name+str(i)] = self._cache[name+str(i-1)]*x
# return a dictionary for each (l,m) tuple
toret = {}
for lm in self._Ylms:
toret[lm] = self._Ylms[lm](xpyhat, zhat)
return toret
def _get_Ylm(self, l, m):
"""
Compute an expression for spherical harmonic of order (l,m)
in terms of Cartesian unit vectors, :math:`\hat{z}`
and :math:`\hat{x} + i \hat{y}`
Parameters
----------
l : int
the degree of the harmonic
m : int
the order of the harmonic; |m| < l
Returns
-------
expr :
a sympy expression that corresponds to the
requested Ylm
References
----------
https://en.wikipedia.org/wiki/Spherical_harmonics
"""
import sympy as sp
# the relevant cartesian and spherical symbols
x, y, z, r = sp.symbols('x y z r', real=True, positive=True)
xhat, yhat, zhat = sp.symbols('xhat yhat zhat', real=True, positive=True)
xpyhat = sp.Symbol('xpyhat', complex=True)
phi, theta = sp.symbols('phi theta')
defs = [(sp.sin(phi), y/sp.sqrt(x**2+y**2)),
(sp.cos(phi), x/sp.sqrt(x**2+y**2)),
(sp.cos(theta), z/sp.sqrt(x**2 + y**2 + z**2))
]
# the cos(theta) dependence encoded by the associated Legendre poly
expr = sp.assoc_legendre(l, m, sp.cos(theta))
# the exp(i*m*phi) dependence
expr *= sp.expand_trig(sp.cos(m*phi)) + sp.I*sp.expand_trig(sp.sin(m*phi))
# simplifying optimizations
expr = sp.together(expr.subs(defs)).subs(x**2 + y**2 + z**2, r**2)
expr = expr.expand().subs([(x/r, xhat), (y/r, yhat), (z/r, zhat)])
expr = expr.factor().factor(extension=[sp.I]).subs(xhat+sp.I*yhat, xpyhat)
expr = expr.subs(xhat**2 + yhat**2, 1-zhat**2).factor()
# and finally add the normalization
amp = sp.sqrt((2*l+1) / (4*numpy.pi) * sp.factorial(l-m) / sp.factorial(l+m))
expr *= amp
return expr
| bccp/nbodykit | nbodykit/algorithms/threeptcf.py | Python | gpl-3.0 | 20,013 |
#!/usr/bin/env python
# vim: fileencoding=utf8:et:sta:ai:sw=4:ts=4:sts=4
import traceback
import greenhouse
from junction import Hub
HOST = "127.0.0.1"
PORT = 9100
SERVICE_HOST = HOST
SERVICE_PORT = 9000
BDHOST = HOST
BDPORT = 9101
SERVICE = 1
greenhouse.global_exception_handler(traceback.print_exception)
hub = Hub((HOST, PORT), [(SERVICE_HOST, SERVICE_PORT)])
def main():
hub.start()
hub.wait_connected()
greenhouse.schedule(greenhouse.run_backdoor,
args=((BDHOST, BDPORT), {'hub': hub}))
try:
greenhouse.Event().wait()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| teepark/junction | examples/echo/relayer.py | Python | bsd-3-clause | 654 |
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2014 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from pybossa.cookies import CookieHandler
from pybossa.password_manager import ProjectPasswdManager
from mock import patch, MagicMock
class TestProjectPasswdManager(object):
"""Unit tests for class ProjectPasswdManager methods"""
def setUp(self):
self.cookie_handler = MagicMock()
self.psswd_mngr = ProjectPasswdManager(self.cookie_handler)
self.project = MagicMock()
def tearDown(self):
self.cookie_handler = None
self.psswd_mngr = None
self.project = None
@patch('pybossa.password_manager.current_user')
def test_password_needed_anon_passwd_no_ip(self, mock_user):
"""Test password_needed should return True for an anonymous user and
a project with password, if the cookie does not contain the user IP"""
mock_user.is_anonymous.return_value = True
mock_user.admin = False
self.cookie_handler.get_cookie_from.return_value = []
self.project.needs_password.return_value = True
user_ip = '127.0.0.1'
password_needed = self.psswd_mngr.password_needed(self.project, user_ip)
self.cookie_handler.get_cookie_from.assert_called_with(self.project)
assert password_needed is True, password_needed
@patch('pybossa.password_manager.current_user')
def test_password_needed_anon_passwd_ip(self, mock_user):
"""Test password_needed should return False for an anonymous user and
a project with password, if the cookie contains the user IP"""
mock_user.is_anonymous.return_value = True
mock_user.admin = False
self.cookie_handler.get_cookie_from.return_value = ['127.0.0.1']
self.project.needs_password.return_value = True
user_ip = '127.0.0.1'
password_needed = self.psswd_mngr.password_needed(self.project, user_ip)
self.cookie_handler.get_cookie_from.assert_called_with(self.project)
assert password_needed is False, password_needed
@patch('pybossa.password_manager.current_user')
def test_password_needed_anon_no_passwd(self, mock_user):
"""Test password_needed should return False for an anonymous user and
a project without password"""
mock_user.is_anonymous.return_value = True
mock_user.admin = False
self.project.needs_password.return_value = False
user_ip = '127.0.0.1'
password_needed = self.psswd_mngr.password_needed(self.project, user_ip)
assert password_needed is False, password_needed
@patch('pybossa.password_manager.current_user')
def test_password_needed_auth_passwd_no_id(self, mock_user):
"""Test password_needed should return True for an authenticated user and
a project with password, if the cookie does not contain the user id"""
mock_user.is_anonymous.return_value = False
mock_user.admin = False
mock_user.id = 2
self.cookie_handler.get_cookie_from.return_value = []
self.project.needs_password.return_value = True
password_needed = self.psswd_mngr.password_needed(self.project, mock_user.id)
self.cookie_handler.get_cookie_from.assert_called_with(self.project)
assert password_needed is True, password_needed
@patch('pybossa.password_manager.current_user')
def test_password_needed_auth_passwd_ip(self, mock_user):
"""Test password_needed should return False for an authenticated user and
a project with password, if the cookie contains the user id"""
mock_user.is_anonymous.return_value = False
mock_user.admin = False
mock_user.id = 2
self.cookie_handler.get_cookie_from.return_value = [2]
self.project.needs_password.return_value = True
password_needed = self.psswd_mngr.password_needed(self.project, mock_user.id)
self.cookie_handler.get_cookie_from.assert_called_with(self.project)
assert password_needed is False, password_needed
@patch('pybossa.password_manager.current_user')
def test_password_needed_auth_no_passwd(self, mock_user):
"""Test password_needed should return False for an authenticated user and
a project without password"""
mock_user.is_anonymous.return_value = False
mock_user.admin = False
mock_user.id = 2
self.project.needs_password.return_value = False
password_needed = self.psswd_mngr.password_needed(self.project, mock_user.id)
assert password_needed is False, password_needed
@patch('pybossa.password_manager.current_user')
def test_password_needed_owner_no_passwd(self, mock_user):
"""Test password_needed returns False for project owner if it has no
password"""
mock_user.is_anonymous.return_value = False
mock_user.admin = False
mock_user.id = 2
self.project.needs_password.return_value = False
self.project.owner_id = 2
password_needed = self.psswd_mngr.password_needed(self.project, mock_user.id)
assert password_needed is False, password_needed
@patch('pybossa.password_manager.current_user')
def test_password_needed_owner_passwd(self, mock_user):
"""Test password_needed returns False for project owner even it has a
password"""
mock_user.is_anonymous.return_value = False
mock_user.admin = False
mock_user.id = 2
self.project.needs_password.return_value = True
self.project.owner_id = 2
password_needed = self.psswd_mngr.password_needed(self.project, mock_user.id)
assert password_needed is False, password_needed
@patch('pybossa.password_manager.current_user')
def test_password_needed_admin_no_passwd(self, mock_user):
"""Test password_needed returns False for admins if project has no
password"""
mock_user.is_anonymous.return_value = False
mock_user.admin = True
mock_user.id = 1
self.project.needs_password.return_value = False
self.project.owner_id = 2
password_needed = self.psswd_mngr.password_needed(self.project, mock_user.id)
assert password_needed is False, password_needed
@patch('pybossa.password_manager.current_user')
def test_password_needed_admin_passwd(self, mock_user):
"""Test password_needed returns False for admins even project has
a password"""
mock_user.is_anonymous.return_value = False
mock_user.admin = True
mock_user.id = 1
self.project.needs_password.return_value = True
self.project.owner_id = 2
password_needed = self.psswd_mngr.password_needed(self.project, mock_user.id)
assert password_needed is False, password_needed
def test_validates(self):
"""Test validates tells the project to check the password (not too much
logic in here)"""
password = '1234'
self.psswd_mngr.validates(password, self.project)
self.project.check_password.assert_called_with(password)
def test_update_response(self):
"""Test update_response tells the cookie handler to update the response
with the project and user info"""
user = MagicMock()
response = MagicMock()
self.psswd_mngr.update_response(response, self.project, user)
self.cookie_handler.add_cookie_to.assert_called_with(response, self.project, user)
| stitchfix/pybossa | test/test_password_manager.py | Python | agpl-3.0 | 8,144 |
"""Test hooks in bash scripts."""
import contextlib
import re
import subprocess
import pytest
@contextlib.contextmanager
def build_image(root):
"""Build a new Docker image with any file in root coped to /.
:param py.path.local root: Root directory of files to copy.
:return: Same root variable, Dockerfile path, and Docker image ID in a list.
:rtype: iter
"""
images = list()
# Create Dockerfile.
docker_file = root.ensure('Dockerfile')
docker_file.write('FROM robpol86/makemkv\n')
# Let caller add files or modify Dockerfile.
yield root, docker_file, images
# Append to Dockerfile.
for path in (p for p in root.listdir() if p.isfile() and p.basename != 'Dockerfile'):
docker_file.write('COPY {} /\n'.format(path.basename), 'a')
# Build.
stdout = pytest.run(['docker', 'build', '.'], cwd=root)[0]
matches = re.compile(br'^Successfully built ([a-f0-9]+)$', re.MULTILINE).findall(stdout)
assert matches
images.extend(m.decode('utf8') for m in matches)
@pytest.mark.usefixtures('cdemu')
def test_success(tmpdir):
"""Test most hooks in one test during a successful rip.
:param py.path.local tmpdir: pytest fixture.
"""
hooks = ('post-env', 'pre-prepare', 'post-prepare', 'pre-rip', 'post-title', 'post-rip', 'end')
with build_image(tmpdir.join('root')) as (root, _, image_ids):
for hook in hooks:
root.ensure('hook-{}.sh'.format(hook)).write('env |sort')
# Docker run.
output = tmpdir.ensure_dir('output')
stdout, stderr = pytest.run(output=output, image_id=image_ids[0])
# Verify.
for hook in hooks:
assert b'FIRING HOOK: /hook-%s.sh' % hook.encode('utf8') in stderr
assert b'_HOOK_SCRIPT=/hook-%s.sh' % hook.encode('utf8') in stdout
assert b'END OF HOOK: /hook-%s.sh' % hook.encode('utf8') in stderr
if hook == 'post-title':
assert re.compile(br'^TITLE_PATH=/output/Sample[a-zA-Z0-9_/.-]+/title00\.mkv$', re.MULTILINE).search(stdout)
assert stderr.count(b'\nEND OF HOOK: ') == len(hooks) # Verify no other hooks fired.
pytest.verify(output)
@pytest.mark.usefixtures('cdemu_truncated')
def test_failed(tmpdir):
"""Test most hooks in one test during a successful rip.
:param py.path.local tmpdir: pytest fixture.
"""
hooks = ('pre-on-err', 'post-on-err', 'pre-on-err-touch', 'post-on-err-touch')
with build_image(tmpdir.join('root')) as (root, _, image_ids):
for hook in hooks:
root.ensure('hook-{}.sh'.format(hook)).write('env |sort')
# Docker run.
with pytest.raises(subprocess.CalledProcessError) as exc:
pytest.run(image_id=image_ids[0])
stdout, stderr = exc.value.output, exc.value.stderr
# Verify.
for hook in hooks:
assert b'\nFIRING HOOK: /hook-%s.sh' % hook.encode('utf8') in stderr
assert b'\n_HOOK_SCRIPT=/hook-%s.sh' % hook.encode('utf8') in stdout
assert b'\nEND OF HOOK: /hook-%s.sh' % hook.encode('utf8') in stderr
assert stderr.count(b'\nEND OF HOOK: ') == len(hooks) # Verify no other hooks fired.
@pytest.mark.usefixtures('cdemu')
def test_failed_after_makemkvcon(tmpdir):
"""Test errors that happen after makemkvcon background and foreground trick.
:param py.path.local tmpdir: pytest fixture.
"""
hooks = ('pre-on-err', 'post-on-err', 'pre-on-err-touch', 'post-on-err-touch')
with build_image(tmpdir.join('root')) as (root, _, image_ids):
for hook in hooks:
root.ensure('hook-{}.sh'.format(hook)).write('env |sort')
root.ensure('hook-post-rip.sh').write('false')
# Docker run.
with pytest.raises(subprocess.CalledProcessError) as exc:
pytest.run(image_id=image_ids[0])
stdout, stderr = exc.value.output, exc.value.stderr
# Verify.
for hook in hooks:
assert b'\nFIRING HOOK: /hook-%s.sh' % hook.encode('utf8') in stderr
assert b'\n_HOOK_SCRIPT=/hook-%s.sh' % hook.encode('utf8') in stdout
assert b'\nEND OF HOOK: /hook-%s.sh' % hook.encode('utf8') in stderr
assert stderr.count(b'\nEND OF HOOK: ') == len(hooks) # Verify no other hooks fired.
@pytest.mark.parametrize('fail', [False, True])
@pytest.mark.parametrize('no_eject', [False, True])
@pytest.mark.usefixtures('cdemu')
def test_eject(tmpdir, fail, no_eject):
"""Test post and pre eject hooks.
:param py.path.local tmpdir: pytest fixture.
:param bool fail: Cause a failure during the run.
:param bool no_eject: Set environment variable to 'true' or 'false'.
"""
hooks = ('pre-success-eject', 'post-success-eject', 'pre-failed-eject', 'post-failed-eject')
with build_image(tmpdir.join('root')) as (root, _, image_ids):
for hook in hooks:
root.ensure('hook-{}.sh'.format(hook)).write('echo eject hook fired!')
if fail:
root.ensure('hook-pre-rip.sh').write('false')
# Docker run.
args = ['-e', 'FAILED_EJECT=true'] + (['-e', 'NO_EJECT=true'] if no_eject else [])
if fail:
with pytest.raises(subprocess.CalledProcessError) as exc:
pytest.run(args=args, image_id=image_ids[0])
stdout, stderr = exc.value.output, exc.value.stderr
else:
stdout, stderr = pytest.run(args=args, image_id=image_ids[0])
# Verify.
if no_eject:
assert stdout.count(b'eject hook fired!') == 0
else:
assert stdout.count(b'eject hook fired!') == 2
if fail:
assert b'\nFIRING HOOK: /hook-pre-failed-eject.sh' in stderr
assert b'\nFIRING HOOK: /hook-post-failed-eject.sh' in stderr
else:
assert b'\nFIRING HOOK: /hook-pre-success-eject.sh' in stderr
assert b'\nFIRING HOOK: /hook-post-success-eject.sh' in stderr
@pytest.mark.parametrize('fail', [False, True])
@pytest.mark.usefixtures('cdemu')
def test_wait(tmpdir, fail):
"""Test waiting for background jobs.
:param py.path.local tmpdir: pytest fixture.
:param bool fail: Cause a failure during the run.
"""
with build_image(tmpdir.join('root')) as (root, _, image_ids):
pre_rip = root.ensure('hook-pre-rip.sh')
pre_rip.write(
'do_wait () {\n'
' sleep 2\n'
' echo do_wait done!\n'
'}\n'
'do_wait &\n'
)
if fail:
pre_rip.write('false\n', 'a')
# Docker run.
if fail:
with pytest.raises(subprocess.CalledProcessError) as exc:
pytest.run(image_id=image_ids[0])
stdout, stderr = exc.value.output, exc.value.stderr
else:
stdout, stderr = pytest.run(image_id=image_ids[0])
# Verify.
assert b'do_wait done!' in stdout
@pytest.mark.parametrize('fail', [False, True])
@pytest.mark.usefixtures('cdemu')
def test_wait_nested(tmpdir, fail):
"""Test waiting for background jobs created by background jobs.
:param py.path.local tmpdir: pytest fixture.
:param bool fail: Cause a failure during the run.
"""
with build_image(tmpdir.join('root')) as (root, _, image_ids):
post_title = root.ensure('hook-post-title.sh')
post_title.write(
'do_wait () {\n'
' for _ in {1..5}; do\n'
' if readlink /proc/*/exe |grep -q makemkvcon &> /dev/null; then sleep 1; else break; fi\n'
' done\n'
' sleep 5\n'
' echo do_wait done!\n'
'}\n'
'do_wait &\n'
)
if fail:
root.ensure('hook-post-rip.sh').write('false\n', 'a')
# Docker run.
if fail:
with pytest.raises(subprocess.CalledProcessError) as exc:
pytest.run(image_id=image_ids[0])
stdout, stderr = exc.value.output, exc.value.stderr
else:
stdout, stderr = pytest.run(image_id=image_ids[0])
# Verify.
assert b'do_wait done!' in stdout
| Robpol86/makemkv | tests/test_hooks.py | Python | mit | 7,910 |
def golf(r):
s=p=0
c=int(-(-r//1))
for i in range(c):
y=max(r**2-(i+1)**2,0)**0.5
s+=y//1
p+=c-y//1
c=-(-y//1)
return s*4,p*4
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert isinstance(golf(1), (list, tuple))
assert list(golf(2)) == [4, 12]
assert list(golf(3)) == [16, 20]
assert list(golf(2.1)) == [4, 20]
assert list(golf(2.5)) == [12, 20]
print("All done? Earn rewards by using the 'Check' button!") | edwardzhu/checkio-solution | EmpireOfCode/common/Crystalite Storage/countingTiles.py | Python | mit | 505 |
from __future__ import division, print_function, unicode_literals, absolute_import
import setuptools
version = '2018.1.12'
dependencies = ['sarge', 'ordered-namespace']
setuptools.setup(install_requires=dependencies,
include_package_data=True,
packages=setuptools.find_packages(),
version=version)
| Who8MyLunch/NutMeg | setup.py | Python | mit | 355 |
###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case1_2_1(Case):
DESCRIPTION = """Send binary message with payload 0."""
EXPECTATION = """Receive echo'ed binary message (with empty payload). Clean close with normal code."""
def onOpen(self):
payload = ""
self.expected[Case.OK] = [("message", payload, True)]
self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL],"requireClean":True}
self.p.sendFrame(opcode = 2, payload = payload)
self.p.killAfter(1)
| crossbario/autobahn-testsuite | autobahntestsuite/autobahntestsuite/case/case1_2_1.py | Python | apache-2.0 | 1,321 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
# from goose import Goose
from pagecrawler.model_article import Articles
class SaveArticle(object):
def process_item(self, item, spider):
# e.g. "indexing function", link = item.[]('link')
title = item['title']
link = item['link']
summary = item['summary']
keywords = item['keywords']
text = item['text']
# article = Articles.create(title=title, link=link, summary=summary, keywords=keywords, text=text)
return item
# class DuplicatesPipeline(object):
#
# def __init__(self):
# self.links_seen = set()
#
# def process_item(self, item, spider):
# if item['link'] in self.links_seen:
# raise DropItem("Duplicate item found: %s" % item)
# else:
# self.links_seen.add(item['link'])
# return item
#
# class DropSelfPostsPipeline(object):
# def process_item(self, item, spider):
# match = re.match("item\?id=[0-9]+", item['link'])
# if match:
# raise DropItem("Excluded self-post: " + item['link'])
#
# return item
#
# class ExtractArticlePipeline(object):
# def __init__(self):
# self.goose = Goose()
#
# def process_item(self, item, spider):
# try:
# article = self.goose.extract(url=item['link'])
# item["text"] = article.cleaned_text
#
# except IndexError:
# raise DropItem("Failed to extract article text from: " + item['link'])
#
# return item
| mayawang/capstone_fetchbot | crawler/pagecrawler/pagecrawler/pipelines.py | Python | mit | 1,732 |
import pytest
from pytest import raises
from viper import compiler
from viper.exceptions import TypeMismatchException
fail_list = [
"""
def foo(inp: bytes <= 10) -> bytes <= 2:
return slice(inp, start=2, len=3)
""",
"""
def foo(inp: num) -> bytes <= 3:
return slice(inp, start=2, len=3)
""",
"""
def foo(inp: bytes <= 10) -> bytes <= 3:
return slice(inp, start=4.0, len=3)
"""
]
@pytest.mark.parametrize('bad_code', fail_list)
def test_slice_fail(bad_code):
with raises(TypeMismatchException):
compiler.compile(bad_code)
valid_list = [
"""
def foo(inp: bytes <= 10) -> bytes <= 3:
return slice(inp, start=2, len=3)
""",
"""
def foo(inp: bytes <= 10) -> bytes <= 4:
return slice(inp, start=2, len=3)
""",
"""
def foo() -> bytes <= 10:
return slice("badmintonzzz", start=1, len=10)
"""
]
@pytest.mark.parametrize('good_code', valid_list)
def test_slice_success(good_code):
assert compiler.compile(good_code) is not None
| NedYork/viper | tests/parser/syntax/test_slice.py | Python | mit | 1,020 |
# -*- coding: utf-8 -*-
from django.utils import feedgenerator
from django.shortcuts import render_to_response
from django.http import HttpResponse, Http404, HttpResponsePermanentRedirect
from django.utils.cache import patch_vary_headers
from django.template import Context, RequestContext, loader
from django.views.generic import RedirectView
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from django.utils.encoding import smart_unicode
from django.views.decorators.http import condition
from feedjack import models, fjlib, fjcache
import itertools as it, operator as op, functools as ft
from datetime import datetime
from collections import defaultdict
from urlparse import urlparse
def ctx_get(ctx, k):
v = ctx[k]
if callable(v): v = ctx[k]()
return v
def cache_etag(request, *argz, **kwz):
'''Produce etag value for a cached page.
Intended for usage in conditional views (@condition decorator).'''
response, site, cachekey = kwz.get('_view_data') or initview(request)
if not response: return None
return fjcache.str2md5(
'{0}--{1}--{2}'.format( site.id if site else 'x', cachekey,
response[1].strftime('%Y-%m-%d %H:%M:%S%z') ) )
def cache_last_modified(request, *argz, **kwz):
'''Last modification date for a cached page.
Intended for usage in conditional views (@condition decorator).'''
response, site, cachekey = kwz.get('_view_data') or initview(request)
if not response: return None
return response[1]
def initview(request, response_cache=True):
'''Retrieves the basic data needed by all feeds (host, feeds, etc)
Returns a tuple of:
1. A valid cached response or None
2. The current site object
3. The cache key
4. The subscribers for the site (objects)
5. The feeds for the site (ids)'''
http_host, path_info = ( smart_unicode(part.strip('/')) for part in
[ request.META['HTTP_HOST'],
request.META.get('REQUEST_URI', request.META.get('PATH_INFO', '/')) ] )
query_string = request.META['QUERY_STRING']
url = '{0}/{1}'.format(http_host, path_info)
cachekey = u'{0}?{1}'.format(*it.imap(smart_unicode, (path_info, query_string)))
hostdict = fjcache.hostcache_get() or dict()
site_id = hostdict[url] if url in hostdict else None
if site_id and response_cache:
response = fjcache.cache_get(site_id, cachekey)
if response: return response, None, cachekey
if site_id: site = models.Site.objects.get(pk=site_id)
else: # match site from all of them
sites = list(models.Site.objects.all())
if not sites:
# Somebody is requesting something, but the user
# didn't create a site yet. Creating a default one...
site = models.Site(
name='Default Feedjack Site/Planet',
url=request.build_absolute_uri(request.path),
title='Feedjack Site Title',
description='Feedjack Site Description.'
' Please change this in the admin interface.',
template='bootstrap' )
site.save()
else:
# Select the most matching site possible,
# preferring "default" when everything else is equal
results = defaultdict(list)
for site in sites:
relevance, site_url = 0, urlparse(site.url)
if site_url.netloc == http_host: relevance += 10 # host matches
if path_info.startswith(site_url.path.strip('/')): relevance += 10 # path matches
if site.default_site: relevance += 5 # marked as "default"
results[relevance].append((site_url, site))
for relevance in sorted(results, reverse=True):
try: site_url, site = results[relevance][0]
except IndexError: pass
else: break
if site_url.netloc != http_host: # redirect to proper site hostname
response = HttpResponsePermanentRedirect(
'http://{0}/{1}{2}'.format( site_url.netloc, path_info,
'?{0}'.format(query_string) if query_string.strip() else '') )
return (response, timezone.now()), None, cachekey
hostdict[url] = site_id = site.id
fjcache.hostcache_set(hostdict)
if response_cache:
response = fjcache.cache_get(site_id, cachekey)
if response: return response, None, cachekey
return None, site, cachekey
class RedirectForSite(RedirectView):
'''Simple permanent redirect, taking site prefix
into account, otherwise similar to RedirectView.'''
def get(self, request, *args, **kwz):
response, site, cachekey = initview(request)
if response: return response[0]
return HttpResponsePermanentRedirect(site.url + self.url)
def blogroll(request, btype):
'View that handles the generation of blogrolls.'
response, site, cachekey = initview(request)
if response: return response[0]
template = loader.get_template('feedjack/{0}.xml'.format(btype))
ctx = dict()
fjlib.get_extra_context(site, ctx)
ctx = Context(ctx)
response = HttpResponse(
template.render(ctx), content_type='text/xml; charset=utf-8' )
patch_vary_headers(response, ['Host'])
fjcache.cache_set(site, cachekey, (response, ctx_get(ctx, 'last_modified')))
return response
def foaf(request):
'View that handles the generation of the FOAF blogroll.'
return blogroll(request, 'foaf')
def opml(request):
'View that handles the generation of the OPML blogroll.'
return blogroll(request, 'opml')
def buildfeed(request, feedclass, **criterias):
'View that handles the feeds.'
view_data = initview(request)
wrap = lambda func: ft.partial(func, _view_data=view_data, **criterias)
return condition(
etag_func=wrap(cache_etag),
last_modified_func=wrap(cache_last_modified) )\
(_buildfeed)(request, feedclass, view_data, **criterias)
def _buildfeed(request, feedclass, view_data, **criterias):
# TODO: quite a mess, can't it be handled with a default feed-views?
response, site, cachekey = view_data
if response: return response[0]
feed_title = site.title
if criterias.get('feed_id'):
try:
feed_title = u'{0} - {1}'.format(
models.Feed.objects.get(id=criterias['feed_id']).title, feed_title )
except ObjectDoesNotExist: raise Http404 # no such feed
object_list = fjlib.get_page(site, page=1, **criterias).object_list
feed = feedclass( title=feed_title, link=site.url,
description=site.description, feed_url=u'{0}/{1}'.format(site.url, '/feed/rss/') )
last_modified = datetime(1970, 1, 1, 0, 0, 0, 0, timezone.utc)
for post in object_list:
# Enclosures are not created here, as these have somewhat unpredictable format,
# and don't always fit Django's url+length+type style - href+title links, for instance.
feed.add_item(
title = u'{0}: {1}'.format(post.feed.name, post.title),
link = post.link,
description = fjlib.html_cleaner(post.content),
author_email = post.author_email,
author_name = post.author,
pubdate = post.date_created,
updateddate = post.date_modified,
unique_id = post.link,
categories = [tag.name for tag in post.tags.all()] )
if post.date_updated > last_modified: last_modified = post.date_updated
response = HttpResponse(content_type=feed.mime_type)
# Per-host caching
patch_vary_headers(response, ['Host'])
feed.write(response, 'utf-8')
if site.use_internal_cache:
fjcache.cache_set(
site, cachekey, (response, last_modified) )
return response
def rssfeed(request, **criterias):
'Generates the RSS2 feed.'
return buildfeed(request, feedgenerator.Rss201rev2Feed, **criterias)
def atomfeed(request, **criterias):
'Generates the Atom 1.0 feed.'
return buildfeed(request, feedgenerator.Atom1Feed, **criterias)
def mainview(request, **criterias):
'View that handles all page requests.'
view_data = initview(request)
wrap = lambda func: ft.partial(func, _view_data=view_data, **criterias)
return condition(
etag_func=wrap(cache_etag),
last_modified_func=wrap(cache_last_modified) )\
(_mainview)(request, view_data, **criterias)
def _mainview(request, view_data, **criterias):
response, site, cachekey = view_data
if not response:
ctx = fjlib.page_context(request, site, **criterias)
response = render_to_response(
u'feedjack/{0}/post_list.html'.format(site.template),
ctx, context_instance=RequestContext(request) )
# per host caching, in case the cache middleware is enabled
patch_vary_headers(response, ['Host'])
if site.use_internal_cache:
fjcache.cache_set( site, cachekey,
(response, ctx_get(ctx, 'last_modified')) )
else: response = response[0]
return response
| mk-fg/feedjack | feedjack/views.py | Python | bsd-3-clause | 8,247 |
from hubcheck.pageobjects.po_groups_wiki_base_page import GroupsWikiBasePage
class GroupsWikiArticlePage(GroupsWikiBasePage):
"""groups wiki article page"""
def __init__(self,browser,catalog,groupid,articleid):
super(GroupsWikiArticlePage,self).__init__(browser,catalog,groupid,articleid)
# self.path is supplied by GroupsWikiBasePage
# load hub's classes
GroupsWikiArticlePage_Locators = self.load_class('GroupsWikiArticlePage_Locators')
GroupsWikiArticle = self.load_class('GroupsWikiArticle')
# update this object's locator
self.locators.update(GroupsWikiArticlePage_Locators.locators)
# setup page object's components
self.article = GroupsWikiArticle(self,{'base':'article'})
def get_tags(self):
return self.article.get_tags()
def click_tag(self,tagname):
return self.article.click_tag(tagname)
def get_page_text(self):
return self.article.get_page_text()
def get_authors(self):
return self.article.get_authors()
def is_created(self):
return self.article.is_created()
def create_page(self):
return self.article.create_page()
def download_attachment(self,attachment):
return self.article.download_attachment(attachment)
def is_file_attached(self,filepath):
return self.article.is_file_attached(filepath)
class GroupsWikiArticlePage_Locators_Base(object):
"""locators for GroupsWikiArticlePage object"""
locators = {
'article' : "css=#page_content",
}
| codedsk/hubcheck | hubcheck/pageobjects/po_groups_wiki_article_page.py | Python | mit | 1,583 |
# -*- coding: utf-8 -*-
# Kodi Addon: Youtube Library
# Copyright 2015 Sleuteltje
#
# This file is part of plugin.video.youtubelibrary
# Description: Functions that will handle updating of playlists in the library (required functions for the service)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import xbmc, xbmcgui, xbmcaddon
from resources.lib import vars
from resources.lib import dev
from resources.lib import m_xml
from resources.lib import generators
from resources.lib import ytube
from resources.lib import play
#Outputs the updatevery setting in normal
def updateevery_normal(t, time, scansince, update_gmt):
import datetime
if time is None:
hour = 23
minute = 59
else:
hour = int(time[:2])
minute = int(time[3:5])
if t == 'every 4 hours':
return scansince + datetime.timedelta(hours=4)
if t == 'every 8 hours':
return scansince + datetime.timedelta(hours=8)
if t == 'every 12 hours':
dev.log('12 hours since last scan is: '+str(scansince + datetime.timedelta(hours=12)))
return scansince + datetime.timedelta(hours=12)
if t == 'every 24 hours':
return scansince + datetime.timedelta(hours=24)
if t == 'every 168 hours':
return scansince + datetime.timedelta(hours=168)
dev.log('t is '+t)
today = datetime.datetime.now()
weekday = (today.weekday() + 1) % 7 # MON = 0, SUN = 6 -> SUN = 0 .. SAT = 6
y = today
if t == 'every sunday':
if weekday is not 0:
y = today - datetime.timedelta(7+weekday)
if t == 'every monday':
if weekday is not 1:
y = today - datetime.timedelta(7+weekday-1)
if t == 'every tuesday':
if weekday is not 2:
y = today - datetime.timedelta(7+weekday-2)
if t == 'every wednesday':
if weekday is not 3:
y = today - datetime.timedelta(7+weekday-3)
if t == 'every thursday':
if weekday is not 4:
y = today - datetime.timedelta(7+weekday-4)
if t == 'every friday':
if weekday is not 5:
y = today - datetime.timedelta(7+weekday-5)
if t == 'every saturday':
if weekday is not 6:
y = today - datetime.timedelta(7+weekday-6)
if t == 'every day':
#See if the playlist has been scanned since yesterday
y = today - datetime.timedelta(days=1)
y = y.replace(hour=hour, minute=minute)
if dev.timedelta_total_seconds(y-scansince) < 0:
dev.log('The time of yesterday is already scanned, so we will send the date&time of today')
y = today
y = y.replace(hour=hour, minute=minute)
dev.log(t+' ago is: '+str(y))
if update_gmt is not None and update_gmt is not 99 and update_gmt is not 98:
y = y + datetime.timedelta(hours = update_gmt) #Offset the time according to the current system timezone and which timezone it should be updated to
dev.log('with gmt offset ('+str(update_gmt)+'): '+str(y))
return y
#Writes the nfo & strm files for all playlists
def update_playlists(type=''):
#xbmcgui.Dialog().notification(vars.__addonname__, 'Updating Youtube '+dev.typeName(type)+' Playlists...', vars.__icon__, 3000)
dev.log('Updating All '+type+' Youtube Playlists')
#scan_interval = 'service_interval'
#if type == 'musicvideo':
# scan_interval = 'service_interval_musicvideo'
m_xml.xml_get(type=type)
pl = m_xml.document.findall('playlists/playlist')
if pl is not None:
for child in pl: #Loop through each playlist
if child.attrib['enabled'] == 'yes': #Playlist has to be enabled
dev.log('SERVICE: Checking if playlist '+child.find('title').text+' should be updated...')
#Grab the settings from this playlist
#Loads the correct information from the settings.xml
#settings = m_xml.xml_get_elem('playlists/playlist', 'playlist', {'id': id}, type=type)
#Get the current GMT offset and consider this when updating
import datetime
import time
#Grab when this playlist should be updated
updateat = '23:59'
update_gmt = 99
if child.find('updateevery') is None:
dev.log('NOTICE: Playlist should have an instruction when to be updated!. Asssumed default (12 hours) for now', 1)
updateevery = 'every 12 hours'
else:
updateevery = child.find('updateevery').text
if child.find('updateat') is not None:
updateat = child.find('updateat').text
if child.find('update_gmt') is not None:
if child.find('update_gmt').text is not '':
update_gmt = dev.timezones(child.find('update_gmt').text)
#Check when this playlist was last updated, and if it is time for this playlist to be updated again
try:
s = child.attrib['scansince']
scansince = datetime.datetime.strptime(s,"%d/%m/%Y %H:%M:%S")
except:
scansince = datetime.datetime.now() - datetime.timedelta(days=3*365)
timenow = datetime.datetime.now()
dev.log('Playlist last scanned on: '+str(scansince)+', now: '+str(timenow), 1)
if update_gmt is not None and update_gmt is not 99 and update_gmt is not 98: #If update_gmt is set to any other then the own timezone, consider this when calculating when the playlist should update
offset = time.timezone if (time.localtime().tm_isdst == 0) else time.altzone
offset = offset / 60 / 60 * -1
timenow = timenow + datetime.timedelta(hours = offset)
scansince = scansince + datetime.timedelta(hours = offset)
dev.log('UCT timecorrection (because update_gmt is set): '+str(scansince)+', now: '+str(timenow), 1)
#diff = (timenow-scansince).total_seconds()
#diff = dev.timedelta_total_seconds(timenow-scansince)
#dev.log('Difference from last scan is '+str(diff))
#Get when this playlist should have last been updated
should_update = updateevery_normal(updateevery, updateat, scansince, update_gmt)
#dev.log('The difference between should_update & scansince: '+str(dev.timedelta_total_seconds(should_update-scansince)))
if dev.timedelta_total_seconds(should_update-scansince) > 0:
#The last scan was earlier than when this playlist should have last been updated!
if dev.timedelta_total_seconds(timenow-should_update) > 0:
#The time for updating lies in the past, so update this playlist
dev.log('This playlist should be updated')
if xbmcaddon.Addon("plugin.video.youtubelibrary").getSetting('notify_update'):
xbmcgui.Dialog().notification(vars.__addonname__, 'Updating Playlist: '+child.find('title').text+'... ', vars.__icon__, 3000)
else:
dev.log('Its not time yet to update this playlist')
continue
else:
dev.log('Last update was after the time this playlist should have been updated')
continue
#WITH OLD SCAN INTERVAL:
#if diff < (int(vars.__settings__.getSetting(scan_interval)) * 60 * 60):
# dev.log('Difference '+str(diff)+' was not enough, '+str(int(vars.__settings__.getSetting("service_interval")) * 60 * 60)+' seconds needed. This Playlist will not be updated now.')
# continue
update_playlist(child.attrib['id'], type=type) #Update the nfo & strm files for this playlist
if xbmcaddon.Addon("plugin.video.youtubelibrary").getSetting('notify_update'):
xbmcgui.Dialog().notification(vars.__addonname__, 'Done updating Playlist: '+child.find('title').text+'! ', vars.__icon__, 3000)
#xbmcgui.Dialog().notification(vars.__addonname__, 'Done Updating Youtube '+dev.typeName(type)+' Playlists', vars.__icon__, 3000)
#Should we also update the video library?
if vars.update_videolibrary == "true" and type=='':
update_dir = vars.tv_folder_path
if type == 'musicvideo':
update_dir = vars.musicvideo_folder_path
elif type == 'movies':
update_dir = vars.movies_folder_path
dev.log('Updating video library is enabled. Updating '+type+' librarys directory %s' % update_dir, True)
xbmc.executebuiltin('xbmc.updatelibrary(Video,'+update_dir+')')
#Writes the nfo & strm files for the given playlist
def update_playlist(id, type=''):
settings = m_xml.xml_get_elem('playlists/playlist', 'playlist', {'id': id}, type=type) #Grab the xml settings for this playlist
if settings is None:
dev.log('Could not find playlist '+id+' in the '+dev.typeXml(type)+' file', True)
return False
else:
dev.log('Updating playlist %s (Id: %s)' % (settings.find('title').text.encode('utf-8'), id))
#Check in which folder the show should be added
folder = settings.find('overwritefolder').text
if folder is None or folder == '':
folder = dev.legal_filename(settings.find('title').text) #Overwrite folder is not set in settings.xml, so set the folder to the title of the show
else:
folder = dev.legal_filename(folder)
#Create the tvshow.nfo
writenfo = settings.find('writenfo').text
if writenfo != 'no':
if type == '' or type == 'tv':
generators.write_tvshow_nfo(folder, settings)
elif type == 'musicvideo':
generators.write_artist_nfo(folder, settings)
if update_playlist_vids(id, folder, settings, type=type) == False:
return False #something failed while updating the videos of the playlist
#Save the time this playlist got updated in the xml
import datetime
d=datetime.datetime.now()
m_xml.xml_update_playlist_attr(id, 'scansince', d.strftime("%d/%m/%Y %H:%M:%S"), type=type)
return True
#Updates the videos of a playlist
#the id of the playlist
#the folder where the strm & nfo files should go
#the elementtree element containing the playlist xml settings
#the id of the fist videoId, so it can save that one in the xml if it parsed all videos. Since the newest is the video it should be stopping the next time.
def update_playlist_vids(id, folder, settings, nextpage=False, firstvid = False, type=type):
onlygrab = 100 #grab max 100 pages by default
##Get all Youtube Videos belonging to this playlist
#resp = ytube.vids_by_playlist(id, nextpage) #Grab the videos belonging to this playlist
#vids = resp.get("items", [])
if settings.find('onlygrab') is not None:
onlygrab = int(settings.find('onlygrab').text) / 50 #Get the maximum number of pages we should gather
all_vids = []
duration = {}
#First we are going to collect all youtube videos until we come across a list containing a videoId we already got
uptodate = False
times = 0 #keep track how many times we grabbed yt videos
reverse = '0'
if settings.find('reverse') is not None:
reverse = settings.find('reverse').text
total_last_time = settings.find('lastvideoId').text
if total_last_time == '' or total_last_time == None:
total_last_time = '0'
total_last_time = int(total_last_time)
while uptodate == False:
all_vidids = []
resp = ytube.vids_by_playlist(id, nextpage) #Grab the videos belonging to this playlist
if resp == False:
return False #Something failed while retrieving the playlist
amount = int(resp['pageInfo']['totalResults'])
vids = resp.get("items", [])
if reverse == '1' and times == 0:
m_xml.xml_update_playlist_setting(id, 'lastvideoId', str(amount), type=type) #Update the amount of videos to the current one
if total_last_time < amount: #There are more videos in the playlist now, so time to update
dev.log('Reversed is enabled and there are more videos ('+str(amount)+' vs '+str(total_last_time)+') then last time.')
else:
dev.log('Reversed is enabled, but there are no more videos ('+str(amount)+' vs '+str(total_last_time)+') then last time.')
return amount #No more videos then last time, so leave it at this
if amount > 5000:
dev.log('This playlist is way to big (more then 5000 videos) to be reversed')
return amount
if onlygrab <= times:
#We have grabbed as many videos as allowed by the setting onlygrab
uptodate = True
break#quit updating the list
for vid in vids:
if m_xml.episode_exists(id, vid['contentDetails']['videoId'], type=type):
if reverse != '1':
#This list contains a videoId we already got, assume we are up to date
uptodate = True
continue #continue to the next video in the list
if vid['snippet']['title'].lower() != 'private video' and vid['snippet']['title'].lower() != 'deleted video' and vid['snippet']['description'].lower() != 'this video is unavailable.':
all_vidids.append(vid['contentDetails']['videoId']) #Collect all videoids in one list
all_vids.append(vid) #Append this video to the all_vids list
##Grab the duration of the videos. We will need it for the minlength and maxlength filters, and for the duration tag in the .nfo file
#We are gonna grab the duration of all 50 videos, saving on youtube api calls.
dev.log('Grabbing duration of videos')
duration.update(ytube.get_duration_vids(all_vidids)) #Get all the duration of the videos
#If there is a nextPagetoken there are more videos to parse, call this function again so it can parse them to
if 'nextPageToken' in resp:
if uptodate is not True:
nextpage = resp['nextPageToken']
else:
uptodate = True #Since there are no more pages, we are uptodate
#update_playlist_vids(id, folder, settings, resp['nextPageToken'], firstvid)
times = times+1
dev.log('')
dev.log('')
dev.log('( ._.)~~~~~~~~~~ DONE GRABBING VIDS FROM YOUTUBE FOR :'+settings.find('title').text+' ~~~~~~~~~~(._. )')
dev.log('')
dev.log('')
##Grab settings from the settings.xml for this playlist
minlength = settings.find('minlength').text
maxlength = settings.find('maxlength').text
if minlength is not '' and minlength is not None and minlength is not '00:00' and minlength is not '0:00':
#Recalculate minlength
dev.log('minlength is turned on: '+minlength)
minlength = ytube.hms_to_sec(minlength)
dev.log('minlength in seconds: '+str(minlength))
else:
minlength = None
if maxlength is not '' and maxlength is not None and maxlength is not '00:00' and maxlength is not '0:00':
#Recalculate maxlength
dev.log('maxlength is turned on: '+maxlength)
maxlength = ytube.hms_to_sec(maxlength)
dev.log('maxlength in seconds: '+str(maxlength))
else:
maxlength = None
if reverse == '1':
all_vids = list(reversed(all_vids))
##Loop through all vids and check with filters if we should add it
for vid in reversed(all_vids):
dev.log('')
#Check if we already had this video, if so we should skip it
if m_xml.episode_exists(id, vid['contentDetails']['videoId'], type=type):
dev.log('Episode '+vid['contentDetails']['videoId']+' is already scanned into the library')
continue
##Check if the filters in the settings prevent this video from being added
#Check if the word has been found, cause if not, we should not add this video to the library
if onlyinclude(vid, settings) == False:
continue #Skip this video
#Check if the word has been found, cause if so, we should not add this video to the library
if excludewords(vid, settings) == False:
continue #Skip this video
#See if this video is smaller or larger than the min-/maxlength specified in the settings
if minlength is not None:
if int(minlength) > int(duration[vid['contentDetails']['videoId']]):
dev.log('Does not match minlength ('+str(minlength)+'): '+vid['snippet']['title']+' (id: '+vid['contentDetails']['videoId']+')')
continue #Skip this video
dev.log('Matches minlength: '+vid['snippet']['title']+' (id: '+vid['contentDetails']['videoId']+')')
if maxlength is not None:
if int(maxlength) < int(duration[vid['contentDetails']['videoId']]):
dev.log('Does not match maxlength: '+vid['snippet']['title']+' (id: '+vid['contentDetails']['videoId']+')')
continue #Skip this video
#dev.log('TEST duration '+str(duration[vid['contentDetails']['videoId']]))
downloadSuccess = True
if type == '' or type == 'tv':
#Grab the correct season and episode number from this vid
season, episode, vid = generators.episode_season(vid, settings, resp['pageInfo']['totalResults'], id)
filename = 's'+season+'e'+episode+' - '+vid['snippet']['title'] #Create the filename for the .strm & .nfo file
if settings.find('download_videos') != None and settings.find('download_videos').text != 'off':
downloadSuccess = play.downloadYoutubeVid(filename, folder, vid['contentDetails']['videoId'], settings, season=season) #Download the video for episode
if downloadSuccess == False:
dev.log('Skip this video, since the download has failed')
continue #Skip this video, since it should have downloaded and failed
else:
generators.write_strm(filename, folder, vid['contentDetails']['videoId'], show=settings.find('title').text, episode=episode, season=season) #Write the strm file for this episode
if settings.find('writenfo').text != 'no':
generators.write_nfo(filename, folder, vid, settings, season = season, episode = episode, duration = duration[vid['contentDetails']['videoId']]) #Write the nfo file for this episode
##Musicvideo
elif type == 'musicvideo':
#Grab the musicvideo information from the generator
musicvideo_info = generators.get_songinfo(vid, settings, duration = duration[vid['contentDetails']['videoId']])
if musicvideo_info == False:
continue #Skip this video, it did not make it past the musicvideo filters
filename = vid['snippet']['title'] #Create the filename for the .strm & .nfo file
if settings.find('download_videos') != None and settings.find('download_videos').text != 'off':
downloadSuccess = play.downloadYoutubeVid(filename, folder, vid['contentDetails']['videoId'], settings, type='musicvideo') #Download the video for episode
if downloadSuccess == False:
dev.log('Skip this video, since the download has failed')
continue #Skip this video, since it should have downloaded and failed
else:
generators.write_strm(filename, folder, vid['contentDetails']['videoId'], artist=musicvideo_info['artist'], song=musicvideo_info['title'], album=musicvideo_info['album'], year=musicvideo_info['year'], type=type) #Write the strm file for this episode
if settings.find('writenfo').text != 'no':
generators.write_nfo(filename, folder, vid, settings, musicvideo=musicvideo_info, duration = duration[vid['contentDetails']['videoId']], type=type) #Write the nfo file for this episode
season = musicvideo_info['album']
if season == '':
season = musicvideo_info['artist']
##Movies
elif type == 'movies':
#Prepare the title as best as we can for the imdb search and stuff
#title = vid['snippet']['title']
#description = vid['snippet']['description']
#title = removetitle(title, settings.find('removetitle').text)
#title = striptitle(title, settings.find('striptitle').text)
#if settings.find('smart_search') == '2':
#title, description = generators.smart_search(title, description, vid, settings)
filename = vid['snippet']['title'] #Create the filename for the .strm & .nfo file
if settings.find('writenfo').text != 'no':
create_strm = generators.write_nfo(filename, folder, vid, settings, duration = duration[vid['contentDetails']['videoId']], type=type) #Write the nfo file for this episode
if create_strm is False:
m_xml.playlist_add_episode(id, '1', vid['contentDetails']['videoId'], type=type) #Add it to the episode list, so it doesnt get picked up again
continue #Skip this video, it did not make it past the filters
if settings.find('download_videos') != None and settings.find('download_videos').text != 'off':
downloadSuccess = play.downloadYoutubeVid(filename, folder, vid['contentDetails']['videoId'], settings, type='movies') #Download the video for episode
if downloadSuccess == False:
dev.log('Skip this video, since the download has failed')
continue #Skip this video, since it should have downloaded and failed
else:
generators.write_strm(filename, folder, vid['contentDetails']['videoId'], type=type) #Write the strm file for this episode
season = '1'
#Add this episode to the episodenr/playlist.xml file so we can remember we scanned this episode already
m_xml.playlist_add_episode(id, season, vid['contentDetails']['videoId'], type=type)
#If there is a nextPagetoken there are more videos to parse, call this function again so it can parse them to
'''
if 'nextPageToken' in resp and lastvid is not True:
#update_playlist_vids(id, folder, settings, resp['nextPageToken'], firstvid)
else:
if firstvid != False:
m_xml.xml_update_playlist_setting(id, 'lastvideoId', firstvid) #Set the lastvideoId to this videoId so the playlist remembers the last video it has. This will save on API calls, since it will quit when it comes across a video that already has been set
'''
dev.log('( ._.)========== Done ripping videos from playlist '+settings.find('title').text+' (ID: '+id+') ==========(._. )')
dev.log('\n\n\n\n')
return amount
##Helper Functions to check requirements of a youtube video according to the playlist settings
#Check onlyinclude
#vid : The vid from the youtube response its about
#settings: The element containing the playlist settings.xml
def onlyinclude(vid, settings):
if settings.find('onlyinclude').text is not '' and settings.find('onlyinclude').text is not None:
#Check if there are | ,if so we should loop through each onlyinclude word
if '|' in settings.find('onlyinclude').text:
strip = settings.find('onlyinclude').text.split('|')
else:
strip = []
strip.append(settings.find('onlyinclude').text)
for s in strip:
if s in vid['snippet']['title']:
return True #We found one of the words in the title, so this one is safe to add
return False #If the code made it this far, it didnt find one of the required words
else:
return True #onlyinclude is not enabled, so return true
#Checks for excludewords, returns True if check passed, False if check fails
def excludewords(vid, settings):
if settings.find('excludewords').text is not '' and settings.find('excludewords').text is not None:
#Check if there are | ,if so we should loop through each onlyinclude word
if '|' in settings.find('excludewords').text:
strip = settings.find('excludewords').text.split('|')
else:
strip = []
strip.append(settings.find('excludewords').text)
for s in strip:
if s.lower() in vid['snippet']['title'].lower() or s.lower() in vid['snippet']['description']:
return False #We found one of the words in the title, so this one should not be added
return True
else:
return True
| Sleuteltje/plugin.video.youtubelibrary | resources/lib/service.py | Python | gpl-3.0 | 26,342 |
'''
Cache manager
=============
The cache manager can be used to store python object attached to an uniq key.
The cache can be controlled in different manner, with a object limit or a
timeout.
For example, we can create a new cache with a limit of 10 objects and a timeout
of 5 seconds::
# register a new Cache
Cache.register('mycache', limit=10, timeout=5)
# create an object + id
text = 'objectid'
instance = Label(text=text)
Cache.append('mycache', text, instance)
# retrieve the cached object
instance = Cache.get('mycache', label)
If the instance is NULL, the cache may have trash it, because you've
not used the label since 5 seconds, and you've reach the limit.
'''
__all__ = ('Cache', )
from os import environ
from kivy.logger import Logger
from kivy.clock import Clock
class Cache(object):
'''See module documentation for more information.
'''
_categories = {}
_objects = {}
@staticmethod
def register(category, limit=None, timeout=None):
'''Register a new category in cache, with limit
:Parameters:
`category` : str
Identifier of the category
`limit` : int (optionnal)
Maximum number of object in the cache.
If None, no limit is applied.
`timeout` : double (optionnal)
Time to delete the object when it's not used.
if None, no timeout is applied.
'''
Cache._categories[category] = {
'limit': limit,
'timeout': timeout}
Cache._objects[category] = {}
Logger.debug('Cache: register <%s> with limit=%s, timeout=%ss' %
(category, str(limit), str(timeout)))
@staticmethod
def append(category, key, obj, timeout=None):
'''Add a new object in the cache.
:Parameters:
`category` : str
Identifier of the category
`key` : str
Uniq identifier of the object to store
`obj` : object
Object to store in cache
`timeout` : double (optionnal)
Custom time to delete the object if it's not used.
'''
#check whether obj should not be cached first
if getattr(obj, '_no_cache', False):
return
try:
cat = Cache._categories[category]
except KeyError:
Logger.warning('Cache: category <%s> not exist' % category)
return
timeout = timeout or cat['timeout']
# FIXME: activate purge when limit is hit
#limit = cat['limit']
#if limit is not None and len(Cache._objects[category]) >= limit:
# Cache._purge_oldest(category)
Cache._objects[category][key] = {
'object': obj,
'timeout': timeout,
'lastaccess': Clock.get_time(),
'timestamp': Clock.get_time()}
@staticmethod
def get(category, key, default=None):
'''Get a object in cache.
:Parameters:
`category` : str
Identifier of the category
`key` : str
Uniq identifier of the object to store
`default` : anything, default to None
Default value to be returned if key is not found
'''
try:
Cache._objects[category][key]['lastaccess'] = Clock.get_time()
return Cache._objects[category][key]['object']
except Exception:
return default
@staticmethod
def get_timestamp(category, key, default=None):
'''Get the object timestamp in cache.
:Parameters:
`category` : str
Identifier of the category
`key` : str
Uniq identifier of the object to store
`default` : anything, default to None
Default value to be returned if key is not found
'''
try:
return Cache._objects[category][key]['timestamp']
except Exception:
return default
@staticmethod
def get_lastaccess(category, key, default=None):
'''Get the object last access time in cache.
:Parameters:
`category` : str
Identifier of the category
`key` : str
Uniq identifier of the object to store
`default` : anything, default to None
Default value to be returned if key is not found
'''
try:
return Cache._objects[category][key]['lastaccess']
except Exception:
return default
@staticmethod
def remove(category, key=None):
'''Purge the cache
:Parameters:
`category` : str (optionnal)
Identifier of the category
`key` : str (optionnal)
Uniq identifier of the object to store
'''
try:
if key is not None:
del Cache._objects[category][key]
else:
Cache._objects[category] = {}
except Exception:
pass
@staticmethod
def _purge_oldest(category, maxpurge=1):
print 'PURGE', category
import heapq
heap_list = []
for key in Cache._objects[category]:
obj = Cache._objects[category][key]
if obj['lastaccess'] == obj['timestamp']:
continue
heapq.heappush(heap_list, (obj['lastaccess'], key))
print '<<<', obj['lastaccess']
n = 0
while n < maxpurge:
try:
lastaccess, key = heapq.heappop(heap_list)
print '=>', key, lastaccess, Clock.get_time()
except Exception:
return
del Cache._objects[category][key]
@staticmethod
def _purge_by_timeout(dt):
curtime = Clock.get_time()
for category in Cache._objects:
timeout = Cache._categories[category]['timeout']
if timeout is not None and dt > timeout:
# XXX got a lag ! that may be because the frame take lot of
# time to draw. and the timeout is not adapted to the current
# framerate. So, increase the timeout by two.
# ie: if the timeout is 1 sec, and framerate go to 0.7, newly
# object added will be automaticly trashed.
timeout *= 2
Cache._categories[category]['timeout'] = timeout
continue
for key in Cache._objects[category].keys()[:]:
lastaccess = Cache._objects[category][key]['lastaccess']
objtimeout = Cache._objects[category][key]['timeout']
# take the object timeout if available
if objtimeout is not None:
timeout = objtimeout
# no timeout, cancel
if timeout is None:
continue
if curtime - lastaccess > timeout:
del Cache._objects[category][key]
@staticmethod
def print_usage():
'''Print the cache usage on the console'''
print 'Cache usage :'
for category in Cache._categories:
print ' * %s : %d / %s, timeout=%s' % (
category.capitalize(),
len(Cache._objects[category]),
str(Cache._categories[category]['limit']),
str(Cache._categories[category]['timeout']))
if 'KIVY_DOC_INCLUDE' not in environ:
# install the schedule clock for purging
Clock.schedule_interval(Cache._purge_by_timeout, 1)
| nuigroup/kivy | kivy/cache.py | Python | lgpl-3.0 | 7,641 |
# -*- coding: utf-'8' "-*-"
from hashlib import sha1
import logging
from lxml import etree, objectify
from pprint import pformat
import time
from urllib import urlencode
import urllib2
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_ogone.controllers.main import OgoneController
from openerp.addons.payment_ogone.data import ogone
from openerp.osv import osv, fields
from openerp.tools import float_round
from openerp.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
class PaymentAcquirerOgone(osv.Model):
_inherit = 'payment.acquirer'
def _get_ogone_urls(self, cr, uid, environment, context=None):
""" Ogone URLS:
- standard order: POST address for form-based
@TDETODO: complete me
"""
return {
'ogone_standard_order_url': 'https://secure.ogone.com/ncol/%s/orderstandard_utf8.asp' % (environment,),
'ogone_direct_order_url': 'https://secure.ogone.com/ncol/%s/orderdirect_utf8.asp' % (environment,),
'ogone_direct_query_url': 'https://secure.ogone.com/ncol/%s/querydirect_utf8.asp' % (environment,),
'ogone_afu_agree_url': 'https://secure.ogone.com/ncol/%s/AFU_agree.asp' % (environment,),
}
def _get_providers(self, cr, uid, context=None):
providers = super(PaymentAcquirerOgone, self)._get_providers(cr, uid, context=context)
providers.append(['ogone', 'Ogone'])
return providers
_columns = {
'ogone_pspid': fields.char('PSPID', required_if_provider='ogone'),
'ogone_userid': fields.char('API User ID', required_if_provider='ogone'),
'ogone_password': fields.char('API User Password', required_if_provider='ogone'),
'ogone_shakey_in': fields.char('SHA Key IN', size=32, required_if_provider='ogone'),
'ogone_shakey_out': fields.char('SHA Key OUT', size=32, required_if_provider='ogone'),
}
def _ogone_generate_shasign(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (ogone
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'ogone'
key = getattr(acquirer, 'ogone_shakey_' + inout)
def filter_key(key):
if inout == 'in':
return True
else:
# SHA-OUT keys
# source https://viveum.v-psp.com/Ncol/Viveum_e-Com-BAS_EN.pdf
keys = [
'AAVADDRESS',
'AAVCHECK',
'AAVMAIL',
'AAVNAME',
'AAVPHONE',
'AAVZIP',
'ACCEPTANCE',
'ALIAS',
'AMOUNT',
'BIC',
'BIN',
'BRAND',
'CARDNO',
'CCCTY',
'CN',
'COMPLUS',
'CREATION_STATUS',
'CURRENCY',
'CVCCHECK',
'DCC_COMMPERCENTAGE',
'DCC_CONVAMOUNT',
'DCC_CONVCCY',
'DCC_EXCHRATE',
'DCC_EXCHRATESOURCE',
'DCC_EXCHRATETS',
'DCC_INDICATOR',
'DCC_MARGINPERCENTAGE',
'DCC_VALIDHOURS',
'DIGESTCARDNO',
'ECI',
'ED',
'ENCCARDNO',
'FXAMOUNT',
'FXCURRENCY',
'IBAN',
'IP',
'IPCTY',
'NBREMAILUSAGE',
'NBRIPUSAGE',
'NBRIPUSAGE_ALLTX',
'NBRUSAGE',
'NCERROR',
'NCERRORCARDNO',
'NCERRORCN',
'NCERRORCVC',
'NCERRORED',
'ORDERID',
'PAYID',
'PM',
'SCO_CATEGORY',
'SCORING',
'STATUS',
'SUBBRAND',
'SUBSCRIPTION_ID',
'TRXDATE',
'VC'
]
return key.upper() in keys
items = sorted((k.upper(), v) for k, v in values.items())
sign = ''.join('%s=%s%s' % (k, v, key) for k, v in items if v and filter_key(k))
sign = sign.encode("utf-8")
shasign = sha1(sign).hexdigest()
return shasign
def ogone_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
ogone_tx_values = dict(tx_values)
temp_ogone_tx_values = {
'PSPID': acquirer.ogone_pspid,
'ORDERID': tx_values['reference'],
'AMOUNT': '%d' % int(float_round(tx_values['amount'], 2) * 100),
'CURRENCY': tx_values['currency'] and tx_values['currency'].name or '',
'LANGUAGE': partner_values['lang'],
'CN': partner_values['name'],
'EMAIL': partner_values['email'],
'OWNERZIP': partner_values['zip'],
'OWNERADDRESS': partner_values['address'],
'OWNERTOWN': partner_values['city'],
'OWNERCTY': partner_values['country'] and partner_values['country'].name or '',
'OWNERTELNO': partner_values['phone'],
'ACCEPTURL': '%s' % urlparse.urljoin(base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(base_url, OgoneController._cancel_url),
}
if ogone_tx_values.get('return_url'):
temp_ogone_tx_values['PARAMPLUS'] = 'return_url=%s' % ogone_tx_values.pop('return_url')
shasign = self._ogone_generate_shasign(acquirer, 'in', temp_ogone_tx_values)
temp_ogone_tx_values['SHASIGN'] = shasign
ogone_tx_values.update(temp_ogone_tx_values)
return partner_values, ogone_tx_values
def ogone_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_ogone_urls(cr, uid, acquirer.environment, context=context)['ogone_standard_order_url']
class PaymentTxOgone(osv.Model):
_inherit = 'payment.transaction'
# ogone status
_ogone_valid_tx_status = [5, 9]
_ogone_wait_tx_status = [41, 50, 51, 52, 55, 56, 91, 92, 99]
_ogone_pending_tx_status = [46] # 3DS HTML response
_ogone_cancel_tx_status = [1]
_columns = {
'ogone_3ds': fields.boolean('3DS Activated'),
'ogone_3ds_html': fields.html('3DS HTML'),
'ogone_complus': fields.char('Complus'),
'ogone_payid': fields.char('PayID', help='Payment ID, generated by Ogone')
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _ogone_form_get_tx_from_data(self, cr, uid, data, context=None):
""" Given a data dict coming from ogone, verify it and find the related
transaction record. """
reference, pay_id, shasign = data.get('orderID'), data.get('PAYID'), data.get('SHASIGN')
if not reference or not pay_id or not shasign:
error_msg = 'Ogone: received data with missing reference (%s) or pay_id (%s) or shashign (%s)' % (reference, pay_id, shasign)
_logger.error(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use paytid ?
tx_ids = self.search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Ogone: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._ogone_generate_shasign(tx.acquirer_id, 'out', data)
if shasign_check.upper() != shasign.upper():
error_msg = 'Ogone: invalid shasign, received %s, computed %s, for data %s' % (shasign, shasign_check, data)
_logger.error(error_msg)
raise ValidationError(error_msg)
return tx
def _ogone_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# TODO: txn_id: should be false at draft, set afterwards, and verified with txn details
if tx.acquirer_reference and data.get('PAYID') != tx.acquirer_reference:
invalid_parameters.append(('PAYID', data.get('PAYID'), tx.acquirer_reference))
# check what is bought
if float_compare(float(data.get('amount', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('amount', data.get('amount'), '%.2f' % tx.amount))
if data.get('currency') != tx.currency_id.name:
invalid_parameters.append(('currency', data.get('currency'), tx.currency_id.name))
return invalid_parameters
def _ogone_form_validate(self, cr, uid, tx, data, context=None):
if tx.state == 'done':
_logger.warning('Ogone: trying to validate an already validated tx (ref %s)' % tx.reference)
return True
status = int(data.get('STATUS', '0'))
if status in self._ogone_valid_tx_status:
tx.write({
'state': 'done',
'date_validate': data['TRXDATE'],
'acquirer_reference': data['PAYID'],
})
return True
elif status in self._ogone_cancel_tx_status:
tx.write({
'state': 'cancel',
'acquirer_reference': data.get('PAYID'),
})
elif status in self._ogone_pending_tx_status:
tx.write({
'state': 'pending',
'acquirer_reference': data.get('PAYID'),
})
else:
error = 'Ogone: feedback error: %(error_str)s\n\n%(error_code)s: %(error_msg)s' % {
'error_str': data.get('NCERROR'),
'error_code': data.get('NCERRORPLUS'),
'error_msg': ogone.OGONE_ERROR_MAP.get(data.get('NCERRORPLUS')),
}
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'acquirer_reference': data.get('PAYID'),
})
return False
# --------------------------------------------------
# S2S RELATED METHODS
# --------------------------------------------------
def ogone_s2s_create_alias(self, cr, uid, id, values, context=None):
""" Create an alias at Ogone via batch.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx = self.browse(cr, uid, id, context=context)
assert tx.type == 'server2server', 'Calling s2s dedicated method for a %s acquirer' % tx.type
alias = 'OPENERP-%d-%d' % (tx.partner_id.id, tx.id)
expiry_date = '%s%s' % (values['expiry_date_mm'], values['expiry_date_yy'][2:])
line = 'ADDALIAS;%(alias)s;%(holder_name)s;%(number)s;%(expiry_date)s;%(brand)s;%(pspid)s'
line = line % dict(values, alias=alias, expiry_date=expiry_date, pspid=tx.acquirer_id.ogone_pspid)
tx_data = {
'FILE_REFERENCE': 'OPENERP-NEW-ALIAS-%s' % time.time(), # something unique,
'TRANSACTION_CODE': 'ATR',
'OPERATION': 'SAL',
'NB_PAYMENTS': 1, # even if we do not actually have any payment, ogone want it to not be 0
'FILE': line,
'REPLY_TYPE': 'XML',
'PSPID': tx.acquirer_id.ogone_pspid,
'USERID': tx.acquirer_id.ogone_userid,
'PSWD': tx.acquirer_id.ogone_password,
'PROCESS_MODE': 'CHECKANDPROCESS',
}
# TODO: fix URL computation
request = urllib2.Request(tx.acquirer_id.ogone_afu_agree_url, urlencode(tx_data))
result = urllib2.urlopen(request).read()
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
_logger.exception('Invalid xml response from ogone')
return None
error_code = error_str = None
if hasattr(tree, 'PARAMS_ERROR'):
error_code = tree.NCERROR.text
error_str = 'PARAMS ERROR: %s' % (tree.PARAMS_ERROR.text or '',)
else:
node = tree.FORMAT_CHECK
error_node = getattr(node, 'FORMAT_CHECK_ERROR', None)
if error_node is not None:
error_code = error_node.NCERROR.text
error_str = 'CHECK ERROR: %s' % (error_node.ERROR.text or '',)
if error_code:
error_msg = ogone.OGONE_ERROR_MAP.get(error_code)
error = '%s\n\n%s: %s' % (error_str, error_code, error_msg)
_logger.error(error)
raise Exception(error) # TODO specific exception
tx.write({'partner_reference': alias})
return True
def ogone_s2s_generate_values(self, cr, uid, id, custom_values, context=None):
""" Generate valid Ogone values for a s2s tx.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx = self.browse(cr, uid, id, context=context)
tx_data = {
'PSPID': tx.acquirer_id.ogone_pspid,
'USERID': tx.acquirer_id.ogone_userid,
'PSWD': tx.acquirer_id.ogone_password,
'OrderID': tx.reference,
'amount': '%d' % int(float_round(tx.amount, 2) * 100), # tde check amount or str * 100 ?
'CURRENCY': tx.currency_id.name,
'LANGUAGE': tx.partner_lang,
'OPERATION': 'SAL',
'ECI': 2, # Recurring (from MOTO)
'ALIAS': tx.partner_reference,
'RTIMEOUT': 30,
}
if custom_values.get('ogone_cvc'):
tx_data['CVC'] = custom_values.get('ogone_cvc')
if custom_values.pop('ogone_3ds', None):
tx_data.update({
'FLAG3D': 'Y', # YEAH!!
})
if custom_values.get('ogone_complus'):
tx_data['COMPLUS'] = custom_values.get('ogone_complus')
if custom_values.get('ogone_accept_url'):
pass
shasign = self.pool['payment.acquirer']._ogone_generate_shasign(tx.acquirer_id, 'in', tx_data)
tx_data['SHASIGN'] = shasign
return tx_data
def ogone_s2s_feedback(self, cr, uid, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
pass
def ogone_s2s_execute(self, cr, uid, id, values, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx = self.browse(cr, uid, id, context=context)
tx_data = self.ogone_s2s_generate_values(cr, uid, id, values, context=context)
_logger.info('Generated Ogone s2s data %s', pformat(tx_data)) # debug
request = urllib2.Request(tx.acquirer_id.ogone_direct_order_url, urlencode(tx_data))
result = urllib2.urlopen(request).read()
_logger.info('Contacted Ogone direct order; result %s', result) # debug
tree = objectify.fromstring(result)
payid = tree.get('PAYID')
query_direct_data = dict(
PSPID=tx.acquirer_id.ogone_pspid,
USERID=tx.acquirer_id.ogone_userid,
PSWD=tx.acquirer_id.ogone_password,
ID=payid,
)
query_direct_url = 'https://secure.ogone.com/ncol/%s/querydirect.asp' % (tx.acquirer_id.environment,)
tries = 2
tx_done = False
tx_status = False
while not tx_done or tries > 0:
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
# invalid response from ogone
_logger.exception('Invalid xml response from ogone')
raise
# see https://secure.ogone.com/ncol/paymentinfos1.asp
VALID_TX = [5, 9]
WAIT_TX = [41, 50, 51, 52, 55, 56, 91, 92, 99]
PENDING_TX = [46] # 3DS HTML response
# other status are errors...
status = tree.get('STATUS')
if status == '':
status = None
else:
status = int(status)
if status in VALID_TX:
tx_status = True
tx_done = True
elif status in PENDING_TX:
html = str(tree.HTML_ANSWER)
tx_data.update(ogone_3ds_html=html.decode('base64'))
tx_status = False
tx_done = True
elif status in WAIT_TX:
time.sleep(1500)
request = urllib2.Request(query_direct_url, urlencode(query_direct_data))
result = urllib2.urlopen(request).read()
_logger.debug('Contacted Ogone query direct; result %s', result)
else:
error_code = tree.get('NCERROR')
if not ogone.retryable(error_code):
error_str = tree.get('NCERRORPLUS')
error_msg = ogone.OGONE_ERROR_MAP.get(error_code)
error = 'ERROR: %s\n\n%s: %s' % (error_str, error_code, error_msg)
_logger.info(error)
raise Exception(error)
tries = tries - 1
if not tx_done and tries == 0:
raise Exception('Cannot get transaction status...')
return tx_status
| mycodeday/crm-platform | payment_ogone/models/ogone.py | Python | gpl-3.0 | 19,078 |
import json
import time
import os
import argparse
import re
import sys
import logging
import math
import random
import threading
import requests
import queue
from datetime import datetime
import datetime as dt
from models.user import User, UserManager
from models.emote import Emote
from scripts.database import update_database
from apiwrappers import TwitchAPI
import pymysql
import wolframalpha
import tweepy
from dispatch import Dispatch
from kvidata import KVIData
from tbmath import TBMath
from pytz import timezone
from whisperconn import WhisperConn
from tbutil import SyncValue, time_since
import irc.client
from command import Filter, Command
log = logging.getLogger('tyggbot')
class TMI:
message_limit = 50
class Setting:
def parse(type, value):
try:
if type == 'int':
return int(value)
elif type == 'string':
return value
elif type == 'list':
return value.split(',')
elif type == 'bool':
return int(value) == 1
else:
log.error('Invalid setting type: {0}'.format(type))
except Exception as e:
log.error('Exception caught when loading setting: {0}'.format(e))
return None
class TyggBot:
"""
Main class for the twitch bot
"""
version = '0.9.5.2'
date_fmt = '%H:%M'
#date_fmt = '%A %B '
commands = {}
filters = []
settings = {}
emotes = {}
twitchapi = False
silent = False
dev = False
""" Singleton instance of TyggBot, one instance of the script
should never have two active classes."""
instance = None
default_settings = {
'broadcaster': 'test_broadcaster',
'ban_ascii': True,
'ban_msg_length': True,
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-c',
default='config.ini',
help='Specify which config file to use (default: config.ini)')
parser.add_argument('--silent',
action='count',
help='Decides whether the bot should be silent or not')
# TODO: Add a log level argument.
return parser.parse_args()
def init_twitter(self):
try:
self.twitter_auth = tweepy.OAuthHandler(self.config['twitter']['consumer_key'], self.config['twitter']['consumer_secret'])
self.twitter_auth.set_access_token(self.config['twitter']['access_token'], self.config['twitter']['access_token_secret'])
self.twitter = tweepy.API(self.twitter_auth)
if self.use_twitter_stream:
self.connect_to_twitter_stream()
except:
log.exception('Twitter authentication failed.')
self.twitter = False
def connect_to_twitter_stream(self):
try:
class MyStreamListener(tweepy.StreamListener):
relevant_users = [
'tyggbar', 'forsensc2', 'pajtest', 'rubarthasdf'
]
def on_status(self, tweet):
if tweet.user.screen_name.lower() in self.relevant_users:
if not tweet.text.startswith('RT ') and tweet.in_reply_to_screen_name is None:
TyggBot.instance.say('Volcania New tweet from {0}: {1}'.format(tweet.user.screen_name, tweet.text.replace("\n", " ")))
def on_error(self, status):
log.warning('Unhandled in twitter stream: {0}'.format(status))
if not self.twitter_stream:
listener = MyStreamListener()
self.twitter_stream = tweepy.Stream(self.twitter_auth, listener, retry_420=3*60, daemonize_thread=True)
self.twitter_stream.userstream(_with='followings', replies='all', async=True)
except:
log.exception('Exception caught while trying to connect to the twitter stream')
def load_default_phrases(self):
default_phrases = {
'welcome': False,
'quit': False,
'nl': '{username} has typed {num_lines} messages in this channel!',
'nl_0': '{username} has not typed any messages in this channel BibleThump',
'new_sub': 'Sub hype! {username} just subscribed PogChamp',
'resub': 'Resub hype! {username} just subscribed, {num_months} months in a row PogChamp <3 PogChamp',
}
if 'phrases' in self.config:
self.phrases = {}
for phrase_key, phrase_value in self.config['phrases'].items():
log.debug('Including from config {0}: {1}'.format(phrase_key, phrase_value))
if len(phrase_value.strip()) <= 0:
self.phrases[phrase_key] = False
else:
self.phrases[phrase_key] = phrase_value
for phrase_key, phrase_value in default_phrases.items():
if phrase_key not in self.phrases:
log.debug('Overriding from default {0}: {1}'.format(phrase_key, phrase_value))
self.phrases[phrase_key] = phrase_value
else:
self.phrases = default_phrases
def __init__(self, config, args):
self.config = config
self.sqlconn = pymysql.connect(unix_socket=config['sql']['unix_socket'], user=config['sql']['user'], passwd=config['sql']['passwd'], db=config['sql']['db'], charset='utf8')
self.sqlconn.autocommit(True)
update_database(self.sqlconn)
self.load_default_phrases()
self.reactor = irc.client.Reactor()
self.connection = self.reactor.server()
self.twitchapi = TwitchAPI(type='api')
self.reactor.add_global_handler('all_events', self._dispatcher, -10)
if 'wolfram' in config['main']:
Dispatch.wolfram = wolframalpha.Client(config['main']['wolfram'])
else:
wolfram = None
self.whisper_conn = None
TyggBot.instance = self
self.base_path = os.path.dirname(os.path.realpath(__file__))
self.data = {}
self.data_cb = {}
self.data_cb['status_length'] = self.c_status_length
self.data_cb['stream_status'] = self.c_stream_status
self.data_cb['time_norway'] = self.c_time_norway
self.data_cb['bot_uptime'] = self.c_uptime
self.data_cb['time_since_latest_deck'] = self.c_time_since_latest_deck
self.ignores = []
self.start_time = datetime.now()
if 'streamer' in config['main']:
self.streamer = config['main']['streamer']
self.channel = '#' + self.streamer
elif 'target' in config['main']:
self.channel = config['main']['target']
self.streamer = self.channel[1:]
self.kvi = KVIData(self.sqlconn)
self.tbm = TBMath()
self.last_sync = time.time()
self.users = UserManager(self.sqlconn)
if 'flags' in config:
self.silent = True if 'silent' in config['flags'] and config['flags']['silent'] == '1' else self.silent
self.dev = True if 'dev' in config['flags'] and config['flags']['dev'] == '1' else self.dev
self.silent = True if args.silent else self.silent
if self.silent:
log.info('Silent mode enabled')
self.sync_from()
self.nickname = config['main']['nickname']
self.password = config['main']['password']
self.reconnection_interval = 5
self.load_all()
self.whisper_conn = WhisperConn(self.streamer, self.nickname, self.password, self.reactor)
self.whisper_conn.connect()
self.num_commands_sent = 0
self.connection.execute_every(30, self.reset_command_throttle)
self.twitter_stream = False
if 'twitter' in config:
self.use_twitter_stream = 'streaming' in config['twitter'] and config['twitter']['streaming'] == '1'
self.init_twitter()
else:
self.twitter = None
self.connection.execute_every(1, self.shift_emotes)
self.ws_clients = []
if 'websocket' in config and config['websocket']['enabled'] == '1':
self.init_websocket_server()
self.execute_every(1, self.refresh_emote_data)
self.urls_to_check = queue.Queue()
self.connection.execute_every(1, self.check_urls)
def refresh_emote_data(self):
if len(self.ws_clients) > 0:
emote_data = {}
for emote in self.emotes:
emote_data[emote.code] = {
'code': emote.code,
'pm': emote.pm,
'tm': emote.tm,
'count': emote.count,
}
payload = json.dumps(emote_data, separators=(',',':')).encode('utf8')
for client in self.ws_clients:
client.sendMessage(payload, False)
def init_websocket_server(self):
import twisted
from twisted.internet import reactor
twisted.python.log.startLogging(sys.stdout)
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
class MyServerProtocol(WebSocketServerProtocol):
def onConnect(self, request):
log.info('Client connecting: {0}'.format(request.peer))
def onOpen(self):
log.info('WebSocket connection open. {0}'.format(self))
TyggBot.instance.ws_clients.append(self)
def onMessage(self, payload, isBinary):
if isBinary:
log.info('Binary message received: {0} bytes'.format(len(payload)))
else:
TyggBot.instance.me('Recieved message: {0}'.format(payload.decode('utf8')))
log.info('Text message received: {0}'.format(payload.decode('utf8')))
def onClose(self, wasClean, code, reason):
log.info('WebSocket connection closed: {0}'.format(reason))
TyggBot.instance.ws_clients.remove(self)
factory = WebSocketServerFactory()
factory.protocol = MyServerProtocol
def reactor_run(reactor, factory, port):
log.info(reactor)
log.info(factory)
log.info(port)
reactor.listenTCP(port, factory)
reactor.run(installSignalHandlers=0)
reactor_thread = threading.Thread(target=reactor_run, args=(reactor, factory, int(self.config['websocket']['port'])))
reactor_thread.daemon = True
reactor_thread.start()
self.ws_factory = factory
def shift_emotes(self):
for emote in self.emotes:
emote.shift()
def reset_command_throttle(self):
self.num_commands_sent = 0
def _dispatcher(self, connection, event):
do_nothing = lambda c, e: None
method = getattr(self, "on_" + event.type, do_nothing)
method(connection, event)
def start(self):
"""Start the IRC client."""
self.reactor.process_forever()
def get_kvi_value(self, key, extra={}):
return self.kvi.get(key)
def get_last_tweet(self, key, extra={}):
if self.twitter:
try:
public_tweets = self.twitter.user_timeline(key)
for tweet in public_tweets:
if not tweet.text.startswith('RT ') and tweet.in_reply_to_screen_name is None:
return '{0} ({1} ago)'.format(tweet.text.replace("\n", " "), time_since(datetime.now().timestamp(), tweet.created_at.timestamp(), format='short'))
except Exception as e:
log.error('Exception caught {0}'.format(e))
return 'FeelsBadMan'
else:
return 'Twitter not set up FeelsBadMan'
return 'FeelsBadMan'
def get_emote_pm(self, key, extra={}):
for emote in self.emotes:
if key == emote.code:
return emote.pm
return 0
def get_emote_tm(self, key, extra={}):
for emote in self.emotes:
if key == emote.code:
return emote.tm
return 0
def get_emote_count(self, key, extra={}):
for emote in self.emotes:
if key == emote.code:
return emote.count
return 0
def get_emote_pm_record(self, key, extra={}):
for emote in self.emotes:
if key == emote.code:
return emote.pm_record
return 0
def get_emote_tm_record(self, key, extra={}):
for emote in self.emotes:
if key == emote.code:
return emote.tm_record
return 0
def get_value(self, key, extra={}):
if key in extra:
return extra[key]
elif key in self.data:
return self.data[key]
elif key in self.data_cb:
return self.data_cb[key]()
elif key in self.settings:
return self.settings[key]
log.warning('Unknown key passed to get_value: {0}'.format(key))
return '???'
def get_cursor(self):
self.sqlconn.ping()
return self.sqlconn.cursor()
def get_dictcursor(self):
self.sqlconn.ping()
return self.sqlconn.cursor(pymysql.cursors.DictCursor)
def reload(self):
self.sync_to()
self.load_all()
def privmsg(self, message, priority=False, channel=None):
# Non-prioritized messages are allowed 50% of the message limit
if (not priority and self.num_commands_sent > TMI.message_limit/2) or (priority and self.num_commands_sent > TMI.message_limit):
log.error('Skipping this say, because we are sending too many messages.')
return False
try:
if channel is None:
channel = self.channel
self.connection.privmsg(channel, message)
self.num_commands_sent += 1
except Exception as e:
log.error('Exception caught while sending privmsg: {0}'.format(e))
def c_time_norway(self):
return datetime.now(timezone('Europe/Oslo')).strftime(TyggBot.date_fmt)
def c_uptime(self):
return time_since(datetime.now().timestamp(), self.start_time.timestamp())
def c_stream_status(self):
if self.kvi.get('stream_status') == 1:
return 'online'
else:
return 'offline'
def c_status_length(self):
stream_status = self.kvi.get('stream_status')
if stream_status == 1:
return time_since(time.time(), self.kvi.get('last_offline'))
else:
return time_since(time.time(), self.kvi.get('last_online'))
def c_time_since_latest_deck(self):
return time_since(time.time(), self.kvi.get('latest_deck_time'))
def _ban(self, username):
self.privmsg('.ban {0}'.format(username), True)
def execute_at(self, at, function, arguments=()):
self.reactor.execute_at(at, function, arguments)
def execute_delayed(self, delay, function, arguments=()):
self.reactor.execute_delayed(delay, function, arguments)
def execute_every(self, period, function, arguments=()):
self.reactor.execute_every(period, function, arguments)
def ban(self, username):
self._timeout(username, 30)
self.execute_delayed(1, self._ban, (username, ))
def unban(self, username):
self.privmsg('.unban {0}'.format(username), True)
def _timeout(self, username, duration):
self.privmsg('.timeout {0} {1}'.format(username, duration), True)
def timeout(self, username, duration):
self._timeout(username, duration)
self.execute_delayed(1, self._timeout, (username, duration))
def whisper(self, username, message):
if self.whisper_conn:
log.debug('Sending whisper {0} to {1}'.format(message, username))
self.whisper_conn.whisper(username, message)
else:
log.debug('No whisper conn set up.')
def say(self, message, force=False):
if force or not self.silent:
message = message.strip()
if len(message) >= 1:
if (message[0] == '.' or message[0] == '/') and not message[:3] == '.me':
log.warning('Message we attempted to send started with . or /, skipping.')
return
log.info('Sending message: {0}'.format(message))
self.privmsg(message[:400])
else:
log.warning('Message too short, skipping...')
def me(self, message, force=False):
if force or not self.silent:
message = message.strip()
if len(message) >= 1:
if message[0] == '.' or message[0] == '/':
log.warning('Message we attempted to send started with . or /, skipping.')
return
log.info('Sending message: {0}'.format(message))
self.privmsg('.me ' + message[:400])
else:
log.warning('Message too short, skipping...')
def sync_to(self):
self.sqlconn.ping()
cursor = self.sqlconn.cursor()
log.debug('Syncing data from TyggBot to the database...')
self.users.sync()
for trigger, command in self.commands.items():
if not command.synced:
command.sync(cursor)
command.synced = True
for filter in self.filters:
if not filter.synced:
filter.sync(cursor)
filter.synced = True
for emote in self.emotes:
emote.sync(cursor)
cursor.close()
def sync_from(self):
pass
def load_all(self):
self._load_commands()
self._load_filters()
self._load_settings()
self._load_ignores()
self._load_emotes()
def _load_commands(self):
cursor = self.sqlconn.cursor(pymysql.cursors.DictCursor)
from command import Command
cursor.execute('SELECT * FROM `tb_commands`')
self.commands = {}
self.commands['reload'] = Command.admin_command(self.reload)
self.commands['quit'] = Command.admin_command(self.quit)
self.commands['ignore'] = Command.admin_command(Dispatch.ignore, type='func')
self.commands['unignore'] = Command.admin_command(Dispatch.unignore, type='func')
self.commands['add'] = Command()
self.commands['add'].load_from_db({
'id': -1,
'level': 500,
'action': '{ "type":"multi", "default":"nothing", "args": [ { "level":500, "command":"banphrase", "action": { "type":"func", "cb":"add_banphrase" } }, { "level":500, "command":"win", "action": { "type":"func", "cb":"add_win" } }, { "level":500, "command":"command", "action": { "type":"func", "cb":"add_command" } }, { "level":2000, "command":"funccommand", "action": { "type":"func", "cb":"add_funccommand" } }, { "level":500, "command":"nothing", "action": { "type":"say", "message":"" } } ] }',
'do_sync': False,
'delay_all': 5,
'delay_user': 15,
'enabled': True,
'num_uses': 0,
'extra_args': None,
})
self.commands['remove'] = Command()
self.commands['remove'].load_from_db({
'id': -1,
'level': 500,
'action': '{ "type":"multi", "default":"nothing", "args": [ { "level":500, "command":"banphrase", "action": { "type":"func", "cb":"remove_banphrase" } }, { "level":500, "command":"win", "action": { "type":"func", "cb":"remove_win" } }, { "level":500, "command":"command", "action": { "type":"func", "cb":"remove_command" } }, { "level":500, "command":"nothing", "action": { "type":"say", "message":"" } } ] }',
'do_sync': False,
'delay_all': 5,
'delay_user': 15,
'enabled': True,
'num_uses': 0,
'extra_args': None,
})
self.commands['rem'] = self.commands['remove']
self.commands['del'] = self.commands['remove']
self.commands['delete'] = self.commands['remove']
self.commands['debug'] = Command()
self.commands['debug'].load_from_db({
'id': -1,
'level': 250,
'action': '{ "type":"multi", "default":"nothing", "args": [ { "level":250, "command":"command", "action": { "type":"func", "cb":"debug_command" } }, { "level":250, "command":"user", "action": { "type":"func", "cb":"debug_user" } }, { "level":250, "command":"nothing", "action": { "type":"say", "message":"" } } ] }',
'do_sync': False,
'delay_all': 5,
'delay_user': 15,
'enabled': True,
'num_uses': 0,
'extra_args': None,
})
self.commands['level'] = Command.admin_command(Dispatch.level, type='func')
self.commands['eval'] = Command.admin_command(Dispatch.eval, type='func', level=2000)
num_commands = 0
num_aliases = 0
for row in cursor:
try:
cmd = Command()
cmd.load_from_db(row)
if cmd.is_enabled():
for alias in row['command'].split('|'):
if alias not in self.commands:
self.commands[alias] = cmd
num_aliases += 1
else:
log.error('Command !{0} is already in use'.format(alias))
num_commands += 1
except Exception as e:
log.error('Exception caught when loading command: {0}'.format(e))
continue
log.debug('Loaded {0} commands ({1} aliases)'.format(num_commands, num_aliases))
cursor.close()
def _load_filters(self):
cursor = self.sqlconn.cursor(pymysql.cursors.DictCursor)
cursor.execute('SELECT * FROM `tb_filters`')
self.filters = []
num_filters = 0
for row in cursor:
try:
filter = Filter(row)
if filter.is_enabled():
self.filters.append(filter)
num_filters += 1
except Exception as e:
log.error('Exception caught when loading filter: {0}'.format(e))
continue
log.debug('Loaded {0} filters'.format(num_filters))
cursor.close()
def _load_settings(self):
cursor = self.sqlconn.cursor(pymysql.cursors.DictCursor)
cursor.execute('SELECT * FROM `tb_settings`')
self.settings = {}
for row in cursor:
self.settings[row['setting']] = Setting.parse(row['type'], row['value'])
if self.settings[row['setting']] is None:
log.error('ERROR LOADING SETTING {0}'.format(row['setting']))
for setting in self.default_settings:
if setting not in self.settings:
self.settings[setting] = self.default_settings[setting]
cursor.close()
def _load_ignores(self):
cursor = self.sqlconn.cursor(pymysql.cursors.DictCursor)
cursor.execute('SELECT * FROM `tb_ignores`')
self.ignores = []
for row in cursor:
self.ignores.append(row['username'])
cursor.close()
def _load_emotes(self):
cursor = self.sqlconn.cursor(pymysql.cursors.DictCursor)
cursor.execute('SELECT * FROM `tb_emote`')
self.emotes = []
for row in cursor:
self.emotes.append(Emote.load_from_row(row))
cursor.close()
def on_welcome(self, chatconn, event):
if chatconn == self.connection:
log.debug('Connected to IRC server.')
if irc.client.is_channel(self.channel):
chatconn.join(self.channel)
if self.phrases['welcome']:
phrase_data = {
'nickname': self.nickname,
'version': self.version,
}
try:
self.say(self.phrases['welcome'].format(**phrase_data))
except Exception as e:
log.error(e)
elif chatconn == self.whisper_conn:
log.debug('Connected to Whisper server.')
def _connected_checker(self):
if not self.connection.is_connected():
self.connection.execute_delayed(self.reconnection_interval,
self._connected_checker)
self.connect()
def connect(self):
log.debug('Fetching random IRC server...')
data = self.twitchapi.get(['channels', self.streamer, 'chat_properties'])
if data and len(data['chat_servers']) > 0:
server = random.choice(data['chat_servers'])
ip, port = server.split(':')
port = int(port)
log.debug('Fetched {0}:{1}'.format(ip, port))
try:
irc.client.SimpleIRCClient.connect(self, ip, port, self.nickname, self.password, self.nickname)
self.connection.cap('REQ', 'twitch.tv/membership')
self.connection.cap('REQ', 'twitch.tv/commands')
self.connection.cap('REQ', 'twitch.tv/tags')
return True
except irc.client.ServerConnectionError:
pass
log.debug('Connecting to IRC server...')
self.connection.execute_delayed(self.reconnection_interval,
self._connected_checker)
return False
def on_disconnect(self, chatconn, event):
if chatconn == self.connection:
log.debug('Disconnected from IRC server')
self.sync_to()
self.connection.execute_delayed(self.reconnection_interval,
self._connected_checker)
elif chatconn == self.whisper_conn:
log.debug('Disconnecting from Whisper server')
self.whisper_conn.execute_delayed(self.whisper_conn.reconnection_interval,
self.whisper_conn._connected_checker)
def check_msg_content(self, source, msg_raw, event):
for f in self.filters:
if f.type == 'regex':
m = f.search(source, msg_lower)
if m:
log.debug('Matched regex filter \'{0}\''.format(f.name))
f.run(self, source, msg_raw, event, {'match':m})
return True
elif f.type == 'banphrase':
if f.filter in msg_lower:
log.debug('Matched banphrase filter \'{0}\''.format(f.name))
f.run(self, source, msg_raw, event)
return True
return False # message was ok
def check_link_content(self, source, content, event):
return self.check_msg_content(source, content, event) # same check as for normal chat messages, probably should be changed
def check_url(self, _url):
try: r = requests.get(_url['url'])
except: return
self.check_url_content(_url['source'], r.text, _url['event'])
return
def _check_urls(self):
while True:
try: _url = self.urls_to_check.get(False)
except queue.Empty: return
self.check_url(_url)
def check_urls(self):
t = threading.Thread(self._check_urls)
t.start()
def parse_message(self, msg_raw, source=None, event=None, pretend=False, force=False, tags={}):
msg_lower = msg_raw.lower()
for tag in tags:
if tag['key'] == 'subscriber':
if source.subscriber and tag['value'] == '0':
source.subscriber = False
source.needs_sync = True
elif not source.subscriber and tag['value'] == '1':
source.subscriber = True
source.needs_sync = True
for emote in self.emotes:
num = len(emote.regex.findall(msg_raw))
if num > 0:
emote.add(num)
if source is None and not event:
log.error('No nick or event passed to parse_message')
return False
log.debug('{0}: {1}'.format(source.username, msg_raw))
if not force:
if source.level < 500:
if self.check_msg_content(source, msg_raw, event): return # If we've matched a filter, we should not have to run a command.
regex = r'((http:\/\/)|\b)(\w|\.)*\.(((aero|asia|biz|cat|com|coop|edu|gov|info|int|jobs|mil|mobi|museum|name|net|org|pro|tel|travel|[a-zA-Z]{2})\/\S*)|(aero|asia|biz|cat|com|coop|edu|gov|info|int|jobs|mil|mobi|museum|name|net|org|pro|tel|travel|[a-zA-Z]{2}))'
#probably shit regex, but kind of works
urls = re.finditer(regex, msg_raw)
for i in urls:
url = i.group(0)
if not (url.startswith('http://') or url.startswith('https://')): url = 'http://' + url
_url['url'] = url
_url['source'] = source
_url['event'] = event
self.urls_to_check.put(_url)
# TODO: Change to if source.ignored
if source.username in self.ignores:
return
if msg_lower[:1] == '!':
msg_lower_parts = msg_lower.split(' ')
command = msg_lower_parts[0][1:]
msg_raw_parts = msg_raw.split(' ')
extra_msg = ' '.join(msg_raw_parts[1:]) if len(msg_raw_parts) > 1 else None
if command in self.commands:
if source.level >= self.commands[command].level:
self.commands[command].run(self, source, extra_msg, event)
return
source.num_lines += 1
source.needs_sync = True
def on_whisper(self, chatconn, event):
# We use .lower() in case twitch ever starts sending non-lowercased usernames
source = self.users[event.source.user.lower()]
if source.level >= 500:
# Only moderators and above can send commands through whispers
self.parse_message(event.arguments[0], source, event)
def on_action(self, chatconn, event):
self.on_pubmsg(chatconn, event)
def on_pubmsg(self, chatconn, event):
if event.source.user == self.nickname:
return False
# We use .lower() in case twitch ever starts sending non-lowercased usernames
source = self.users[event.source.user.lower()]
cur_time = time.time()
msg = event.arguments[0]
msg_len = len(msg)
if msg_len > 70:
non_alnum = sum(not c.isalnum() for c in msg)
ratio = non_alnum/msg_len
log.debug('Ascii ratio: {0}'.format(ratio))
if self.settings['ban_ascii']:
if (msg_len > 140 and ratio > 0.8) or ratio > 0.91:
log.debug('Timeouting {0} because of a high ascii ratio ({1}). Message length: {2}'.format(source.username, ratio, msg_len))
self.timeout(source.username, 120)
self.whisper(source.username, 'You have been timed out for 120 seconds because your message contained too many ascii characters.')
return
if self.settings['ban_msg_length']:
if msg_len > 450:
log.debug('Timeouting {0} because of a message length: {1}'.format(source.username, msg_len))
self.timeout(source.username, 20)
self.whisper(source.username, 'You have been timed out for 20 seconds because your message was too long.')
return
if cur_time - self.last_sync >= 60:
self.sync_to()
self.last_sync = cur_time
self.parse_message(event.arguments[0], source, event, tags=event.tags)
def quit(self):
self.sync_to()
if self.phrases['quit']:
phrase_data = {
'nickname': self.nickname,
'version': self.version,
}
try:
self.say(self.phrases['quit'].format(**phrase_data))
except Exception as e:
log.error(e)
if self.twitter_stream:
self.twitter_stream.disconnect()
self.connection.quit('bye')
if self.whisper_conn:
self.whisper_conn.connection.quit('bye')
sys.exit(0)
| 0rmi/BetterTyggbotKappa | tyggbot.py | Python | mit | 32,794 |
import sys
from operator import add
from pyspark import SparkContext
#import pyspark_csv as pycsv
from csv import reader
import os
import StringIO
import datetime
import numbers
import re
def str2date(x):
x = x.strip()
try:
x = datetime.datetime.strptime(x, '%m/%d/%Y')
except ValueError:
return False
def str2time(x):
x = x.strip()
try:
x = datetime.datetime.strptime(x, '%H:%M:%S')
except ValueError:
return False
def is_date(string):
if re.findall('\s*(([0]\d)|([1][0-2]))/([0-2]\d|[3][0-1])/(20(([0][6-9])|([1][0-5])))', string):
return True
else:
return False
def is_date_outside_range(string):
if re.findall('\s*(([0]\d)|([1][0-2]))/([0-2]\d|[3][0-1])/((200[0-5])|(19\d\d)|(2016))', string):
return True
else:
return False
def is_date_ridiculous(string):
if re.findall('\s*(([0]\d)|([1][0-2]))/([0-2]\d|[3][0-1])/\d\d\d\d', string):
return True
else:
return False
def is_time(string):
if re.findall('(([01]\d)|(2[0-3]))\:([0-5]\d)\:([0-5]\d)', string):
return True
else:
return False
def is_24(string):
if re.findall('(24)\:([0-5]\d)\:([0-5]\d)', string):
return True
else:
return False
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
idx = 4
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Input file error")
exit(-1)
sc = SparkContext()
data = sc.textFile(sys.argv[1], 1)
line = data.filter(lambda l: not l.startswith('CMPLNT'))\
.mapPartitions(lambda x: reader(x))\
.map(lambda x: (
x[0], # key
[x[idx], # v[0]
'Empty' if x[idx].strip() == '' else
'NULL' if x[idx].lower() == 'null' else
'date' if is_date(x[idx]) else
'OutDate' if is_date_outside_range(x[idx]) else
'WayODate' if is_date_ridiculous(x[idx]) else
'time' if is_time(x[idx]) else
'24hTime' if is_24(x[idx]) else
('int' if abs(int(float(x[idx])) - float(x[idx])) < .0000001 else 'float')
if isfloat(x[idx]) else
'tuple' if (re.findall('\(.*[,].*\)', x[idx]) != []
and x[idx].strip()[0] == '(' and x[idx].strip()[-1] == ')') else
type(x[idx]).__name__, # v[1]
x[2], # v[2] CMPLNT_FR_TM
x[1], x[3] # v[3]:fr_dt; v[4]:to_dt
]))\
.sortByKey(lambda x: x[0])
result = line.map(lambda(k, v): "{0},{1},{2},{3}".format(
v[0] if v[1] != 'Empty' else '99:99:99',
'Empty' if v[1] == 'Empty' else 'DATETIME',
'Complaint To Time',
'VALID' if (v[1] == 'time' and str2date(v[3]) < str2date(v[4]) or (str2date(v[3]) == str2date(v[4]) and str2time(v[0]) >= str2time(v[2])) ) else ('NULL' if v[1] == 'Empty' else 'INVALID')
))\
.saveAsTextFile("CMPLNT_TO_TM.out")
sc.stop()
| leedtan/SparklesSunshinePuppies | feature_mapreduce/mapreduce4.py | Python | mit | 2,966 |
import zmqpy as zmq
def main():
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.bind("tcp://*:3333")
msg = "aaaaaaaaaa"
for i in range(10000000):
socket.send(msg, 0)
socket.close()
context.term()
if __name__ == "__main__":
main()
| felipecruz/zmqpy | benchmarks/sender.py | Python | bsd-2-clause | 292 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: purefa_host
version_added: "2.4"
short_description: Create, Delete and Modify Hosts on Pure Storage FlashArray
description:
- This module creates, deletes or modifies hosts on Pure Storage FlashArray.
author: Simon Dodsley (@sdodsley)
options:
host:
description:
- Host Name
required: true
state:
description:
- Creates host.
- When removing host all connected volumes will be disconnected.
required: false
default: present
choices: [ "present", "absent" ]
protocol:
description:
- Defines the host connection protocol for volumes.
required: false
default: iscsi
choices: [ "iscsi", "fc" ]
wwns:
description:
- List of wwns of the host if protocol is fc
required: false
iqn:
description:
- List of IQNs of the host if protocol is iscsi
required: false
volume:
description:
- Volume name to map to the host
required: false
extends_documentation_fragment:
- purestorage
'''
EXAMPLES = '''
- name: Create new new host
purefa_host:
host: foo
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Delete host
purefa_host:
host: foo
state: absent
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Make sure host bar is available with wwn ports
purefa_host:
host: bar
protocol: fc
wwns:
- "00:00:00:00:00:00:00"
- "11:11:11:11:11:11:11"
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Make sure host bar is available with iSCSI ports
purefa_host:
host: bar
protocol: iscsi
iqn:
- "iqn.1994-05.com.redhat:7d366003913"
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Map host foo to volume bar
purefa_host:
host: foo
volume: bar
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
HAS_PURESTORAGE = True
try:
from purestorage import purestorage
except ImportError:
HAS_PURESTORAGE = False
def get_host(module, array):
host = None
for h in array.list_hosts():
if h["name"] == module.params['host']:
host = h
break
return host
def make_host(module, array):
changed = True
if not module.check_mode:
host = array.create_host(module.params['host'])
if module.params['protocol'] == 'iscsi':
if module.params['iqn']:
array.set_host(module.params['host'], addiqnlist=module.params['iqn'])
if module.params['protocol'] == 'fc':
if module.params['wwns']:
array.set_host(module.params['host'], addwwnlist=module.params['wwns'])
if module.params['volume']:
array.connect_host(module.params['host'], module.params['volume'])
module.exit_json(changed=changed)
def update_host(module, array):
changed = False
host = module.params['host']
module.exit_json(changed=changed)
def delete_host(module, array):
changed = True
if not module.check_mode:
for vol in array.list_host_connections(module.params['host']):
array.disconnect_host(module.params['host'], vol["vol"])
array.delete_host(module.params['host'])
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(
dict(
host=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
protocol=dict(default='iscsi', choices=['iscsi', 'fc']),
iqn=dict(type='list'),
wwns=dict(type='list'),
volume=dict()
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_PURESTORAGE:
module.fail_json(msg='purestorage sdk is required for this module in host')
state = module.params['state']
protocol = module.params['protocol']
array = get_system(module)
host = get_host(module, array)
if module.params['volume']:
try:
array.get_volume(module.params['volume'])
except:
module.fail_json(msg='Volume {} not found'.format(module.params['volume']))
if host and state == 'present':
update_host(module, array)
elif host and state == 'absent':
delete_host(module, array)
elif host is None and state == 'absent':
module.exit_json(changed=False)
else:
make_host(module, array)
if __name__ == '__main__':
main()
| nrwahl2/ansible | lib/ansible/modules/storage/purestorage/purefa_host.py | Python | gpl-3.0 | 5,132 |
"""The tests for the device tracker component."""
# pylint: disable=protected-access
import asyncio
import json
import logging
import unittest
from unittest.mock import call, patch
from datetime import datetime, timedelta
import os
from homeassistant.core import callback
from homeassistant.bootstrap import setup_component
from homeassistant.loader import get_component
from homeassistant.util.async import run_coroutine_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_ENTITY_PICTURE, ATTR_FRIENDLY_NAME, ATTR_HIDDEN,
STATE_HOME, STATE_NOT_HOME, CONF_PLATFORM)
import homeassistant.components.device_tracker as device_tracker
from homeassistant.exceptions import HomeAssistantError
from homeassistant.remote import JSONEncoder
from tests.common import (
get_test_home_assistant, fire_time_changed, fire_service_discovered,
patch_yaml_files, assert_setup_component)
from ...test_util.aiohttp import mock_aiohttp_client
TEST_PLATFORM = {device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}}
_LOGGER = logging.getLogger(__name__)
class TestComponentsDeviceTracker(unittest.TestCase):
"""Test the Device tracker."""
hass = None # HomeAssistant
yaml_devices = None # type: str
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.yaml_devices = self.hass.config.path(device_tracker.YAML_DEVICES)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
try:
os.remove(self.yaml_devices)
except FileNotFoundError:
pass
self.hass.stop()
def test_is_on(self):
"""Test is_on method."""
entity_id = device_tracker.ENTITY_ID_FORMAT.format('test')
self.hass.states.set(entity_id, STATE_HOME)
self.assertTrue(device_tracker.is_on(self.hass, entity_id))
self.hass.states.set(entity_id, STATE_NOT_HOME)
self.assertFalse(device_tracker.is_on(self.hass, entity_id))
# pylint: disable=no-self-use
def test_reading_broken_yaml_config(self):
"""Test when known devices contains invalid data."""
files = {'empty.yaml': '',
'nodict.yaml': '100',
'badkey.yaml': '@:\n name: Device',
'noname.yaml': 'my_device:\n',
'allok.yaml': 'My Device:\n name: Device',
'oneok.yaml': ('My Device!:\n name: Device\n'
'bad_device:\n nme: Device')}
args = {'hass': self.hass, 'consider_home': timedelta(seconds=60)}
with patch_yaml_files(files):
assert device_tracker.load_config('empty.yaml', **args) == []
assert device_tracker.load_config('nodict.yaml', **args) == []
assert device_tracker.load_config('noname.yaml', **args) == []
assert device_tracker.load_config('badkey.yaml', **args) == []
res = device_tracker.load_config('allok.yaml', **args)
assert len(res) == 1
assert res[0].name == 'Device'
assert res[0].dev_id == 'my_device'
res = device_tracker.load_config('oneok.yaml', **args)
assert len(res) == 1
assert res[0].name == 'Device'
assert res[0].dev_id == 'my_device'
def test_reading_yaml_config(self):
"""Test the rendering of the YAML configuration."""
dev_id = 'test'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id,
'AB:CD:EF:GH:IJ', 'Test name', picture='http://test.picture',
hide_if_away=True)
device_tracker.update_config(self.yaml_devices, dev_id, device)
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
config = device_tracker.load_config(self.yaml_devices, self.hass,
device.consider_home)[0]
self.assertEqual(device.dev_id, config.dev_id)
self.assertEqual(device.track, config.track)
self.assertEqual(device.mac, config.mac)
self.assertEqual(device.config_picture, config.config_picture)
self.assertEqual(device.away_hide, config.away_hide)
self.assertEqual(device.consider_home, config.consider_home)
self.assertEqual(device.vendor, config.vendor)
# pylint: disable=invalid-name
@patch('homeassistant.components.device_tracker._LOGGER.warning')
def test_track_with_duplicate_mac_dev_id(self, mock_warning):
"""Test adding duplicate MACs or device IDs to DeviceTracker."""
devices = [
device_tracker.Device(self.hass, True, True, 'my_device', 'AB:01',
'My device', None, None, False),
device_tracker.Device(self.hass, True, True, 'your_device',
'AB:01', 'Your device', None, None, False)]
device_tracker.DeviceTracker(self.hass, False, True, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert mock_warning.call_count == 1, \
"The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert 'Duplicate device MAC' in args[0], \
'Duplicate MAC warning expected'
mock_warning.reset_mock()
devices = [
device_tracker.Device(self.hass, True, True, 'my_device',
'AB:01', 'My device', None, None, False),
device_tracker.Device(self.hass, True, True, 'my_device',
None, 'Your device', None, None, False)]
device_tracker.DeviceTracker(self.hass, False, True, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert mock_warning.call_count == 1, \
"The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert 'Duplicate device IDs' in args[0], \
'Duplicate device IDs warning expected'
def test_setup_without_yaml_file(self):
"""Test with no YAML file."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
# pylint: disable=invalid-name
def test_adding_unknown_device_to_config(self):
"""Test the adding of unknown devices to configuration file."""
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
scanner.come_home('DEV1')
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}})
# wait for async calls (macvendor) to finish
self.hass.block_till_done()
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert len(config) == 1
assert config[0].dev_id == 'dev1'
assert config[0].track
def test_gravatar(self):
"""Test the Gravatar generation."""
dev_id = 'test'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id,
'AB:CD:EF:GH:IJ', 'Test name', gravatar='[email protected]')
gravatar_url = ("https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar")
self.assertEqual(device.config_picture, gravatar_url)
def test_gravatar_and_picture(self):
"""Test that Gravatar overrides picture."""
dev_id = 'test'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id,
'AB:CD:EF:GH:IJ', 'Test name', picture='http://test.picture',
gravatar='[email protected]')
gravatar_url = ("https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar")
self.assertEqual(device.config_picture, gravatar_url)
def test_mac_vendor_lookup(self):
"""Test if vendor string is lookup on macvendors API."""
mac = 'B8:27:EB:00:00:00'
vendor_string = 'Raspberry Pi Foundation'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, 'test', mac, 'Test name')
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
text=vendor_string)
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
assert aioclient_mock.call_count == 1
self.assertEqual(device.vendor, vendor_string)
def test_mac_vendor_mac_formats(self):
"""Verify all variations of MAC addresses are handled correctly."""
vendor_string = 'Raspberry Pi Foundation'
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
text=vendor_string)
aioclient_mock.get('http://api.macvendors.com/00:27:eb',
text=vendor_string)
mac = 'B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180),
True, 'test', mac, 'Test name')
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, vendor_string)
mac = '0:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180),
True, 'test', mac, 'Test name')
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, vendor_string)
mac = 'PREFIXED_B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180),
True, 'test', mac, 'Test name')
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, vendor_string)
def test_mac_vendor_lookup_unknown(self):
"""Prevent another mac vendor lookup if was not found first time."""
mac = 'B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, 'test', mac, 'Test name')
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
status=404)
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, 'unknown')
def test_mac_vendor_lookup_error(self):
"""Prevent another lookup if failure during API call."""
mac = 'B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, 'test', mac, 'Test name')
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
status=500)
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, 'unknown')
def test_mac_vendor_lookup_exception(self):
"""Prevent another lookup if exception during API call."""
mac = 'B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, 'test', mac, 'Test name')
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
exc=asyncio.TimeoutError())
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, 'unknown')
def test_mac_vendor_lookup_on_see(self):
"""Test if macvendor is looked up when device is seen."""
mac = 'B8:27:EB:00:00:00'
vendor_string = 'Raspberry Pi Foundation'
tracker = device_tracker.DeviceTracker(
self.hass, timedelta(seconds=60), 0, [])
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
text=vendor_string)
run_coroutine_threadsafe(
tracker.async_see(mac=mac), self.hass.loop).result()
assert aioclient_mock.call_count == 1, \
'No http request for macvendor made!'
self.assertEqual(tracker.devices['b827eb000000'].vendor, vendor_string)
def test_discovery(self):
"""Test discovery."""
scanner = get_component('device_tracker.test').SCANNER
with patch.dict(device_tracker.DISCOVERY_PLATFORMS, {'test': 'test'}):
with patch.object(scanner, 'scan_devices',
autospec=True) as mock_scan:
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(
self.hass, device_tracker.DOMAIN, TEST_PLATFORM)
fire_service_discovered(self.hass, 'test', {})
self.assertTrue(mock_scan.called)
def test_update_stale(self):
"""Test stalled update."""
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
scanner.come_home('DEV1')
register_time = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(2015, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with patch('homeassistant.components.device_tracker.dt_util.utcnow',
return_value=register_time):
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'test',
device_tracker.CONF_CONSIDER_HOME: 59,
}})
self.assertEqual(STATE_HOME,
self.hass.states.get('device_tracker.dev1').state)
scanner.leave_home('DEV1')
with patch('homeassistant.components.device_tracker.dt_util.utcnow',
return_value=scan_time):
fire_time_changed(self.hass, scan_time)
self.hass.block_till_done()
self.assertEqual(STATE_NOT_HOME,
self.hass.states.get('device_tracker.dev1').state)
def test_entity_attributes(self):
"""Test the entity attributes."""
dev_id = 'test_entity'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
friendly_name = 'Paulus'
picture = 'http://placehold.it/200x200'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id, None,
friendly_name, picture, hide_if_away=True)
device_tracker.update_config(self.yaml_devices, dev_id, device)
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
attrs = self.hass.states.get(entity_id).attributes
self.assertEqual(friendly_name, attrs.get(ATTR_FRIENDLY_NAME))
self.assertEqual(picture, attrs.get(ATTR_ENTITY_PICTURE))
def test_device_hidden(self):
"""Test hidden devices."""
dev_id = 'test_entity'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id, None,
hide_if_away=True)
device_tracker.update_config(self.yaml_devices, dev_id, device)
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
self.assertTrue(self.hass.states.get(entity_id)
.attributes.get(ATTR_HIDDEN))
def test_group_all_devices(self):
"""Test grouping of devices."""
dev_id = 'test_entity'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id, None,
hide_if_away=True)
device_tracker.update_config(self.yaml_devices, dev_id, device)
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
state = self.hass.states.get(device_tracker.ENTITY_ID_ALL_DEVICES)
self.assertIsNotNone(state)
self.assertEqual(STATE_NOT_HOME, state.state)
self.assertSequenceEqual((entity_id,),
state.attributes.get(ATTR_ENTITY_ID))
@patch('homeassistant.components.device_tracker.DeviceTracker.async_see')
def test_see_service(self, mock_see):
"""Test the see service with a unicode dev_id and NO MAC."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
params = {
'dev_id': 'some_device',
'host_name': 'example.com',
'location_name': 'Work',
'gps': [.3, .8],
'attributes': {
'test': 'test'
}
}
device_tracker.see(self.hass, **params)
self.hass.block_till_done()
assert mock_see.call_count == 1
self.assertEqual(mock_see.call_count, 1)
self.assertEqual(mock_see.call_args, call(**params))
mock_see.reset_mock()
params['dev_id'] += chr(233) # e' acute accent from icloud
device_tracker.see(self.hass, **params)
self.hass.block_till_done()
assert mock_see.call_count == 1
self.assertEqual(mock_see.call_count, 1)
self.assertEqual(mock_see.call_args, call(**params))
def test_new_device_event_fired(self):
"""Test that the device tracker will fire an event."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
test_events = []
@callback
def listener(event):
"""Helper method that will verify our event got called."""
test_events.append(event)
self.hass.bus.listen("device_tracker_new_device", listener)
device_tracker.see(self.hass, 'mac_1', host_name='hello')
device_tracker.see(self.hass, 'mac_1', host_name='hello')
self.hass.block_till_done()
assert len(test_events) == 1
# Assert we can serialize the event
json.dumps(test_events[0].as_dict(), cls=JSONEncoder)
assert test_events[0].data == {
'entity_id': 'device_tracker.hello',
'host_name': 'hello',
}
# pylint: disable=invalid-name
def test_not_write_duplicate_yaml_keys(self):
"""Test that the device tracker will not generate invalid YAML."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
device_tracker.see(self.hass, 'mac_1', host_name='hello')
device_tracker.see(self.hass, 'mac_2', host_name='hello')
self.hass.block_till_done()
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert len(config) == 2
# pylint: disable=invalid-name
def test_not_allow_invalid_dev_id(self):
"""Test that the device tracker will not allow invalid dev ids."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
device_tracker.see(self.hass, dev_id='hello-world')
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert len(config) == 0
def test_see_state(self):
"""Test device tracker see records state correctly."""
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM))
params = {
'mac': 'AA:BB:CC:DD:EE:FF',
'dev_id': 'some_device',
'host_name': 'example.com',
'location_name': 'Work',
'gps': [.3, .8],
'gps_accuracy': 1,
'battery': 100,
'attributes': {
'test': 'test',
'number': 1,
},
}
device_tracker.see(self.hass, **params)
self.hass.block_till_done()
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert len(config) == 1
state = self.hass.states.get('device_tracker.examplecom')
attrs = state.attributes
self.assertEqual(state.state, 'Work')
self.assertEqual(state.object_id, 'examplecom')
self.assertEqual(state.name, 'example.com')
self.assertEqual(attrs['friendly_name'], 'example.com')
self.assertEqual(attrs['battery'], 100)
self.assertEqual(attrs['latitude'], 0.3)
self.assertEqual(attrs['longitude'], 0.8)
self.assertEqual(attrs['test'], 'test')
self.assertEqual(attrs['gps_accuracy'], 1)
self.assertEqual(attrs['number'], 1)
@patch('homeassistant.components.device_tracker._LOGGER.warning')
def test_see_failures(self, mock_warning):
"""Test that the device tracker see failures."""
tracker = device_tracker.DeviceTracker(
self.hass, timedelta(seconds=60), 0, [])
# MAC is not a string (but added)
tracker.see(mac=567, host_name="Number MAC")
# No device id or MAC(not added)
with self.assertRaises(HomeAssistantError):
run_coroutine_threadsafe(
tracker.async_see(), self.hass.loop).result()
assert mock_warning.call_count == 0
# Ignore gps on invalid GPS (both added & warnings)
tracker.see(mac='mac_1_bad_gps', gps=1)
tracker.see(mac='mac_2_bad_gps', gps=[1])
tracker.see(mac='mac_3_bad_gps', gps='gps')
self.hass.block_till_done()
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert mock_warning.call_count == 3
assert len(config) == 4
@patch('homeassistant.components.device_tracker.async_log_exception')
def test_config_failure(self, mock_ex):
"""Test that the device tracker see failures."""
with assert_setup_component(0, device_tracker.DOMAIN):
setup_component(self.hass, device_tracker.DOMAIN,
{device_tracker.DOMAIN: {
device_tracker.CONF_CONSIDER_HOME: -1}})
| florianholzapfel/home-assistant | tests/components/device_tracker/test_init.py | Python | mit | 24,131 |
from django.contrib import admin
from surprise import models
def update_metadata(model_admin, request, queryset):
for surprise in queryset:
models.update_metadata(surprise)
update_metadata.short_description = "Update metadata"
class SurpriseAdmin(admin.ModelAdmin):
list_display = ('id', 'link', 'metadata_retrieved', 'link_exists')
actions = [update_metadata]
admin.site.register(models.Surprise, SurpriseAdmin)
| mirigata/mirigata | mirigata/surprise/admin.py | Python | agpl-3.0 | 441 |
import time
def dumps(s):
if not console.json:
die('dumps requries lib', 'console-json')
console.json(s)
def out(prefix, *a):
p = [prefix]
for obj in a:
if obj.__repr__:
p.append(obj.__repr__())
else:
p.append(obj)
console.log.apply(console, p)
def log(*a):
out('', *a)
def die(err_type, *a):
out('!!', *a)
throw (__new__( Error(err_type)))
def decorate(cls, func, dfunc):
"""
class : a Transcrypt class
func : name of method to decorate
dfunc : decorator function
Example:
e.g. dfunc =
def mydeco(obj, func, *a): return func(obj, *a)
class A:
i = 2
def foo(self, j, k): return self.i * int(j) * int(k)
decorate(A, 'foo', dfunc)
A().foo(4, 5) -> will pass the args and the result through the mydeco
"""
def d3(*a):
# stage 3: call the decorator like known in python (obj, func, args):
return this['dfunc'](this['self'], this['orig'], *a)
def d2(f, dfunc):
# stage2: return stage3 function, with context
return lambda: d3.bind({'self': this, 'orig': f, 'dfunc': dfunc})
# stage1: define the getter, func = name of original function:
cls.__defineGetter__(func, d2(cls[func], dfunc))
def d(*a):
''''
deep = d(foo=d(bar=42)) => deep = {'foo': {'bar': 42}}
with the map a pure js obj.
'''
r = a[0]
if not r:
return {}
return dict(r)
def jd(*a):
''''
deep = d(foo=d(bar=42)) => deep = {'foo': {'bar': 42}}
with the map a pure js obj.
'''
r = a[0]
if not r:
__pragma__('js', '{}', 'return {}')
del r['constructor']
del r['__class__']
return r
def name_value_pairs(l):
return [{'name': k, 'value': v} for k, v in l]
def camelize(s):
''' kendo wants camelCase, we want snake_case '''
s = s.split('_')
r = s[0]
for part in s[1:]:
r += part.capitalize()
return r
def jstype(obj, typ):
__pragma__('js', '{}', '''
var t = typeof(obj)''')
if t == typ:
return True
return False
class PyDate:
'''
Descendants get a self.value property, which is always in sync
with an internal self.ts = unixtime'''
_value = ts = None
_mod_time = time
def get_value(self):
''' the js Date we return is based on the unixtime ts '''
if self._value:
t = self._value.getTime() / 1000
if t == self.ts:
return self._value
# will set the new self._value to self.ts and return it
return self.set_value()
def set_value(self, ts):
''' accept none, js data and unix time
on none our self.ts wins
'''
if ts:
if not jstype(ts, 'number'):
self._value = ts
self.ts = ts.getTime() / 1000
return self._value
# ts = unixtime:
self.ts = ts
self._value = __new__(Date(ts * 1000))
return self._value
if not self.ts:
self.ts = time.time()
return self.set_value(self.ts)
value = property(get_value, set_value)
| axiros/misc_transcrypt | doc/kendo/src/ch7/tools.py | Python | bsd-3-clause | 3,203 |
from datetime import datetime
from multiprocessing import Process
from uuid import uuid4
import time
from collections import OrderedDict
import logging
import functools
import zmq
from zmq.eventloop.ioloop import IOLoop, PeriodicCallback
from zmq.utils import jsonapi
from .socket_configs import DeferredSocket, SockConfigsTask,\
create_sockets, create_streams
from .utils import cleanup_ipc_uris, log_label, unique_ipc_uri
class Consumer(SockConfigsTask):
def __init__(self, req_uri, push_uri, pull_uri, delay=0, push_bind=True,
pull_bind=True, **kwargs):
self.delay = delay
self.uris = OrderedDict([
('req', req_uri),
('push', push_uri),
('pull', pull_uri),
])
self.sock_configs = OrderedDict([
('req', DeferredSocket(zmq.REQ).connect(req_uri)),
('push', DeferredSocket(zmq.PUSH)),
('pull', DeferredSocket(zmq.PULL)
.stream_callback('on_recv_stream', self.process_input)),
])
if push_bind:
self.sock_configs['push'].bind(push_uri)
else:
self.sock_configs['push'].connect(push_uri)
if pull_bind:
self.sock_configs['pull'].bind(pull_uri)
else:
self.sock_configs['pull'].connect(pull_uri)
super(Consumer, self).__init__(self.sock_configs, **kwargs)
def process_input(self, env, stream, multipart_message):
logging.getLogger(log_label(self)).debug(
'%s %s' % (stream, multipart_message,))
async_id = multipart_message[0]
message = multipart_message[1:]
env['socks']['req'].send_multipart(message)
response = env['socks']['req'].recv_multipart()
if self.delay > 0:
time.sleep(self.delay)
env['socks']['push'].send_multipart([async_id] + response)
logging.getLogger(log_label(self)).debug(
' \--> req response: %s' % response)
class Producer(SockConfigsTask):
def __init__(self, rep_uri, pub_uri, push_uri, pull_uri, push_bind=False,
pull_bind=False, **kwargs):
self.uris = OrderedDict([
('rep', rep_uri),
('pub', pub_uri),
('push', push_uri),
('pull', pull_uri),
])
self.sock_configs = OrderedDict([
('push', DeferredSocket(zmq.PUSH)),
('pull', DeferredSocket(zmq.PULL)
.stream_callback('on_recv_stream', self.process_response)
),
('rep', DeferredSocket(zmq.REP)
.bind(rep_uri)
.stream_callback('on_recv_stream', self.process_request)
),
('pub', DeferredSocket(zmq.PUB).bind(pub_uri))
])
if push_bind:
self.sock_configs['push'].bind(push_uri)
else:
self.sock_configs['push'].connect(push_uri)
if pull_bind:
self.sock_configs['pull'].bind(pull_uri)
else:
self.sock_configs['pull'].connect(pull_uri)
super(Producer, self).__init__(self.sock_configs, **kwargs)
def process_response(self, env, stream, multipart_message):
logging.getLogger(log_label(self)).debug('%s %s' % (stream,
multipart_message,))
env['socks']['pub'].send_multipart(multipart_message)
def process_request(self, env, stream, multipart_message):
logging.getLogger(log_label(self)).debug('%s %s' % (stream,
multipart_message,))
async_id = '[%s] %s' % (datetime.now(), uuid4())
env['socks']['rep'].send_multipart([async_id] + multipart_message)
env['socks']['push'].send_multipart([async_id] + multipart_message)
class JsonProducer(Producer):
def process_response(self, env, stream, multipart_message):
'''
Extract async_id from first frame of the message, strip the first frame
from the message, and embed the id into the JSON-encoded message.
'''
logging.getLogger(log_label(self)).debug('%s %s' % (stream,
multipart_message,))
# multipart_message should have two parts: 1) async_id, 2) JSON-message
assert(len(multipart_message) == 2)
async_id = multipart_message[0]
message = jsonapi.loads(multipart_message[1])
message['async_id'] = async_id
env['socks']['pub'].send_json(message)
def process_subscribed_in(self, label, env, multipart_message):
'''
Extract set `__referrer__` field of message before forwarding the
message on through the `PUB` socket.
'''
message = jsonapi.loads(multipart_message[0])
message['__referrer__'] = label
logging.getLogger(log_label(self)).info('%s' % ((label, env['socks'], ), ))
env['socks']['pub'].send_json(message)
def process_request(self, env, stream, multipart_message):
'''
Generate a unique async_id and:
1) Add it as the first frame the message before sending to the PUSH socket
2) Embed the async_id into the JSON-encoded message before sending
the response on the REP socket. The response to the REP socket
is intended as an acknowledgement of the request (not the
result). The result will be published to the PUB socket once
the request has been processed.
'''
logging.getLogger(log_label(self)).debug('%s %s' % (stream,
multipart_message,))
assert(len(multipart_message) == 1)
request = jsonapi.loads(multipart_message[0])
response = request.copy()
if request['command'] in ('pub_uri', 'rep_uri', 'pull_uri',
'push_uri'):
response['type'] = 'result'
response['result'] = self.uris[request['command'].split('_')[0]]
elif request['command'] in ('add_subscription', ):
response['type'] = 'result'
label, sub_uri = request['args']
logging.getLogger(log_label(self)).info(
"add_subscription: label=%s sub_uri=%s" % (label, sub_uri))
# Create a `DeferredSocket` configuration for our new `SUB` socket.
deferred_socks = OrderedDict([
(label, DeferredSocket(zmq.SUB)
.connect(sub_uri)
.setsockopt(zmq.SUBSCRIBE, '')
.stream_callback('on_recv',
functools.partial(self.process_subscribed_in,
label)))
])
# Create the actual socket and stream for the new `DeferredSocket`.
# N.B. We must use `create_sockets` and `create_streams` separately
# here. The reason is that the `create_sockets_and_streams`
# function passes the newly created set of sockets to
# `create_streams` internally, which means that only newly created
# sockets will be passed to any stream callbacks. However, by
# explicitly calling `create_sockets` separately, we can simply
# pass in the up-to-date socket list, i.e., `env['socks']`.
temp_socks = create_sockets(env['ctx'], deferred_socks)
# Update current environment's list of sockets with newly created
# subscribe socket.
env['socks'].update(temp_socks)
temp_streams = create_streams(deferred_socks, env['socks'],
env['io_loop'])
# Update current environment's list of streams with newly created
# stream.
env['streams'].update(temp_streams)
response['result'] = 'SUCCESS'
response['description'] = 'The subscription has been added '\
'successfully with the label: %s' % label
else:
async_id = '[%s] %s' % (datetime.now(), uuid4())
response['type'] = 'async'
response['async_id'] = async_id
env['socks']['push'].send_multipart([async_id] + multipart_message)
env['socks']['rep'].send_json(response)
class AsyncServerAdapter(object):
producer_class = Producer
def __init__(self, backend_rep_uri, frontend_rep_uri, frontend_pub_uri,
control_pipe=None):
self.uris = OrderedDict([
('backend_rep', backend_rep_uri),
('consumer_push_be', unique_ipc_uri()),
('consumer_pull_be', unique_ipc_uri()),
('frontend_rep_uri', frontend_rep_uri),
('frontend_pub_uri', frontend_pub_uri)
])
self.control_pipe = control_pipe
self.done = False
logging.getLogger(log_label(self)).info("uris: %s", self.uris)
def watchdog(self):
if self.control_pipe is None:
return
elif not self.done and self.control_pipe.poll():
self.done = True
self.finish()
def run(self):
consumer = Process(target=Consumer(self.uris['backend_rep'],
self.uris['consumer_push_be'],
self.uris['consumer_pull_be']).run
)
producer = Process(target=self.producer_class(
self.uris['frontend_rep_uri'],
self.uris['frontend_pub_uri'],
self.uris['consumer_pull_be'],
self.uris['consumer_push_be']).run
)
self.io_loop = IOLoop()
periodic_callback = PeriodicCallback(self.watchdog, 500, self.io_loop)
periodic_callback.start()
try:
consumer.start()
producer.start()
self.io_loop.start()
except KeyboardInterrupt:
pass
producer.terminate()
consumer.terminate()
logging.getLogger(log_label(self)).info('PRODUCER and CONSUMER have '
'been terminated')
def __del__(self):
uris = [self.uris[label] for label in ('consumer_push_be',
'consumer_pull_be', )]
cleanup_ipc_uris(uris)
def finish(self):
logging.getLogger(log_label(self)).debug('"finish" request received')
self.io_loop.stop()
class AsyncJsonServerAdapter(AsyncServerAdapter):
producer_class = JsonProducer
def get_uris(sock):
uris = {}
for uri_label in ('rep', 'push', 'pull', 'pub'):
sock.send_json({"command": '%s_uri' % uri_label})
response = sock.recv_json()
uris[uri_label] = response['result']
return uris
| cfobel/zmq_helpers | zmq_helpers/async.py | Python | gpl-3.0 | 10,902 |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Starwels developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import StarwelsTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(StarwelsTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid starwels address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################
# test a fundrawtransaction with a provided change type #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
# send 1.2 USDH to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| starwels/starwels | test/functional/rpc_fundrawtransaction.py | Python | mit | 33,077 |
""":mod:`plastic.config` --- Configuration mapping
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from keyword import iskeyword
from re import VERBOSE, compile
from os.path import abspath
from tokenize import Name
from types import ModuleType
from werkzeug._internal import _DictAccessorProperty
from werkzeug.utils import import_string
__all__ = 'Config', 'config_property', 'get_typename', 'import_instance'
class Config(dict):
"""Mapping object (subtype of :class:`dict`) to store configurations."""
def update_from_object(self, object_, overwrite=False):
"""Updates configuration from arbitrary ``object_``.
::
@config.update_from_object
class default_config:
debug = False
database_uri = 'sqlite://'
It ignores attributes that start with underscore and
keys that already exist until ``overwrite`` is ``True``.
:param object_: arbitrary object to update from,
or import path of that if it's a string
e.g. ``'myapp.configs:prod``
:param overwrite: keys that already exist are ignored by default.
if ``True`` has given to this parameter,
these are not overwritten
:type overwrite: :class:`bool`
"""
if isinstance(object_, basestring):
object_ = import_string(object_)
for key in dir(object_):
if not key.startswith('_') and (overwrite or key not in self):
self[key] = getattr(object_, key)
def update_from_file(self, filename, overwrite=False):
"""Updates configuration from Python file.
For example, if there's :file:`dev.cfg`::
debug = False
database_uri = 'sqlite://'
so you can load it using :meth:`update_from_file()` method::
config.update_from_file('dev.cfg')
Like :meth:`update_from_object()` method, it also ignores
variables that start with underscore.
:param filename: the path of Python file to load
:type filename: :class:`basestring`
:param overwrite: keys that already exist are ignored by default.
if ``True`` has given to this parameter,
these are not overwritten
:type overwrite: :class:`bool`
"""
module = ModuleType(filename)
module.__file__ = abspath(filename)
execfile(filename, module.__dict__)
self.update_from_object(module, overwrite)
def update_unless_exists(self, mapping=(), **keywords):
"""Almost equivalent to :meth:`~dict.update()` except
it ignores keys already exist.
>>> config = Config(a=1, b=2)
>>> config.update({'b': 1, 'c': 2})
>>> config
plastic.config.Config(a=1, b=2, c=3)
"""
if callable(getattr(mapping, 'keys', None)):
for key in mapping:
if key not in self:
self[key] = mapping[key]
else:
for key, value in mapping:
if key not in self:
self[key] = value
for key, value in keywords.iteritems():
if key not in self:
self[key] = value
def __repr__(self):
cls = type(self)
typename = cls.__module__ + '.' + cls.__name__
is_ident = compile('^' + Name + '$').match
keywords = []
literals = []
format_keyword = '{0}={1!r}'.format
format_literal = '{0!r}: {1!r}'.format
for key, value in self.iteritems():
if is_ident(key) and not iskeyword(key):
keywords.append(format_keyword(key, value))
else:
literals.append(format_literal(key, value))
keywords = ', '.join(keywords)
literals = ', '.join(literals)
if literals and keywords:
return '{0}({{{1}}}, {2})'.format(typename, literals, keywords)
elif literals:
return '{0}({{{1}}})'.format(typename, literals)
return '{0}({1})'.format(typename, keywords)
class config_property(_DictAccessorProperty):
"""Maps application properties to configuration values."""
read_only = False
def lookup(self, obj):
return obj.config
expression_pattern = compile(
r'^(?P<import_string>' + Name + r'(?:\.' + Name + r')*(?::' + Name + r')?)'
r'\s*(?:\((?P<arguments>.*?)\)\s*)?$'
)
expression_arguments_pattern = compile(r'''
(?: ^ | ,) \s*
(?:(?P<keyword> \w+) \s* = \s*)?
(?P<argument>
(?P<argument_none> None) |
(?P<argument_bool> True | False) |
(?P<argument_float> \d+\.(?:\d+)?) |
(?P<argument_int> \d+) |
(?P<argument_bareword> \w+) |
(?P<argument_string>
(?: [uU][rR] | [uU]|[rR])?
(?: ' (?: [^'\\] | \\. )* ' | " (?: [^"\\] | \\. )* ")
)
)
\s*
''', VERBOSE)
def import_instance(expression, type_):
"""This function provides a minimal language to import a class from
a package/module and make an instance of it. For example, the following
code::
val = import_instance('abc.defg:ClassName(3.14, hello, world=False)')
is equivalent to the following normal Python code::
from abc.defg import ClassName
val = ClassName(3.14, 'hello', world=False)
As you can see its syntax is slightly different from normal Python.
You can pass arguments to class' constructor using its own syntax.
You can pass limited types of values:
Booleans
You can pass ``True`` and ``False``.
Numbers
It can take integers and floating numbers e.g. ``123``, ``3.14``.
Strings
You can ``'single quote'`` and ``"double quote"`` both for string
literals, and ``r'raw string literals'`` are also available.
There are ``u'Unicode string literals'`` as well.
Moreover, if there re unquoted barewords these are also interpreted
as strings.
None
You can pass ``None``.
"""
if not isinstance(type_, type):
raise TypeError('type_ must be a class object, not ' + repr(type_))
if issubclass(type_, basestring):
raise TypeError('type_ cannot be basestring nor its subtype because '
'these are ambiguious to determine whether the given '
'value is an import expression or already an '
'instance of type_')
if isinstance(expression, basestring):
match = expression_pattern.match(expression)
if not match:
raise ValueError('invalid import expression: ' + repr(expression))
import_name = match.group('import_string')
cls = import_string(import_name)
if not callable(cls):
raise TypeError(
'{0} ({1!r}) is not callable'.format(import_name, cls)
)
if isinstance(cls, type) and not issubclass(cls, type_):
raise TypeError(
'{0} ({1!r}) is not a subtype of {2} ({3!r})'.format(
import_name, cls, get_typename(type_), type_
)
)
arguments = match.group('arguments')
if arguments:
arguments_offset = match.start('arguments')
offset = 0
args = []
kwargs = {}
for match in expression_arguments_pattern.finditer(arguments):
keyword = match.group('keyword')
if match.start() > offset or not keyword and kwargs:
raise ValueError(
'invalid expression:\n' + expression + '\n' +
' ' * (offset + arguments_offset) + '^'
)
argument = match.group('argument_bareword')
if not argument:
argument = eval(match.group('argument'))
if keyword:
kwargs[keyword] = argument
else:
args.append(argument)
offset = match.end()
if offset < len(arguments):
raise ValueError('invalid expression:\n' + expression + '\n' +
' ' * (offset + arguments_offset) + '^')
instance = cls(*args, **kwargs)
else:
instance = cls()
if not isinstance(instance, type_):
raise TypeError(
'{0} is not an instance of {1}'.format(expression,
get_typename(type_))
)
else:
instance = expression
if not isinstance(instance, type_):
raise TypeError(
'{0!r} is not an instance of {1}'.format(instance,
get_typename(type_))
)
return instance
def get_typename(cls):
"""Finds the typename string of the given ``cls``.
:param cls: the class object to find its typename
:type cls: :class:`type`
:returns: the typename
:rtype: :class:`basestring`
"""
mod = cls.__module__
if mod in ('__main__', '__builtin__'):
return cls.__name__
return mod + '.' + cls.__name__
| dahlia/plastic | plastic/config.py | Python | mit | 9,295 |
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import Sequence
import pytest
from pants.core.util_rules import stripped_source_files
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.core.util_rules.stripped_source_files import StrippedSourceFileNames, StrippedSourceFiles
from pants.engine.addresses import Address
from pants.engine.fs import EMPTY_SNAPSHOT
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.target import Sources, SourcesPathsRequest, Target
from pants.testutil.rule_runner import QueryRule, RuleRunner
class TargetWithSources(Target):
alias = "target"
core_fields = (Sources,)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*stripped_source_files.rules(),
QueryRule(SourceFiles, [SourceFilesRequest]),
QueryRule(StrippedSourceFiles, [SourceFiles]),
QueryRule(StrippedSourceFileNames, [SourcesPathsRequest]),
],
target_types=[TargetWithSources],
)
def get_stripped_files(
rule_runner: RuleRunner,
request: SourceFiles,
*,
source_root_patterns: Sequence[str] = ("src/python", "src/java", "tests/python"),
) -> list[str]:
rule_runner.set_options([f"--source-root-patterns={repr(source_root_patterns)}"])
result = rule_runner.request(StrippedSourceFiles, [request])
return list(result.snapshot.files)
def test_strip_snapshot(rule_runner: RuleRunner) -> None:
def get_stripped_files_for_snapshot(
paths: list[str],
*,
source_root_patterns: Sequence[str] = ("src/python", "src/java", "tests/python"),
) -> list[str]:
input_snapshot = rule_runner.make_snapshot_of_empty_files(paths)
request = SourceFiles(input_snapshot, ())
return get_stripped_files(rule_runner, request, source_root_patterns=source_root_patterns)
# Normal source roots
assert get_stripped_files_for_snapshot(["src/python/project/example.py"]) == [
"project/example.py"
]
assert (
get_stripped_files_for_snapshot(
["src/python/project/example.py"],
)
== ["project/example.py"]
)
assert get_stripped_files_for_snapshot(["src/java/com/project/example.java"]) == [
"com/project/example.java"
]
assert get_stripped_files_for_snapshot(["tests/python/project_test/example.py"]) == [
"project_test/example.py"
]
# Unrecognized source root
unrecognized_source_root = "no-source-root/example.txt"
with pytest.raises(ExecutionError) as exc:
get_stripped_files_for_snapshot([unrecognized_source_root])
assert f"NoSourceRootError: No source root found for `{unrecognized_source_root}`." in str(
exc.value
)
# Support for multiple source roots
file_names = ["src/python/project/example.py", "src/java/com/project/example.java"]
assert get_stripped_files_for_snapshot(file_names) == [
"com/project/example.java",
"project/example.py",
]
# Test a source root at the repo root. We have performance optimizations for this case
# because there is nothing to strip.
assert get_stripped_files_for_snapshot(
["project/f1.py", "project/f2.py"], source_root_patterns=["/"]
) == ["project/f1.py", "project/f2.py"]
assert get_stripped_files_for_snapshot(
["dir1/f.py", "dir2/f.py"], source_root_patterns=["/"]
) == ["dir1/f.py", "dir2/f.py"]
# Gracefully handle an empty snapshot
assert get_stripped_files(rule_runner, SourceFiles(EMPTY_SNAPSHOT, ())) == []
def test_strip_source_file_names(rule_runner: RuleRunner) -> None:
def assert_stripped_source_file_names(
address: Address, *, source_root: str, expected: list[str]
) -> None:
rule_runner.set_options([f"--source-root-patterns=['{source_root}']"])
tgt = rule_runner.get_target(address)
result = rule_runner.request(StrippedSourceFileNames, [SourcesPathsRequest(tgt[Sources])])
assert set(result) == set(expected)
rule_runner.write_files(
{
"src/java/com/project/example.java": "",
"src/java/com/project/BUILD": "target(sources=['*.java'])",
"src/python/script.py": "",
"src/python/BUILD": "target(sources=['*.py'])",
"data.json": "",
# Test a source root at the repo root. We have performance optimizations for this case
# because there is nothing to strip.
#
# Also, gracefully handle an empty sources field.
"BUILD": "target(name='json', sources=['*.json'])\ntarget(name='empty', sources=[])",
}
)
assert_stripped_source_file_names(
Address("src/java/com/project"),
source_root="src/java",
expected=["com/project/example.java"],
)
assert_stripped_source_file_names(
Address("src/python"), source_root="src/python", expected=["script.py"]
)
assert_stripped_source_file_names(
Address("", target_name="json"), source_root="/", expected=["data.json"]
)
assert_stripped_source_file_names(
Address("", target_name="empty"), source_root="/", expected=[]
)
| benjyw/pants | src/python/pants/core/util_rules/stripped_source_files_test.py | Python | apache-2.0 | 5,370 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python
from test_generate_proposal_labels_op import _generate_groundtruth
from test_generate_proposal_labels_op import _bbox_overlaps, _box_to_delta
def rpn_target_assign(anchor_by_gt_overlap,
rpn_batch_size_per_im,
rpn_positive_overlap,
rpn_negative_overlap,
rpn_fg_fraction,
use_random=True):
anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1)
anchor_to_gt_max = anchor_by_gt_overlap[np.arange(
anchor_by_gt_overlap.shape[0]), anchor_to_gt_argmax]
gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0)
gt_to_anchor_max = anchor_by_gt_overlap[gt_to_anchor_argmax, np.arange(
anchor_by_gt_overlap.shape[1])]
anchors_with_max_overlap = np.where(
anchor_by_gt_overlap == gt_to_anchor_max)[0]
labels = np.ones((anchor_by_gt_overlap.shape[0], ), dtype=np.int32) * -1
labels[anchors_with_max_overlap] = 1
labels[anchor_to_gt_max >= rpn_positive_overlap] = 1
num_fg = int(rpn_fg_fraction * rpn_batch_size_per_im)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg and use_random:
disable_inds = np.random.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
else:
disable_inds = fg_inds[num_fg:]
labels[disable_inds] = -1
fg_inds = np.where(labels == 1)[0]
bbox_inside_weight = np.zeros((len(fg_inds), 4), dtype=np.float32)
num_bg = rpn_batch_size_per_im - np.sum(labels == 1)
bg_inds = np.where(anchor_to_gt_max < rpn_negative_overlap)[0]
if len(bg_inds) > num_bg and use_random:
enable_inds = bg_inds[np.random.randint(len(bg_inds), size=num_bg)]
else:
enable_inds = bg_inds[:num_bg]
fg_fake_inds = np.array([], np.int32)
fg_value = np.array([fg_inds[0]], np.int32)
fake_num = 0
for bg_id in enable_inds:
if bg_id in fg_inds:
fake_num += 1
fg_fake_inds = np.hstack([fg_fake_inds, fg_value])
labels[enable_inds] = 0
bbox_inside_weight[fake_num:, :] = 1
fg_inds = np.where(labels == 1)[0]
bg_inds = np.where(labels == 0)[0]
loc_index = np.hstack([fg_fake_inds, fg_inds])
score_index = np.hstack([fg_inds, bg_inds])
labels = labels[score_index]
assert not np.any(labels == -1), "Wrong labels with -1"
gt_inds = anchor_to_gt_argmax[loc_index]
return loc_index, score_index, labels, gt_inds, bbox_inside_weight
def get_anchor(n, c, h, w):
input_feat = np.random.random((n, c, h, w)).astype('float32')
anchors, _ = anchor_generator_in_python(
input_feat=input_feat,
anchor_sizes=[32., 64.],
aspect_ratios=[0.5, 1.0],
variances=[1.0, 1.0, 1.0, 1.0],
stride=[16.0, 16.0],
offset=0.5)
return anchors
def rpn_target_assign_in_python(all_anchors,
gt_boxes,
is_crowd,
im_info,
lod,
rpn_straddle_thresh,
rpn_batch_size_per_im,
rpn_positive_overlap,
rpn_negative_overlap,
rpn_fg_fraction,
use_random=True):
anchor_num = all_anchors.shape[0]
batch_size = len(lod) - 1
for i in range(batch_size):
im_height = im_info[i][0]
im_width = im_info[i][1]
im_scale = im_info[i][2]
if rpn_straddle_thresh >= 0:
# Only keep anchors inside the image by a margin of straddle_thresh
inds_inside = np.where(
(all_anchors[:, 0] >= -rpn_straddle_thresh) &
(all_anchors[:, 1] >= -rpn_straddle_thresh) & (
all_anchors[:, 2] < im_width + rpn_straddle_thresh) & (
all_anchors[:, 3] < im_height + rpn_straddle_thresh))[0]
# keep only inside anchors
inside_anchors = all_anchors[inds_inside, :]
else:
inds_inside = np.arange(all_anchors.shape[0])
inside_anchors = all_anchors
b, e = lod[i], lod[i + 1]
gt_boxes_slice = gt_boxes[b:e, :] * im_scale
is_crowd_slice = is_crowd[b:e]
not_crowd_inds = np.where(is_crowd_slice == 0)[0]
gt_boxes_slice = gt_boxes_slice[not_crowd_inds]
iou = _bbox_overlaps(inside_anchors, gt_boxes_slice)
loc_inds, score_inds, labels, gt_inds, bbox_inside_weight = \
rpn_target_assign(iou, rpn_batch_size_per_im,
rpn_positive_overlap,
rpn_negative_overlap,
rpn_fg_fraction,
use_random)
# unmap to all anchor
loc_inds = inds_inside[loc_inds]
score_inds = inds_inside[score_inds]
sampled_gt = gt_boxes_slice[gt_inds]
sampled_anchor = all_anchors[loc_inds]
box_deltas = _box_to_delta(sampled_anchor, sampled_gt, [1., 1., 1., 1.])
if i == 0:
loc_indexes = loc_inds
score_indexes = score_inds
tgt_labels = labels
tgt_bboxes = box_deltas
bbox_inside_weights = bbox_inside_weight
else:
loc_indexes = np.concatenate(
[loc_indexes, loc_inds + i * anchor_num])
score_indexes = np.concatenate(
[score_indexes, score_inds + i * anchor_num])
tgt_labels = np.concatenate([tgt_labels, labels])
tgt_bboxes = np.vstack([tgt_bboxes, box_deltas])
bbox_inside_weights = np.vstack([bbox_inside_weights, \
bbox_inside_weight])
return loc_indexes, score_indexes, tgt_bboxes, tgt_labels, bbox_inside_weights
class TestRpnTargetAssignOp(OpTest):
def setUp(self):
n, c, h, w = 2, 4, 14, 14
all_anchors = get_anchor(n, c, h, w)
gt_num = 10
all_anchors = all_anchors.reshape(-1, 4)
anchor_num = all_anchors.shape[0]
images_shape = [[64, 64], [64, 64]]
#images_shape = [[64, 64]]
groundtruth, lod = _generate_groundtruth(images_shape, 3, 4)
lod = [0, 4, 8]
#lod = [0, 4]
im_info = np.ones((len(images_shape), 3)).astype(np.float32)
for i in range(len(images_shape)):
im_info[i, 0] = images_shape[i][0]
im_info[i, 1] = images_shape[i][1]
im_info[i, 2] = 0.8 #scale
gt_boxes = np.vstack([v['boxes'] for v in groundtruth])
is_crowd = np.hstack([v['is_crowd'] for v in groundtruth])
all_anchors = all_anchors.astype('float32')
gt_boxes = gt_boxes.astype('float32')
rpn_straddle_thresh = 0.0
rpn_batch_size_per_im = 256
rpn_positive_overlap = 0.7
rpn_negative_overlap = 0.3
rpn_fg_fraction = 0.5
use_random = False
loc_index, score_index, tgt_bbox, labels, bbox_inside_weights = \
rpn_target_assign_in_python(all_anchors, gt_boxes, is_crowd,
im_info, lod, rpn_straddle_thresh,
rpn_batch_size_per_im, rpn_positive_overlap,
rpn_negative_overlap,
rpn_fg_fraction, use_random)
labels = labels[:, np.newaxis]
self.op_type = "rpn_target_assign"
self.inputs = {
'Anchor': all_anchors,
'GtBoxes': (gt_boxes, [[4, 4]]),
'IsCrowd': (is_crowd, [[4, 4]]),
'ImInfo': (im_info, [[1, 1]])
}
self.attrs = {
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
}
self.outputs = {
'LocationIndex': loc_index.astype('int32'),
'ScoreIndex': score_index.astype('int32'),
'TargetBBox': tgt_bbox.astype('float32'),
'TargetLabel': labels.astype('int32'),
'BBoxInsideWeight': bbox_inside_weights.astype('float32')
}
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
unittest.main()
| reyoung/Paddle | python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py | Python | apache-2.0 | 9,366 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
# url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
url(r'^$', include('Blog.urls')),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("Blog.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
| 2083008/blog | config/urls.py | Python | bsd-3-clause | 1,269 |
__version__="0.1.0"
# ToDo:
# [] process mult dimacs.trees to hrg
import sys
import traceback
import argparse
import os
import glob
import networkx as nx
import pandas as pd
from PHRG import graph_checks
import subprocess
import math
import graph_sampler as gs
global args
def get_parser ():
parser = argparse.ArgumentParser(description='Given an edgelist and PEO heuristic perform tree decomposition')
parser.add_argument('--orig', required=True, help='input the reference graph in edgelist format')
parser.add_argument('--version', action='version', version=__version__)
return parser
def dimacs_nddgo_tree(dimacsfnm_lst, heuristic):
# print heuristic,dimacsfnm_lst
for dimacsfname in dimacsfnm_lst:
nddgoout = ""
args = ["bin/mac/serial_wis -f {} -nice -{} -w {}.tree".format(dimacsfname, heuristic, dimacsfname)]
while not nddgoout:
popen = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)
popen.wait()
# output = popen.stdout.read()
out, err = popen.communicate()
nddgoout = out.split('\n')
print nddgoout
return dimacsfname+".tree"
def load_edgelist(gfname):
import pandas as pd
try:
edglst = pd.read_csv(gfname, comment='%', delimiter='\t')
# print edglst.shape
if edglst.shape[1]==1: edglst = pd.read_csv(gfname, comment='%', delimiter="\s+")
except Exception, e:
print "EXCEPTION:",str(e)
traceback.print_exc()
sys.exit(1)
if edglst.shape[1] == 3:
edglst.columns = ['src', 'trg', 'wt']
elif edglst.shape[1] == 4:
edglst.columns = ['src', 'trg', 'wt','ts']
else:
edglst.columns = ['src', 'trg']
g = nx.from_pandas_dataframe(edglst,source='src',target='trg')
g.name = os.path.basename(gfname)
return g
def nx_edges_to_nddgo_graph (G,n,m, sampling=False, peoh=""):
# print args['peoh']
ofname = 'datasets/{}_{}.dimacs'.format(G.name, peoh)
# print '...', ofname
if sampling:
edges = G.edges()
edges = [(int(e[0]), int(e[1])) for e in edges]
df = pd.DataFrame(edges)
df.sort_values(by=[0], inplace=True)
with open(ofname, 'w') as f:
f.write('c {}\n'.format(G.name))
f.write('p edge\t{}\t{}\n'.format(n,m))
# for e in df.iterrows():
output_edges = lambda x: f.write("e\t{}\t{}\n".format(x[0], x[1]))
df.apply(output_edges, axis=1)
# f.write("e\t{}\t{}\n".format(e[0]+1,e[1]+1))
if os.path.exists(ofname): print 'Wrote: ./{}'.format(ofname)
else:
edges = G.edges()
edges = [(int(e[0]), int(e[1])) for e in edges]
df = pd.DataFrame(edges)
df.sort_values(by=[0], inplace=True)
with open(ofname, 'w') as f:
f.write('c {}\n'.format(G.name))
f.write('p edge\t{}\t{}\n'.format(n,m))
# for e in df.iterrows():
output_edges = lambda x: f.write("e\t{}\t{}\n".format(x[0], x[1]))
df.apply(output_edges, axis=1)
# f.write("e\t{}\t{}\n".format(e[0]+1,e[1]+1))
if os.path.exists(ofname): print 'Wrote: ./{}'.format(ofname)
return [ofname]
def nx_edges_to_nddgo_graph_sampling(graph, n, m, peo_h):
G = graph
if n is None and m is None: return
# n = G.number_of_nodes()
# m = G.number_of_edges()
nbr_nodes = 256
basefname = 'datasets/{}_{}'.format(G.name, peo_h)
K = int(math.ceil(.25*G.number_of_nodes()/nbr_nodes))
print "--", nbr_nodes, K, '--';
for j,Gprime in enumerate(gs.rwr_sample(G, K, nbr_nodes)):
# if gname is "":
# # nx.write_edgelist(Gprime, '/tmp/sampled_subgraph_200_{}.tsv'.format(j), delimiter="\t", data=False)
# gprime_lst.append(Gprime)
# else:
# # nx.write_edgelist(Gprime, '/tmp/{}{}.tsv'.format(gname, j), delimiter="\t", data=False)
# gprime_lst.append(Gprime)
# # print "... files written: /tmp/{}{}.tsv".format(gname, j)
edges = Gprime.edges()
edges = [(int(e[0]), int(e[1])) for e in edges]
df = pd.DataFrame(edges)
df.sort_values(by=[0], inplace=True)
ofname = basefname+"_{}.dimacs".format(j)
with open(ofname, 'w') as f:
f.write('c {}\n'.format(G.name))
f.write('p edge\t{}\t{}\n'.format(n,m))
# for e in df.iterrows():
output_edges = lambda x: f.write("e\t{}\t{}\n".format(x[0], x[1]))
df.apply(output_edges, axis=1)
# f.write("e\t{}\t{}\n".format(e[0]+1,e[1]+1))
if os.path.exists(ofname): print 'Wrote: {}'.format(ofname)
return basefname
def edgelist_dimacs_graph(orig_graph, peo_h):
fname = orig_graph
gname = os.path.basename(fname).split(".")
gname = sorted(gname,reverse=True, key=len)[0]
G = nx.read_edgelist(fname, comments="%", data=False, nodetype=int)
# print "...", G.number_of_nodes(), G.number_of_edges()
# from numpy import max
# print "...", max(G.nodes()) ## to handle larger 300K+ nodes with much larger labels
N = max(G.nodes())
M = G.number_of_edges()
# +++ Graph Checks
if G is None: sys.exit(1)
G.remove_edges_from(G.selfloop_edges())
giant_nodes = max(nx.connected_component_subgraphs(G), key=len)
G = nx.subgraph(G, giant_nodes)
graph_checks(G)
# --- graph checks
G.name = gname
# print "...", G.number_of_nodes(), G.number_of_edges()
#if G.number_of_nodes() > 500:
# return (nx_edges_to_nddgo_graph_sampling(G, n=N, m=M, peo_h=peo_h), gname)
#else:
return (nx_edges_to_nddgo_graph(G, n=N, m=M, peoh=peo_h), gname)
def print_treewidth (in_dimacs, var_elim):
nddgoout = ""
args = ["bin/mac/serial_wis -f {} -nice -{} -width".format(in_dimacs, var_elim)]
while not nddgoout:
popen = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)
popen.wait()
# output = popen.stdout.read()
out, err = popen.communicate()
nddgoout = out.split('\n')
print nddgoout
return nddgoout
def main ():
parser = get_parser()
args = vars(parser.parse_args())
dimacs_g, gname = edgelist_dimacs_graph(args['orig'],"")
if len(dimacs_g) == 1:
print "dimacs_g", dimacs_g
if __name__ == '__main__':
try:
main()
except Exception, e:
print str(e)
traceback.print_exc()
sys.exit(1)
sys.exit(0)
| nddsg/TreeDecomps | xplodnTree/tdec/tredec.edgelist_dimacs.py | Python | mit | 6,413 |
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Github util API."""
from tensorflow_datasets.core.github_api.github_path import GithubPath
__all__ = [
'GithubPath',
]
| tensorflow/datasets | tensorflow_datasets/core/github_api/__init__.py | Python | apache-2.0 | 740 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""File reader utility."""
from __future__ import absolute_import, print_function
from os.path import basename, splitext
from flask import url_for
class PreviewFile(object):
"""Preview file default implementation."""
def __init__(self, pid, record, fileobj):
"""Default constructor.
:param file: ObjectVersion instance from Invenio-Files-REST.
"""
self.file = fileobj
self.pid = pid
self.record = record
@property
def size(self):
"""Get file size."""
return self.file['size']
@property
def filename(self):
"""Get filename."""
return basename(self.file.key)
@property
def bucket(self):
"""Get bucket."""
return self.file.bucket_id
@property
def uri(self):
"""Get file download link.
.. note::
The URI generation assumes that you can download the file using the
view ``invenio_records_ui.<pid_type>_files``.
"""
return url_for(
'.{0}_files'.format(self.pid.pid_type),
pid_value=self.pid.pid_value,
filename=self.file.key)
def is_local(self):
"""Check if file is local."""
return True
def has_extensions(self, *exts):
"""Check if file has one of the extensions."""
file_ext = splitext(self.filename)[1]
file_ext = file_ext.lower()
for e in exts:
if file_ext == e:
return True
return False
def open(self):
"""Open the file."""
return self.file.file.storage().open()
| hachreak/invenio-previewer | invenio_previewer/api.py | Python | gpl-2.0 | 2,592 |
############################################################################
# Copyright (C) Internet Systems Consortium, Inc. ("ISC")
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# See the COPYRIGHT file distributed with this work for additional
# information regarding copyright ownership.
############################################################################
# flake8: noqa: E501
from typing import List, Tuple
from docutils import nodes
from docutils.nodes import Node, system_message
from docutils.parsers.rst import roles
from sphinx import addnodes
from sphinx.util.docutils import ReferenceRole
GITLAB_BASE_URL = 'https://gitlab.isc.org/isc-projects/bind9/-/'
# Custom Sphinx role enabling automatic hyperlinking to GitLab issues/MRs.
class GitLabRefRole(ReferenceRole):
def __init__(self, base_url: str) -> None:
self.base_url = base_url
super().__init__()
def run(self) -> Tuple[List[Node], List[system_message]]:
gl_identifier = '[GL %s]' % self.target
target_id = 'index-%s' % self.env.new_serialno('index')
entries = [('single', 'GitLab; ' + gl_identifier, target_id, '', None)]
index = addnodes.index(entries=entries)
target = nodes.target('', '', ids=[target_id])
self.inliner.document.note_explicit_target(target)
try:
refuri = self.build_uri()
reference = nodes.reference('', '', internal=False, refuri=refuri,
classes=['gl'])
if self.has_explicit_title:
reference += nodes.strong(self.title, self.title)
else:
reference += nodes.strong(gl_identifier, gl_identifier)
except ValueError:
error_text = 'invalid GitLab identifier %s' % self.target
msg = self.inliner.reporter.error(error_text, line=self.lineno)
prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
return [prb], [msg]
return [index, target, reference], []
def build_uri(self):
if self.target[0] == '#':
return self.base_url + 'issues/%d' % int(self.target[1:])
if self.target[0] == '!':
return self.base_url + 'merge_requests/%d' % int(self.target[1:])
raise ValueError
def setup(_):
roles.register_local_role('gl', GitLabRefRole(GITLAB_BASE_URL))
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'BIND 9 管理员参考手册'
copyright = u'2021, Internet Systems Consortium'
author = u"Internet Systems Consortium \\and 翻译: [email protected]"
# The full version, including alpha/beta/rc tags
release = 'BIND 9.16.18(稳定版)'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
'*.grammar.rst',
'*.zoneopts.rst',
'catz.rst',
'dlz.rst',
'dnssec.rst',
'dyndb.rst',
'logging-cattegories.rst',
'managed-keys.rst',
'pkcs11.rst',
'plugins.rst'
]
# The master toctree document.
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
latex_engine = 'xelatex'
latex_elements = {
'fontpkg': r'''
\setmainfont{Source Han Serif CN:style=Regular}
\setsansfont{Source Han Sans CN Medium:style=Medium,Regular}
\setmonofont{Source Han Sans CN:style=Regular}
\setCJKfamilyfont{song}{Source Han Serif CN:style=Regular}
\setCJKfamilyfont{heiti}{Source Han Sans CN:style=Regular}
''',
'pointsize': '11pt',
'preamble': r'\input{../mystyle.tex.txt}'
}
latex_documents = [
(master_doc, 'Bv9ARM.tex', u'BIND 9管理员参考手册', author, 'manual'),
]
latex_logo = "isc-logo.pdf"
| perlang/bv9arm-chinese | branches/9.16.18/arm/conf.py | Python | mpl-2.0 | 5,717 |
from setuptools import setup
import sys
setup(name='HTTPLang',
version='2.0.0',
author='Frankie Primerano',
author_email='[email protected]',
packages=['httplang'],
entry_points={
'console_scripts': ['httplang=httplang:console_main'],
},
url='https://github.com/Max00355/HTTPLang',
description='A scripting language to do HTTP routines.',
classifiers=[
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities'
],
)
| Max00355/HTTPLang | setup.py | Python | mit | 853 |
import tensorflow as tf
from kentf.scoping import adapt_name
def merge_grads(tower_grads, name=None):
name = adapt_name(name, "merge-grads")
with tf.name_scope(name):
grads = []
for grad_group in zip(*tower_grads):
var = grad_group[0][1]
vals = [val for val, var in grad_group if val is not None]
if len(vals) == 0:
grads.append((None, var))
else:
grads.append((tf.reduce_mean(tf.stack(vals, 0), 0), var))
return grads
def split_and_recombined(inps, fn, num_splits, name=None):
name = adapt_name(name, "split-and-recombine")
with tf.name_scope(name):
adapted_inps = []
# Split inputs:
with tf.name_scope("preprocessing"):
for inp in inps:
if isinstance(inp, list) or isinstance(inp, tuple):
if len(inp) % num_splits != 0:
raise RuntimeError("List not divisible by number of splits: %s" % repr(inp))
stride = len(inp) // num_splits
squeeze = lambda x: x[0] if len(x) == 1 else x
adapted_inps.append([squeeze(inp[i:(i+stride)]) for i in range(0, len(inp), stride)])
elif (isinstance(inp, tf.Variable) or isinstance(inp, tf.Tensor))\
and len(inp.shape) > 0:
if inp.shape[0].value is None:
raise RuntimeError("Batch index must be defined for tensor")
leng = int(inp.shape[0])
if leng % num_splits != 0:
raise RuntimeError("Tensor not divisible by number of splits (%d): %s" % (num_splits, inp.shape))
stride = leng // num_splits
adapted_inps.append([
tf.slice(inp,
[i if j == 0 else 0 for j in range(len(inp.shape))],
[stride if j == 0 else -1 for j in range(len(inp.shape))])
for i in range(0, leng, stride)])
else:
adapted_inps.append([inp] * num_splits)
# Zip inputs to divide work:
adapted_inps = list(zip(*adapted_inps))
# Do work
raw_outputs = []
for split, args in enumerate(adapted_inps):
with tf.name_scope("bin%d" % split):
raw_outputs.append(fn(*args))
# Post-process outputs
outputs = []
with tf.name_scope("postprocessing"):
for i, group in enumerate(raw_outputs):
for j, var in enumerate(group):
if i == 0:
outputs.append([var])
else:
outputs[j].append(var)
return outputs
# Useful for breaking up very large batch sizes to avoid allocating large tensors:
def splitfn(inp, fn, maxbatch=None, allow_unrolling=True, name=None):
name = adapt_name(name, "splitfn")
with tf.variable_scope(name) as scope:
if not allow_unrolling or inp.shape[0].value is None:
leng = tf.shape(inp)[0]
def minibatch():
scope.reuse_variables()
remainder = tf.mod(leng, maxbatch, name="remainder")
splits = tf.identity(tf.floor_div(leng - remainder, maxbatch), "splits")
remainder_inp = tf.slice(inp,
[leng - remainder if i == 0 else 0 for i in range(len(inp.shape))],
[-1 for i in range(len(inp.shape))])
majority_inp = tf.slice(inp,
[0 for i in range(len(inp.shape))],
[leng - remainder if i == 0 else -1 for i in range(len(inp.shape))])
split_inp = tf.reshape(
majority_inp,
tf.concat([[splits, maxbatch], tf.shape(inp)[1:]], 0))
majority_out = tf.map_fn(fn, split_inp)
scope.reuse_variables()
remainder_out = fn(remainder_inp)
out = tf.concat([
tf.reshape(majority_out,
tf.concat([[leng - remainder], tf.shape(majority_out)[2:]], 0)),
remainder_out], 0)
if inp.shape[0].value is not None:
out = tf.reshape(out, tf.concat([[int(inp.shape[0])], tf.shape(out)[1:]], 0))
return out
if maxbatch is None:
out = fn(inp)
else:
out = tf.case([(maxbatch < leng, minibatch)], lambda: fn(inp))
else:
leng = int(inp.shape[0])
if maxbatch is not None and maxbatch < leng:
remainder = leng % maxbatch
splits = (leng - remainder) // maxbatch
remainder_inp = tf.slice(inp,
[leng - remainder if i == 0 else 0 for i in range(len(inp.shape))],
[-1 for i in range(len(inp.shape))])
majority_inp = tf.slice(inp,
[0 for i in range(len(inp.shape))],
[leng - remainder if i == 0 else -1 for i in range(len(inp.shape))])
split_inp = tf.reshape(
majority_inp,
tf.concat([[splits, maxbatch], tf.shape(inp)[1:]], 0))
majority_out = tf.map_fn(fn, split_inp)
scope.reuse_variables()
remainder_out = fn(remainder_inp)
out = tf.concat([
tf.reshape(majority_out,
tf.concat([[leng - remainder], tf.shape(majority_out)[2:]], 0)),
remainder_out], 0)
else:
out = fn(inp)
return tf.identity(out, name)
| kkleidal/kentf | splitfn.py | Python | mit | 5,794 |
# -*- coding: utf-8 -*-
# Este arquivo é parte do programa Blocos
# Blocos é um software livre; você pode redistribui-lo e/ou
# modifica-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF); na versão 3 da
# Licença, ou (na sua opinião) qualquer versão.
#
# Este programa é distribuido na esperança que possa ser util,
# mas SEM NENHUMA GARANTIA; sem uma garantia implicita de ADEQUAÇÂO a qualquer
# MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, escreva para a Fundação do Software
# Livre(FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gtk, gtk.glade
import block
import container
import os
import string
import tempfile
from geom import *
from commands import *
from config import *
## verifica se 2 pixbufs estao colidindo
# @param x1 posição x de pix1
# @param y1 posição y de pix1
# @param pix1 gtk.gdk.Pixbuf
# @param x2 posição x de pix2
# @param y2 posição y de pix2
# @param pix2 gtk.gdk.Pixbuf
# @return bool dizendo se estão colidindo
def collide_pixbuf(x1, y1, pix1, x2, y2, pix2):
p1 = Vector(x1, y1)
p2 = Vector(x2, y2)
p3 = p1 + Vector( pix1.get_width(), pix1.get_height() )
p4 = p2 + Vector( pix2.get_width(), pix2.get_height() )
img1 = pix1.get_pixels()
img2 = pix2.get_pixels()
c1 = Vector( max(p1.x, p2.x), max(p1.y, p2.y) )
c2 = Vector( min(p3.x, p4.x), min(p3.y, p4.y) )
(w, h) = (c2.x - c1.x, c2.y - c1.y)
r1 = c1 - p1
r2 = c1 - p2
collide = False
y = 0
x = 0
while( y < h and collide == False ):
x = 0
while( x < w and collide == False ):
if ord(img1[((y+int( r1.y ))*pix1.get_width()+(x+int( r1.x )))*4+3]) != 0\
and ord(img2[((y+int( r2.y ))*pix2.get_width()+(x+int( r2.x )))*4+3]) != 0:
collide = True
x = x+1
y = y+1
return collide
class Canvas( container.Block_container, gtk.Layout ):
"""Classe que implementa um container de blocos em um gtk.Layout"""
def __init__(self, prop_box):
container.Block_container.__init__(self)
gtk.Layout.__init__(self, None, None)
self.add_events(gtk.gdk.KEY_PRESS_MASK)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.add_events(gtk.gdk.BUTTON_RELEASE_MASK)
self.add_events(gtk.gdk.POINTER_MOTION_MASK)
self.add_events(gtk.gdk.LEAVE_NOTIFY_MASK)
self.add_events(gtk.gdk.ENTER_NOTIFY_MASK)
self.set_flags(gtk.CAN_FOCUS)
self.connect("motion_notify_event", self.on_motion_notify)
self.connect("leave_notify_event", self.on_leave_notify)
self.connect("enter_notify_event", self.on_enter_notify)
self.connect("button_press_event", self.on_button_press)
self.connect("button_release_event", self.on_button_release)
self.connect("expose-event", self.on_expose)
self.connect("key-press-event", self.on_key_press)
#Scrollbars
self.get_hadjustment().connect("value-changed", self.scrollscreen)
self.get_vadjustment().connect("value-changed", self.scrollscreen)
self.change_stack = []
self.undo_flag = False
self.clicked = None # Flag que serve para indicar se o mouse
# está dentro do canvas
self.create_block = None # Tipo do bloco que deve ser criado,
# se for None, não está no modo de criação
# de novo bloco
self.last_saved = "" # Ultimo arquivo salvado, para comparar se o programa foi
# alterado antes de fechar
self.first_expose = False # já fui mostrado?
self.prop_box = prop_box # Nesse gtk.Box aparecerão as propriedades
# do bloco selecionado
self.drawable = None # gtk.gdk.Drawable
self.gc = None # gtk.gdk.GC
self.cm = None # gtk.gdk.Colormap
self.bg = None # background color
self.startblock = None # aponta para o start block
self.mousex = -1 # Posição X do mouse no canvas... -1 -> fora do canvas
self.mousey = -1 # Posição Y do mouse no canvas... -1 -> fora do canvas
self.selectrect_on = False # Se o retângulo de seleção está ativado
self.selectrect_p1 = None # ponto mais a esquerda do retangulo de seleção
self.selectrect_p2 = None # ponto mais a direita do retangulo de seleção
self.selectrect = None # retangulo de seleção [geometric.Rect]
#pixbuf da seta que acompanha o bloco selecionado
self.arrow = gtk.gdk.pixbuf_new_from_file('images/arrow.png')
## Mostra a barra de propriedades de
# um bloco que foi clicado
# @param block bloco clicado
def set_clicked(self, block):
if self.clicked is not None:
self.arrow_mark_dirty()
self.clicked.hide_gui()
self.clicked = block
if self.clicked is not None:
self.arrow_mark_dirty()
self.clicked.show_gui(self.prop_box)
## Salva o estado atual do canvas em uma pilha
# para fazer o undo
def change_done(self):
if self.undo_flag == True:
return
k = tempfile.TemporaryFile('w+', 0)
if len(self.change_stack) > 10:
k = self.change_stack.pop(0)
self.change_stack.append( k )
self.save( k )
k.flush()
k.seek(0)
## Retorna o último estado salvo na pilha de undo
def get_last_state(self):
return self.change_stack[-1]
## Coloca no canvas o último estado salvo na pilha
def undo(self):
try:
k = self.change_stack.pop()
except IndexError:
return
self.undo_flag = True
self.load( k )
k.close()
self.undo_flag = False
self.load_update()
## limpa o canvas
def clear(self):
self.change_done()
container.Block_container.clear(self)
self.first_expose = False
self.set_clicked(None)
def remove_selected(self):
self.change_done()
if self.clicked in self.get_selected():
self.set_clicked(None)
container.Block_container.remove_selected(self)
self.set_adjustment()
def load(self, file):
container.Block_container.load(self, file)
self.set_adjustment()
def paste(self):
self.change_done()
container.Block_container.paste(self)
self.set_adjustment()
def get_drawable(self):
return self.drawable
def get_gc(self):
return self.gc
## Ajusta as scrollbars para os blocos no canvas
def set_adjustment(self):
maxx = 0
maxy = 0
# max value of the scrolls should be the block more distant + 50 in x and y
for block in self.get_children():
if block.get_x()+block.get_width() > maxx:
maxx = block.get_x()+block.get_width()
if block.get_y()+block.get_height() > maxy:
maxy = block.get_y()+block.get_height()
self.get_hadjustment().set_all(self.get_hadjustment().value, 0, maxx+50, 1)
self.get_vadjustment().set_all(self.get_vadjustment().value, 0, maxy+50, 1)
# resize canvas
self.set_size(maxx+10, maxy+10)
def scrollscreen(self, adjustment):
self.on_expose(None, None)
## Verifica se tudo está bem para mandar o código para a placa
def code_ok(self):
self.unselect( self.get_children() )
l = []
ans = True
ans, b = container.Block_container.dock_ok(self, self.startblock)
l.append(b)
if ans == True:
for npblock in newprocedure_block.get_list():
ans, b = container.Block_container.dock_ok(self, npblock)
l.append(b)
if ans == False:
break
if ans == False:
self.unselect( self.get_children() )
for missing_connections_blocks in l:
self.select( missing_connections_blocks )
return 1
for block in self.get_children():
if type(block) == set_variable_block and \
not block.dock_parent.get_enabled():
if block.get_variable() == "":
self.unselect( self.get_children() )
self.select( [block,] )
return 2
return 0
## Retorna o código gerado pelos blocos
# @return string com o código
def get_code(self):
txt=""
#lista com todos os nomes de variáveis
#for var in variable_block.get_names():
# txt+= "global "+var+"\n"
txt += self.code(self.startblock)+'\n'
for npblock in newprocedure_block.get_list():
txt += self.code(npblock) + '\n'
# indentar código
s=[]
indent_level=0
for i in txt:
if i=='[':
indent_level+=1
if i==']':
indent_level-=1
s=s[:-1]
s.append(i)
if i=='\n':
s+=['\t']*indent_level
return string.join(s,'')
## marca a seta como posição suja
def arrow_mark_dirty(self):
if self.clicked:
self.get_drawable().invalidate_rect(gtk.gdk.Rectangle( self.clicked.get_x()-self.arrow.get_width(), self.clicked.get_y(), self.arrow.get_width(), self.clicked.get_height()), False)
## marca o retângulo de seleção como sujo
def selectrect_mark_dirty(self):
self.get_drawable().invalidate_rect(gtk.gdk.Rectangle( self.selectrect.x-2, self.selectrect.y-2, self.selectrect.w+4, self.selectrect.h+4 ), False )
## marca a imagem flutuante de novo bloco como suja
def create_block_mark_dirty(self):
self.get_drawable().invalidate_rect(gtk.gdk.Rectangle( self.mousex, self.mousey, self.create_block.get_width(), self.create_block.get_height() ), False )
def on_button_press(self, widget, event):
self.grab_focus()
if self.create_block is None:
block = self.find( Vector(event.x, event.y) )
if block == None:
self.unselect( self.get_children() )
self.set_clicked(None)
self.selectrect_on = True
self.selectrect_p1 = Vector(self.mousex, self.mousey)
self.selectrect = Rect(0, 0, 1 , 1)
return True
else:
self.set_clicked(block)
block.bpress( Vector(event.x, event.y), event.type == gtk.gdk._2BUTTON_PRESS )
self.get_toplevel().window.set_cursor( gtk.gdk.Cursor(gtk.gdk.FLEUR) )
else:
self.change_done()
# criar novo bloco
newblock = self.create_block.copy()
newblock.put(self, event.x, event.y)
self.set_clicked(newblock)
self.create_block = None
self.get_toplevel().window.set_cursor(None)
return True
def on_key_press(self, widget, event):
keyname = gtk.gdk.keyval_name(event.keyval)
if keyname in ("BackSpace", "Delete"):
self.remove_selected()
elif keyname == "Escape":
print 'esc'
#cancel create-mode
if not self.create_block is None:
self.create_block_mark_dirty()
self.create_block = None
self.get_toplevel().window.set_cursor(None)
if self.selectrect_on:
self.selectrect_mark_dirty()
self.selectrect_on = False
elif keyname == "r":
self.refresh()
elif keyname == "":
self.undo()
def on_button_release(self, widget, event):
if self.clicked:
self.arrow_mark_dirty()
self.clicked.brelease( Vector(event.x, event.y) )
self.arrow_mark_dirty()
if self.selectrect_on == True:
if self.selectrect.w > 0 and self.selectrect.h > 0:
self.selectrect_mark_dirty()
collide_pix = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, self.selectrect.w, self.selectrect.h )
collide_pix.fill( 0x00000000 )
collide_pix = collide_pix.add_alpha(False, chr(255), chr(255), chr(255) )
group = set()
for block in self.get_children():
if self.selectrect.pos_inside( block.get_position() ) or collide_pixbuf(block.get_x(), block.get_y(), block.get_image()[0], self.selectrect.x, self.selectrect.y, collide_pix):
group.add( block )
self.unselect( self.get_children() )
self.select( group )
self.set_group( self.clicked )
self.selectrect_on = False
self.get_toplevel().window.set_cursor(None)
self.set_adjustment()
def on_motion_notify(self, widget, event):
if (not self.create_block is None) and self.mouse_in:
#refresh the moving image of the create-block
self.create_block_mark_dirty()
if self.selectrect_on == True:
#refresh the selection rectangle
self.selectrect_mark_dirty()
p1 = self.selectrect_p1
p2 = Vector(int(event.x), int(event.y))
self.selectrect = Rect.create_rect_from_vector(p1, p2)
self.mousex = int(event.x)
self.mousey = int(event.y)
if self.clicked:
self.arrow_mark_dirty()
self.clicked.drag( Vector(event.x, event.y) )
self.arrow_mark_dirty()
def on_enter_notify(self, widget, event):
self.mouse_in = True
def on_leave_notify(self, widget, event):
self.mouse_in = False
if self.create_block != None:
self.create_block_mark_dirty()
self.mousex = -1
self.mousey = -1
## encontra o bloco inicio
# @return bloco início
def find_startblock(self):
for block in self.get_children():
if type(block) == start_block:
return block
return None
## Força a atualizaçao do canvas
def refresh(self):
if self.drawable != None:
self.drawable.clear()
self.startblock = self.find_startblock()
if self.startblock == None:
print "error - start block not found!"
self.on_expose(None, None)
## Reinicia o canvas
def restart(self):
self.clear()
self.first_expose = False
self.startblock = start_block()
self.startblock.put(self, 10, 10)
self.refresh()
def on_expose(self, widget, event):
if self.first_expose == False and event and widget:
self.drawable = event.window
self.gc = self.drawable.new_gc()
self.cm = self.gc.get_colormap()
self.bg = self.cm.alloc_color("#ffffff")
if self.find_startblock() == None:
self.startblock = start_block()
self.startblock.put(self, 10, 10)
self.set_clicked( self.startblock )
self.change_done()
self.first_expose = True
self.set_adjustment()
for block in self.get_children()[:]:
if block.was_put == False:
block.put(self, block.get_x(), block.get_y() )
#blocos no fim da lista são desenhados na frente
block.draw(self.drawable, self.gc)
if self.clicked:
#seta ao lado do bloco
self.drawable.draw_pixbuf( self.gc, self.arrow, 0, 0, self.clicked.get_x()-self.arrow.get_width(), self.clicked.get_y()+self.clicked.get_height()/2-self.arrow.get_height()/2 )
if not self.create_block is None:
if self.mousex != -1 and self.mousey != -1:
#uma imagem flutuando com uma previsão do novo bloco a ser criado
self.drawable.draw_pixbuf( self.gc, self.create_block.get_image()[0], 0, 0, self.mousex, self.mousey )
# retângulo de selecão
if self.selectrect_on == True:
self.gc.set_line_attributes(1, gtk.gdk.LINE_DOUBLE_DASH, gtk.gdk.CAP_ROUND, gtk.gdk.JOIN_ROUND)
self.drawable.draw_rectangle( self.gc, False, self.selectrect.x, self.selectrect.y, self.selectrect.w-1, self.selectrect.h-1 )
return True
## carrega um projeto
# @param projectfile arquivo com o projeto
def is_saved(self):
k = tempfile.TemporaryFile('w+', 0)
current_project = self.save(k)
if current_project == self.last_saved :
k.flush()
k.seek(0)
return True
k.flush()
k.seek(0)
return False
def load_proj(self, projectfile):
if os.path.exists(projectfile):
p=open(projectfile,"r")
self.load(p)
p.close()
self.save_proj(projectfile)
else:
print "file "+projectfile+" doesn't exist!"
self.load_update()
self.change_done()
## Função executada sempre após fazer um load no canvas
def load_update(self):
self.first_expose == False
self.startblock = self.find_startblock()
self.unselect( self.get_children() )
self.set_clicked(None)
## salva o estado atual do canvas em um arquivo de projeto
# @param projectfile arquivo de projeto
def save_proj(self, projectfile):
f = open(projectfile,"w")
self.last_saved =self.save(f)
f.close()
def set_create_block(self, block):
self.create_block = block
# invalid position
self.mousex = -1
self.mousey = -1
| yantrabuddhi/blocos | blockcanvas/canvas.py | Python | gpl-3.0 | 19,312 |
from uno.base import UnoBaseFeature, Element
# standard bs3 field follows...
class TextInputBs3(Element):
div_0 = Element('element_0', 'div')
div_0.class_1 = Css('CLASS','form-group')
div_0.label_4 = Element('element_4', 'label')
div_0.label_4.payload_6 = Payload('payload_6', "Text Input with Placeholder")
div_0.input_8 = Element('element_8', 'input')
div_0.input_8.class_9 = Css('CLASS','form-control')
div_0.input_8.placeholder_10 = Css('placeholder','Enter text')
| elbow-jason/Uno | uno/field.py | Python | mit | 517 |
from cosmos.lib.ezflow.tool import Tool
from cosmos.Workflow.models import TaskFile
from cosmos.session import settings as cosmos_settings
import os
def list2input(l):
return "-I " +" -I ".join(map(lambda x: str(x),l))
def get_interval(param_dict):
"""
:param param_dict: parameter dictionary
:return: '' if param_dict does not have 'interval' in it, otherwise -L p['interval']
"""
if 'interval' in param_dict:
return '-L {0}'.format(param_dict['interval'])
else:
return ''
def get_sleep(settings_dict):
"""
Some tools can't be submitted to short because orchestra gets mad if they finish before 10 minutes.
This is especially troublesome because some jobs for exome analysis take about 10 minutes. It is a catch-22,
if you submit to the mini queue, the jobs that take longer than 10 minutes get killed, if you submit to the short
queue, your jobs finish too quickly and your jobs get automatically suspended!
:param settings_dict:
:return: a sleep command
"""
return ' && sleep 480' if settings_dict['capture'] and cosmos_settings['server_name'] == 'orchestra' else ''
def get_pedigree(settings_dict):
"""
:param settings_dict: parameter dictionary
:return: '' if settings_dict does not have 'interval' in it, otherwise -L p['interval']
"""
ped_path = settings_dict['pedigree']
if ped_path:
return ' --pedigree {0}'.format(ped_path)
else:
return ''
class GATK(Tool):
time_req = 12*60
mem_req = 5*1024
@property
def bin(self):
return 'java -Xmx{mem_req}m -Djava.io.tmpdir={s[tmp_dir]} -jar {s[GATK_path]}'.format(
self=self,s=self.settings,
mem_req=int(self.mem_req*.9)
)
def post_cmd(self,cmd_str,format_dict):
new_cmd_str = cmd_str + ' ' + get_pedigree(format_dict['s'])
#import ipdb; ipdb.set_trace()
return new_cmd_str,format_dict
class BQSRGatherer(Tool):
name="BQSR Gatherer"
time_req=10
mem_req=3*1024
inputs = ['bam','recal']
outputs = ['recal']
forward_input = True
persist=True
def cmd(self,i, s, p):
return r"""
java -Dlog4j.configuration="file://{log4j_props}"
-cp "{s[queue_path]}:{s[bqsr_gatherer_path]}"
BQSRGathererMain
$OUT.recal
{input_recals}
""", {
'input_recals': ' '.join(map(str,i['recal'])),
'log4j_props': os.path.join(s['bqsr_gatherer_path'],'log4j.properties')
}
class RealignerTargetCreator(GATK):
name = "Indel Realigner Target Creator"
mem_req = 8*1024
#cpu_req = 4
cpu_req = 2
inputs = ['bam']
outputs = ['intervals']
forward_input = True
persist=True
def cmd(self,i,s,p):
return r"""
{self.bin}
-T RealignerTargetCreator
-R {s[reference_fasta_path]}
-I {i[bam][0]}
-o $OUT.intervals
--known {s[indels_1000g_phase1_path]}
--known {s[mills_path]}
-nt {self.cpu_req}
{interval}
{sleep}
""",{'interval':get_interval(p),
'sleep': get_sleep(s)}
class IndelRealigner(GATK):
name = "Indel Realigner"
mem_req = 8*1024
inputs = ['bam','intervals']
outputs = ['bam']
def cmd(self,i,s,p):
return r"""
{self.bin}
-T IndelRealigner
-R {s[reference_fasta_path]}
-I {i[bam][0]}
-o $OUT.bam
-targetIntervals {i[intervals][0]}
-known {s[indels_1000g_phase1_path]}
-known {s[mills_path]}
-model USE_READS
{interval} {sleep}
""",{'interval':get_interval(p),
'sleep': get_sleep(s)}
class BQSR(GATK):
name = "Base Quality Score Recalibration"
#cpu_req = 8
cpu_req = 4
mem_req = 9*1024
inputs = ['bam']
outputs = ['recal']
persist=True
forward_input = True
def cmd(self,i,s,p):
return r"""
{self.bin}
-T BaseRecalibrator
-R {s[reference_fasta_path]}
{inputs}
-o $OUT.recal
-knownSites {s[indels_1000g_phase1_path]}
-knownSites {s[mills_path]}
--disable_indel_quals
-cov ReadGroupCovariate
-cov QualityScoreCovariate
-cov CycleCovariate
-cov ContextCovariate
-nct {nct}
{sleep}
""", {
'inputs' : list2input(i['bam']),
'nct': self.cpu_req +1,
'sleep': get_sleep(s)
}
class ApplyBQSR(GATK):
name = "Apply BQSR"
mem_req = 8*1024
inputs = ['bam','recal']
outputs = ['bam']
# def map_inputs(self):
# d= dict([ ('bam',[p.get_output('bam')]) for p in self.parent.parents ])
# # d['recal'] = [bqsrG_tool.get_output('recal')]
# return d
added_edge = False
def cmd(self,i,s,p):
if not self.added_edge:
#TODO fix this hack. Also there might be duplicate edges being added on reload which doesn't matter but is ugly.
#TODO this also forces ApplyBQSR to expect a ReduceBQSR
bqsrG_tool = self.dag.get_tools_by([BQSRGatherer.name],tags={'sample_name':self.tags['sample_name']})[0]
self.dag.G.add_edge(bqsrG_tool, self)
self.added_edge = True
return r"""
{self.bin}
-T PrintReads
-R {s[reference_fasta_path]}
{inputs}
-o $OUT.bam
-BQSR {i[recal][0]}
{sleep}
""", {
'inputs' : list2input(i['bam']),
'sleep': get_sleep(s)
}
class ReduceReads(GATK):
name = "Reduce Reads"
mem_req = 30*1024
cpu_req = 1
inputs = ['bam']
outputs = ['bam']
reduce_reads=True
time_req = 12*60
def cmd(self,i,s,p):
return r"""
{self.bin}
-T ReduceReads
-R {s[reference_fasta_path]}
{inputs}
-o $OUT.bam
{interval}
""", {
'inputs' : list2input(i['bam']),
'interval': get_interval(p)
}
class HaplotypeCaller(GATK):
name = "Haplotype Caller"
mem_req = 5.5*1024
cpu_req = 1
inputs = ['bam']
outputs = ['vcf']
time_req = 12*60
def cmd(self,i,s,p):
return r"""
{self.bin}
-T HaplotypeCaller
-R {s[reference_fasta_path]}
--dbsnp {s[dbsnp_path]}
{inputs}
-minPruning 3
-o $OUT.vcf
-A Coverage
-A AlleleBalance
-A AlleleBalanceBySample
-A DepthPerAlleleBySample
-A HaplotypeScore
-A InbreedingCoeff
-A QualByDepth
-A FisherStrand
-A MappingQualityRankSumTest
-L {p[interval]}
""", {
'inputs' : list2input(i['bam'])
}
class UnifiedGenotyper(GATK):
name = "Unified Genotyper"
mem_req = 6.5*1024
cpu_req = 6
inputs = ['bam']
outputs = ['vcf']
time_req = 12*60
def cmd(self,i,s,p):
return r"""
{self.bin}
-T UnifiedGenotyper
-R {s[reference_fasta_path]}
--dbsnp {s[dbsnp_path]}
-glm {p[glm]}
{inputs}
-o $OUT.vcf
-A Coverage
-A AlleleBalance
-A AlleleBalanceBySample
-A DepthPerAlleleBySample
-A HaplotypeScore
-A InbreedingCoeff
-A QualByDepth
-A FisherStrand
-A MappingQualityRankSumTest
-baq CALCULATE_AS_NECESSARY
-L {p[interval]}
-nt {self.cpu_req}
""", {
'inputs' : list2input(i['bam'])
}
class CombineVariants(GATK):
name = "Combine Variants"
mem_req = 3*1024
time_req = 12*60
inputs = ['vcf']
outputs = [TaskFile(name='vcf',basename='master.vcf')]
persist = True
default_params = {
'genotypeMergeOptions':'UNSORTED'
}
def cmd(self,i,s,p):
"""
:param genotypemergeoptions: select from the following:
UNIQUIFY - Make all sample genotypes unique by file. Each sample shared across RODs gets named sample.ROD.
PRIORITIZE - Take genotypes in priority order (see the priority argument).
UNSORTED - Take the genotypes in any order.
REQUIRE_UNIQUE - Require that all samples/genotypes be unique between all inputs.
"""
return r"""
{self.bin}
-T CombineVariants
-R {s[reference_fasta_path]}
{inputs}
-o $OUT.vcf
-genotypeMergeOptions {p[genotypeMergeOptions]}
""", {
'inputs' : "\n".join(["--variant {0}".format(vcf) for vcf in i['vcf']])
}
class VQSR(GATK):
"""
VQSR
100G_phase1_highconfidence is missing from bundle, but referenced in VQSR faq:
-resource:1000G,known=false,training=true,truth=false,prior=10.0 {s[1000G_phase1_highconfidence_path]}
Might want to set different values for capture vs whole genome of
i don't understand vqsr well enough yet
--maxGaussians 4 -percentBad 0.01 -minNumBad 1000
Note that HaplotypeScore is no longer applicable to indels
see http://gatkforums.broadinstitute.org/discussion/2463/unified-genotyper-no-haplotype-score-annotated-for-indels
"""
name = "Variant Quality Score Recalibration"
mem_req = 8*1024
cpu_req = 6
time_req = 12*60
inputs = ['vcf']
outputs = ['recal','tranches','R']
persist=True
forward_input = True
default_params = {
'inbreeding_coeff' : False
}
def cmd(self,i,s,p):
annotations = ['MQRankSum','ReadPosRankSum','FS',]
if not s['capture']:
annotations.append('DP')
if p['inbreeding_coeff']:
annotations.append('InbreedingCoeff')
if p['glm'] == 'SNP':
annotations.extend(['QD','HaplotypeScore'])
cmd = r"""
{self.bin}
-T VariantRecalibrator
-R {s[reference_fasta_path]}
--maxGaussians 6
-input {i[vcf][0]}
-resource:hapmap,known=false,training=true,truth=true,prior=15.0 {s[hapmap_path]}
-resource:omni,known=false,training=true,truth=true,prior=12.0 {s[omni_path]}
-resource:dbsnp,known=true,training=false,truth=false,prior=2.0 {s[dbsnp_path]}
-an {an}
-mode SNP
-recalFile $OUT.recal
-tranchesFile $OUT.tranches
-rscriptFile $OUT.R
-nt {self.cpu_req}
"""
elif p['glm'] == 'INDEL':
cmd = r"""
{self.bin}
-T VariantRecalibrator
-R {s[reference_fasta_path]}
-input {i[vcf][0]}
--maxGaussians 4 -percentBad 0.01 -minNumBad 1000
-resource:mills,known=false,training=true,truth=true,prior=12.0 {s[mills_path]}
-resource:dbsnp,known=true,training=false,truth=false,prior=2.0 {s[dbsnp_path]}
-an {an}
-mode INDEL
-recalFile $OUT.recal
-tranchesFile $OUT.tranches
-rscriptFile $OUT.R
"""
return cmd, {'an':' -an '.join(annotations)}
class Apply_VQSR(GATK):
name = "Apply VQSR"
mem_req = 8*1024
time_req = 12*60
persist=True
inputs = ['vcf','recal','tranches']
outputs = [TaskFile(name='vcf',persist=True)]
def cmd(self,i,s,p):
if p['glm'] == 'SNP':
cmd = r"""
{self.bin}
-T ApplyRecalibration
-R {s[reference_fasta_path]}
-input {i[vcf][0]}
-tranchesFile {i[tranches][0]}
-recalFile {i[recal][0]}
-o $OUT.vcf
--ts_filter_level 99.9
-mode SNP
"""
elif p['glm'] == 'INDEL':
cmd = r"""
{self.bin}
-T ApplyRecalibration
-R {s[reference_fasta_path]}
-input {i[vcf][0]}
-tranchesFile {i[tranches][0]}
-recalFile {i[recal][0]}
-o $OUT.vcf
--ts_filter_level 99.9
-mode INDEL
"""
return cmd
| LPM-HMS/GenomeKey | obsolete/genomekey/tools/gatk.py | Python | mit | 12,514 |
from practicum import McuBoard
RQ_GET_SWITCH_UP = 4
RQ_GET_SWITCH_DOWN = 1
RQ_GET_SWITCH_LEFT = 3
RQ_GET_SWITCH_RIGHT = 5
RQ_GET_SWITCH_BOMB = 0
RQ_GET_LIGHT = 2
# from practicum import *
# from peri import PeriBoard
# devs = findDevices()
# b = PeriBoard(devs[0])
####################################
class PeriBoard(McuBoard):
################################
def getSwitchUp(self):
'''
Return a boolean value indicating whether the switch on the peripheral
board is currently pressed
'''
if (self.usbRead(request = RQ_GET_SWITCH_UP, length = 1)[0] == 1):
return True
return False
################################
def getSwitchDown(self):
'''
Return a boolean value indicating whether the switch on the peripheral
board is currently pressed
'''
if (self.usbRead(request = RQ_GET_SWITCH_DOWN, length = 1)[0] == 1):
return True
return False
################################
def getSwitchLeft(self):
'''
Return a boolean value indicating whether the switch on the peripheral
board is currently pressed
'''
if (self.usbRead(request = RQ_GET_SWITCH_LEFT, length = 1)[0] == 1):
return True
return False
################################
def getSwitchRight(self):
'''
Return a boolean value indicating whether the switch on the peripheral
board is currently pressed
'''
if (self.usbRead(request = RQ_GET_SWITCH_RIGHT, length = 1)[0] == 1):
return True
return False
################################
def getSwitchBomb(self):
'''
Return a boolean value indicating whether the switch on the peripheral
board is currently pressed
'''
if (self.usbRead(request = RQ_GET_SWITCH_BOMB, length = 1)[0] == 1):
return True
return False
################################
def getLight(self):
'''
Return the current reading of light sensor on peripheral board
'''
return self.usbRead(request = RQ_GET_LIGHT, length = 2)[0] + (self.usbRead(request = RQ_GET_LIGHT, length = 2)[1] * 256)
| vuun/PyGame_OOP | board/peri.py | Python | mit | 2,252 |
"""
Helper functions and classes for discussion tests.
"""
from uuid import uuid4
from ...fixtures.discussion import (
SingleThreadViewFixture,
Thread,
Response,
)
class BaseDiscussionMixin(object):
"""
A mixin containing methods common to discussion tests.
"""
def setup_thread(self, num_responses, **thread_kwargs):
"""
Create a test thread with the given number of responses, passing all
keyword arguments through to the Thread fixture, then invoke
setup_thread_page.
"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, **thread_kwargs)
)
for i in range(num_responses):
thread_fixture.addResponse(Response(id=str(i), body=str(i)))
thread_fixture.push()
self.setup_thread_page(thread_id)
| LearnEra/LearnEraPlaftform | common/test/acceptance/tests/discussion/helpers.py | Python | agpl-3.0 | 933 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw
from frappe.utils import cint, today, flt
from erpnext.setup.utils import get_company_currency, get_exchange_rate
from erpnext.accounts.utils import get_fiscal_year, validate_fiscal_year
from erpnext.utilities.transaction_base import TransactionBase
from erpnext.controllers.recurring_document import convert_to_recurring, validate_recurring_document
import json
class AccountsController(TransactionBase):
def validate(self):
if self.get("_action") and self._action != "update_after_submit":
self.set_missing_values(for_validate=True)
self.validate_date_with_fiscal_year()
if self.meta.get_field("currency"):
self.calculate_taxes_and_totals()
self.validate_value("grand_total", ">=", 0)
self.set_total_in_words()
self.validate_for_freezed_account()
if self.meta.get_field("is_recurring"):
validate_recurring_document(self)
def on_submit(self):
if self.meta.get_field("is_recurring"):
convert_to_recurring(self, self.get("posting_date") or self.get("transaction_date"))
def on_update_after_submit(self):
if self.meta.get_field("is_recurring"):
validate_recurring_document(self)
convert_to_recurring(self, self.get("posting_date") or self.get("transaction_date"))
def before_recurring(self):
self.fiscal_year = None
for fieldname in ("due_date", "aging_date"):
if self.meta.get_field(fieldname):
self.set(fieldname, None)
def set_missing_values(self, for_validate=False):
for fieldname in ["posting_date", "transaction_date"]:
if not self.get(fieldname) and self.meta.get_field(fieldname):
self.set(fieldname, today())
if not self.fiscal_year:
self.fiscal_year = get_fiscal_year(self.get(fieldname))[0]
break
def validate_date_with_fiscal_year(self):
if self.meta.get_field("fiscal_year") :
date_field = ""
if self.meta.get_field("posting_date"):
date_field = "posting_date"
elif self.meta.get_field("transaction_date"):
date_field = "transaction_date"
if date_field and self.get(date_field):
validate_fiscal_year(self.get(date_field), self.fiscal_year,
label=self.meta.get_label(date_field))
def validate_for_freezed_account(self):
for fieldname in ["customer", "supplier"]:
if self.meta.get_field(fieldname) and self.get(fieldname):
accounts = frappe.db.get_values("Account",
{"master_type": fieldname.title(), "master_name": self.get(fieldname),
"company": self.company}, "name")
if accounts:
from erpnext.accounts.doctype.gl_entry.gl_entry import validate_frozen_account
for account in accounts:
validate_frozen_account(account[0])
def set_price_list_currency(self, buying_or_selling):
if self.meta.get_field("currency"):
company_currency = get_company_currency(self.company)
# price list part
fieldname = "selling_price_list" if buying_or_selling.lower() == "selling" \
else "buying_price_list"
if self.meta.get_field(fieldname) and self.get(fieldname):
self.price_list_currency = frappe.db.get_value("Price List",
self.get(fieldname), "currency")
if self.price_list_currency == company_currency:
self.plc_conversion_rate = 1.0
elif not self.plc_conversion_rate:
self.plc_conversion_rate = get_exchange_rate(
self.price_list_currency, company_currency)
# currency
if not self.currency:
self.currency = self.price_list_currency
self.conversion_rate = self.plc_conversion_rate
elif self.currency == company_currency:
self.conversion_rate = 1.0
elif not self.conversion_rate:
self.conversion_rate = get_exchange_rate(self.currency,
company_currency)
def set_missing_item_details(self):
"""set missing item values"""
from erpnext.stock.get_item_details import get_item_details
if hasattr(self, "fname"):
parent_dict = {}
for fieldname in self.meta.get_valid_columns():
parent_dict[fieldname] = self.get(fieldname)
for item in self.get(self.fname):
if item.get("item_code"):
args = parent_dict.copy()
args.update(item.as_dict())
ret = get_item_details(args)
for fieldname, value in ret.items():
if item.meta.get_field(fieldname) and \
item.get(fieldname) is None and value is not None:
item.set(fieldname, value)
if ret.get("pricing_rule"):
for field in ["base_price_list_rate", "price_list_rate",
"discount_percentage", "base_rate", "rate"]:
item.set(field, ret.get(field))
def set_taxes(self, tax_parentfield, tax_master_field):
if not self.meta.get_field(tax_parentfield):
return
tax_master_doctype = self.meta.get_field(tax_master_field).options
if not self.get(tax_parentfield):
if not self.get(tax_master_field):
# get the default tax master
self.set(tax_master_field, frappe.db.get_value(tax_master_doctype, {"is_default": 1}))
self.append_taxes_from_master(tax_parentfield, tax_master_field, tax_master_doctype)
def append_taxes_from_master(self, tax_parentfield, tax_master_field, tax_master_doctype=None):
if self.get(tax_master_field):
if not tax_master_doctype:
tax_master_doctype = self.meta.get_field(tax_master_field).options
self.extend(tax_parentfield,
get_taxes_and_charges(tax_master_doctype, self.get(tax_master_field), tax_parentfield))
def set_other_charges(self):
self.set("other_charges", [])
self.set_taxes("other_charges", "taxes_and_charges")
def calculate_taxes_and_totals(self):
self.discount_amount_applied = False
self._calculate_taxes_and_totals()
if self.meta.get_field("discount_amount"):
self.apply_discount_amount()
def _calculate_taxes_and_totals(self):
# validate conversion rate
company_currency = get_company_currency(self.company)
if not self.currency or self.currency == company_currency:
self.currency = company_currency
self.conversion_rate = 1.0
else:
from erpnext.setup.doctype.currency.currency import validate_conversion_rate
validate_conversion_rate(self.currency, self.conversion_rate,
self.meta.get_label("conversion_rate"), self.company)
self.conversion_rate = flt(self.conversion_rate)
self.item_doclist = self.get(self.fname)
self.tax_doclist = self.get(self.other_fname)
self.calculate_item_values()
self.initialize_taxes()
if hasattr(self, "determine_exclusive_rate"):
self.determine_exclusive_rate()
self.calculate_net_total()
self.calculate_taxes()
self.calculate_totals()
self._cleanup()
def initialize_taxes(self):
for tax in self.tax_doclist:
tax.item_wise_tax_detail = {}
tax_fields = ["total", "tax_amount_after_discount_amount",
"tax_amount_for_current_item", "grand_total_for_current_item",
"tax_fraction_for_current_item", "grand_total_fraction_for_current_item"]
if not self.discount_amount_applied:
tax_fields.append("tax_amount")
for fieldname in tax_fields:
tax.set(fieldname, 0.0)
self.validate_on_previous_row(tax)
self.validate_inclusive_tax(tax)
self.round_floats_in(tax)
def validate_on_previous_row(self, tax):
"""
validate if a valid row id is mentioned in case of
On Previous Row Amount and On Previous Row Total
"""
if tax.charge_type in ["On Previous Row Amount", "On Previous Row Total"] and \
(not tax.row_id or cint(tax.row_id) >= tax.idx):
throw(_("Please specify a valid Row ID for {0} in row {1}").format(_(tax.doctype), tax.idx))
def validate_inclusive_tax(self, tax):
def _on_previous_row_error(row_range):
throw(_("To include tax in row {0} in Item rate, taxes in rows {1} must also be included").format(tax.idx,
row_range))
if cint(getattr(tax, "included_in_print_rate", None)):
if tax.charge_type == "Actual":
# inclusive tax cannot be of type Actual
throw(_("Charge of type 'Actual' in row {0} cannot be included in Item Rate").format(tax.idx))
elif tax.charge_type == "On Previous Row Amount" and \
not cint(self.tax_doclist[cint(tax.row_id) - 1].included_in_print_rate):
# referred row should also be inclusive
_on_previous_row_error(tax.row_id)
elif tax.charge_type == "On Previous Row Total" and \
not all([cint(t.included_in_print_rate) for t in self.tax_doclist[:cint(tax.row_id) - 1]]):
# all rows about the reffered tax should be inclusive
_on_previous_row_error("1 - %d" % (tax.row_id,))
def calculate_taxes(self):
# maintain actual tax rate based on idx
actual_tax_dict = dict([[tax.idx, flt(tax.rate, self.precision("tax_amount", tax))] for tax in self.tax_doclist
if tax.charge_type == "Actual"])
for n, item in enumerate(self.item_doclist):
item_tax_map = self._load_item_tax_rate(item.item_tax_rate)
for i, tax in enumerate(self.tax_doclist):
# tax_amount represents the amount of tax for the current step
current_tax_amount = self.get_current_tax_amount(item, tax, item_tax_map)
# Adjust divisional loss to the last item
if tax.charge_type == "Actual":
actual_tax_dict[tax.idx] -= current_tax_amount
if n == len(self.item_doclist) - 1:
current_tax_amount += actual_tax_dict[tax.idx]
# store tax_amount for current item as it will be used for
# charge type = 'On Previous Row Amount'
tax.tax_amount_for_current_item = current_tax_amount
# accumulate tax amount into tax.tax_amount
if not self.discount_amount_applied:
tax.tax_amount += current_tax_amount
tax.tax_amount_after_discount_amount += current_tax_amount
if getattr(tax, "category", None):
# if just for valuation, do not add the tax amount in total
# hence, setting it as 0 for further steps
current_tax_amount = 0.0 if (tax.category == "Valuation") \
else current_tax_amount
current_tax_amount *= -1.0 if (tax.add_deduct_tax == "Deduct") else 1.0
# Calculate tax.total viz. grand total till that step
# note: grand_total_for_current_item contains the contribution of
# item's amount, previously applied tax and the current tax on that item
if i==0:
tax.grand_total_for_current_item = flt(item.base_amount + current_tax_amount,
self.precision("total", tax))
else:
tax.grand_total_for_current_item = \
flt(self.tax_doclist[i-1].grand_total_for_current_item +
current_tax_amount, self.precision("total", tax))
# in tax.total, accumulate grand total of each item
tax.total += tax.grand_total_for_current_item
# set precision in the last item iteration
if n == len(self.item_doclist) - 1:
self.round_off_totals(tax)
# adjust Discount Amount loss in last tax iteration
if i == (len(self.tax_doclist) - 1) and self.discount_amount_applied:
self.adjust_discount_amount_loss(tax)
def round_off_totals(self, tax):
tax.total = flt(tax.total, self.precision("total", tax))
tax.tax_amount = flt(tax.tax_amount, self.precision("tax_amount", tax))
tax.tax_amount_after_discount_amount = flt(tax.tax_amount_after_discount_amount,
self.precision("tax_amount", tax))
def adjust_discount_amount_loss(self, tax):
discount_amount_loss = self.grand_total - flt(self.discount_amount) - tax.total
tax.tax_amount_after_discount_amount = flt(tax.tax_amount_after_discount_amount +
discount_amount_loss, self.precision("tax_amount", tax))
tax.total = flt(tax.total + discount_amount_loss, self.precision("total", tax))
def get_current_tax_amount(self, item, tax, item_tax_map):
tax_rate = self._get_tax_rate(tax, item_tax_map)
current_tax_amount = 0.0
if tax.charge_type == "Actual":
# distribute the tax amount proportionally to each item row
actual = flt(tax.rate, self.precision("tax_amount", tax))
current_tax_amount = (self.net_total
and ((item.base_amount / self.net_total) * actual)
or 0)
elif tax.charge_type == "On Net Total":
current_tax_amount = (tax_rate / 100.0) * item.base_amount
elif tax.charge_type == "On Previous Row Amount":
current_tax_amount = (tax_rate / 100.0) * \
self.tax_doclist[cint(tax.row_id) - 1].tax_amount_for_current_item
elif tax.charge_type == "On Previous Row Total":
current_tax_amount = (tax_rate / 100.0) * \
self.tax_doclist[cint(tax.row_id) - 1].grand_total_for_current_item
current_tax_amount = flt(current_tax_amount, self.precision("tax_amount", tax))
# store tax breakup for each item
key = item.item_code or item.item_name
if tax.item_wise_tax_detail.get(key):
item_wise_tax_amount = tax.item_wise_tax_detail[key][1] + current_tax_amount
tax.item_wise_tax_detail[key] = [tax_rate,item_wise_tax_amount]
else:
tax.item_wise_tax_detail[key] = [tax_rate,current_tax_amount]
return current_tax_amount
def _load_item_tax_rate(self, item_tax_rate):
return json.loads(item_tax_rate) if item_tax_rate else {}
def _get_tax_rate(self, tax, item_tax_map):
if item_tax_map.has_key(tax.account_head):
return flt(item_tax_map.get(tax.account_head), self.precision("rate", tax))
else:
return tax.rate
def _cleanup(self):
for tax in self.tax_doclist:
tax.item_wise_tax_detail = json.dumps(tax.item_wise_tax_detail, separators=(',', ':'))
def _set_in_company_currency(self, item, print_field, base_field):
"""set values in base currency"""
value_in_company_currency = flt(self.conversion_rate *
flt(item.get(print_field), self.precision(print_field, item)),
self.precision(base_field, item))
item.set(base_field, value_in_company_currency)
def calculate_total_advance(self, parenttype, advance_parentfield):
if self.doctype == parenttype and self.docstatus < 2:
sum_of_allocated_amount = sum([flt(adv.allocated_amount, self.precision("allocated_amount", adv))
for adv in self.get(advance_parentfield)])
self.total_advance = flt(sum_of_allocated_amount, self.precision("total_advance"))
self.calculate_outstanding_amount()
def get_gl_dict(self, args):
"""this method populates the common properties of a gl entry record"""
gl_dict = frappe._dict({
'company': self.company,
'posting_date': self.posting_date,
'voucher_type': self.doctype,
'voucher_no': self.name,
'aging_date': self.get("aging_date") or self.posting_date,
'remarks': self.get("remarks"),
'fiscal_year': self.fiscal_year,
'debit': 0,
'credit': 0,
'is_opening': self.get("is_opening") or "No",
})
gl_dict.update(args)
return gl_dict
def clear_unallocated_advances(self, childtype, parentfield):
self.set(parentfield, self.get(parentfield, {"allocated_amount": ["not in", [0, None, ""]]}))
frappe.db.sql("""delete from `tab%s` where parentfield=%s and parent = %s
and ifnull(allocated_amount, 0) = 0""" % (childtype, '%s', '%s'), (parentfield, self.name))
def get_advances(self, account_head, child_doctype, parentfield, dr_or_cr, against_order_field):
so_list = list(set([d.get(against_order_field) for d in self.get("entries") if d.get(against_order_field)]))
cond = ""
if so_list:
cond = "or (ifnull(t2.%s, '') in (%s))" % ("against_" + against_order_field, ', '.join(['%s']*len(so_list)))
res = frappe.db.sql("""
select
t1.name as jv_no, t1.remark, t2.%s as amount, t2.name as jv_detail_no, `against_%s` as against_order
from
`tabJournal Voucher` t1, `tabJournal Voucher Detail` t2
where
t1.name = t2.parent and t2.account = %s and t2.is_advance = 'Yes' and t1.docstatus = 1
and ((
ifnull(t2.against_voucher, '') = ''
and ifnull(t2.against_invoice, '') = ''
and ifnull(t2.against_jv, '') = ''
and ifnull(t2.against_sales_order, '') = ''
and ifnull(t2.against_purchase_order, '') = ''
) %s)
order by t1.posting_date""" %
(dr_or_cr, against_order_field, '%s', cond),
tuple([account_head] + so_list), as_dict= True)
self.set(parentfield, [])
for d in res:
self.append(parentfield, {
"doctype": child_doctype,
"journal_voucher": d.jv_no,
"jv_detail_no": d.jv_detail_no,
"remarks": d.remark,
"advance_amount": flt(d.amount),
"allocated_amount": flt(d.amount) if d.against_order else 0
})
def validate_advance_jv(self, advance_table_fieldname, against_order_field):
order_list = list(set([d.get(against_order_field) for d in self.get("entries") if d.get(against_order_field)]))
if order_list:
account = self.get("debit_to" if self.doctype=="Sales Invoice" else "credit_to")
jv_against_order = frappe.db.sql("""select parent, %s as against_order
from `tabJournal Voucher Detail`
where docstatus=1 and account=%s and ifnull(is_advance, 'No') = 'Yes'
and ifnull(against_sales_order, '') in (%s)
group by parent, against_sales_order""" %
("against_" + against_order_field, '%s', ', '.join(['%s']*len(order_list))),
tuple([account] + order_list), as_dict=1)
if jv_against_order:
order_jv_map = {}
for d in jv_against_order:
order_jv_map.setdefault(d.against_order, []).append(d.parent)
advance_jv_against_si = [d.journal_voucher for d in self.get(advance_table_fieldname)]
for order, jv_list in order_jv_map.items():
for jv in jv_list:
if not advance_jv_against_si or jv not in advance_jv_against_si:
frappe.msgprint(_("Journal Voucher {0} is linked against Order {1}, check if it should be pulled as advance in this invoice.")
.format(jv, order))
def validate_multiple_billing(self, ref_dt, item_ref_dn, based_on, parentfield):
from erpnext.controllers.status_updater import get_tolerance_for
item_tolerance = {}
global_tolerance = None
for item in self.get("entries"):
if item.get(item_ref_dn):
ref_amt = flt(frappe.db.get_value(ref_dt + " Item",
item.get(item_ref_dn), based_on), self.precision(based_on, item))
if not ref_amt:
frappe.msgprint(_("Warning: System will not check overbilling since amount for Item {0} in {1} is zero").format(item.item_code, ref_dt))
else:
already_billed = frappe.db.sql("""select sum(%s) from `tab%s`
where %s=%s and docstatus=1 and parent != %s""" %
(based_on, self.tname, item_ref_dn, '%s', '%s'),
(item.get(item_ref_dn), self.name))[0][0]
total_billed_amt = flt(flt(already_billed) + flt(item.get(based_on)),
self.precision(based_on, item))
tolerance, item_tolerance, global_tolerance = get_tolerance_for(item.item_code,
item_tolerance, global_tolerance)
max_allowed_amt = flt(ref_amt * (100 + tolerance) / 100)
if total_billed_amt - max_allowed_amt > 0.01:
frappe.throw(_("Cannot overbill for Item {0} in row {0} more than {1}. To allow overbilling, please set in Stock Settings").format(item.item_code, item.idx, max_allowed_amt))
def get_company_default(self, fieldname):
from erpnext.accounts.utils import get_company_default
return get_company_default(self.company, fieldname)
def get_stock_items(self):
stock_items = []
item_codes = list(set(item.item_code for item in self.get(self.fname)))
if item_codes:
stock_items = [r[0] for r in frappe.db.sql("""select name
from `tabItem` where name in (%s) and is_stock_item='Yes'""" % \
(", ".join((["%s"]*len(item_codes))),), item_codes)]
return stock_items
def set_total_advance_paid(self):
if self.doctype == "Sales Order":
dr_or_cr = "credit"
against_field = "against_sales_order"
else:
dr_or_cr = "debit"
against_field = "against_purchase_order"
advance_paid = frappe.db.sql("""
select
sum(ifnull({dr_or_cr}, 0))
from
`tabJournal Voucher Detail`
where
{against_field} = %s and docstatus = 1 and is_advance = "Yes" """.format(dr_or_cr=dr_or_cr, \
against_field=against_field), self.name)
if advance_paid:
advance_paid = flt(advance_paid[0][0], self.precision("advance_paid"))
if flt(self.grand_total) >= advance_paid:
frappe.db.set_value(self.doctype, self.name, "advance_paid", advance_paid)
else:
frappe.throw(_("Total advance ({0}) against Order {1} cannot be greater \
than the Grand Total ({2})")
.format(advance_paid, self.name, self.grand_total))
@property
def company_abbr(self):
if not hasattr(self, "_abbr"):
self._abbr = frappe.db.get_value("Company", self.company, "abbr")
return self._abbr
def check_credit_limit(self, account):
total_outstanding = frappe.db.sql("""
select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
from `tabGL Entry` where account = %s""", account)
total_outstanding = total_outstanding[0][0] if total_outstanding else 0
if total_outstanding:
frappe.get_doc('Account', account).check_credit_limit(total_outstanding)
@frappe.whitelist()
def get_tax_rate(account_head):
return frappe.db.get_value("Account", account_head, "tax_rate")
@frappe.whitelist()
def get_taxes_and_charges(master_doctype, master_name, tax_parentfield):
from frappe.model import default_fields
tax_master = frappe.get_doc(master_doctype, master_name)
taxes_and_charges = []
for i, tax in enumerate(tax_master.get(tax_parentfield)):
tax = tax.as_dict()
for fieldname in default_fields:
if fieldname in tax:
del tax[fieldname]
taxes_and_charges.append(tax)
return taxes_and_charges
| mbauskar/phrerp | erpnext/controllers/accounts_controller.py | Python | agpl-3.0 | 21,173 |
import os
import re
import tempfile
import unittest
from pyprint.ClosableObject import close_objects
from pyprint.NullPrinter import NullPrinter
from coalib.misc import Constants
from coalib.misc.ContextManagers import make_temp, change_directory
from coalib.output.printers.LogPrinter import LogPrinter
from coalib.parsing.StringProcessing import escape
from coalib.settings.ConfigurationGathering import (
find_user_config, gather_configuration, get_config_directory,
load_configuration)
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
class ConfigurationGatheringTest(unittest.TestCase):
def setUp(self):
self.log_printer = LogPrinter(NullPrinter())
def tearDown(self):
close_objects(self.log_printer)
def test_gather_configuration(self):
args = (lambda *args: True, self.log_printer)
# Passing the default coafile name only triggers a warning.
gather_configuration(*args, arg_list=["-c abcdefghi/invalid/.coafile"])
# Using a bad filename explicitly exits coala.
with self.assertRaises(SystemExit):
gather_configuration(
*args,
arg_list=["-S", "test=5", "-c", "some_bad_filename"])
with make_temp() as temporary:
sections, local_bears, global_bears, targets = (
gather_configuration(
*args,
arg_list=["-S",
"test=5",
"-c",
escape(temporary, "\\"),
"-s"]))
self.assertEqual(str(sections["default"]),
"Default {config : " +
repr(temporary) + ", save : 'True', test : '5'}")
with make_temp() as temporary:
sections, local_bears, global_bears, targets = (
gather_configuration(*args,
arg_list=["-S test=5",
"-c " + escape(temporary, "\\"),
"-b LineCountBear -s"]))
self.assertEqual(len(local_bears["default"]), 0)
def test_default_coafile_parsing(self):
tmp = Constants.system_coafile
Constants.system_coafile = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"section_manager_test_files",
"default_coafile"))
sections, local_bears, global_bears, targets = gather_configuration(
lambda *args: True,
self.log_printer,
arg_list=[])
self.assertEqual(str(sections["test"]),
"test {value : '1', testval : '5'}")
Constants.system_coafile = tmp
def test_user_coafile_parsing(self):
tmp = Constants.user_coafile
Constants.user_coafile = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"section_manager_test_files",
"default_coafile"))
sections, local_bears, global_bears, targets = gather_configuration(
lambda *args: True,
self.log_printer,
arg_list=[])
self.assertEqual(str(sections["test"]),
"test {value : '1', testval : '5'}")
Constants.user_coafile = tmp
def test_nonexistent_file(self):
filename = "bad.one/test\neven with bad chars in it"
with self.assertRaises(SystemExit):
gather_configuration(lambda *args: True,
self.log_printer,
arg_list=['-S', "config=" + filename])
tmp = Constants.system_coafile
Constants.system_coafile = filename
with self.assertRaises(SystemExit):
gather_configuration(lambda *args: True,
self.log_printer,
arg_list=[])
Constants.system_coafile = tmp
def test_merge(self):
tmp = Constants.system_coafile
Constants.system_coafile = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"section_manager_test_files",
"default_coafile"))
config = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"section_manager_test_files",
".coafile"))
# Check merging of default_coafile and .coafile
sections, local_bears, global_bears, targets = gather_configuration(
lambda *args: True,
self.log_printer,
arg_list=["-c", re.escape(config)])
self.assertEqual(str(sections["test"]),
"test {value : '2'}")
self.assertEqual(str(sections["test-2"]),
"test-2 {files : '.', bears : 'LineCountBear'}")
# Check merging of default_coafile, .coafile and cli
sections, local_bears, global_bears, targets = gather_configuration(
lambda *args: True,
self.log_printer,
arg_list=["-c",
re.escape(config),
"-S",
"test.value=3",
"test-2.bears=",
"test-5.bears=TestBear2"])
self.assertEqual(str(sections["test"]), "test {value : '3'}")
self.assertEqual(str(sections["test-2"]),
"test-2 {files : '.', bears : ''}")
self.assertEqual(str(sections["test-3"]),
"test-3 {files : 'MakeFile'}")
self.assertEqual(str(sections["test-4"]),
"test-4 {bears : 'TestBear'}")
self.assertEqual(str(sections["test-5"]),
"test-5 {bears : 'TestBear2'}")
Constants.system_coafile = tmp
def test_merge_defaults(self):
with make_temp() as temporary:
sections, local_bears, global_bears, targets = (
gather_configuration(lambda *args: True,
self.log_printer,
arg_list=["-S",
"value=1",
"test.value=2",
"-c",
escape(temporary, "\\")]))
self.assertEqual(sections["default"],
sections["test"].defaults)
def test_back_saving(self):
filename = os.path.join(tempfile.gettempdir(),
"SectionManagerTestFile")
# We need to use a bad filename or this will parse coalas .coafile
gather_configuration(
lambda *args: True,
self.log_printer,
arg_list=['-S',
"save=" + escape(filename, '\\'),
"-c=some_bad_filename"])
with open(filename, "r") as f:
lines = f.readlines()
self.assertEqual(["[Default]\n", "config = some_bad_filename\n"], lines)
gather_configuration(
lambda *args: True,
self.log_printer,
arg_list=['-S',
"save=true",
"config=" + escape(filename, '\\'),
"test.value=5"])
with open(filename, "r") as f:
lines = f.readlines()
os.remove(filename)
if os.path.sep == '\\':
filename = escape(filename, '\\')
self.assertEqual(["[Default]\n",
"config = " + filename + "\n",
"\n",
"[test]\n",
"value = 5\n"], lines)
def test_targets(self):
sections, local_bears, global_bears, targets = gather_configuration(
lambda *args: True,
self.log_printer,
arg_list=["default", "test1", "test2"])
self.assertEqual(targets, ["default", "test1", "test2"])
def test_find_user_config(self):
current_dir = os.path.abspath(os.path.dirname(__file__))
c_file = os.path.join(current_dir,
"section_manager_test_files",
"project",
"test.c")
retval = find_user_config(c_file, 1)
self.assertEqual("", retval)
retval = find_user_config(c_file, 2)
self.assertEqual(os.path.join(current_dir,
"section_manager_test_files",
".coafile"), retval)
child_dir = os.path.join(current_dir,
"section_manager_test_files",
"child_dir")
retval = find_user_config(child_dir, 2)
self.assertEqual(os.path.join(current_dir,
"section_manager_test_files",
"child_dir",
".coafile"), retval)
with change_directory(child_dir):
sections, _, _, _ = gather_configuration(
lambda *args: True,
self.log_printer,
arg_list=["--find-config"])
self.assertEqual(bool(sections["default"]['find_config']), True)
def test_no_config(self):
current_dir = os.path.abspath(os.path.dirname(__file__))
child_dir = os.path.join(current_dir,
"section_manager_test_files",
"child_dir")
with change_directory(child_dir):
sections, targets = load_configuration([], self.log_printer)
self.assertIn('value', sections["default"])
sections, targets = load_configuration(
['--no-config'],
self.log_printer)
self.assertNotIn('value', sections["default"])
sections, targets = load_configuration(
['--no-config', '-S', 'use_spaces=True'],
self.log_printer)
self.assertIn('use_spaces', sections["default"])
self.assertNotIn('values', sections["default"])
sections, targets = load_configuration(
['--no-config', 'False', '-S', 'use_spaces=True'],
self.log_printer)
self.assertIn('use_spaces', sections["default"])
self.assertIn('value', sections["default"])
with self.assertRaises(SystemExit) as cm:
sections, target = load_configuration(
['--no-config', '--save'],
self.log_printer)
self.assertEqual(cm.exception.code, 2)
with self.assertRaises(SystemExit) as cm:
sections, target = load_configuration(
['--no-config', '--find-config'],
self.log_printer)
self.assertEqual(cm.exception.code, 2)
def test_get_config_directory(self):
old_isfile = os.path.isfile
old_isdir = os.path.isdir
section = Section("default")
# Without section
config_dir = get_config_directory(None)
self.assertEqual(config_dir, os.getcwd())
# With section, but without "config"
os.path.isfile = lambda *args: True
config_dir = get_config_directory(section)
self.assertEqual(config_dir, os.getcwd())
os.path.isfile = lambda *args: False
config_dir = get_config_directory(section)
self.assertEqual(config_dir, None)
# With "config" in section
section.append(Setting("config", "/path/to/dir/config"))
os.path.isdir = lambda *args: True
config_dir = get_config_directory(section)
self.assertEqual(config_dir, "/path/to/dir/config")
os.path.isdir = lambda *args: False
config_dir = get_config_directory(section)
self.assertEqual(config_dir, "/path/to/dir")
os.path.isdir = old_isdir
os.path.isfile = old_isfile
def test_autoapply_arg(self):
sections, _, _, _ = gather_configuration(
lambda *args: True,
self.log_printer,
autoapply=False,
arg_list=[])
self.assertEqual(str(sections['default'].get('autoapply', None)),
'False')
sections, _, _, _ = gather_configuration(
lambda *args: True,
self.log_printer,
autoapply=True,
arg_list=[])
self.assertEqual(str(sections['default'].get('autoapply', None)),
'None')
| djkonro/coala | tests/settings/ConfigurationGatheringTest.py | Python | agpl-3.0 | 12,712 |
#!/usr/bin/python
import unittest
try:
import autotest.common as common
except ImportError:
import common
from autotest.client.shared.test_utils import mock
from autotest.client import harness, harness_standalone, harness_ABAT
class harness_unittest(unittest.TestCase):
def setUp(self):
self.god = mock.mock_god()
def tearDown(self):
self.god.unstub_all()
def test_select_none(self):
job = object()
self.god.stub_class(harness_standalone, "harness_standalone")
harness_args = ''
harness_standalone.harness_standalone.expect_new(job, harness_args)
harness.select(None, job, harness_args)
self.god.check_playback()
def test_select_standalone(self):
job = object()
self.god.stub_class(harness_standalone, "harness_standalone")
harness_args = ''
harness_standalone.harness_standalone.expect_new(job, harness_args)
harness.select('standalone', job, harness_args)
self.god.check_playback()
def test_select_ABAT(self):
job = object()
self.god.stub_class(harness_ABAT, "harness_ABAT")
harness_args = ''
harness_ABAT.harness_ABAT.expect_new(job, harness_args)
harness.select('ABAT', job, harness_args)
self.god.check_playback()
if __name__ == "__main__":
unittest.main()
| nacc/autotest | client/harness_unittest.py | Python | gpl-2.0 | 1,370 |
from django.contrib.auth import get_user_model
from elasticsearch_dsl import field
class ContributorField(field.Object):
def __init__(self, *args, **kwargs):
super(ContributorField, self).__init__(*args, **kwargs)
self.properties['username'] = field.String(index='not_analyzed')
self.properties['is_freelance'] = field.Boolean()
def to_es(self, obj):
data = {
'id': obj.id,
'username': obj.username,
'first_name': obj.first_name,
'last_name': obj.last_name
}
profile = getattr(obj, 'freelanceprofile', None)
if profile:
data['is_freelance'] = profile.is_freelance
return data
def to_python(self, data):
User = get_user_model()
user = User.objects.filter(id=data['id'])
if user.exists():
return user.first()
class ContentContributionsField(field.Nested):
"""
This needs a better long-term solution, but alas, time is cruel.
"""
def __init__(self, *args, **kwargs):
super(ContentContributionsField, self).__init__(*args, **kwargs)
self.properties['contributor'] = ContributorField()
def to_es(self, obj):
data = {}
return data
def to_dict(self, **kwargs):
return {}
| theonion/django-bulbs | bulbs/contributions/fields.py | Python | mit | 1,311 |
from django.conf import settings
from rest_framework.settings import APISettings
USER_SETTINGS = getattr(settings, 'REST_FRAMEWORK_EXTENSIONS', None)
DEFAULTS = {
# caching
'DEFAULT_USE_CACHE': 'default',
'DEFAULT_CACHE_RESPONSE_TIMEOUT': None,
'DEFAULT_CACHE_ERRORS': True,
'DEFAULT_CACHE_KEY_FUNC': 'rest_framework_extensions.utils.default_cache_key_func',
'DEFAULT_OBJECT_CACHE_KEY_FUNC': 'rest_framework_extensions.utils.default_object_cache_key_func',
'DEFAULT_LIST_CACHE_KEY_FUNC': 'rest_framework_extensions.utils.default_list_cache_key_func',
# ETAG
'DEFAULT_ETAG_FUNC': 'rest_framework_extensions.utils.default_etag_func',
'DEFAULT_OBJECT_ETAG_FUNC': 'rest_framework_extensions.utils.default_object_etag_func',
'DEFAULT_LIST_ETAG_FUNC': 'rest_framework_extensions.utils.default_list_etag_func',
# API - ETAG
'DEFAULT_API_OBJECT_ETAG_FUNC': 'rest_framework_extensions.utils.default_api_object_etag_func',
'DEFAULT_API_LIST_ETAG_FUNC': 'rest_framework_extensions.utils.default_api_list_etag_func',
# other
'DEFAULT_KEY_CONSTRUCTOR_MEMOIZE_FOR_REQUEST': False,
'DEFAULT_BULK_OPERATION_HEADER_NAME': 'X-BULK-OPERATION',
'DEFAULT_PARENT_LOOKUP_KWARG_NAME_PREFIX': 'parent_lookup_'
}
IMPORT_STRINGS = [
'DEFAULT_CACHE_KEY_FUNC',
'DEFAULT_OBJECT_CACHE_KEY_FUNC',
'DEFAULT_LIST_CACHE_KEY_FUNC',
'DEFAULT_ETAG_FUNC',
'DEFAULT_OBJECT_ETAG_FUNC',
'DEFAULT_LIST_ETAG_FUNC',
# API - ETAG
'DEFAULT_API_OBJECT_ETAG_FUNC',
'DEFAULT_API_LIST_ETAG_FUNC',
]
extensions_api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS)
| chibisov/drf-extensions | rest_framework_extensions/settings.py | Python | mit | 1,645 |
#!/usr/bin/python
# Copyright (c) 2015 Ansible, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: vca_vapp
short_description: Manages vCloud Air vApp instances.
description:
- This module will actively managed vCloud Air vApp instances. Instances
can be created and deleted as well as both deployed and undeployed.
version_added: "2.0"
author: Peter Sprygada (@privateip)
options:
vapp_name:
description:
- The name of the vCloud Air vApp instance
required: yes
template_name:
description:
- The name of the vApp template to use to create the vApp instance. If
the I(state) is not `absent` then the I(template_name) value must be
provided. The I(template_name) must be previously uploaded to the
catalog specified by I(catalog_name)
required: no
default: None
network_name:
description:
- The name of the network that should be attached to the virtual machine
in the vApp. The virtual network specified must already be created in
the vCloud Air VDC. If the I(state) is not 'absent' then the
I(network_name) argument must be provided.
required: no
default: None
network_mode:
description:
- Configures the mode of the network connection.
required: no
default: pool
choices: ['pool', 'dhcp', 'static']
vm_name:
description:
- The name of the virtual machine instance in the vApp to manage.
required: no
default: None
vm_cpus:
description:
- The number of vCPUs to configure for the VM in the vApp. If the
I(vm_name) argument is provided, then this becomes a per VM setting
otherwise it is applied to all VMs in the vApp.
required: no
default: None
vm_memory:
description:
- The amount of memory in MB to allocate to VMs in the vApp. If the
I(vm_name) argument is provided, then this becomes a per VM setting
otherise it is applied to all VMs in the vApp.
required: no
default: None
operation:
description:
- Specifies an operation to be performed on the vApp.
required: no
default: noop
choices: ['noop', 'poweron', 'poweroff', 'suspend', 'shutdown', 'reboot', 'reset']
state:
description:
- Configures the state of the vApp.
required: no
default: present
choices: ['present', 'absent', 'deployed', 'undeployed']
username:
description:
- The vCloud Air username to use during authentication
required: false
default: None
password:
description:
- The vCloud Air password to use during authentication
required: false
default: None
org:
description:
- The org to login to for creating vapp, mostly set when the service_type is vdc.
required: false
default: None
instance_id:
description:
- The instance id in a vchs environment to be used for creating the vapp
required: false
default: None
host:
description:
- The authentication host to be used when service type is vcd.
required: false
default: None
api_version:
description:
- The api version to be used with the vca
required: false
default: "5.7"
service_type:
description:
- The type of service we are authenticating against
required: false
default: vca
choices: [ "vca", "vchs", "vcd" ]
vdc_name:
description:
- The name of the virtual data center (VDC) where the vm should be created or contains the vAPP.
required: false
default: None
'''
EXAMPLES = '''
- name: Creates a new vApp in a VCA instance
vca_vapp:
vapp_name: tower
state: present
template_name: 'Ubuntu Server 12.04 LTS (amd64 20150127)'
vdc_name: VDC1
instance_id: '<your instance id here>'
username: '<your username here>'
password: '<your password here>'
'''
DEFAULT_VAPP_OPERATION = 'noop'
VAPP_STATUS = {
'Powered off': 'poweroff',
'Powered on': 'poweron',
'Suspended': 'suspend'
}
VAPP_STATES = ['present', 'absent', 'deployed', 'undeployed']
VAPP_OPERATIONS = ['poweron', 'poweroff', 'suspend', 'shutdown',
'reboot', 'reset', 'noop']
def get_instance(module):
vapp_name = module.params['vapp_name']
inst = dict(vapp_name=vapp_name, state='absent')
try:
vapp = module.get_vapp(vapp_name)
if vapp:
status = module.vca.get_status(vapp.me.get_status())
inst['status'] = VAPP_STATUS.get(status, 'unknown')
inst['state'] = 'deployed' if vapp.me.deployed else 'undeployed'
return inst
except VcaError:
return inst
def create(module):
vdc_name = module.params['vdc_name']
vapp_name = module.params['vapp_name']
template_name = module.params['template_name']
catalog_name = module.params['catalog_name']
network_name = module.params['network_name']
network_mode = module.params['network_mode']
vm_name = module.params['vm_name']
vm_cpus = module.params['vm_cpus']
vm_memory = module.params['vm_memory']
deploy = module.params['state'] == 'deploy'
poweron = module.params['operation'] == 'poweron'
task = module.vca.create_vapp(vdc_name, vapp_name, template_name,
catalog_name, network_name, network_mode,
vm_name, vm_cpus, vm_memory, deploy, poweron)
module.vca.block_until_completed(task)
def delete(module):
vdc_name = module.params['vdc_name']
vapp_name = module.params['vapp_name']
module.vca.delete_vapp(vdc_name, vapp_name)
def do_operation(module):
vapp_name = module.params['vapp_name']
operation = module.params['operation']
vm_name = module.params.get('vm_name')
vm = None
if vm_name:
vm = module.get_vm(vapp_name, vm_name)
if operation == 'poweron':
operation = 'powerOn'
elif operation == 'poweroff':
operation = 'powerOff'
cmd = 'power:%s' % operation
module.get_vapp(vapp_name).execute(cmd, 'post', targetVM=vm)
def set_state(module):
state = module.params['state']
vapp = module.get_vapp(module.params['vapp_name'])
if state == 'deployed':
action = module.params['operation'] == 'poweron'
if not vapp.deploy(action):
module.fail('unable to deploy vapp')
elif state == 'undeployed':
action = module.params['operation']
if action == 'poweroff':
action = 'powerOff'
elif action != 'suspend':
action = None
if not vapp.undeploy(action):
module.fail('unable to undeploy vapp')
def main():
argument_spec = dict(
vapp_name=dict(required=True),
vdc_name=dict(required=True),
template_name=dict(),
catalog_name=dict(default='Public Catalog'),
network_name=dict(),
network_mode=dict(default='pool', choices=['dhcp', 'static', 'pool']),
vm_name=dict(),
vm_cpus=dict(),
vm_memory=dict(),
operation=dict(default=DEFAULT_VAPP_OPERATION, choices=VAPP_OPERATIONS),
state=dict(default='present', choices=VAPP_STATES)
)
module = VcaAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
operation = module.params['operation']
instance = get_instance(module)
result = dict(changed=False)
if instance and state == 'absent':
if not module.check_mode:
delete(module)
result['changed'] = True
elif state != 'absent':
if instance['state'] == 'absent':
if not module.check_mode:
create(module)
result['changed'] = True
elif instance['state'] != state and state != 'present':
if not module.check_mode:
set_state(module)
result['changed'] = True
if operation != instance.get('status') and operation != 'noop':
if not module.check_mode:
do_operation(module)
result['changed'] = True
return module.exit(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.vca import *
if __name__ == '__main__':
main()
| elventear/ansible | lib/ansible/modules/cloud/vmware/vca_vapp.py | Python | gpl-3.0 | 9,031 |
def get_position(pag):
return pag.locateOnScreen('start.png')
def get_screen(region,win32gui, win32ui, win32con, win32api):
left,top,width,height = region
hwin = win32gui.GetDesktopWindow()
hwindc = win32gui.GetWindowDC(hwin)
srcdc = win32ui.CreateDCFromHandle(hwindc)
memdc = srcdc.CreateCompatibleDC()
bmp = win32ui.CreateBitmap()
bmp.CreateCompatibleBitmap(srcdc, width, height)
memdc.SelectObject(bmp)
memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)
intsarray = bmp.GetBitmapBits(True)
srcdc.DeleteDC()
memdc.DeleteDC()
win32gui.ReleaseDC(hwin, hwindc)
win32gui.DeleteObject(bmp.GetHandle())
return intsarray,height,width
| Akatsuki06/AutonomousCarAI | lib/game_position.py | Python | mit | 762 |
'''One of the enemies, the spider, which climbs along walls and shoots at the player.'''
import pygame
import os
from pygame.locals import *
from locals import *
import data
from object import Gameobject
from sound import play_sound
from animation import Animation
from projectile import Projectile
from util import cycle_clockwise, get_direction
from sound import play_sound
class Spider(Gameobject):
def __init__(self, screen, x = None, y = None, attached = RIGHT):
Gameobject.__init__(self, screen, True, False, x, y, 30, True)
self.animations["default"] = Animation("spider", "standing")
self.animations["walking"] = Animation("spider", "walking")
self.image = self.animations[self.current_animation].update_and_get_image()
self.rect = self.image.get_rect()
self.life = 10
self.itemclass = "spider"
self.attached = attached
self.move_target = STAY
self.fire_delay = 0
return
def get_orientation(self):
return self.attached
def update(self, level = None):
blood = Gameobject.update(self, level)
if self.x < 0 or self.y < 0 or self.flipping:
return []
if self.attached == RIGHT:
self.top_leg_attach_point = (self.rect.right + 2, self.rect.top + SPIDER_TOO_WIDE)
self.bottom_leg_attach_point = (self.rect.right + 2, self.rect.bottom - SPIDER_TOO_WIDE)
if self.attached == LEFT:
self.top_leg_attach_point = (self.rect.left - 2, self.rect.top + SPIDER_TOO_WIDE)
self.bottom_leg_attach_point = (self.rect.left - 2, self.rect.bottom - SPIDER_TOO_WIDE)
if self.attached == DOWN:
self.top_leg_attach_point = (self.rect.left + SPIDER_TOO_WIDE, self.rect.bottom + 2)
self.bottom_leg_attach_point = (self.rect.right - SPIDER_TOO_WIDE, self.rect.bottom + 2)
if self.attached == UP:
self.top_leg_attach_point = (self.rect.left + SPIDER_TOO_WIDE, self.rect.top - 2)
self.bottom_leg_attach_point = (self.rect.right - SPIDER_TOO_WIDE, self.rect.top - 2)
if (not level.ground_check(self.top_leg_attach_point[0], self.top_leg_attach_point[1])) and (not level.ground_check(self.bottom_leg_attach_point[0], self.bottom_leg_attach_point[1])):
self.gravity = True
else:
self.gravity = False
self.move_target = STAY
if self.attached == RIGHT or self.attached == LEFT:
if (level.player.rect.top > (self.y - 2)):
self.move_target = DOWN
if (level.player.rect.bottom < (self.y + 2)):
self.move_target = UP
if self.attached == DOWN or self.attached == UP:
if (level.player.rect.left > (self.x - 2)):
self.move_target = RIGHT
if (level.player.rect.right < (self.x + 2)):
self.move_target = LEFT
if not self.gravity:
if self.fire_delay > 0:
self.fire_delay -= 1
self.dy = 0
self.dx = 0
if self.move_target == UP:
if (level.ground_check(self.top_leg_attach_point[0], self.top_leg_attach_point[1] - 1)):
self.dy = -1
if self.move_target == DOWN:
if (level.ground_check(self.bottom_leg_attach_point[0], self.bottom_leg_attach_point[1] + 1)):
self.dy = 1
if self.move_target == LEFT:
if (level.ground_check(self.top_leg_attach_point[0] - 1, self.top_leg_attach_point[1])):
self.dx = -1
if self.move_target == RIGHT:
if (level.ground_check(self.bottom_leg_attach_point[0] + 1, self.bottom_leg_attach_point[1])):
self.dx = 1
if self.move_target == STAY and not level.player.dead:
self.fire(level)
if self.animations[self.current_animation].finished and self.current_animation != "dying":
self.animations[self.current_animation].reset()
self.current_animation = "default"
if self.dx != 0 and self.dy != 0 and self.current_animation == "default":
self.current_animation = "walking"
if self.dx == 0 and self.dy == 0 and self.current_animation == "walking":
self.current_animation = "default"
return blood
def flip(self):
self.attached = cycle_clockwise(self.attached)
Gameobject.flip(self)
return
def fire(self, level):
if self.fire_delay == 0:
play_sound("fire")
self.fire_delay = SPIDER_FIRE_DELAY
fire_direction = get_direction(self.attached)
level.objects.append(Projectile(self.screen, self.x, self.y, fire_direction[0]*-SPIDER_PROJECTILE_SPEED, fire_direction[1]*-SPIDER_PROJECTILE_SPEED, SPIDER_DAMAGE, "energy"))
return | italomaia/turtle-linux | games/WhichWayIsUp/lib/spider.py | Python | gpl-3.0 | 4,462 |
#!/usr/bin/env python
""" Record a few seconds of audio and plot the fft. """
import pyaudio
import wave
import sys
import random
import LightController
from scipy import *
#from pylab import *
from time import sleep
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 22050
RECORD_SECONDS = 1000
WAVE_OUTPUT_FILENAME = "output.wav"
NBINS = 64;
NUMAVE = 22;
TRIGGERLEVEL = 1.2;
TRIGGERBIN = 0;
TRIGGERDWELL = 10; # Number of cycles to wait before another tigger is possible
bool_BeatLasts = False;
MIN_VOLUME = 6000;
dwell = 0;
p = pyaudio.PyAudio()
lc = LightController.lightcontroller();
all = []
counter1 = 0;
counter2 = 0;
def trigger(counter1):
if counter1 == 0:
turnOffMulti([16,18,6]);
turnOnMulti([3,6,11,0,7,12,4]);
if counter1 == 1:
turnOffMulti([3,6,11,0,7,12,4]);
turnOnMulti([16,18,5,15,22,9]);
if counter1 == 2:
turnOffMulti([16,18,5,15,22,9]);
turnOnMulti([11,18,6,16,0,5,7,12,15,22,4,9,14]);
if counter1 == 3:
turnOffMulti([11,18,6,16,0,5,7,12,15,22,4,9,14]);
turnOnMulti([3,6,18,0,5,7,12,15,22,4,9,14]);
if counter1 == 4:
turnOffMulti([3,6,18,0,5,7,12,15,22,4,9,14]);
turnOnMulti([3,11,16,7,5,15,4,9]);
if counter1 == 5:
turnOffMulti([3,11,16,7,5,15,4,9]);
turnOnMulti([6,18,7,22,12,14,9]);
if counter1 == 6:
turnOffMulti([6,18,7,22,12,14,9]);
turnOnMulti([3,16,11,0,22,14]);
if counter1 == 7:
turnOffMulti([3,16,11,0,22,14]);
turnOnMulti([3,18,6]);
counter1 = counter1 + 1;
if counter1 == 8:
counter1 = 0;
return counter1;
def turnOnMulti(lights):
for i in lights:
lc.lightOn(i, 25.5);
def turnOffMulti(lights):
for i in lights:
lc.lightOn(i, 0);
avesamples = zeros(NUMAVE);
a = 0;
print "Opening stream...";
stream = p.open(format = FORMAT, channels = CHANNELS, rate = RATE, input = True, frames_per_buffer = chunk)
print "done.";
while True:
print("counter1: "+str(counter1))
stream.start_stream();
data = stream.read(chunk)
stream.stop_stream();
arr = zeros(len(data));
arr = fromstring(data, dtype=short);
arr = arr;
# Frequency domain:
arr_fft = abs(fft(arr)[0:129]);
# plot(abs(fft(arr)[0:128]), '-', [0, 2000000], '-', hold=False);
# Divide into bins
binwidth = chunk/(2*NBINS);
bins = zeros(NBINS);
for i in range(0, NBINS-1):
bins[i] = sum(arr_fft[i*binwidth:(1+i)*binwidth]) / binwidth;
#plot(bins[0:10], 'o', [400000, 0], 'o', hold=False, aa=False);
# Tally running average
a = a + 1;
if a == NUMAVE:
a = 0;
avesamples[a] = bins[TRIGGERBIN];
# Deincrement the dwell
dwell = dwell - 1;
# Check the bin signal level
print bins[TRIGGERBIN]
if bins[TRIGGERBIN] > TRIGGERLEVEL * average(avesamples) and bins[TRIGGERBIN] > MIN_VOLUME:
if not bool_BeatLasts: # Only trigger on the start of the beat
print bins[0];
# Change the light color
counter1 = trigger(counter1);
# Set the dwell counter
dwell = TRIGGERDWELL;
bool_BeatLasts = True;
else:
bool_BeatLasts = False;
| KappaEtaKappa/Sound-2-Disco | partylights_Dan.py | Python | mit | 3,080 |
from __future__ import absolute_import
from nlpaug.model.audio.audio import *
from nlpaug.model.audio.noise import *
from nlpaug.model.audio.shift import *
from nlpaug.model.audio.speed import *
from nlpaug.model.audio.pitch import *
from nlpaug.model.audio.loudness import *
from nlpaug.model.audio.crop import *
from nlpaug.model.audio.mask import *
from nlpaug.model.audio.vtlp import *
from nlpaug.model.audio.normalization import *
from nlpaug.model.audio.inversion import *
| makcedward/nlpaug | nlpaug/model/audio/__init__.py | Python | mit | 480 |
"""
Redis Backends
------------------
Provides backends for talking to `Redis <http://redis.io>`_.
"""
from __future__ import absolute_import
from dogpile.cache.api import CacheBackend, NO_VALUE
from dogpile.cache.compat import pickle, u
redis = None
__all__ = 'RedisBackend',
class RedisBackend(CacheBackend):
"""A `Redis <http://redis.io/>`_ backend, using the
`redis-py <http://pypi.python.org/pypi/redis/>`_ backend.
Example configuration::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.redis',
arguments = {
'host': 'localhost',
'port': 6379,
'db': 0,
'redis_expiration_time': 60*60*2, # 2 hours
'distributed_lock':True
}
)
Arguments accepted in the arguments dictionary:
:param url: string. If provided, will override separate host/port/db
params. The format is that accepted by ``StrictRedis.from_url()``.
.. versionadded:: 0.4.1
:param host: string, default is ``localhost``.
:param password: string, default is no password.
.. versionadded:: 0.4.1
:param port: integer, default is ``6379``.
:param db: integer, default is ``0``.
:param redis_expiration_time: integer, number of seconds after setting
a value that Redis should expire it. This should be larger than dogpile's
cache expiration. By default no expiration is set.
:param distributed_lock: boolean, when True, will use a
redis-lock as the dogpile lock.
Use this when multiple
processes will be talking to the same redis instance.
When left at False, dogpile will coordinate on a regular
threading mutex.
:param lock_timeout: integer, number of seconds after acquiring a lock that
Redis should expire it. This argument is only valid when
``distributed_lock`` is ``True``.
.. versionadded:: 0.5.0
:param lock_sleep: integer, number of seconds to sleep when failed to
acquire a lock. This argument is only valid when
``distributed_lock`` is ``True``.
.. versionadded:: 0.5.0
"""
def __init__(self, arguments):
self._imports()
self.url = arguments.pop('url', None)
self.host = arguments.pop('host', 'localhost')
self.password = arguments.pop('password', None)
self.port = arguments.pop('port', 6379)
self.db = arguments.pop('db', 0)
self.distributed_lock = arguments.get('distributed_lock', False)
self.lock_timeout = arguments.get('lock_timeout', None)
self.lock_sleep = arguments.get('lock_sleep', 0.1)
self.redis_expiration_time = arguments.pop('redis_expiration_time', 0)
self.client = self._create_client()
def _imports(self):
# defer imports until backend is used
global redis
import redis
def _create_client(self):
if self.url is not None:
return redis.StrictRedis.from_url(url=self.url)
else:
return redis.StrictRedis(host=self.host, password=self.password,
port=self.port, db=self.db)
def get_mutex(self, key):
if self.distributed_lock:
return self.client.lock(u('_lock{}').format(key), self.lock_timeout,
self.lock_sleep)
else:
return None
def get(self, key):
value = self.client.get(key)
if value is None:
return NO_VALUE
return pickle.loads(value)
def get_multi(self, keys):
values = self.client.mget(keys)
return [pickle.loads(v) if v is not None else NO_VALUE
for v in values]
def set(self, key, value):
if self.redis_expiration_time:
self.client.setex(key, self.redis_expiration_time,
pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
else:
self.client.set(key, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
def set_multi(self, mapping):
mapping = dict((k, pickle.dumps(v)) for k, v in mapping.items())
if not self.redis_expiration_time:
self.client.mset(mapping)
else:
pipe = self.client.pipeline()
for key, value in mapping.items():
pipe.setex(key, self.redis_expiration_time, value)
pipe.execute()
def delete(self, key):
self.client.delete(key)
def delete_multi(self, keys):
self.client.delete(*keys)
| dprince/dogpile.cache | dogpile/cache/backends/redis.py | Python | bsd-3-clause | 4,596 |
# -*- coding: utf-8 -*-
#
# The Linux Kernel documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 12 13:51:46 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx
import subprocess
from distutils.version import LooseVersion
from subprocess import check_output
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinx'))
from load_config import loadConfig
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include',
'kfigure', 'sphinx.ext.ifconfig', 'automarkup',
'maintainers_include', 'sphinx.ext.autosectionlabel',
'kernel_abi']
#
# cdomain is badly broken in Sphinx 3+. Leaving it out generates *most*
# of the docs correctly, but not all. Scream bloody murder but allow
# the process to proceed; hopefully somebody will fix this properly soon.
#
if major >= 3:
sys.stderr.write('''WARNING: The kernel documentation build process
support for Sphinx v3.0 and above is brand new. Be prepared for
possible issues in the generated output.
''')
if (major > 3) or (minor > 0 or patch >= 2):
# Sphinx c function parser is more pedantic with regards to type
# checking. Due to that, having macros at c:function cause problems.
# Those needed to be scaped by using c_id_attributes[] array
c_id_attributes = [
# GCC Compiler types not parsed by Sphinx:
"__restrict__",
# include/linux/compiler_types.h:
"__iomem",
"__kernel",
"noinstr",
"notrace",
"__percpu",
"__rcu",
"__user",
# include/linux/compiler_attributes.h:
"__alias",
"__aligned",
"__aligned_largest",
"__always_inline",
"__assume_aligned",
"__cold",
"__attribute_const__",
"__copy",
"__pure",
"__designated_init",
"__visible",
"__printf",
"__scanf",
"__gnu_inline",
"__malloc",
"__mode",
"__no_caller_saved_registers",
"__noclone",
"__nonstring",
"__noreturn",
"__packed",
"__pure",
"__section",
"__always_unused",
"__maybe_unused",
"__used",
"__weak",
"noinline",
# include/linux/memblock.h:
"__init_memblock",
"__meminit",
# include/linux/init.h:
"__init",
"__ref",
# include/linux/linkage.h:
"asmlinkage",
]
else:
extensions.append('cdomain')
# Ensure that autosectionlabel will produce unique names
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 2
# The name of the math extension changed on Sphinx 1.4
if (major == 1 and minor > 3) or (major > 1):
extensions.append("sphinx.ext.imgmath")
else:
extensions.append("sphinx.ext.pngmath")
try:
hglyph_ver = subprocess.check_output(["hieroglyph", "--version"])
if LooseVersion(hglyph_ver) > LooseVersion("1.0.0"):
extensions.append('hieroglyph')
except:
None
extensions.append("ditaa")
extensions.append("asciicast")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'The Linux Kernel'
copyright = 'The kernel development community'
author = 'The kernel development community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# In a normal build, version and release are are set to KERNELVERSION and
# KERNELRELEASE, respectively, from the Makefile via Sphinx command line
# arguments.
#
# The following code tries to extract the information by reading the Makefile,
# when Sphinx is run directly (e.g. by Read the Docs).
try:
makefile_version = None
makefile_patchlevel = None
for line in open('../Makefile'):
key, val = [x.strip() for x in line.split('=', 2)]
if key == 'VERSION':
makefile_version = val
elif key == 'PATCHLEVEL':
makefile_patchlevel = val
if makefile_version and makefile_patchlevel:
break
except:
pass
finally:
if makefile_version and makefile_patchlevel:
version = release = makefile_version + '.' + makefile_patchlevel
else:
version = release = "unknown version"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['output']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
primary_domain = 'c'
highlight_language = 'none'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# The Read the Docs theme is available from
# - https://github.com/snide/sphinx_rtd_theme
# - https://pypi.python.org/pypi/sphinx_rtd_theme
# - python-sphinx-rtd-theme package (on Debian)
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
sys.stderr.write('Warning: The Sphinx \'sphinx_rtd_theme\' HTML theme was not found. Make sure you have the theme installed to produce pretty HTML output. Falling back to the default theme.\n')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['sphinx-static']
def setup(app):
app.add_stylesheet('theme_overrides.css')
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TheLinuxKerneldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Latex figure (float) alignment
#'figure_align': 'htbp',
# Don't mangle with UTF-8 chars
'inputenc': '',
'utf8extra': '',
# Additional stuff for the LaTeX preamble.
'preamble': '''
% Use some font with UTF-8 support with XeLaTeX
\\usepackage{fontspec}
\\setsansfont{DejaVu Sans}
\\setromanfont{DejaVu Serif}
\\setmonofont{DejaVu Sans Mono}
'''
}
# At least one book (translations) may have Asian characters
# with are only displayed if xeCJK is used
cjk_cmd = check_output(['fc-list', '--format="%{family[0]}\n"']).decode('utf-8', 'ignore')
if cjk_cmd.find("Noto Sans CJK SC") >= 0:
print ("enabling CJK for LaTeX builder")
latex_elements['preamble'] += '''
% This is needed for translations
\\usepackage{xeCJK}
\\setCJKmainfont{Noto Sans CJK SC}
'''
# Fix reference escape troubles with Sphinx 1.4.x
if major == 1 and minor > 3:
latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n'
if major == 1 and minor <= 4:
latex_elements['preamble'] += '\\usepackage[margin=0.5in, top=1in, bottom=1in]{geometry}'
elif major == 1 and (minor > 5 or (minor == 5 and patch >= 3)):
latex_elements['sphinxsetup'] = 'hmargin=0.5in, vmargin=1in'
latex_elements['preamble'] += '\\fvset{fontsize=auto}\n'
# Customize notice background colors on Sphinx < 1.6:
if major == 1 and minor < 6:
latex_elements['preamble'] += '''
\\usepackage{ifthen}
% Put notes in color and let them be inside a table
\\definecolor{NoteColor}{RGB}{204,255,255}
\\definecolor{WarningColor}{RGB}{255,204,204}
\\definecolor{AttentionColor}{RGB}{255,255,204}
\\definecolor{ImportantColor}{RGB}{192,255,204}
\\definecolor{OtherColor}{RGB}{204,204,204}
\\newlength{\\mynoticelength}
\\makeatletter\\newenvironment{coloredbox}[1]{%
\\setlength{\\fboxrule}{1pt}
\\setlength{\\fboxsep}{7pt}
\\setlength{\\mynoticelength}{\\linewidth}
\\addtolength{\\mynoticelength}{-2\\fboxsep}
\\addtolength{\\mynoticelength}{-2\\fboxrule}
\\begin{lrbox}{\\@tempboxa}\\begin{minipage}{\\mynoticelength}}{\\end{minipage}\\end{lrbox}%
\\ifthenelse%
{\\equal{\\py@noticetype}{note}}%
{\\colorbox{NoteColor}{\\usebox{\\@tempboxa}}}%
{%
\\ifthenelse%
{\\equal{\\py@noticetype}{warning}}%
{\\colorbox{WarningColor}{\\usebox{\\@tempboxa}}}%
{%
\\ifthenelse%
{\\equal{\\py@noticetype}{attention}}%
{\\colorbox{AttentionColor}{\\usebox{\\@tempboxa}}}%
{%
\\ifthenelse%
{\\equal{\\py@noticetype}{important}}%
{\\colorbox{ImportantColor}{\\usebox{\\@tempboxa}}}%
{\\colorbox{OtherColor}{\\usebox{\\@tempboxa}}}%
}%
}%
}%
}\\makeatother
\\makeatletter
\\renewenvironment{notice}[2]{%
\\def\\py@noticetype{#1}
\\begin{coloredbox}{#1}
\\bf\\it
\\par\\strong{#2}
\\csname py@noticestart@#1\\endcsname
}
{
\\csname py@noticeend@\\py@noticetype\\endcsname
\\end{coloredbox}
}
\\makeatother
'''
# With Sphinx 1.6, it is possible to change the Bg color directly
# by using:
# \definecolor{sphinxnoteBgColor}{RGB}{204,255,255}
# \definecolor{sphinxwarningBgColor}{RGB}{255,204,204}
# \definecolor{sphinxattentionBgColor}{RGB}{255,255,204}
# \definecolor{sphinximportantBgColor}{RGB}{192,255,204}
#
# However, it require to use sphinx heavy box with:
#
# \renewenvironment{sphinxlightbox} {%
# \\begin{sphinxheavybox}
# }
# \\end{sphinxheavybox}
# }
#
# Unfortunately, the implementation is buggy: if a note is inside a
# table, it isn't displayed well. So, for now, let's use boring
# black and white notes.
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# Sorted in alphabetical order
latex_documents = [
]
# Add all other index files from Documentation/ subdirectories
for fn in os.listdir('.'):
doc = os.path.join(fn, "index")
if os.path.exists(doc + ".rst"):
has = False
for l in latex_documents:
if l[0] == doc:
has = True
break
if not has:
latex_documents.append((doc, fn + '.tex',
'Linux %s Documentation' % fn.capitalize(),
'The kernel development community',
'manual'))
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'thelinuxkernel', 'The Linux Kernel Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TheLinuxKernel', 'The Linux Kernel Documentation',
author, 'TheLinuxKernel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
#=======
# rst2pdf
#
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
#
# See the Sphinx chapter of https://ralsina.me/static/manual.pdf
#
# FIXME: Do not add the index file here; the result will be too big. Adding
# multiple PDF files here actually tries to get the cross-referencing right
# *between* PDF files.
pdf_documents = [
('kernel-documentation', u'Kernel', u'Kernel', u'J. Random Bozo'),
]
# kernel-doc extension configuration for running Sphinx directly (e.g. by Read
# the Docs). In a normal build, these are supplied from the Makefile via command
# line arguments.
kerneldoc_bin = '../scripts/kernel-doc'
kerneldoc_srctree = '..'
# ------------------------------------------------------------------------------
# Since loadConfig overwrites settings from the global namespace, it has to be
# the last statement in the conf.py file
# ------------------------------------------------------------------------------
loadConfig(globals())
| walac/linux | Documentation/conf.py | Python | gpl-2.0 | 20,998 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.base.exceptions import TargetDefinitionException
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.base.validation import assert_list
logger = logging.getLogger(__name__)
class JavaWireLibrary(ExportableJvmLibrary):
"""A Java library generated from Wire IDL files.
Supports Wire 1.x only.
For an example Wire 2.x interface that generates service stubs see:
https://github.com/ericzundel/mvn2pants/tree/master/src/python/squarepants/plugins/sake_wire_codegen
But note this requires you to write a custom wire code generator with a command line interface.
"""
def __init__(self,
payload=None,
service_writer=None,
service_writer_options=None,
roots=None,
registry_class=None,
enum_options=None,
no_options=None,
**kwargs):
"""
:param string service_writer: the name of the class to pass as the --service_writer option to
the Wire compiler (For wire 1.0 only)
:param list service_writer_options: A list of options to pass to the service writer (For
wire 1.x only)
:param list roots: passed through to the --roots option of the Wire compiler
:param string registry_class: fully qualified class name of RegistryClass to create. If in
doubt, specify com.squareup.wire.SimpleServiceWriter
:param list enum_options: list of enums to pass to as the --enum-enum_options option, # optional
:param boolean no_options: boolean that determines if --no_options flag is passed
"""
if not service_writer and service_writer_options:
raise TargetDefinitionException(self,
'service_writer_options requires setting service_writer')
payload = payload or Payload()
payload.add_fields({
'service_writer': PrimitiveField(service_writer or None),
'service_writer_options': PrimitiveField(
assert_list(service_writer_options, key_arg='service_writer_options',
raise_type=TargetDefinitionException)),
'roots': PrimitiveField(roots or []),
'registry_class': PrimitiveField(registry_class or None),
'enum_options': PrimitiveField(enum_options or []),
'no_options': PrimitiveField(no_options or False),
})
super(JavaWireLibrary, self).__init__(payload=payload, **kwargs)
self.add_labels('codegen')
| jtrobec/pants | src/python/pants/backend/codegen/targets/java_wire_library.py | Python | apache-2.0 | 2,849 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:si:et:enc=utf-8
# Author: Ivan A-R <[email protected]>
# Project page: http://tuxotronic.org/wiki/projects/stm32loader
#
# This file is part of stm32loader.
#
# stm32loader is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
#
# stm32loader is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with stm32loader; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys, getopt
import serial
import time
import glob
import time
import tempfile
import os
import subprocess
try:
from progressbar import *
usepbar = 1
except:
usepbar = 0
# Verbose level
QUIET = 5
def mdebug(level, message):
if QUIET >= level:
print(message, file=sys.stderr)
# Takes chip IDs (obtained via Get ID command) to human-readable names
CHIP_ID_STRS = {0x410: 'STM32F1, performance, medium-density',
0x411: 'STM32F2',
0x412: 'STM32F1, performance, low-density',
0x413: 'STM32F4',
0x414: 'STM32F1, performance, high-density',
0x416: 'STM32L1, performance, medium-density',
0x418: 'STM32F1, connectivity',
0x420: 'STM32F1, value, medium-density',
0x428: 'STM32F1, value, high-density',
0x430: 'STM32F1, performance, XL-density'}
class CmdException(Exception):
pass
class CommandInterface(object):
def open(self, aport='/dev/tty.usbserial-FTD3TMCH', abaudrate=115200) :
self.sp = serial.Serial(
port=aport,
baudrate=abaudrate, # baudrate
bytesize=8, # number of databits
parity=serial.PARITY_EVEN,
stopbits=1,
xonxoff=0, # enable software flow control
rtscts=0, # disable RTS/CTS flow control
timeout=0.5 # set a timeout value, None for waiting forever
)
def _wait_for_ack(self, info="", timeout=0):
stop = time.time() + timeout
got = None
while not got:
got = self.sp.read(1)
if time.time() > stop:
break
if not got:
raise CmdException("No response to %s" % info)
# wait for ask
ask = ord(got)
if ask == 0x79:
# ACK
return 1
elif ask == 0x1F:
# NACK
raise CmdException("Chip replied with a NACK during %s" % info)
# Unknown response
raise CmdException("Unrecognised response 0x%x to %s" % (ask, info))
def reset(self):
self.sp.setDTR(0)
time.sleep(0.1)
self.sp.setDTR(1)
time.sleep(0.5)
def initChip(self):
# Set boot
self.sp.setRTS(0)
self.reset()
# Be a bit more persistent when trying to initialise the chip
stop = time.time() + 5.0
while time.time() <= stop:
self.sp.write('\x7f')
got = self.sp.read()
# The chip will ACK a sync the very first time and
# NACK it every time afterwards
if got and got in '\x79\x1f':
# Synced up
return
raise CmdException('No response while trying to sync')
def releaseChip(self):
self.sp.setRTS(1)
self.reset()
def cmdGeneric(self, cmd):
self.sp.write(chr(cmd))
self.sp.write(chr(cmd ^ 0xFF)) # Control byte
return self._wait_for_ack(hex(cmd))
def cmdGet(self):
if self.cmdGeneric(0x00):
mdebug(10, "*** Get command");
len = ord(self.sp.read())
version = ord(self.sp.read())
mdebug(10, " Bootloader version: "+hex(version))
dat = map(lambda c: hex(ord(c)), self.sp.read(len))
mdebug(10, " Available commands: "+str(dat))
self._wait_for_ack("0x00 end")
return version
else:
raise CmdException("Get (0x00) failed")
def cmdGetVersion(self):
if self.cmdGeneric(0x01):
mdebug(10, "*** GetVersion command")
version = ord(self.sp.read())
self.sp.read(2)
self._wait_for_ack("0x01 end")
mdebug(10, " Bootloader version: "+hex(version))
return version
else:
raise CmdException("GetVersion (0x01) failed")
def cmdGetID(self):
if self.cmdGeneric(0x02):
mdebug(10, "*** GetID command")
len = ord(self.sp.read())
id = self.sp.read(len+1)
self._wait_for_ack("0x02 end")
return id
else:
raise CmdException("GetID (0x02) failed")
def _encode_addr(self, addr):
byte3 = (addr >> 0) & 0xFF
byte2 = (addr >> 8) & 0xFF
byte1 = (addr >> 16) & 0xFF
byte0 = (addr >> 24) & 0xFF
crc = byte0 ^ byte1 ^ byte2 ^ byte3
return (chr(byte0) + chr(byte1) + chr(byte2) + chr(byte3) + chr(crc))
def cmdReadMemory(self, addr, lng):
assert(lng <= 256)
if self.cmdGeneric(0x11):
mdebug(10, "*** ReadMemory command")
self.sp.write(self._encode_addr(addr))
self._wait_for_ack("0x11 address failed")
N = (lng - 1) & 0xFF
crc = N ^ 0xFF
self.sp.write(chr(N) + chr(crc))
self._wait_for_ack("0x11 length failed")
return map(lambda c: ord(c), self.sp.read(lng))
else:
raise CmdException("ReadMemory (0x11) failed")
def cmdGo(self, addr):
if self.cmdGeneric(0x21):
mdebug(10, "*** Go command")
self.sp.write(self._encode_addr(addr))
self._wait_for_ack("0x21 go failed")
else:
raise CmdException("Go (0x21) failed")
def cmdWriteMemory(self, addr, data):
assert(len(data) <= 256)
if self.cmdGeneric(0x31):
mdebug(10, "*** Write memory command")
self.sp.write(self._encode_addr(addr))
self._wait_for_ack("0x31 address failed")
#map(lambda c: hex(ord(c)), data)
lng = (len(data)-1) & 0xFF
mdebug(10, " %s bytes to write" % [lng+1]);
self.sp.write(chr(lng)) # len really
crc = 0xFF
for c in data:
crc = crc ^ c
self.sp.write(chr(c))
self.sp.write(chr(crc))
self._wait_for_ack("0x31 programming failed")
mdebug(10, " Write memory done")
else:
raise CmdException("Write memory (0x31) failed")
def cmdEraseMemory(self, sectors = None):
if self.cmdGeneric(0x43):
mdebug(10, "*** Erase memory command")
if sectors is None:
# Global erase
self.sp.write(chr(0xFF))
self.sp.write(chr(0x00))
else:
# Sectors erase
self.sp.write(chr((len(sectors)-1) & 0xFF))
crc = 0xFF
for c in sectors:
crc = crc ^ c
self.sp.write(chr(c))
self.sp.write(chr(crc))
self._wait_for_ack("0x43 erasing failed")
mdebug(10, " Erase memory done")
else:
raise CmdException("Erase memory (0x43) failed")
# TODO support for non-global mass erase
GLOBAL_ERASE_TIMEOUT_SECONDS = 20 # This takes a while
def cmdExtendedEraseMemory(self):
if self.cmdGeneric(0x44):
mdebug(10, "*** Extended erase memory command")
# Global mass erase
mdebug(5, "Global mass erase; this may take a while")
self.sp.write(chr(0xFF))
self.sp.write(chr(0xFF))
# Checksum
self.sp.write(chr(0x00))
self._wait_for_ack("0x44 extended erase failed",
timeout=self.GLOBAL_ERASE_TIMEOUT_SECONDS)
mdebug(10, " Extended erase memory done")
else:
raise CmdException("Extended erase memory (0x44) failed")
def cmdWriteProtect(self, sectors):
if self.cmdGeneric(0x63):
mdebug(10, "*** Write protect command")
self.sp.write(chr((len(sectors)-1) & 0xFF))
crc = 0xFF
for c in sectors:
crc = crc ^ c
self.sp.write(chr(c))
self.sp.write(chr(crc))
self._wait_for_ack("0x63 write protect failed")
mdebug(10, " Write protect done")
else:
raise CmdException("Write Protect memory (0x63) failed")
def cmdWriteUnprotect(self):
if self.cmdGeneric(0x73):
mdebug(10, "*** Write Unprotect command")
self._wait_for_ack("0x73 write unprotect failed")
self._wait_for_ack("0x73 write unprotect 2 failed")
mdebug(10, " Write Unprotect done")
else:
raise CmdException("Write Unprotect (0x73) failed")
def cmdReadoutProtect(self):
if self.cmdGeneric(0x82):
mdebug(10, "*** Readout protect command")
self._wait_for_ack("0x82 readout protect failed")
self._wait_for_ack("0x82 readout protect 2 failed")
mdebug(10, " Read protect done")
else:
raise CmdException("Readout protect (0x82) failed")
def cmdReadoutUnprotect(self):
if self.cmdGeneric(0x92):
mdebug(10, "*** Readout Unprotect command")
self._wait_for_ack("0x92 readout unprotect failed")
self._wait_for_ack("0x92 readout unprotect 2 failed")
mdebug(10, " Read Unprotect done")
else:
raise CmdException("Readout unprotect (0x92) failed")
# Complex commands section
def readMemory(self, addr, lng):
data = []
if usepbar:
widgets = ['Reading: ', Percentage(),', ', ETA(), ' ', Bar()]
pbar = ProgressBar(widgets=widgets,maxval=lng, term_width=79).start()
while lng > 256:
if usepbar:
pbar.update(pbar.maxval-lng)
else:
mdebug(5, "Read %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
data = data + self.cmdReadMemory(addr, 256)
addr = addr + 256
lng = lng - 256
if usepbar:
pbar.update(pbar.maxval-lng)
pbar.finish()
else:
mdebug(5, "Read %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
data = data + self.cmdReadMemory(addr, lng)
return data
def writeMemory(self, addr, data):
lng = len(data)
mdebug(5, "Writing %(lng)d bytes to start address 0x%(addr)X" %
{ 'lng': lng, 'addr': addr})
if usepbar:
widgets = ['Writing: ', Percentage(),' ', ETA(), ' ', Bar()]
pbar = ProgressBar(widgets=widgets, maxval=lng, term_width=79).start()
offs = 0
while lng > 256:
if usepbar:
pbar.update(pbar.maxval-lng)
else:
mdebug(5, "Write %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
self.cmdWriteMemory(addr, data[offs:offs+256])
offs = offs + 256
addr = addr + 256
lng = lng - 256
if usepbar:
pbar.update(pbar.maxval-lng)
pbar.finish()
else:
mdebug(5, "Write %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
self.cmdWriteMemory(addr, data[offs:offs+lng] + ([0xFF] * (256-lng)) )
def usage():
print("""Usage: %s [-hqVewvr] [-l length] [-p port] [-b baud] [-a addr] [file.bin]
-h This help
-q Quiet
-V Verbose
-e Erase
-w Write
-v Verify
-r Read
-l length Length of read
-p port Serial port (default: first USB-like port in /dev)
-b baud Baud speed (default: 115200)
-a addr Target address
./stm32loader.py -e -w -v example/main.bin
""" % sys.argv[0])
def read(filename):
"""Read the file to be programmed and turn it into a binary"""
with open(filename, 'rb') as f:
bytes = f.read()
if bytes.startswith('\x7FELF'):
# Actually an ELF file. Convert to binary
handle, path = tempfile.mkstemp(suffix='.bin', prefix='stm32loader')
try:
os.close(handle)
# Try a couple of options for objcopy
for name in ['arm-none-eabi-objcopy', 'arm-linux-gnueabi-objcopy']:
try:
code = subprocess.call([name, '-Obinary', filename, path])
if code == 0:
return read(path)
except OSError:
pass
else:
raise Exception('Error %d while converting to a binary file' % code)
finally:
# Remove the temporary file
os.unlink(path)
else:
return [ord(x) for x in bytes]
if __name__ == "__main__":
conf = {
'port': 'auto',
'baud': 115200,
'address': 0x08000000,
'erase': 0,
'write': 0,
'verify': 0,
'read': 0,
'len': 1000,
'fname':'',
}
# http://www.python.org/doc/2.5.2/lib/module-getopt.html
try:
opts, args = getopt.getopt(sys.argv[1:], "hqVewvrp:b:a:l:")
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o == '-V':
QUIET = 10
elif o == '-q':
QUIET = 0
elif o == '-h':
usage()
sys.exit(0)
elif o == '-e':
conf['erase'] = 1
elif o == '-w':
conf['write'] = 1
elif o == '-v':
conf['verify'] = 1
elif o == '-r':
conf['read'] = 1
elif o == '-p':
conf['port'] = a
elif o == '-b':
conf['baud'] = eval(a)
elif o == '-a':
conf['address'] = eval(a)
elif o == '-l':
conf['len'] = eval(a)
else:
assert False, "unhandled option"
# Try and find the port automatically
if conf['port'] == 'auto':
ports = []
# Get a list of all USB-like names in /dev
for name in ['tty.usbserial', 'ttyUSB']:
ports.extend(glob.glob('/dev/%s*' % name))
ports = sorted(ports)
if ports:
# Found something - take it
conf['port'] = ports[0]
cmd = CommandInterface()
cmd.open(conf['port'], conf['baud'])
mdebug(10, "Open port %(port)s, baud %(baud)d" % {'port':conf['port'],
'baud':conf['baud']})
try:
if (conf['write'] or conf['verify']):
mdebug(5, "Reading data from %s" % args[0])
data = read(args[0])
try:
cmd.initChip()
except CmdException:
print("Can't init. Ensure BOOT0=1, BOOT1=0, and reset device")
bootversion = cmd.cmdGet()
mdebug(0, "Bootloader version 0x%X" % bootversion)
if bootversion < 20 or bootversion >= 100:
raise Exception('Unreasonable bootloader version %d' % bootversion)
chip_id = cmd.cmdGetID()
assert len(chip_id) == 2, "Unreasonable chip id: %s" % repr(chip_id)
chip_id_num = (ord(chip_id[0]) << 8) | ord(chip_id[1])
chip_id_str = CHIP_ID_STRS.get(chip_id_num, None)
if chip_id_str is None:
mdebug(0, 'Warning: unrecognised chip ID 0x%x' % chip_id_num)
else:
mdebug(0, "Chip id 0x%x, %s" % (chip_id_num, chip_id_str))
if conf['erase']:
# Pre-3.0 bootloaders use the erase memory
# command. Starting with 3.0, extended erase memory
# replaced this command.
if bootversion < 0x30:
cmd.cmdEraseMemory()
else:
cmd.cmdExtendedEraseMemory()
if conf['write']:
cmd.writeMemory(conf['address'], data)
if conf['verify']:
verify = cmd.readMemory(conf['address'], len(data))
if(data == verify):
print("Verification OK")
else:
print("Verification FAILED")
print(str(len(data)) + ' vs ' + str(len(verify)))
for i in xrange(0, len(data)):
if data[i] != verify[i]:
print(hex(i) + ': ' + hex(data[i]) + ' vs ' + hex(verify[i]))
if not conf['write'] and conf['read']:
rdata = cmd.readMemory(conf['address'], conf['len'])
file(args[0], 'wb').write(''.join(map(chr,rdata)))
finally:
cmd.releaseChip()
| fabriciopk/SEA_Firmware | firmware/support/stm32loader.py | Python | mit | 17,493 |
# -*- coding: utf-8 -*-
"""
Sop.config
~~~~~~~~~~~~~~
Configure file
:copyright: (c) 2017 by 陶先森.
:license: MIT, see LICENSE for more details.
"""
import os
GLOBAL={
"ProcessName": "Sop",
#Custom process, you can see it with "ps aux|grep ProcessName".
"Host": os.getenv("sop_host", "0.0.0.0"),
#Application run network address, you can set it `0.0.0.0`, `127.0.0.1`, ``.
"Port": os.getenv("sop_port", 13142),
#Application run port, default port.
"LogLevel": os.getenv("sop_loglevel", "DEBUG"),
#Application to write the log level, currently has DEBUG, INFO, WARNING, ERROR, CRITICAL.
}
REDIS=os.getenv("sop_redis_url", "redis://")
#Redis数据库连接信息,格式:
#redis://[:password]@host:port/db
#host,port必填项,如有密码,记得密码前加冒号,其余项默认localhost:6379/0
SITE={
"title": "SaintIC Sop",
} | staugur/Sop | src/config.py | Python | mit | 899 |
from .misc import (
camel_to_underscore,
convert_date,
convert_datetime,
dict_from_dataframe,
dir_list,
download_if_new,
get_ulmo_dir,
mkdir_if_doesnt_exist,
module_with_dependency_errors,
module_with_deprecation_warnings,
open_file_for_url,
parse_fwf,
raise_dependency_error,
save_pretty_printed_xml,
)
from .raster import (
extract_from_zip,
mosaic_and_clip,
download_tiles,
generate_raster_uid,
)
try:
from .pytables import (
get_default_h5file_path,
get_or_create_group,
get_or_create_table,
open_h5file,
update_or_append_sortable,
)
except ImportError:
get_default_h5file_path = raise_dependency_error
get_or_create_group = raise_dependency_error
get_or_create_table = raise_dependency_error
open_h5file = raise_dependency_error
update_or_append_sortable = raise_dependency_error
| cameronbracken/ulmo | ulmo/util/__init__.py | Python | bsd-3-clause | 1,031 |
import unittest
"""
Given an array of positive and negative numbers, arrange them in alternate fashion
such that every positive number is followed by a negative number and vice-versa. You should
maintain the order of appearance.
Input: 1 2 3 -4 -1 4
Output: -4 1 -1 2 3 4
"""
"""
Approach 1:
1. Scan the array from left to right. Stop when there are two adjacent numbers with opposite sign.
2. Keep swapping until adjacent numbers have different signs.
"""
"""
Approach 2:
1. Scan the array from left to right.
2. Find the first number which is out of place - a negative number at odd index or a positive number at even index.
3. After finding out of place number, find the first element to its right with opposite sign.
4. Right rotate the array between these two numbers by 1 position.
"""
def same_sign(x, y):
return (x > 0 and y > 0) or (x < 0 and y < 0)
def rearrange_no_rotation(arr):
end = len(arr)
for i in range(end-1):
if same_sign(arr[i], arr[i+1]):
continue
j = i+1
k = i-1
while j >= 0 and k >= 0 and not same_sign(arr[j], arr[k]):
arr[j], arr[j-1] = arr[j-1], arr[j]
j -= 1
k -= 1
def is_out_of_place(number, index):
return (number < 0 and index % 2 != 0) or (number > 0 and index % 2 == 0)
def right_rotate(list_of_numbers, start, end, how_many):
while how_many > 0:
temp = list_of_numbers[end]
for i in range(end, start, -1):
list_of_numbers[i] = list_of_numbers[i-1]
list_of_numbers[start] = temp
how_many -= 1
def rearrange_with_rotation(list_of_numbers):
end = len(list_of_numbers)
out_of_place = -1
for i in range(end):
# check if current number is out of place
if out_of_place == -1 and is_out_of_place(list_of_numbers[i], i):
out_of_place = i
elif out_of_place >= 0 and not same_sign(list_of_numbers[i], list_of_numbers[out_of_place]):
right_rotate(list_of_numbers, out_of_place, i, 1)
# now out_of_place and the number next to it are no longer out of place (Think about it!)
# so, next potential out_of_place can be two steps ahead from current
if i - out_of_place > 2:
out_of_place += 2
else:
out_of_place = -1
class TestRearrange(unittest.TestCase):
def test_rearrange_no_rotation(self):
arr = [1, 2, 3, -4, -1, 4]
rearrange_no_rotation(arr)
self.assertEqual(arr, [1, -4, 2, -1, 3, 4])
arr = [-5, -2, 5, 2, 4, 7, 1, 8, -10, -8]
rearrange_no_rotation(arr)
self.assertEqual(arr, [-5, 5, -2, 2, -10, 4, -8, 7, 1, 8])
def test_rearrange_rotation(self):
arr = [1, 2, 3, -4, -1, 4]
rearrange_with_rotation(arr)
self.assertEqual(arr, [-4, 1, -1, 2, 3, 4])
arr = [-5, -2, 5, 2, 4, 7, 1, 8, -10, -8]
rearrange_with_rotation(arr)
self.assertEqual(arr, [-5, 5, -2, 2, -10, 4, -8, 7, 1, 8])
| prathamtandon/g4gproblems | Arrays/rearrange_positive_and_negative_same_order.py | Python | mit | 3,022 |
from django import forms
from a2b_satchmo.customer.function_def import *
from django.forms import ModelForm
from django.contrib import *
from django.contrib.admin.widgets import *
from uni_form.helpers import *
from django.utils.translation import ugettext_lazy as _
from satchmo_store.accounts.forms import RegistrationForm
#from django.shortcuts import render_to_response
#from datetime import *
class PostPaidForm(forms.Form):
dial_no = forms.IntegerField(required=True, help_text=_("Enter Destination Phone Number"))
| Star2Billing/a2b-satchmo | a2b_satchmo/localsite/forms.py | Python | agpl-3.0 | 527 |
from PoolBasedTripletMDS import PoolBasedTripletMDS
| lalitkumarj/NEXT-psych | gui/base/app_manager/PoolBasedTripletMDS/__init__.py | Python | apache-2.0 | 52 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib2
import six
import tempfile
import time
import unittest2
from gcloud import exceptions
from gcloud import storage
from gcloud import _helpers
from gcloud.storage._helpers import _base64_md5hash
HTTP = httplib2.Http()
SHARED_BUCKETS = {}
_helpers._PROJECT_ENV_VAR_NAME = 'GCLOUD_TESTS_PROJECT_ID'
CLIENT = storage.Client()
def setUpModule():
if 'test_bucket' not in SHARED_BUCKETS:
# %d rounds milliseconds to nearest integer.
bucket_name = 'new%d' % (1000 * time.time(),)
# In the **very** rare case the bucket name is reserved, this
# fails with a ConnectionError.
SHARED_BUCKETS['test_bucket'] = CLIENT.create_bucket(bucket_name)
def tearDownModule():
for bucket in SHARED_BUCKETS.values():
bucket.delete(force=True)
class TestStorageBuckets(unittest2.TestCase):
def setUp(self):
self.case_buckets_to_delete = []
def tearDown(self):
with storage.Batch():
for bucket_name in self.case_buckets_to_delete:
storage.Bucket(bucket_name).delete()
def test_create_bucket(self):
new_bucket_name = 'a-new-bucket'
self.assertRaises(exceptions.NotFound,
CLIENT.get_bucket, new_bucket_name)
created = CLIENT.create_bucket(new_bucket_name)
self.case_buckets_to_delete.append(new_bucket_name)
self.assertEqual(created.name, new_bucket_name)
def test_list_buckets(self):
buckets_to_create = [
'new%d' % (1000 * time.time(),),
'newer%d' % (1000 * time.time(),),
'newest%d' % (1000 * time.time(),),
]
created_buckets = []
for bucket_name in buckets_to_create:
bucket = CLIENT.create_bucket(bucket_name)
self.case_buckets_to_delete.append(bucket_name)
# Retrieve the buckets.
all_buckets = CLIENT.list_buckets()
created_buckets = [bucket for bucket in all_buckets
if bucket.name in buckets_to_create]
self.assertEqual(len(created_buckets), len(buckets_to_create))
class TestStorageFiles(unittest2.TestCase):
FILES = {
'logo': {
'path': 'system_tests/data/CloudPlatform_128px_Retina.png',
},
'big': {
'path': 'system_tests/data/five-point-one-mb-file.zip',
},
'simple': {
'path': 'system_tests/data/simple.txt',
}
}
@classmethod
def setUpClass(cls):
super(TestStorageFiles, cls).setUpClass()
for file_data in cls.FILES.values():
with open(file_data['path'], 'rb') as file_obj:
file_data['hash'] = _base64_md5hash(file_obj)
cls.bucket = SHARED_BUCKETS['test_bucket']
def setUp(self):
self.case_blobs_to_delete = []
def tearDown(self):
for blob in self.case_blobs_to_delete:
blob.delete()
class TestStorageWriteFiles(TestStorageFiles):
def test_large_file_write_from_stream(self):
blob = storage.Blob(bucket=self.bucket, name='LargeFile')
self.assertEqual(blob._properties, {})
file_data = self.FILES['big']
with open(file_data['path'], 'rb') as file_obj:
blob.upload_from_file(file_obj)
self.case_blobs_to_delete.append(blob)
md5_hash = blob.md5_hash
if not isinstance(md5_hash, six.binary_type):
md5_hash = md5_hash.encode('utf-8')
self.assertEqual(md5_hash, file_data['hash'])
def test_small_file_write_from_filename(self):
blob = storage.Blob(bucket=self.bucket, name='SmallFile')
self.assertEqual(blob._properties, {})
file_data = self.FILES['simple']
blob.upload_from_filename(file_data['path'])
self.case_blobs_to_delete.append(blob)
md5_hash = blob.md5_hash
if not isinstance(md5_hash, six.binary_type):
md5_hash = md5_hash.encode('utf-8')
self.assertEqual(md5_hash, file_data['hash'])
def test_write_metadata(self):
blob = self.bucket.upload_file(self.FILES['logo']['path'])
self.case_blobs_to_delete.append(blob)
# NOTE: This should not be necessary. We should be able to pass
# it in to upload_file and also to upload_from_string.
blob.content_type = 'image/png'
self.assertEqual(blob.content_type, 'image/png')
def test_direct_write_and_read_into_file(self):
blob = storage.Blob(bucket=self.bucket, name='MyBuffer')
file_contents = b'Hello World'
blob.upload_from_string(file_contents)
self.case_blobs_to_delete.append(blob)
same_blob = storage.Blob(bucket=self.bucket, name='MyBuffer')
same_blob.reload() # Initialize properties.
temp_filename = tempfile.mktemp()
with open(temp_filename, 'wb') as file_obj:
same_blob.download_to_file(file_obj)
with open(temp_filename, 'rb') as file_obj:
stored_contents = file_obj.read()
self.assertEqual(file_contents, stored_contents)
def test_copy_existing_file(self):
blob = self.bucket.upload_file(self.FILES['logo']['path'],
blob_name='CloudLogo')
self.case_blobs_to_delete.append(blob)
new_blob = self.bucket.copy_blob(blob, self.bucket, 'CloudLogoCopy')
self.case_blobs_to_delete.append(new_blob)
base_contents = blob.download_as_string()
copied_contents = new_blob.download_as_string()
self.assertEqual(base_contents, copied_contents)
class TestStorageListFiles(TestStorageFiles):
FILENAMES = ['CloudLogo1', 'CloudLogo2', 'CloudLogo3']
@classmethod
def setUpClass(cls):
super(TestStorageListFiles, cls).setUpClass()
# Make sure bucket empty before beginning.
for blob in cls.bucket.list_blobs():
blob.delete()
logo_path = cls.FILES['logo']['path']
blob = cls.bucket.upload_file(logo_path, blob_name=cls.FILENAMES[0])
cls.suite_blobs_to_delete = [blob]
# Copy main blob onto remaining in FILENAMES.
for filename in cls.FILENAMES[1:]:
new_blob = cls.bucket.copy_blob(blob, cls.bucket, filename)
cls.suite_blobs_to_delete.append(new_blob)
@classmethod
def tearDownClass(cls):
for blob in cls.suite_blobs_to_delete:
blob.delete()
def test_list_files(self):
all_blobs = list(self.bucket.list_blobs())
self.assertEqual(len(all_blobs), len(self.FILENAMES))
def test_paginate_files(self):
truncation_size = 1
count = len(self.FILENAMES) - truncation_size
iterator = self.bucket.list_blobs(max_results=count)
response = iterator.get_next_page_response()
blobs = list(iterator.get_items_from_response(response))
self.assertEqual(len(blobs), count)
self.assertEqual(iterator.page_number, 1)
self.assertTrue(iterator.next_page_token is not None)
response = iterator.get_next_page_response()
last_blobs = list(iterator.get_items_from_response(response))
self.assertEqual(len(last_blobs), truncation_size)
class TestStoragePseudoHierarchy(TestStorageFiles):
FILENAMES = [
'file01.txt',
'parent/file11.txt',
'parent/child/file21.txt',
'parent/child/file22.txt',
'parent/child/grand/file31.txt',
'parent/child/other/file32.txt',
]
@classmethod
def setUpClass(cls):
super(TestStoragePseudoHierarchy, cls).setUpClass()
# Make sure bucket empty before beginning.
for blob in cls.bucket.list_blobs():
blob.delete()
simple_path = cls.FILES['simple']['path']
blob = cls.bucket.upload_file(simple_path, blob_name=cls.FILENAMES[0])
cls.suite_blobs_to_delete = [blob]
for filename in cls.FILENAMES[1:]:
new_blob = cls.bucket.copy_blob(blob, cls.bucket, filename)
cls.suite_blobs_to_delete.append(new_blob)
@classmethod
def tearDownClass(cls):
for blob in cls.suite_blobs_to_delete:
blob.delete()
def test_root_level_w_delimiter(self):
iterator = self.bucket.list_blobs(delimiter='/')
response = iterator.get_next_page_response()
blobs = list(iterator.get_items_from_response(response))
self.assertEqual([blob.name for blob in blobs], ['file01.txt'])
self.assertEqual(iterator.page_number, 1)
self.assertTrue(iterator.next_page_token is None)
self.assertEqual(iterator.prefixes, set(['parent/']))
def test_first_level(self):
iterator = self.bucket.list_blobs(delimiter='/', prefix='parent/')
response = iterator.get_next_page_response()
blobs = list(iterator.get_items_from_response(response))
self.assertEqual([blob.name for blob in blobs], ['parent/file11.txt'])
self.assertEqual(iterator.page_number, 1)
self.assertTrue(iterator.next_page_token is None)
self.assertEqual(iterator.prefixes, set(['parent/child/']))
def test_second_level(self):
iterator = self.bucket.list_blobs(delimiter='/',
prefix='parent/child/')
response = iterator.get_next_page_response()
blobs = list(iterator.get_items_from_response(response))
self.assertEqual([blob.name for blob in blobs],
['parent/child/file21.txt',
'parent/child/file22.txt'])
self.assertEqual(iterator.page_number, 1)
self.assertTrue(iterator.next_page_token is None)
self.assertEqual(iterator.prefixes,
set(['parent/child/grand/', 'parent/child/other/']))
def test_third_level(self):
# Pseudo-hierarchy can be arbitrarily deep, subject to the limit
# of 1024 characters in the UTF-8 encoded name:
# https://cloud.google.com/storage/docs/bucketnaming#objectnames
# Exercise a layer deeper to illustrate this.
iterator = self.bucket.list_blobs(delimiter='/',
prefix='parent/child/grand/')
response = iterator.get_next_page_response()
blobs = list(iterator.get_items_from_response(response))
self.assertEqual([blob.name for blob in blobs],
['parent/child/grand/file31.txt'])
self.assertEqual(iterator.page_number, 1)
self.assertTrue(iterator.next_page_token is None)
self.assertEqual(iterator.prefixes, set())
class TestStorageSignURLs(TestStorageFiles):
def setUp(self):
super(TestStorageSignURLs, self).setUp()
logo_path = self.FILES['logo']['path']
with open(logo_path, 'rb') as file_obj:
self.LOCAL_FILE = file_obj.read()
blob = storage.Blob(bucket=self.bucket, name='LogoToSign.jpg')
blob.upload_from_string(self.LOCAL_FILE)
self.case_blobs_to_delete.append(blob)
def tearDown(self):
for blob in self.case_blobs_to_delete:
if blob.exists():
blob.delete()
def test_create_signed_read_url(self):
blob = storage.Blob(bucket=self.bucket, name='LogoToSign.jpg')
expiration = int(time.time() + 5)
signed_url = blob.generate_signed_url(expiration, method='GET',
client=CLIENT)
response, content = HTTP.request(signed_url, method='GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, self.LOCAL_FILE)
def test_create_signed_delete_url(self):
blob = storage.Blob(bucket=self.bucket, name='LogoToSign.jpg')
expiration = int(time.time() + 283473274)
signed_delete_url = blob.generate_signed_url(expiration,
method='DELETE',
client=CLIENT)
response, content = HTTP.request(signed_delete_url, method='DELETE')
self.assertEqual(response.status, 204)
self.assertEqual(content, b'')
# Check that the blob has actually been deleted.
self.assertFalse(blob.exists())
| GrimDerp/gcloud-python | system_tests/storage.py | Python | apache-2.0 | 12,846 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Include this to load these fixtures. """
def import_fixtures(self):
""" Create simple fixture entries..."""
self.redis.zadd('flaw_scope', '{ "name":"major" , "score":100 }', 100)
self.redis.zadd('flaw_quality', '{ "name":"poor" , "score":100 }', 100)
self.redis.lpush('flaw_allergy', 'horses')
self.redis.lpush('flaw_enemytrait', 'vicious')
self.redis.lpush('flaw_template', "You have {{params.scope['name']|article}} an allergy to {{params.allergy}}")
| CityGenerator/Megacosm-Generator | fixtures/flaw.py | Python | gpl-2.0 | 538 |
# Adapted from the book "The Blender Python API: Precision 3D Modeling and Add-on Development"
# by Chris Conlan
bl_info = {
"name": "Add-on Template",
"author": "Alex Martinelli",
"location": "View3D > Tools > Simple Addon",
"version": (1, 0, 0),
"blender": (2, 7, 8),
"description": "Template",
"category": "Development"
}
import bpy
# Panel takes care of the UI components
class SimplePanel(bpy.types.Panel):
# Hierarchically define location of the add-on in the Blender UI
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
bl_category = "Test Add-On"
bl_label = "Template"
# define panel UI components
def draw(self, context):
# sample button
self.layout.operator("object.simple_operator",
text="Template operator")
# sample int value
self.layout.prop(context.scene, 'my_int_prop')
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
# Register properties related to the class here.
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
# Delete parameters related to the class here
# Operator is the actual logic behind the add-of
class SimpleOperator(bpy.types.Operator):
bl_idname = "object.simple_operator"
bl_label = "Template"
def execute(self, context):
# example of adding a monkey to the scene
bpy.ops.mesh.primitive_monkey_add(
radius=context.scene.my_int_prop,
location=(0, 0, 0))
# better to return this string when done with the execution work
return {'FINISHED'}
@classmethod
def register(cls):
print("Registered operator: %s " % cls.bl_label)
# Register properties related to the class here
bpy.types.Scene.my_int_prop = bpy.props.IntProperty(name="My Int",
description="Sample integer property to print to user",
default=123,
min=100,
max=200)
@classmethod
def unregister(cls):
print("Unregistered operator: %s " % cls.bl_label)
# Delete parameters related to the class here
def register():
# Implicitly register objects inheriting bpy.types in current file and scope
#bpy.utils.register_module(__name__)
# Or explicitly register objects
bpy.utils.register_class(SimpleOperator)
bpy.utils.register_class(SimplePanel)
print("%s registration complete\n" % bl_info.get('name'))
def unregister():
# Always unregister in reverse order to prevent error due to
# interdependencies
# Explicitly unregister objects
bpy.utils.unregister_class(SimpleOperator)
bpy.utils.unregister_class(SimplePanel)
# Or unregister objects inheriting bpy.types in current file and scope
#bpy.utils.unregister_module(__name__)
print("%s unregister complete\n" % bl_info.get('name'))
# Called only when running the script from Blender
# when distributed as plugin register() and unregister() are used
if __name__ == "__main__":
try:
unregister()
except Exception as e:
print(e)
pass
register()
| 5agado/data-science-learning | graphics/blender_addon_template.py | Python | apache-2.0 | 3,418 |
import logging
from django.db import models
from django.db import connection
import os
from django.conf import settings
logger = logging.getLogger(__name__)
class QuerySetManager(models.Manager):
"""
@see http://djangosnippets.org/snippets/734/
@see http://hunterford.me/django-custom-model-manager-chaining/
"""
def get_queryset(self):
return self.model.QuerySet(self.model)
def __getattr__(self, name, *args):
if name.startswith('_'):
raise AttributeError
return getattr(self.get_queryset(), name, *args)
class LockingManager(models.Manager):
"""
Add lock/unlock functionality to manager.
Example::
class Job(models.Model):
manager = LockingManager()
counter = models.IntegerField(null=True, default=0)
@staticmethod
def do_atomic_update(job_id)
''' Updates job integer, keeping it below 5 '''
try:
# Ensure only one HTTP request can do this update at once.
Job.objects.lock()
job = Job.object.get(id=job_id)
# If we don't lock the tables two simultanous
# requests might both increase the counter
# going over 5
if job.counter < 5:
job.counter += 1
job.save()
finally:
Job.objects.unlock()
@see http://djangosnippets.org/snippets/833/
"""
def lock(self, *args):
"""
Lock table(s).
Locks the object model table so that atomic update is possible.
Simulatenous database access request pend until the lock is unlock()'ed.
See http://dev.mysql.com/doc/refman/5.0/en/lock-tables.html
@param *args: Models to be locked - if None then self.model is used.
"""
if not args:
args = [self.model]
cursor = connection.cursor()
tables = ", ".join(['%s WRITE' % connection.ops.quote_name(model._meta.db_table) for model in args])
logger.debug('LOCK TABLES %s' % tables)
cursor.execute("LOCK TABLES %s" % tables)
row = cursor.fetchone()
return row
def unlock(self):
"""
Unlock the table(s)
"""
cursor = connection.cursor()
cursor.execute("UNLOCK TABLES")
logger.debug('Unlocked tables')
row = cursor.fetchone()
return row
class LockingQuerySetManager(QuerySetManager, LockingManager):
pass
def upload_to(path, filename):
try:
_dir = os.path.join(settings.MEDIA_ROOT, path)
os.makedirs(dir, settings.FILE_UPLOAD_PERMISSIONS)
except OSError:
# Someone beat us to the punch
if not os.path.isdir(_dir):
# Nope, must be something else...
raise
return os.path.join(path, filename)
| alexhayes/django-toolkit | django_toolkit/db/models.py | Python | mit | 2,944 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
from lib.workflow import Workflow3 as Workflow, ICON_WARNING, ICON_INFO, web, MATCH_ATOM, MATCH_SUBSTRING
def main(wf):
# Imports go here.
import pycountry
import unicodedata
import unicodecsv as csv
from dotmap import DotMap as Map
from cStringIO import StringIO
from workflow.background import run_in_background, is_running
import re
# Function to retrieve CSV from an URL and p
def get_web_data():
r = web.get('http://ourairports.com/data/airports.csv')
r.raise_for_status()
# Read the whole CSV
csvstring = StringIO(r.text)
result = csv.DictReader(csvstring, encoding='utf-8', delimiter=',', quotechar='"')
# Skip the header row
headers = result.fieldnames
airports = []
# Go over each row and cache the airport data.
for index, row in enumerate(result, start=1):
iata_code = row['iata_code'].upper().strip()
airport_type = row['type'].strip()
airport_id = int(row['id'])
# We're only interested in airports with IATA code.
if not airport_id: continue
if len(iata_code) == 0 or iata_code == '-': continue
if airport_type != 'large_airport' and airport_type != 'small_airport' and airport_type != 'medium_airport': continue
airport_type = airport_type.split("_")[0]
airport_name = row['name'].strip()
airport_coords = { 'lat': float(row['latitude_deg']), 'lon': float(row['longitude_deg']) }
airport_city = row['municipality'].strip()
airport_url = row['home_link'].strip()
airport_wiki = row['wikipedia_link'].strip()
airport_icao = row['ident'].upper().strip()
# Add a '.' after single uppercase letters
airport_name = re.sub( r"\b([A-Z])(?![\w\-\.])", r"\1.", airport_name)
country_iso_code = row['iso_country'].strip().upper()[:2]
try :
country = pycountry.countries.get(alpha_2=country_iso_code)
country = country.name
except (KeyError, AttributeError) as err:
wf.logger.error("Error: {0} (Country: {1})".format(err, country_iso_code))
country = country_iso_code
airport_country = country
# Build our airport object.
airports.append( Map( id = airport_id, iata_code = iata_code, icao_code = airport_icao,
type = airport_type, name = airport_name, coords = airport_coords,
country = airport_country, city = airport_city, url = airport_url, wiki = airport_wiki ) )
# Sort the list by airport_type. Snce it's only 'Large', 'Medium' and 'Small', they should be sorted correctly.
airports = sorted(airports, key=lambda k: (k.type, k.iata_code))
return airports
# Build a search key given an airport object.
def key_for_airports(airport):
searchkey = u'{},{},{},{},{}'.format(airport.iata_code, airport.name, airport.icao_code, airport.country, airport.city)
wf.logger.debug('Search key: ' + searchkey)
return searchkey
# ==========================================================================
# ================================= MAIN ===================================
# ==========================================================================
wf.magic_prefix = 'wf:'
# Get args from Workflow, already in normalized Unicode
if not wf.args or not len(wf.args) >= 1 :
wf.add_item('Searching...')
wf.send_feedback()
return 1
airportquery = u' '.join(wf.args).strip().encode('utf-8')
# If no query, return.
if not airportquery or len(airportquery) <= 2 :
wf.add_item('Please enter 2 or more characters!')
wf.send_feedback()
return 1
# If '--update' is passed as parameter, update cached data.
if airportquery == '--update':
wf.logger.info('Updating airport data...')
airportdata = get_web_data()
wf.cache_data('airports', airportdata)
wf.logger.info('Updated airport data in cache')
return 0
wf.logger.info('Searching airports for \'%s\'...' % airportquery)
# Update workflow if a new version is available.
if wf.update_available == True:
wf.add_item('New version available', 'Press enter to install the update.',
autocomplete='workflow:update',
icon=ICON_INFO)
# Is cache over 10 days old or non-existent?
if not wf.cached_data_fresh('airports', max_age=60*60*24*10):
wf.logger.info('Airport data is stale, updating in the background...')
run_in_background('update', ['/usr/bin/env', 'python', wf.workflowfile('airports.py'), '--update'])
if is_running('update'):
wf.logger.debug('Waiting for update to finish.')
# Rerun every 1/2 second until the `update` job is complete
wf.rerun = 0.5
# Add a notification if the script is running
wf.add_item('Updating airport data...', valid=False, icon=ICON_INFO)
# Get cached airports object, max_age=0 will load any cached data regardless of age
all_airports = wf.cached_data('airports', None, max_age=0)
if not all_airports or len(all_airports) == 0:
wf.send_feedback()
return
# Find all airports that match the filter query.
filtered_airports = wf.filter(airportquery, all_airports, key_for_airports, min_score=70, match_on=MATCH_ATOM | MATCH_SUBSTRING, include_score=True)
if not filtered_airports or len(filtered_airports) == 0:
wf.add_item('No airport found that matches your query of \'%s\'!' % airportquery, icon=ICON_WARNING)
wf.send_feedback()
return
# Build response
for airport, score, wffilter in filtered_airports:
title = '(%s) %s' % (airport.iata_code, airport.name)
subtitle = airport.icao_code + ' ' + ('(%s, %s)' % (airport.city, airport.country)) if airport.city != '' else airport.country
item = wf.add_item( title, subtitle,
autocomplete=airport.iata_code,
icon='icon.png',
valid=True,
copytext=airport.name,
largetext='(%s) %s' % (airport.iata_code, airport.name),
uid='alfred-airport-codes-%s' % airport.id
)
item.setvar('WF_URL', '')
# CMD modifier for Maps.app
mod = item.add_modifier('cmd', 'Show the airport in Maps.app')
geo_url = 'http://maps.apple.com/?q=%f,%f' % ( airport.coords['lat'], airport.coords['lon'] );
mod.setvar('WF_URL', geo_url )
item.arg = geo_url
# CTRL modifier for Wikipedia page.
if airport.wiki :
mod = item.add_modifier('ctrl', 'Open the airports Wikipedia page (%s)' % airport.wiki)
mod.setvar('WF_URL', airport.wiki)
# Overwrite main action.
item.arg = airport.wiki
else :
mod = item.add_modifier('ctrl', 'The %s airport has no Wikipedia entry.' % airport.iata_code )
# ALT modifier for URL.
if airport.url :
mod = item.add_modifier('alt', 'Open the airports website (%s)' % airport.url)
mod.setvar('WF_URL', airport.url)
# Overwrite main action.
item.arg = airport.url
else :
mod = item.add_modifier('alt', 'The %s airport has no website.' % airport.iata_code )
# Send output to Alfred.
wf.send_feedback()
if __name__ == u'__main__':
# Create a global `Workflow` object
wf = Workflow(libraries=['./lib'], update_settings={
'github_slug': 'otherguy/alfred-airports-workflow',
'frequency': 1
})
# Call your entry function via `Workflow.run()` to enable its helper
# functions, like exception catching, ARGV normalization, magic
# arguments etc.
sys.exit(wf.run(main))
| darkwinternight/alfred-airports-workflow | airports.py | Python | mit | 8,021 |
#!/usr/bin/python
"""
AirRace competition in Vienna. See robotchallenge.org
usage:
./airrace.py <TODO>
"""
import sys
import datetime
import multiprocessing
import cv2
import math
from pave import PaVE, isIFrame, frameNumber, timestamp, correctTimePeriod
from airrace import processFrame, allStripPoses
from sourcelogger import SourceLogger
from ardrone2 import ARDrone2, ManualControlException, manualControl, normalizeAnglePIPI, distance
import viewlog
from line import Line
from pose import Pose
from striploc import StripsLocalisation, REF_CIRCLE_RADIUS
from striploc import PATH_STRAIGHT, PATH_TURN_LEFT, PATH_TURN_RIGHT
MAX_ALLOWED_SPEED = 0.8
STRIPS_FAILURE_SPEED = 0.5 # ??? speed when localisation is not updated from last image ... maybe two images??
TRANSITION_SPEED = 0.4
NUM_FAST_STRIPS = 4 # now the same number for straight and turn segments -> TODO split
MAX_ALLOWED_VIDEO_DELAY = 2.0 # in seconds, then it will wait (desiredSpeed = 0.0)
def timeName( prefix, ext ):
dt = datetime.datetime.now()
filename = prefix + dt.strftime("%y%m%d_%H%M%S.") + ext
return filename
g_pave = None
def wrapper( packet ):
global g_pave
if g_pave == None:
g_pave = PaVE()
g_pave.append( packet )
header,payload = g_pave.extract()
while payload:
if isIFrame( header ):
tmpFile = open( "tmp.bin", "wb" )
tmpFile.write( payload )
tmpFile.flush()
tmpFile.close()
cap = cv2.VideoCapture( "tmp.bin" )
ret, frame = cap.read()
assert ret
if ret:
return (frameNumber( header ), timestamp(header)), processFrame( frame, debug=False )
header,payload = g_pave.extract()
g_queueResults = multiprocessing.Queue()
def getOrNone():
if g_queueResults.empty():
return None
return g_queueResults.get()
class AirRaceDrone( ARDrone2 ):
def __init__( self, replayLog=None, speed = 0.2, skipConfigure=False, metaLog=None, console=None ):
self.loggedVideoResult = None
self.lastImageResult = None
self.videoHighResolution = False
ARDrone2.__init__( self, replayLog, speed, skipConfigure, metaLog, console )
if replayLog == None:
name = timeName( "logs/src_cv2_", "log" )
metaLog.write("cv2: "+name+'\n' )
self.loggedVideoResult = SourceLogger( getOrNone, name ).get
self.startVideo( wrapper, g_queueResults, record=True, highResolution=self.videoHighResolution )
else:
assert metaLog
self.loggedVideoResult = SourceLogger( None, metaLog.getLog("cv2:") ).get
self.startVideo( record=True, highResolution=self.videoHighResolution )
def update( self, cmd="AT*COMWDG=%i,\r" ):
ARDrone2.update( self, cmd )
if self.loggedVideoResult != None:
self.lastImageResult = self.loggedVideoResult()
def competeAirRace( drone, desiredHeight = 1.7 ):
loops = []
drone.speed = 0.1
maxVideoDelay = 0.0
maxControlGap = 0.0
loc = StripsLocalisation()
remainingFastStrips = 0
desiredSpeed = TRANSITION_SPEED
updateFailedCount = 0
try:
drone.wait(1.0)
drone.setVideoChannel( front=False )
drone.takeoff()
poseHistory = []
startTime = drone.time
while drone.time < startTime + 1.0:
drone.update("AT*PCMD=%i,0,0,0,0,0\r") # drone.hover(1.0)
poseHistory.append( (drone.time, (drone.coord[0], drone.coord[1], drone.heading), (drone.angleFB, drone.angleLR)) )
magnetoOnStart = drone.magneto[:3]
print "NAVI-ON"
pathType = PATH_TURN_LEFT
virtualRefCircle = None
startTime = drone.time
sx,sy,sz,sa = 0,0,0,0
lastUpdate = None
while drone.time < startTime + 600.0:
sz = max( -0.2, min( 0.2, desiredHeight - drone.coord[2] ))
sx = max( 0, min( drone.speed, desiredSpeed - drone.vx ))
if drone.lastImageResult:
lastUpdate = drone.time
assert len( drone.lastImageResult ) == 2 and len( drone.lastImageResult[0] ) == 2, drone.lastImageResult
(frameNumber, timestamp), rects = drone.lastImageResult
viewlog.dumpCamera( "tmp_%04d.jpg" % (frameNumber/15,), 0 )
# keep history small
videoTime = correctTimePeriod( timestamp/1000., ref=drone.time )
videoDelay = drone.time - videoTime
if videoDelay > 1.0:
print "!DANGER! - video delay", videoDelay
maxVideoDelay = max( videoDelay, maxVideoDelay )
toDel = 0
for oldTime, oldPose, oldAngles in poseHistory:
toDel += 1
if oldTime >= videoTime:
break
poseHistory = poseHistory[:toDel]
tiltCompensation = Pose(desiredHeight*oldAngles[0], desiredHeight*oldAngles[1], 0) # TODO real height?
print "FRAME", frameNumber/15, "[%.1f %.1f]" % (math.degrees(oldAngles[0]), math.degrees(oldAngles[1])),
loc.updateFrame( Pose( *oldPose ).add(tiltCompensation), allStripPoses( rects, highResolution=drone.videoHighResolution ) )
if loc.pathType != pathType:
print "TRANS", pathType, "->", loc.pathType
if pathType == PATH_TURN_LEFT and loc.pathType == PATH_STRAIGHT:
if len(loops) > 0:
print "Loop %d, time %d" % (len(loops), drone.time-loops[-1])
print "-----------------------------------------------"
loops.append( drone.time )
if drone.magneto[:3] == magnetoOnStart:
print "!!!!!!!! COMPASS FAILURE !!!!!!!!"
pathType = loc.pathType
print "----"
remainingFastStrips = NUM_FAST_STRIPS
if loc.crossing:
print "X", True, remainingFastStrips
else:
print pathType, loc.pathUpdated, remainingFastStrips
if not loc.pathUpdated:
updateFailedCount += 1
if updateFailedCount > 1:
print "UPDATE FAILED", updateFailedCount
else:
updateFailedCount = 0
if remainingFastStrips > 0:
remainingFastStrips -= 1
desiredSpeed = MAX_ALLOWED_SPEED
if not loc.pathUpdated and not loc.crossing:
desiredSpeed = STRIPS_FAILURE_SPEED
else:
desiredSpeed = TRANSITION_SPEED
if videoDelay > MAX_ALLOWED_VIDEO_DELAY:
desiredSpeed = 0.0
if drone.battery < 10:
print "BATTERY LOW!", drone.battery
# height debugging
#print "HEIGHT\t%d\t%d\t%.2f\t%d\t%d\t%d\t%d\t%d\t%d" % tuple([max([0]+[w for ((x,y),(w,h),a) in rects])] + list(drone.altitudeData[:4]) + list(drone.pressure) )
for sp in allStripPoses( rects, highResolution=drone.videoHighResolution ):
sPose = Pose( *oldPose ).add(tiltCompensation).add( sp )
viewlog.dumpBeacon( sPose.coord(), index=3 )
viewlog.dumpObstacles( [[(sPose.x-0.15*math.cos(sPose.heading), sPose.y-0.15*math.sin(sPose.heading)),
(sPose.x+0.15*math.cos(sPose.heading), sPose.y+0.15*math.sin(sPose.heading))]] )
refCircle,refLine = loc.getRefCircleLine( Pose(drone.coord[0], drone.coord[1], drone.heading) )
if refCircle == None and refLine == None and virtualRefCircle != None:
refCircle = virtualRefCircle
# error definition ... if you substract that you get desired position or angle
# error is taken from the path point of view, x-path direction, y-positive left, angle-anticlockwise
errY, errA = 0.0, 0.0
assert refCircle == None or refLine == None # they cannot be both active at the same time
if refCircle:
if pathType == PATH_TURN_LEFT:
errY = refCircle[1] - math.hypot( drone.coord[0]-refCircle[0][0], drone.coord[1]-refCircle[0][1] )
errA = normalizeAnglePIPI( - math.atan2( refCircle[0][1] - drone.coord[1], refCircle[0][0] - drone.coord[0] )
- math.radians(-90) + drone.heading )
if pathType == PATH_TURN_RIGHT:
errY = math.hypot( drone.coord[0]-refCircle[0][0], drone.coord[1]-refCircle[0][1] ) - refCircle[1]
errA = normalizeAnglePIPI( - math.atan2( refCircle[0][1] - drone.coord[1], refCircle[0][0] - drone.coord[0] )
- math.radians(90) + drone.heading )
if refLine:
errY = refLine.signedDistance( drone.coord )
errA = normalizeAnglePIPI( drone.heading - refLine.angle )
# get the height first
if drone.coord[2] < desiredHeight - 0.1 and drone.time-startTime < 5.0:
sx = 0.0
if refCircle == None and refLine == None and virtualRefCircle == None:
sx = 0.0 # wait for Z-up
if drone.coord[2] > desiredHeight - 0.1:
print "USING VIRTUAL LEFT TURN CIRCLE!"
circCenter = Pose( drone.coord[0], drone.coord[1], drone.heading ).add( Pose(0.0, REF_CIRCLE_RADIUS, 0 )).coord()
viewlog.dumpBeacon( circCenter, index=0 )
virtualRefCircle = circCenter, REF_CIRCLE_RADIUS
# error correction
# the goal is to have errY and errA zero in 1 second -> errY defines desired speed at given distance from path
sy = max( -0.2, min( 0.2, -errY-drone.vy ))/2.0
# there is no drone.va (i.e. derivative of heading) available at the moment ...
sa = max( -0.1, min( 0.1, -errA/2.0 ))*1.35*(desiredSpeed/0.4) # originally set for 0.4=OK
# print "%0.2f\t%d\t%0.2f\t%0.2f\t%0.2f" % (errY, int(math.degrees(errA)), drone.vy, sy, sa)
prevTime = drone.time
drone.moveXYZA( sx, sy, sz, sa )
maxControlGap = max( drone.time - prevTime, maxControlGap )
poseHistory.append( (drone.time, (drone.coord[0], drone.coord[1], drone.heading), (drone.angleFB, drone.angleLR)) )
print "NAVI-OFF", drone.time - startTime
drone.hover(0.5)
drone.land()
drone.setVideoChannel( front=True )
except ManualControlException, e:
print "ManualControlException"
if drone.ctrlState == 3: # CTRL_FLYING=3 ... i.e. stop the current motion
drone.hover(0.1)
drone.land()
drone.wait(1.0)
drone.stopVideo()
print "MaxVideoDelay", maxVideoDelay
print "MaxControlGap", maxControlGap
print "Loops", len(loops)-1, [int(now-prev) for prev,now in zip(loops[:-1],loops[1:])]
print "Battery", drone.battery
if __name__ == "__main__":
import launcher
launcher.launch( sys.argv, AirRaceDrone, competeAirRace )
| robotika/heidi | airrace_drone.py | Python | mit | 10,239 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id$
# Last modified Wed Jul 8 22:50:28 2009 on violator
# update count: 133
#
# subdms - A document management system based on subversion.
# Copyright (C) 2009 Albert Thuswaldner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from subdms import database
db = database.sqlitedb()
# Dump database
print("Document list:")
for row in db.getalldocs():
print(row)
print("Template list:")
for row in db.getalltmpls():
print(row)
print("Project list:")
for row in db.dumpallprojs():
print(row)
print("Revision list:")
for row in db.getallrev():
print(row)
| thuswa/subdms | tools/dbdump.py | Python | gpl-3.0 | 1,211 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^form$', views.form_test),
url(r'(?P<pk>\d+)/$', views.detail, name='detail'),
url(r'(?P<pk>\d+)/results$', views.results, name='results'),
]
| usa-mimi/tutorial | tutorial/polls/urls.py | Python | mit | 275 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift.account.backend import AccountBroker, DATADIR
from swift.common import db_replicator
class AccountReplicator(db_replicator.Replicator):
server_type = 'account'
brokerclass = AccountBroker
datadir = DATADIR
default_port = 6002
| mjzmjz/swift | swift/account/replicator.py | Python | apache-2.0 | 850 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import re
from twisted.cred.checkers import FilePasswordDB
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.portal import IRealm
from twisted.cred.portal import Portal
from twisted.internet import defer
from twisted.web.error import Error
from twisted.web.guard import BasicCredentialFactory
from twisted.web.guard import DigestCredentialFactory
from twisted.web.guard import HTTPAuthSessionWrapper
from twisted.web.resource import IResource
from zope.interface import implementer
from buildbot.util import bytes2NativeString
from buildbot.util import config
from buildbot.www import resource
class AuthRootResource(resource.Resource):
def getChild(self, path, request):
# return dynamically generated resources
if path == b'login':
return self.master.www.auth.getLoginResource()
elif path == b'logout':
return self.master.www.auth.getLogoutResource()
return resource.Resource.getChild(self, path, request)
class AuthBase(config.ConfiguredMixin):
def __init__(self, userInfoProvider=None):
if userInfoProvider is None:
userInfoProvider = UserInfoProviderBase()
self.userInfoProvider = userInfoProvider
def reconfigAuth(self, master, new_config):
self.master = master
def maybeAutoLogin(self, request):
return defer.succeed(None)
def getLoginResource(self):
raise Error(501, "not implemented")
def getLogoutResource(self):
return LogoutResource(self.master)
@defer.inlineCallbacks
def updateUserInfo(self, request):
session = request.getSession()
if self.userInfoProvider is not None:
infos = yield self.userInfoProvider.getUserInfo(session.user_info['username'])
session.user_info.update(infos)
session.updateSession(request)
def getConfigDict(self):
return {'name': type(self).__name__}
class UserInfoProviderBase(config.ConfiguredMixin):
name = "noinfo"
def getUserInfo(self, username):
return defer.succeed({'email': username})
class LoginResource(resource.Resource):
def render_GET(self, request):
return self.asyncRenderHelper(request, self.renderLogin)
@defer.inlineCallbacks
def renderLogin(self, request):
raise NotImplementedError
class NoAuth(AuthBase):
pass
class RemoteUserAuth(AuthBase):
header = "REMOTE_USER"
headerRegex = re.compile(r"(?P<username>[^ @]+)@(?P<realm>[^ @]+)")
def __init__(self, header=None, headerRegex=None, **kwargs):
AuthBase.__init__(self, **kwargs)
if header is not None:
self.header = header
if headerRegex is not None:
self.headerRegex = re.compile(headerRegex)
@defer.inlineCallbacks
def maybeAutoLogin(self, request):
header = request.getHeader(self.header)
if header is None:
raise Error(403, "missing http header %s. Check your reverse proxy config!" % (
self.header))
res = self.headerRegex.match(header)
if res is None:
raise Error(
403, 'http header does not match regex! "%s" not matching %s' %
(header, self.headerRegex.pattern))
session = request.getSession()
if session.user_info != dict(res.groupdict()):
session.user_info = dict(res.groupdict())
yield self.updateUserInfo(request)
@implementer(IRealm)
class AuthRealm(object):
def __init__(self, master, auth):
self.auth = auth
self.master = master
def requestAvatar(self, avatarId, mind, *interfaces):
if IResource in interfaces:
return (IResource,
PreAuthenticatedLoginResource(self.master, avatarId),
lambda: None)
raise NotImplementedError()
class TwistedICredAuthBase(AuthBase):
def __init__(self, credentialFactories, checkers, **kwargs):
AuthBase.__init__(self, **kwargs)
self.credentialFactories = credentialFactories
self.checkers = checkers
def getLoginResource(self):
return HTTPAuthSessionWrapper(
Portal(AuthRealm(self.master, self), self.checkers),
self.credentialFactories)
class HTPasswdAuth(TwistedICredAuthBase):
def __init__(self, passwdFile, **kwargs):
TwistedICredAuthBase.__init__(
self,
[DigestCredentialFactory(b"md5", b"buildbot"),
BasicCredentialFactory(b"buildbot")],
[FilePasswordDB(passwdFile)],
**kwargs)
class UserPasswordAuth(TwistedICredAuthBase):
def __init__(self, users, **kwargs):
TwistedICredAuthBase.__init__(
self,
[DigestCredentialFactory(b"md5", b"buildbot"),
BasicCredentialFactory(b"buildbot")],
[InMemoryUsernamePasswordDatabaseDontUse(**dict(users))],
**kwargs)
def _redirect(master, request):
url = request.args.get("redirect", ["/"])[0]
return resource.Redirect(master.config.buildbotURL + "#" + url)
class PreAuthenticatedLoginResource(LoginResource):
# a LoginResource which is already authenticated via a
# HTTPAuthSessionWrapper
def __init__(self, master, username):
LoginResource.__init__(self, master)
self.username = username
@defer.inlineCallbacks
def renderLogin(self, request):
session = request.getSession()
session.user_info = dict(username=bytes2NativeString(self.username))
yield self.master.www.auth.updateUserInfo(request)
raise _redirect(self.master, request)
class LogoutResource(resource.Resource):
def render_GET(self, request):
session = request.getSession()
session.expire()
session.updateSession(request)
request.redirect(_redirect(self.master, request).url)
return b''
| Lekensteyn/buildbot | master/buildbot/www/auth.py | Python | gpl-2.0 | 6,728 |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 19:53:25 2009
Author: josef-pktd
generate arma sample using fft with all the lfilter it looks slow
to get the ma representation first
apply arma filter (in ar representation) to time series to get white noise
but seems slow to be useful for fast estimation for nobs=10000
change/check: instead of using marep, use fft-transform of ar and ma
separately, use ratio check theory is correct and example works
DONE : feels much faster than lfilter
-> use for estimation of ARMA
-> use pade (scipy.misc) approximation to get starting polynomial
from autocorrelation (is autocorrelation of AR(p) related to marep?)
check if pade is fast, not for larger arrays ?
maybe pade doesn't do the right thing for this, not tried yet
scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)
raises LinAlgError: singular matrix
also doesn't have roots inside unit circle ??
-> even without initialization, it might be fast for estimation
-> how do I enforce stationarity and invertibility,
need helper function
get function drop imag if close to zero from numpy/scipy source, where?
"""
from __future__ import print_function
import numpy as np
import numpy.fft as fft
#import scipy.fftpack as fft
from scipy import signal
#from try_var_convolve import maxabs
from statsmodels.sandbox.archive.linalg_decomp_1 import OneTimeProperty
from statsmodels.tsa.arima_process import ArmaProcess
#trying to convert old experiments to a class
class ArmaFft(ArmaProcess):
'''fft tools for arma processes
This class contains several methods that are providing the same or similar
returns to try out and test different implementations.
Notes
-----
TODO:
check whether we don't want to fix maxlags, and create new instance if
maxlag changes. usage for different lengths of timeseries ?
or fix frequency and length for fft
check default frequencies w, terminology norw n_or_w
some ffts are currently done without padding with zeros
returns for spectral density methods needs checking, is it always the power
spectrum hw*hw.conj()
normalization of the power spectrum, spectral density: not checked yet, for
example no variance of underlying process is used
'''
def __init__(self, ar, ma, n):
#duplicates now that are subclassing ArmaProcess
super(ArmaFft, self).__init__(ar, ma)
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.nobs = n
#could make the polynomials into cached attributes
self.arpoly = np.polynomial.Polynomial(ar)
self.mapoly = np.polynomial.Polynomial(ma)
self.nar = len(ar) #1d only currently
self.nma = len(ma)
if self.nar > 1:
self.arroots = self.arpoly.roots()
else:
self.arroots = np.array([])
if self.nma > 1:
self.maroots = self.mapoly.roots()
else:
self.maroots = np.array([])
def padarr(self, arr, maxlag, atend=True):
'''pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : boolean
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy.
'''
if atend:
return np.r_[arr, np.zeros(maxlag-len(arr))]
else:
return np.r_[np.zeros(maxlag-len(arr)), arr]
def pad(self, maxlag):
'''construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients
'''
arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]
mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]
return arpad, mapad
def fftar(self, n=None):
'''Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ar, n))
def fftma(self, n):
'''Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ma, n))
#@OneTimeProperty # not while still debugging things
def fftarma(self, n=None):
'''Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial
'''
if n is None:
n = self.nobs
return (self.fftma(n) / self.fftar(n))
def spd(self, npos):
'''raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft
'''
n = npos
w = fft.fftfreq(2*n) * 2 * np.pi
hw = self.fftarma(2*n) #not sure, need to check normalization
#return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #doesn't show in plot
return (hw*hw.conj()).real * 0.5 / np.pi, w
def spdshift(self, n):
'''power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
mapadded = self.padarr(self.ma, n)
arpadded = self.padarr(self.ar, n)
hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))
#return np.abs(spd)[n//2-1:]
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(n//2-1, None, None)
#return (hw*hw.conj()).real[wslice], w[wslice]
return (hw*hw.conj()).real, w
def spddirect(self, n):
'''power spectral density using padding to length n done by fft
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
#abs looks wrong
hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(None, n//2, None)
#return (np.abs(hw)**2)[wslice], w[wslice]
return (np.abs(hw)**2) * 0.5/np.pi, w
def _spddirect2(self, n):
'''this looks bad, maybe with an fftshift
'''
#size = s1+s2-1
hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)
/ fft.fft(np.r_[self.ar[::-1],self.ar], n))
return (hw*hw.conj()) #.real[n//2-1:]
def spdroots(self, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
'''
return self.spdroots_(self.arroots, self.maroots, w)
def spdroots_(self, arroots, maroots, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
Parameters
----------
arroots : ndarray
roots of ar (denominator) lag-polynomial
maroots : ndarray
roots of ma (numerator) lag-polynomial
w : array_like
frequencies for which spd is calculated
Notes
-----
this should go into a function
'''
w = np.atleast_2d(w).T
cosw = np.cos(w)
#Greene 5th edt. p626, section 20.2.7.a.
maroots = 1./maroots
arroots = 1./arroots
num = 1 + maroots**2 - 2* maroots * cosw
den = 1 + arroots**2 - 2* arroots * cosw
#print 'num.shape, den.shape', num.shape, den.shape
hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog
return np.squeeze(hw), w.squeeze()
def spdpoly(self, w, nma=50):
'''spectral density from MA polynomial representation for ARMA process
References
----------
Cochrane, section 8.3.3
'''
mpoly = np.polynomial.Polynomial(self.arma2ma(nma))
hw = mpoly(np.exp(1j * w))
spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)
return spd, w
def filter(self, x):
'''
filter a timeseries with the ARMA filter
padding with zero is missing, in example I needed the padding to get
initial conditions identical to direct filter
Initial filtered observations differ from filter2 and signal.lfilter, but
at end they are the same.
See Also
--------
tsa.filters.fftconvolve
'''
n = x.shape[0]
if n == self.fftarma:
fftarma = self.fftarma
else:
fftarma = self.fftma(n) / self.fftar(n)
tmpfft = fftarma * fft.fft(x)
return fft.ifft(tmpfft)
def filter2(self, x, pad=0):
'''filter a time series using fftconvolve3 with ARMA filter
padding of x currently works only if x is 1d
in example it produces same observations at beginning as lfilter even
without padding.
TODO: this returns 1 additional observation at the end
'''
from statsmodels.tsa.filters import fftconvolve3
if not pad:
pass
elif pad == 'auto':
#just guessing how much padding
x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)
else:
x = self.padarr(x, x.shape[0] + int(pad), atend=False)
return fftconvolve3(x, self.ma, self.ar)
def acf2spdfreq(self, acovf, nfreq=100, w=None):
'''
not really a method
just for comparison, not efficient for large n or long acf
this is also similarly use in tsa.stattools.periodogram with window
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)[:, None]
nac = len(acovf)
hw = 0.5 / np.pi * (acovf[0] +
2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))
return hw
def invpowerspd(self, n):
'''autocovariance from spectral density
scaling is correct, but n needs to be large for numerical accuracy
maybe padding with zero in fft would be faster
without slicing it returns 2-sided autocovariance with fftshift
>>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
>>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
'''
hw = self.fftarma(n)
return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]
def spdmapoly(self, w, twosided=False):
'''ma only, need division for ar, use LagPolynomial
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)
return 0.5 / np.pi * self.mapoly(np.exp(w*1j))
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
rvs = self.generate_sample(size=100, burnin=500)
acf = self.acf(nacf)[:nacf] #TODO: check return length
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq)
spdr, wr = self.spdroots(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
ax.plot(rvs)
ax.set_title('Random Sample \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,3)
ax.plot(wr, spdr)
ax.set_title('Power Spectrum \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation \nar=%s, ma=%s' % (self.ar, self.ma))
return fig
def spdar1(ar, w):
if np.ndim(ar) == 0:
rho = ar
else:
rho = -ar[1]
return 0.5 / np.pi /(1 + rho*rho - 2 * rho * np.cos(w))
if __name__ == '__main__':
def maxabs(x,y):
return np.max(np.abs(x-y))
nobs = 200 #10000
ar = [1, 0.0]
ma = [1, 0.0]
ar2 = np.zeros(nobs)
ar2[:2] = [1, -0.9]
uni = np.zeros(nobs)
uni[0]=1.
#arrep = signal.lfilter(ma, ar, ar2)
#marep = signal.lfilter([1],arrep, uni)
# same faster:
arcomb = np.convolve(ar, ar2, mode='same')
marep = signal.lfilter(ma,arcomb, uni) #[len(ma):]
print(marep[:10])
mafr = fft.fft(marep)
rvs = np.random.normal(size=nobs)
datafr = fft.fft(rvs)
y = fft.ifft(mafr*datafr)
print(np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]],rowvar=0))
arrep = signal.lfilter([1],marep, uni)
print(arrep[:20]) # roundtrip to ar
arfr = fft.fft(arrep)
yfr = fft.fft(y)
x = fft.ifft(arfr*yfr).real #imag part is e-15
# the next two are equal, roundtrip works
print(x[:5])
print(rvs[:5])
print(np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]],rowvar=0))
# ARMA filter using fft with ratio of fft of ma/ar lag polynomial
# seems much faster than using lfilter
#padding, note arcomb is already full length
arcombp = np.zeros(nobs)
arcombp[:len(arcomb)] = arcomb
map_ = np.zeros(nobs) #rename: map was shadowing builtin
map_[:len(ma)] = ma
ar0fr = fft.fft(arcombp)
ma0fr = fft.fft(map_)
y2 = fft.ifft(ma0fr/ar0fr*datafr)
#the next two are (almost) equal in real part, almost zero but different in imag
print(y2[:10])
print(y[:10])
print(maxabs(y, y2)) # from chfdiscrete
#1.1282071239631782e-014
ar = [1, -0.4]
ma = [1, 0.2]
arma1 = ArmaFft([1, -0.5,0,0,0,00, -0.7, 0.3], [1, 0.8], nobs)
nfreq = nobs
w = np.linspace(0, np.pi, nfreq)
w2 = np.linspace(0, 2*np.pi, nfreq)
import matplotlib.pyplot as plt
plt.close('all')
plt.figure()
spd1, w1 = arma1.spd(2**10)
print(spd1.shape)
_ = plt.plot(spd1)
plt.title('spd fft complex')
plt.figure()
spd2, w2 = arma1.spdshift(2**10)
print(spd2.shape)
_ = plt.plot(w2, spd2)
plt.title('spd fft shift')
plt.figure()
spd3, w3 = arma1.spddirect(2**10)
print(spd3.shape)
_ = plt.plot(w3, spd3)
plt.title('spd fft direct')
plt.figure()
spd3b = arma1._spddirect2(2**10)
print(spd3b.shape)
_ = plt.plot(spd3b)
plt.title('spd fft direct mirrored')
plt.figure()
spdr, wr = arma1.spdroots(w)
print(spdr.shape)
plt.plot(w, spdr)
plt.title('spd from roots')
plt.figure()
spdar1_ = spdar1(arma1.ar, w)
print(spdar1_.shape)
_ = plt.plot(w, spdar1_)
plt.title('spd ar1')
plt.figure()
wper, spdper = arma1.periodogram(nfreq)
print(spdper.shape)
_ = plt.plot(w, spdper)
plt.title('periodogram')
startup = 1000
rvs = arma1.generate_sample(startup+10000)[startup:]
import matplotlib.mlab as mlb
plt.figure()
sdm, wm = mlb.psd(x)
print('sdm.shape', sdm.shape)
sdm = sdm.ravel()
plt.plot(wm, sdm)
plt.title('matplotlib')
from nitime.algorithms import LD_AR_est
#yule_AR_est(s, order, Nfreqs)
wnt, spdnt = LD_AR_est(rvs, 10, 512)
plt.figure()
print('spdnt.shape', spdnt.shape)
_ = plt.plot(spdnt.ravel())
print(spdnt[:10])
plt.title('nitime')
fig = plt.figure()
arma1.plot4(fig)
#plt.show()
| rgommers/statsmodels | statsmodels/sandbox/tsa/fftarma.py | Python | bsd-3-clause | 16,687 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sonetworks.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| semitki/semitki | api/manage.py | Python | mit | 808 |
import numpy as np
from lightdock.constants import DEFAULT_LIGHTDOCK_PREFIX, DEFAULT_ELLIPSOID_DATA_EXTENSION, NUMPY_FILE_SAVE_EXTENSION
from lightdock.gso.searchspace.ofunction import ObjectiveFunction
class ScoringFunction(ObjectiveFunction):
"""Scoring Functions interface"""
def __init__(self, weight=1.0):
self.weight = float(weight)
def __call__(self, receptor, receptor_coordinates, ligand, ligand_coordinates):
"""Calculates the value of the scoring function.
The GSO algorithm depends on a positive value for calculating the luciferin.
If more negative means better in the scoring function, the sign must be changed.
"""
raise NotImplementedError()
@staticmethod
def restraints_satisfied(restraints, interface):
"""Calculates the percentage of satisfied restraints"""
if not restraints:
return 0.0
residues = restraints.keys()
total = len(residues)
satisfied = 0
for residue in residues:
intersection = set(restraints[residue]) & interface
if len(intersection) > 0:
satisfied += 1
return float(satisfied) / total
class ModelAdapter(object):
"""Adapts a given Complex object as a DockingModel suitable for this
ScoringFunction object.
"""
def __init__(self, receptor, ligand, receptor_restraints=None, ligand_restraints=None):
self.receptor_model = self._get_docking_model(receptor, receptor_restraints)
self.ligand_model = self._get_docking_model(ligand, ligand_restraints)
def _get_docking_model(self, protein, restraints):
"""Complex -> DockingModel interface"""
raise NotImplementedError()
@staticmethod
def load_reference_points(molecule):
"""Load reference points if exist"""
reference_points = None
try:
ellipsoid_data_file = "%s%s%s" % (DEFAULT_LIGHTDOCK_PREFIX % molecule.structure_file_names[0],
DEFAULT_ELLIPSOID_DATA_EXTENSION, NUMPY_FILE_SAVE_EXTENSION)
reference_points = np.load(ellipsoid_data_file)
except (IOError, ValueError):
pass
return reference_points
# Two variables are needed to dynamically load the scoring functions from command line
DefinedScoringFunction = None
DefinedModelAdapter = None
| brianjimenez/lightdock | lightdock/scoring/functions.py | Python | gpl-3.0 | 2,421 |
#-*- coding: cp936 -*-
import math
from PIL import Image
def spherize(image):
'''
@Ч¹û£ºÇòÃæ£¬¶ÔͼÏñ½øÐÐÇòÃæÌØÐ§´¦Àí(¹þ¹þ¾µ)
@param image: instance of Image
@return: instance of Image
'''
width, height = image.size
mid_x = width / 2
mid_y = height / 2 # modified
max_mid_xy = max(mid_x, mid_y)
if image.mode != "RGBA":
image = image.convert("RGBA")
pix = image.load()
dst_image = Image.new("RGBA", (width, height))
dst_pix = dst_image.load()
for w in xrange(width):
for h in xrange(height):
offset_x = w - mid_x
offset_y = h - mid_y
radian = math.atan2(offset_y, offset_x) # ½Ç¶È£¬Ê¹ÓÃmath.atan2(y, x)Çó
# ÕâÀï²»ÊÇÕæÕýµÄ°ë¾¶
radius = (offset_x ** 2 + offset_y ** 2) / max_mid_xy
x = int(radius * math.cos(radian)) + mid_x
y = int(radius * math.sin(radian)) + mid_y
x = min(max(x, 0), width - 1)
y = min(max(y, 0), height - 1)
dst_pix[w, h] = pix[x, y]
return dst_image | QuinnSong/JPG-Tools | src/spherize.py | Python | gpl-3.0 | 1,157 |
#!/usr/bin/env python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# quickly: quickly project handler
#
# Copyright (C) 2009 Didier Roche
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# UPDATE VERSION WHEN NEEDED (it updates all versions needed to be updated)
VERSION = '12.08.1'
import glob
import os
import sys
import subprocess
try:
import DistUtilsExtra.auto
except ImportError:
print >> sys.stderr, 'To build quickly you need https://launchpad.net/python-distutils-extra'
sys.exit(1)
assert DistUtilsExtra.auto.__version__ >= '2.18', 'needs DistUtilsExtra.auto >= 2.18'
def update_config(values = {}):
oldvalues = {}
try:
fin = file('quickly/quicklyconfig.py', 'r')
fout = file(fin.name + '.new', 'w')
for line in fin:
fields = line.split(' = ') # Separate variable from value
if fields[0] in values:
oldvalues[fields[0]] = fields[1].strip()
line = "%s = %s\n" % (fields[0], values[fields[0]])
fout.write(line)
fout.flush()
fout.close()
fin.close()
os.rename(fout.name, fin.name)
except (OSError, IOError), e:
print ("ERROR: Can't find quickly/quicklyconfig.py")
sys.exit(1)
return oldvalues
def update_tutorial(tutorial_layouts):
for tutorial_layout in tutorial_layouts:
tutorial_dir = tutorial_layout[0]
file_name = tutorial_layout[1]
po_dir= "%s/po" % tutorial_dir
# update .pot
update_cmd = ['xml2po', '-e', '-o', '%s/%s.pot' % (po_dir, file_name),
'%s/%s.xml' % (tutorial_dir, file_name)]
subprocess.call(update_cmd)
# update lang
for po_file in glob.glob("%s/*.po" % po_dir):
lang = os.path.basename(po_file[:-3])
update_cmd = ['xml2po', '-p', '%s/%s.po' % (po_dir, lang), '-o',
'%s/%s-%s.xml' % (tutorial_dir, file_name, lang),
'%s/%s.xml' % (tutorial_dir, file_name)]
subprocess.call(update_cmd)
class InstallAndUpdateDataDirectory(DistUtilsExtra.auto.install_auto):
def run(self):
values = {'__quickly_data_directory__': "'%s'" % (self.prefix + '/share/quickly/'),
'__version__': "'%s'" % self.distribution.get_version()}
previous_values = update_config(values)
update_tutorial([("data/templates/ubuntu-application/help",
'tutorial')])
DistUtilsExtra.auto.install_auto.run(self)
update_config(previous_values)
DistUtilsExtra.auto.setup(name='quickly',
version="%s" % VERSION,
description='build new Ubuntu apps quickly',
long_description='Quickly enables for prospective programmer a way to easily build new ' \
'apps for Ubuntu based on templates and other systems for helping them ' \
'write their code in a guided manner. This also includes packaging and ' \
'deploying code.',
url='https://launchpad.net/quickly',
license="GPL v3",
author='Quickly Developer Team',
author_email='[email protected]',
data_files=[('share/quickly/templates/ubuntu-application/project_root', glob.glob('data/templates/ubuntu-application/project_root/project_name.desktop.in')),
('share/quickly/templates/ubuntu-flash-game/project_root', glob.glob('data/templates/ubuntu-flash-game/project_root/project_name.desktop.in'))],
cmdclass={'install': InstallAndUpdateDataDirectory})
| didrocks/quickly | setup.py | Python | gpl-3.0 | 4,148 |
from openmm_systems.test_systems import (
LennardJonesPair,
LysozymeImplicit,
)
import simtk.openmm.app as omma
import simtk.openmm as omm
import simtk.unit as unit
from wepy.runners.openmm import gen_sim_state
import time
def create_sim():
test_sys = LysozymeImplicit()
integrator = omm.LangevinIntegrator(300.0*unit.kelvin,
1/unit.picosecond,
0.002*unit.picoseconds)
init_state = gen_sim_state(test_sys.positions, test_sys.system, integrator)
platform = omm.Platform.getPlatformByName('CPU')
simulation = omma.Simulation(
test_sys.topology,
test_sys.system,
integrator,
platform=platform,
)
simulation.context.setState(init_state)
return simulation
def run_sim(sim, steps):
sim.integrator.step(steps)
return sim
def main():
num_sims = 2
steps = 5000
simulations = []
for idx in range(num_sims):
simulations.append(create_sim())
for i, sim in enumerate(simulations):
start = time.time()
run_sim(sim, steps)
end = time.time()
print(f"Sim {i} took: {end - start}")
start = time.time()
main()
end = time.time()
print(f"Took {end - start} seconds")
| ADicksonLab/wepy | jigs/trio_mapper/source/sync_openmm.py | Python | mit | 1,288 |
import argparse
import os
import random
import scipy.stats as stats
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from torch.autograd import grad
from data.finetune import CreateDataLoader
from models.standard import *
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', required=True, help='path to colored dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batchSize', type=int, default=16, help='input batch size')
parser.add_argument('--imageSize', type=int, default=512, help='the height / width of the input image to network')
parser.add_argument('--cut', type=int, default=1, help='cut backup frequency')
parser.add_argument('--niter', type=int, default=700, help='number of epochs to train for')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--lrG', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--lrD', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--optim', action='store_true', help='load optimizer\'s checkpoint')
parser.add_argument('--outf', default='', help='folder to output images and model checkpoints')
parser.add_argument('--optf', default='', help='folder to optimizer checkpoints')
parser.add_argument('--Diters', type=int, default=1, help='number of D iters per each G iter')
parser.add_argument('--manualSeed', type=int, default=2345, help='random seed to use. Default=1234')
parser.add_argument('--geni', type=int, default=0, help='continue gen image num')
parser.add_argument('--epoi', type=int, default=0, help='continue epoch num')
parser.add_argument('--env', type=str, default=None, help='tensorboard env')
parser.add_argument('--advW', type=float, default=0.0001, help='adversarial weight, default=0.0001')
parser.add_argument('--gpW', type=float, default=10, help='gradient penalty weight')
parser.add_argument('--drift', type=float, default=0.001, help='wasserstein drift weight')
opt = parser.parse_args()
print(opt)
####### regular set up
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
gen_iterations = opt.geni
try:
os.makedirs(opt.outf)
except OSError:
pass
# random seed setup # !!!!!
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
####### regular set up end
writer = SummaryWriter(log_dir=opt.env, comment='this is great')
dataloader = CreateDataLoader(opt)
netG = torch.nn.DataParallel(NetG(ngf=opt.ngf))
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netD = torch.nn.DataParallel(NetD(ndf=opt.ndf))
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
netF = torch.nn.DataParallel(NetF())
print(netF)
for param in netF.parameters():
param.requires_grad = False
netI = torch.nn.DataParallel(NetI())
print(netI)
criterion_MSE = nn.MSELoss()
one = torch.FloatTensor([1])
mone = one * -1
half_batch = opt.batchSize // 2
fixed_sketch = torch.FloatTensor()
fixed_hint = torch.FloatTensor()
fixed_sketch_feat = torch.FloatTensor()
if opt.cuda:
netD = netD.cuda()
netG = netG.cuda()
netF = netF.cuda()
netI = netI.cuda().eval()
fixed_sketch, fixed_hint, fixed_sketch_feat = fixed_sketch.cuda(), fixed_hint.cuda(), fixed_sketch_feat.cuda()
criterion_MSE = criterion_MSE.cuda()
one, mone = one.cuda(), mone.cuda()
# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.9))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.9))
if opt.optim:
optimizerG.load_state_dict(torch.load('%s/optimG_checkpoint.pth' % opt.optf))
optimizerD.load_state_dict(torch.load('%s/optimD_checkpoint.pth' % opt.optf))
for param_group in optimizerG.param_groups:
param_group['lr'] = opt.lrG
for param_group in optimizerD.param_groups:
param_group['lr'] = opt.lrD
def calc_gradient_penalty(netD, real_data, fake_data, sketch_feat):
alpha = torch.rand(opt.batchSize, 1, 1, 1)
alpha = alpha.cuda() if opt.cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if opt.cuda:
interpolates = interpolates.cuda()
interpolates = Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates, Variable(sketch_feat))
gradients = grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda() if opt.cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
# TODO:test gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.gpW
return gradient_penalty
def mask_gen():
mask1 = torch.cat(
[torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(opt.batchSize // 2)],
0).cuda()
mask2 = torch.cat([torch.zeros(1, 1, maskS, maskS).float() for _ in range(opt.batchSize // 2)],
0).cuda()
mask = torch.cat([mask1, mask2], 0)
return mask
flag = 1
lower, upper = 0, 1
mu, sigma = 1, 0.005
maskS = opt.imageSize // 4
X = stats.truncnorm(
(lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
for p in netG.parameters():
p.requires_grad = False # to avoid computation
for epoch in range(opt.niter):
data_iter = iter(CreateDataLoader(opt))
i = 0
while i < len(dataloader) - 16 // opt.batchSize:
############################
# (1) Update D network
###########################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for p in netG.parameters():
p.requires_grad = False # to avoid computation ft_params
# train the discriminator Diters times
Diters = opt.Diters
j = 0
while j < Diters and i < len(dataloader): #- 16 // opt.batchSize:
j += 1
netD.zero_grad()
data = data_iter.next()
real_cim, real_vim, real_sim = data
i += 1
###############################
if opt.cuda:
real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()
mask = mask_gen()
hint = torch.cat((real_vim * mask, mask), 1)
# train with fake
with torch.no_grad():
feat_sim = netI(Variable(real_sim)).data
fake_cim = netG(Variable(real_sim),
Variable(hint),
Variable(feat_sim)
).data
errD_fake = netD(Variable(fake_cim), Variable(feat_sim))
errD_fake = errD_fake.mean(0).view(1)
errD_fake.backward(one, retain_graph=True) # backward on score on real
errD_real = netD(Variable(real_cim), Variable(feat_sim))
errD_real = errD_real.mean(0).view(1)
errD = errD_real - errD_fake
errD_realer = -1 * errD_real + errD_real.pow(2) * opt.drift
# additional penalty term to keep the scores from drifting too far from zero
errD_realer.backward(one, retain_graph=True) # backward on score on real
gradient_penalty = calc_gradient_penalty(netD, real_cim, fake_cim, feat_sim)
gradient_penalty.backward()
optimizerD.step()
############################
# (2) Update G network
############################
if i < len(dataloader) - 16 // opt.batchSize:
if 0:#:flag: # fix samples
data = zip(*[data_iter.next() for _ in range(16 // opt.batchSize)])
real_cim, real_vim, real_sim = [torch.cat(dat, 0) for dat in data]
i += 1
if opt.cuda:
real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()
mask1 = torch.cat(
[torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(8)],
0).cuda()
mask2 = torch.cat([torch.zeros(1, 1, maskS, maskS).float() for _ in range(8)],
0).cuda()
mask = torch.cat([mask1, mask2], 0)
hint = torch.cat((real_vim * mask, mask), 1)
with torch.no_grad():
feat_sim = netI(Variable(real_sim)).data
writer.add_image('target imgs', vutils.make_grid(real_cim.mul(0.5).add(0.5), nrow=4))
writer.add_image('sketch imgs', vutils.make_grid(real_sim.mul(0.5).add(0.5), nrow=4))
writer.add_image('hint', vutils.make_grid((real_vim * mask).mul(0.5).add(0.5), nrow=4))
vutils.save_image(real_cim.mul(0.5).add(0.5),
'%s/color_samples' % opt.outf + '.png')
vutils.save_image(real_sim.mul(0.5).add(0.5),
'%s/blur_samples' % opt.outf + '.png')
fixed_sketch.resize_as_(real_sim).copy_(real_sim)
fixed_hint.resize_as_(hint).copy_(hint)
fixed_sketch_feat.resize_as_(feat_sim).copy_(feat_sim)
flag -= 1
for p in netD.parameters():
p.requires_grad = False # to avoid computation
for p in netG.parameters():
p.requires_grad = True
netG.zero_grad()
data = data_iter.next()
real_cim, real_vim, real_sim = data
i += 1
if opt.cuda:
real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()
mask = mask_gen()
hint = torch.cat((real_vim * mask, mask), 1)
with torch.no_grad():
feat_sim = netI(Variable(real_sim)).data
fake = netG(Variable(real_sim),
Variable(hint),
Variable(feat_sim))
errd = netD(fake, Variable(feat_sim))
errG = errd.mean() * opt.advW
errG.backward(mone, retain_graph=True)
feat1 = netF(fake)
with torch.no_grad():
feat2 = netF(Variable(real_cim))
contentLoss = criterion_MSE(feat1, feat2)
contentLoss.backward()
optimizerG.step()
############################
# (3) Report & 100 Batch checkpoint
############################
writer.add_scalar('VGG MSE Loss', contentLoss.data[0], gen_iterations)
writer.add_scalar('wasserstein distance', errD.data[0], gen_iterations)
writer.add_scalar('errD_real', errD_real.data[0], gen_iterations)
writer.add_scalar('errD_fake', errD_fake.data[0], gen_iterations)
writer.add_scalar('Gnet loss toward real', errG.data[0], gen_iterations)
writer.add_scalar('gradient_penalty', gradient_penalty.data[0], gen_iterations)
print('[%d/%d][%d/%d][%d] errD: %f err_G: %f err_D_real: %f err_D_fake %f content loss %f'
% (epoch, opt.niter, i, len(dataloader), gen_iterations,
errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0], contentLoss.data[0]))
if gen_iterations % 500 == 666:
with torch.no_grad():
fake = netG(Variable(fixed_sketch),
Variable(fixed_hint),
Variable(fixed_sketch_feat))
writer.add_image('colored imgs', vutils.make_grid(fake.data.mul(0.5).add(0.5), nrow=4),
gen_iterations)
gen_iterations += 1
# do checkpointing
if opt.cut == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_only.pth' % opt.outf)
torch.save(netD.state_dict(), '%s/netD_epoch_only.pth' % opt.outf)
torch.save(optimizerG.state_dict(), '%s/optimG_checkpoint.pth' % opt.outf)
torch.save(optimizerD.state_dict(), '%s/optimD_checkpoint.pth' % opt.outf)
elif gen_iterations % 100 == 0:#(epoch + opt.epoi) % opt.cut == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, gen_iterations))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, gen_iterations))
torch.save(optimizerG.state_dict(), '%s/optimG_checkpoint.pth' % opt.outf)
torch.save(optimizerD.state_dict(), '%s/optimD_checkpoint.pth' % opt.outf)
| orashi/PaintsPytorch | train_ft.py | Python | mit | 13,323 |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, The Horizomer Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
#
# functions relevant to phylogenetic tree operations
#
from skbio import TreeNode
from math import isclose
from skbio.tree import MissingNodeError
def _extract_support(node):
"""Extract the support value from a node label, if available.
Parameters
----------
skbio.TreeNode
node from which the support value is extracted
Returns
-------
tuple of
int, float or None
The support value extracted from the node label
str or None
The node label with the support value stripped
"""
support, label = None, None
if node.name:
# separate support value from node name by the first colon
left, _, right = node.name.partition(':')
try:
support = int(left)
except ValueError:
try:
support = float(left)
except ValueError:
pass
# strip support value from node name
label = right or None if support is not None else node.name
return support, label
def _node_label(node):
"""Generate a node label in the format of "support:name" if both exist,
or "support" or "name" if either exists.
Parameters
----------
skbio.TreeNode
node containing support value or name
Returns
-------
str
Generated node label
"""
lblst = []
if node.support is not None: # prevents support of NoneType
lblst.append(str(node.support))
if node.name: # prevents name of NoneType
lblst.append(node.name)
return ':'.join(lblst)
def assign_supports(tree):
"""Extract support values from internal node labels of a tree.
Notes
-----
A "support value" measures the confidence or frequency of the incoming
branch (the branch from parent to self) of an internal node in a tree.
Roots and tips do not have support values. To extract a support value
from a node label, this method reads from left and stops at the first
":" (if any), and attempts to convert it to a number.
For examples: "(a,b)1.0", "(a,b)1.0:2.5", and "(a,b)'1.0:species_A'".
In these cases the support values are all 1.0.
For examples: "(a,b):1.0" and "(a,b)species_A". In these cases there
are no support values.
If a support value is successfully extracted, it will be stripped from
the node label and assigned to the `support` property.
IMPORTANT: mathematically, "support value" is a property of a branch,
not a node. Because of historical reasons, support values are usually
attached to nodes in a typical tree file [1].
[1] Czech, Lucas, Jaime Huerta-Cepas, and Alexandros Stamatakis. "A
Critical Review on the Use of Support Values in Tree Viewers and
Bioinformatics Toolkits." Molecular biology and evolution 34.6
(2017): 1535-1542.
Examples
--------
>>> from skbio import TreeNode
>>> newick = "((a,b)95,(c,d):1.1,(e,f)'80:speciesA':1.0);"
>>> tree = TreeNode.read([newick])
>>> assign_supports(tree)
>>> tree.lca(['a', 'b']).support
95
>>> tree.lca(['c', 'd']).support is None
True
>>> tree.lca(['e', 'f']).support
80
>>> tree.lca(['e', 'f']).name
'speciesA'
"""
for node in tree.traverse():
if node.is_root() or node.is_tip():
node.support = None
else:
node.support, node.name = _extract_support(node)
def walk_copy(node, src):
"""Directionally and recursively copy a tree node and its neighbors.
Parameters
----------
node : skbio.TreeNode
node and its neighbors to be copied
src : skbio.TreeNode
an upstream node determining the direction of walking (src -> node)
Returns
-------
skbio.TreeNode
copied node and its neighbors
Notes
-----
After manipulation, `src` will become the parent of `node`, and all other
neighbors of `node` will become children of it.
Unlike scikit-bio's `unrooted_copy` function, this function has special
treatment at root: For an unrooted tree, its "root" will be retained as a
regular node; for a rooted tree, its root will be deleted, and all basal
nodes will become immediate children of the basal node where the source is
located.
The function determines whether a tree is rooted or unrooted in such way:
rooted: root has two children; unrooted: root has one or more than two
children.
Logic (pseudocode):
if node is root:
if tree is rooted:
raise error
else:
if src in node.children:
append node.other_child
else:
raise error
elif node is basal (i.e., child of root):
if tree is rooted:
if src in node.siblings:
append node.children
elif src in node.children:
append node.sibling and node.other_children
else:
raise error
else:
if src is node.parent (i.e., root):
append node.children
elif src in node.children:
append node.parent and node.other_children
else:
raise error
else: (i.e., node is derived)
if src is node.parent:
append node.children
elif src in node.children:
append node.parent and node.other_children
else:
raise error
See Also
--------
root_above
Raises
------
ValueError
if the input node is already a root or if the input node and input
source node are not neighbors
"""
parent = node.parent
children = node.children
# position of node
pos = ('root' if node.is_root() else 'basal' if parent.is_root()
else 'derived')
# whether tree is rooted
root = node if pos == 'root' else node.parent if pos == 'basal' else None
rooted = None if pos == 'derived' else (
True if len(root.children) == 2 else False)
if rooted:
if pos == 'root':
raise ValueError('Cannot walk from root of a rooted tree.')
elif pos == 'basal':
sibling = [x for x in node.siblings()][0]
# direction of walking
move = (('bottom' if src is sibling else 'top' if src in children
else 'n/a') if rooted and pos == 'basal'
else ('down' if src is parent else 'up' if src in children
else 'n/a'))
if move == 'n/a':
raise ValueError('Source and node are not neighbors.')
# create a new node
res = TreeNode(node.name)
# determine length of the new node
res.length = (node.length if move == 'down'
else src.length + node.length if move == 'bottom'
else src.length) # up or top
# determine support of the new node
res.support = (node.support if move in ('down', 'bottom')
else src.support)
# append children except for src (if applies)
res.extend([walk_copy(c, node) for c in children if c is not src])
# append parent if walking up (except at root)
if move == 'up' and pos != 'root':
res.append(walk_copy(parent, node))
# append sibling if walking from one basal node to another
if move == 'top':
res.append(walk_copy(sibling, node))
return res
def root_above(node, name=None):
"""Re-root a tree between a give node and its parent.
Parameters
----------
node : skbio.TreeNode
node above which the new root will be placed
name : str, optional
name of the new root
Returns
-------
skbio.TreeNode
resulting rooted tree
Notes
-----
Unlike scikit-bio's `root_at` function which actually generates an
unrooted tree, this function generates a rooted tree (the root of
which has exactly two children).
"""
# walk down from self node
left = walk_copy(node, node.parent)
# walk up from parent node
right = walk_copy(node.parent, node)
# set basal branch lengths to be half of the original, i.e., midpoint
left.length = right.length = node.length / 2
# create new root
res = TreeNode(name, children=[left, right])
res.support = None
return res
def _exact_compare(tree1, tree2):
"""Simultaneously compares the name, length, and support of each node from
two trees.
Parameters
----------
tree1: skbio.TreeNode
first tree to compare
tree2: skbio.TreeNode
second tree to compare
Returns
-------
bool
`True` if name, length, and support of each corresponding node are same
`False` otherwise
"""
attrs = ['name', 'length', 'support']
for n1, n2 in zip(tree1.postorder(), tree2.postorder()):
for attr in attrs:
if getattr(n1, attr, None) != getattr(n2, attr, None):
return False
return True
def _compare_length(node1, node2):
"""Private function for compare_branch_lengths. Determines if lengths of the
two nodes are same.
Parameters
----------
node1: skbio.TreeNode
first node to compare
node2: skbio.TreeNode
second node to compare
Returns
-------
bool
`True` if lengths of the two input nodes are None, same, or close
`False` otherwise
See Also
--------
compare_branch_length
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["(a:1,b:1)c;"])
>>> print(_compare_length(tree.find('a'), tree.find('b')))
True
>>> print(_compare_length(tree.find('a'), tree.find('c')))
False
"""
if node1.length is None and node2.length is None:
return True
elif (node1.length is None) ^ (node2.length is None):
return False
elif isclose(node1.length, node2.length) is False:
return False
return True
def compare_branch_lengths(tree1, tree2):
"""Returns `True` if each corresponding node in 2 trees has same length.
Parameters
----------
tree1: skbio.TreeNode
first tree to compare
tree2: skbio.TreeNode
second tree to compare
Returns
-------
bool
`True` if two input trees have same topologies and branch lengths
`False` otherwise
Notes
-----
This method compares two unordered trees to check if the two given trees
have same topology and same branch lengths. The topology check is done by
postorder traversals of the first tree while `find()` the respective nodes
in tree 2. Topology of tree2 is determined by storing anchoring nodes in
stack temporarily.
See Also
--------
compare_topology
Examples
--------
>>> from skbio import TreeNode
>>> tree1 = TreeNode.read(['((a:1, c:1):2, b:1);'])
>>> tree2 = TreeNode.read(['((a:1, c:1):2, b:1);'])
>>> print(compare_branch_lengths(tree1, tree2))
True
>>> tree3 = TreeNode.read(['((a:1, c:1):2, b);'])
>>> print(compare_branch_lengths(tree3, tree1))
False
"""
stack = [] # stack to store nodes in tree2
for count, node in enumerate(tree1.postorder(include_self=False)):
if node.is_tip():
try:
cur = tree2.find(node.name)
except MissingNodeError:
return False
else:
if node.id == stack[-1].id:
cur = stack.pop()
else:
return False
if _compare_length(node, cur) is False:
return False
if node.parent.id is None and cur.parent.id is None:
cur.parent.id = node.parent.id = str(count)
elif (node.parent.id is not None) ^ (cur.parent.id is not None):
return False
if cur.parent not in stack:
stack.append(cur.parent)
return True
def order_nodes(tree, increase=True):
"""Rotate internal nodes of a tree so that child nodes are ordered by the
number of descendants.
Parameters
----------
tree : skbio.TreeNode
tree to order
increase : bool, optional
order nodes in increasing (True) or decreasing (False) order
Returns
-------
skbio.TreeNode
resulting ordered tree
See Also
--------
is_ordered
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(['(((a,b),(c,d,e)),((f,g),h));'])
>>> print(tree)
(((a,b),(c,d,e)),((f,g),h));
<BLANKLINE>
>>> tree_ordered = order_nodes(tree, False)
>>> print(tree_ordered)
((h,(f,g)),((a,b),(c,d,e)));
<BLANKLINE>
"""
res = tree.copy()
for node in res.postorder():
if node.is_tip():
node.n = 1
else:
node.n = sum(x.n for x in node.children)
for node in res.postorder():
if not node.is_tip():
children = node.children
node.children = []
for child in sorted(children, key=lambda x: x.n, reverse=increase):
node.append(child)
for node in res.postorder():
delattr(node, 'n')
return res
def is_ordered(tree, increase=True):
"""Returns `True` if the tree is ordered.
Parameters
----------
tree : skbio.TreeNode
tree to check ordering
increase : bool, optional
check if nodes in increasing (True) or decreasing (False) order
Returns
-------
bool
`True` if the tree is ordered
See Also
--------
order_nodes
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(['((a,b)c,d)e;'])
>>> print(is_ordered(tree))
True
"""
tcopy = tree.copy()
for node in tcopy.postorder():
if node.is_tip():
node.n = 1
else:
node.n = sum(x.n for x in node.children)
p = tcopy.root()
prev = p.n
for node in tcopy.levelorder():
s = [x for x in p.siblings()]
if node in s:
cur = node.n
if prev < cur if increase else prev > cur:
return False
prev = cur
else:
p = node
prev = p.n
return True
def cladistic(tree, taxa):
"""Determines the cladistic property of the given taxon set.
Parameters
----------
tree : skbio.TreeNode
tree for taxa comparison
taxa : iterable of str
taxon names
Returns
-------
str
'uni' if input taxon is a single tip in given tree
'mono' if input taxa are monophyletic in given tree
'poly' if input taxa are polyphyletic in given tree
Notes
-----
In the following tree example:
/-a
/--------|
| \-b
/--------|
| | /-c
| | |
| \--------|--d
---------| |
| \-e
|
| /-f
| /--------|
\--------| \-g
|
\-h
['a'] returns 'uni'
['c', 'd', 'e'] returns 'mono'
['a', 'c', 'f'] returns 'poly'
['f', 'h'] returns 'poly'
Paraphyly, which is programmably indistinguishable from polyphyly, returns
poly here.
Raises
------
ValueError
if one or more taxon names are not present in the tree
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(['((a,b)c,d)e;'])
>>> print(cladistic(tree, ['a']))
uni
>>> print(cladistic(tree, ['a', 'b']))
mono
>>> print(cladistic(tree, ['a', 'd']))
poly
"""
tips = []
taxa = set(taxa)
for tip in tree.tips():
if tip.name in taxa:
tips.append(tip)
n = len(taxa)
if len(tips) < n:
raise ValueError('Taxa not found in the tree.')
return ('uni' if n == 1 else
('mono' if len(tree.lca(tips).subset()) == n else
'poly'))
def support(node):
"""Get support value of a node.
Parameters
----------
node : skbio.TreeNode
node to get support value of
Returns
-------
float or None
support value of the node, or None if not available
Notes
-----
A "support value" is defined as the numeric form of a whole node label
without ":", or the part preceding the first ":" in the node label.
- For examples: "(a,b)1.0", "(a,b)1.0:2.5", and "(a,b)'1.0:species_A'". In
these cases the support values are all 1.0.
- For examples: "(a,b):1.0" and "(a,b)species_A". In these cases there are
no support values.
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(['((a,b)99,(c,d):1.0);'])
>>> support(tree.lca(['a', 'b']))
99.0
>>> support(tree.lca(['c', 'd'])) is None
True
"""
try:
return float(node.name.split(':')[0])
except (ValueError, AttributeError):
return None
def unpack(node):
"""Unpack an internal node of a tree.
Parameters
----------
node : skbio.TreeNode
node to unpack
Notes
-----
This function sequentially: 1) elongates child nodes by branch length of
self (omit if there is no branch length), 2) removes self from parent node,
and 3) grafts child nodes to parent node.
Here is an illustration of the "unpack" operation:
/----a
/c---|
| \--b
-----|
| /---d
\f-----|
\-e
Unpack node "c" and the tree becomes:
/---------a
|
-----|--------b
|
| /---d
\f-----|
\-e
Raises
------
ValueError
if input node is root
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(['((c:2.0,d:3.0)a:1.0,(e:2.0,f:1.0)b:2.0);'])
>>> unpack(tree.find('b'))
>>> print(tree)
((c:2.0,d:3.0)a:1.0,e:4.0,f:3.0);
<BLANKLINE>
"""
if node.is_root():
raise ValueError('Cannot unpack root.')
parent = node.parent
blen = (node.length or 0.0)
for child in node.children:
clen = (child.length or 0.0)
child.length = (clen + blen or None)
parent.remove(node)
parent.extend(node.children)
def has_duplicates(tree):
"""Test whether there are duplicated taxa (tip names) in a tree.
Parameters
----------
tree : skbio.TreeNode
tree for check for duplicates
Returns
-------
bool
whether there are duplicates
Raises
------
ValueError
if taxon is empty
"""
taxa = [tip.name for tip in tree.tips()]
if '' in taxa or None in taxa:
raise ValueError('Empty taxon name(s) found.')
return len(set(taxa)) < len(taxa)
def compare_topology(tree1, tree2):
"""Test whether the topologies of two trees with all nodes assigned
unique IDs are identical.
Parameters
----------
tree1 : skbio.TreeNode
first tree in comparison
tree2 : skbio.TreeNode
second tree in comparison
Returns
-------
bool
whether the topologies are identical
Notes
-----
Given all nodes (internal nodes or tips) have unique IDs, one just needs
to check if all node-to-parent pairs are identical.
Flipping child nodes does not change topology. Branch lengths are ignored
when comparing topology.
"""
n2p1, n2p2 = ({node.name: node.parent.name
for node in tree.traverse() if not node.is_root()}
for tree in (tree1, tree2))
return n2p1 == n2p2
def intersect_trees(tree1, tree2):
"""Shrink two trees to contain only overlapping taxa.
Parameters
----------
tree1 : skbio.TreeNode
first tree to intersect
tree2 : skbio.TreeNode
second tree to intersect
Returns
-------
tuple of two TreeNodes
resulting trees containing only overlapping taxa
"""
for tree in (tree1, tree2):
if has_duplicates(tree):
raise ValueError('Either tree has duplicated taxa.')
taxa1 = set([tip.name for tip in tree1.tips()])
taxa2 = set([tip.name for tip in tree2.tips()])
taxa_lap = taxa1.intersection(taxa2)
if len(taxa_lap) == 0:
raise ValueError('Trees have no overlapping taxa.')
tree1_lap = tree1.shear(taxa_lap)
tree2_lap = tree2.shear(taxa_lap)
return (tree1_lap, tree2_lap)
def unpack_by_func(tree, func):
"""Unpack internal nodes that meet certain criteria.
Parameters
----------
tree : skbio.TreeNode
tree to search for nodes to unpack
func : function
a function that accepts a TreeNode and returns `True` or `False`,
where `True` indicates the node is to be unpacked
Returns
-------
skbio.TreeNode
resulting tree with nodes meeting criteria unpacked
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(['((c:2,d:3)a:1,(e:1,f:2)b:2);'])
>>> tree_unpacked = unpack_by_func(tree, lambda x: x.length <= 1)
>>> print(tree_unpacked)
((e:1.0,f:2.0)b:2.0,c:3.0,d:4.0);
<BLANKLINE>
>>> tree = TreeNode.read(['(((a,b)85,(c,d)78)75,(e,(f,g)64)80);'])
>>> tree_unpacked = unpack_by_func(tree, lambda x: support(x) < 75)
>>> print(tree_unpacked)
(((a,b)85,(c,d)78)75,(e,f,g)80);
<BLANKLINE>
"""
tcopy = tree.copy()
nodes_to_unpack = []
for node in tcopy.non_tips():
if func(node):
nodes_to_unpack.append(node)
for node in nodes_to_unpack:
unpack(node)
return tcopy
def read_taxdump(nodes_fp, names_fp=None):
"""Read NCBI taxdump.
Parameters
----------
nodes_fp : str
file path to NCBI nodes.dmp
names_fp : str, optional
file path to NCBI names.dmp
Returns
-------
dict of dict
taxid : {
'parent' : str
parent taxid
'rank' : str
taxonomic rank
'name' : str
taxon name, empty if names_fp is None
'children' : set of str
child taxids
}
"""
taxdump = {}
# format of nodes.dmp: taxid | parent taxid | rank | more info...
with open(nodes_fp, 'r') as f:
for line in f:
x = line.rstrip('\r\n').replace('\t|', '').split('\t')
taxdump[x[0]] = {'parent': x[1], 'rank': x[2], 'name': '',
'children': set()}
# format of names.dmp: taxid | name | unique name | name class |
if names_fp is not None:
with open(names_fp, 'r') as f:
for line in f:
x = line.rstrip('\r\n').replace('\t|', '').split('\t')
if x[3] == 'scientific name':
taxdump[x[0]]['name'] = x[1]
# identify children taxids
for tid in taxdump:
pid = taxdump[tid]['parent']
if tid != pid: # skip root whose taxid equals its parent
taxdump[pid]['children'].add(tid)
return taxdump
def build_taxdump_tree(taxdump):
"""Build NCBI taxdump tree.
Parameters
----------
taxdump : dict of dict
attributes of each taxid, see read_taxdump
Returns
-------
skbio.TreeNode
a tree representing taxdump
"""
# create the tree from root
tree = TreeNode('1')
# iteratively attach child nodes to parent node
def iter_node(node):
for cid in taxdump[node.name]['children']:
child = TreeNode(cid)
node.extend([child])
iter_node(child)
iter_node(tree)
return tree
| biocore/WGS-HGT | horizomer/utils/tree.py | Python | bsd-3-clause | 24,210 |
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
sys.path += ['../..']
import gencerts
def add_excluded_name_constraints(cert, num_dns, num_ip, num_dirnames, num_uri):
cert.get_extensions().set_property('nameConstraints', '@nameConstraints_info')
constraints = cert.config.get_section('nameConstraints_info')
for i in range(num_dns):
constraints.set_property('excluded;DNS.%i' % (i + 1), 'x%i.test' % i)
for i in range(num_ip):
b,c = divmod(i, 256)
a,b = divmod(b, 256)
constraints.set_property('excluded;IP.%i' % (i + 1),
'11.%i.%i.%i/255.255.255.255' % (a, b, c))
for i in range(num_dirnames):
section_name = 'nameConstraints_dirname_x%i' % (i + 1)
dirname = cert.config.get_section(section_name)
dirname.set_property('commonName', '"x%i' % i)
constraints.set_property('excluded;dirName.%i' % (i + 1), section_name)
for i in range(num_uri):
constraints.set_property('excluded;URI.%i' % (i + 1), 'http://xest/%i' % i)
def add_permitted_name_constraints(
cert, num_dns, num_ip, num_dirnames, num_uri):
cert.get_extensions().set_property('nameConstraints', '@nameConstraints_info')
constraints = cert.config.get_section('nameConstraints_info')
for i in range(num_dns):
constraints.set_property('permitted;DNS.%i' % (i + 1), 't%i.test' % i)
for i in range(num_ip):
b,c = divmod(i, 256)
a,b = divmod(b, 256)
constraints.set_property('permitted;IP.%i' % (i + 1),
'10.%i.%i.%i/255.255.255.255' % (a, b, c))
for i in range(num_dirnames):
section_name = 'nameConstraints_dirname_p%i' % (i + 1)
dirname = cert.config.get_section(section_name)
dirname.set_property('commonName', '"t%i' % i)
constraints.set_property('permitted;dirName.%i' % (i + 1), section_name)
for i in range(num_uri):
constraints.set_property('permitted;URI.%i' % (i + 1),
'http://test/%i' % i)
def add_sans(cert, num_dns, num_ip, num_dirnames, num_uri):
cert.get_extensions().set_property('subjectAltName', '@san_info')
sans = cert.config.get_section('san_info')
for i in range(num_dns):
sans.set_property('DNS.%i' % (i + 1), 't%i.test' % i)
for i in range(num_ip):
b,c = divmod(i, 256)
a,b = divmod(b, 256)
sans.set_property('IP.%i' % (i + 1), '10.%i.%i.%i' % (a, b, c))
for i in range(num_dirnames):
section_name = 'san_dirname%i' % (i + 1)
dirname = cert.config.get_section(section_name)
dirname.set_property('commonName', '"t%i' % i)
sans.set_property('dirName.%i' % (i + 1), section_name)
for i in range(num_uri):
sans.set_property('URI.%i' % (i + 1), 'http://test/%i' % i)
# Self-signed root certificate.
root = gencerts.create_self_signed_root_certificate('Root')
# Use the same keys for all the chains. Fewer key files to check in, and also
# gives stability against re-ordering of the calls to |make_chain|.
intermediate_key = gencerts.get_or_generate_rsa_key(
2048, gencerts.create_key_path('Intermediate'))
target_key = gencerts.get_or_generate_rsa_key(
2048, gencerts.create_key_path('t0'))
def make_chain(name, doc, excluded, permitted, sans):
# Intermediate certificate.
intermediate = gencerts.create_intermediate_certificate('Intermediate', root)
intermediate.set_key(intermediate_key)
add_excluded_name_constraints(intermediate, **excluded)
add_permitted_name_constraints(intermediate, **permitted)
# Target certificate.
target = gencerts.create_end_entity_certificate('t0', intermediate)
target.set_key(target_key)
add_sans(target, **sans)
chain = [target, intermediate, root]
gencerts.write_chain(doc, chain, '%s.pem' % name)
make_chain(
'ok-all-types',
"A chain containing a large number of name constraints and names,\n"
"but below the limit.",
excluded=dict(num_dns=418, num_ip=418, num_dirnames=418, num_uri=1025),
permitted=dict(num_dns=418, num_ip=418, num_dirnames=418, num_uri=1025),
sans=dict(num_dns=418, num_ip=418, num_dirnames=417, num_uri=1025))
make_chain(
'toomany-all-types',
"A chain containing a large number of different types of name\n"
"constraints and names, above the limit.",
excluded=dict(num_dns=419, num_ip=419, num_dirnames=419, num_uri=0),
permitted=dict(num_dns=419, num_ip=419, num_dirnames=419, num_uri=0),
sans=dict(num_dns=419, num_ip=419, num_dirnames=418, num_uri=0))
make_chain(
'toomany-dns-excluded',
"A chain containing a large number of excluded DNS name\n"
"constraints and DNS names, above the limit.",
excluded=dict(num_dns=1025, num_ip=0, num_dirnames=0, num_uri=0),
permitted=dict(num_dns=0, num_ip=0, num_dirnames=0, num_uri=0),
sans=dict(num_dns=1024, num_ip=0, num_dirnames=0, num_uri=0))
make_chain(
'toomany-ips-excluded',
"A chain containing a large number of excluded IP name\n"
"constraints and IP names, above the limit.",
excluded=dict(num_dns=0, num_ip=1025, num_dirnames=0, num_uri=0),
permitted=dict(num_dns=0, num_ip=0, num_dirnames=0, num_uri=0),
sans=dict(num_dns=0, num_ip=1024, num_dirnames=0, num_uri=0))
make_chain(
'toomany-dirnames-excluded',
"A chain containing a large number of excluded directory name\n"
"constraints and directory names, above the limit.",
excluded=dict(num_dns=0, num_ip=0, num_dirnames=1025, num_uri=0),
permitted=dict(num_dns=0, num_ip=0, num_dirnames=0, num_uri=0),
sans=dict(num_dns=0, num_ip=0, num_dirnames=1024, num_uri=0))
make_chain(
'toomany-dns-permitted',
"A chain containing a large number of permitted DNS name\n"
"constraints and DNS names, above the limit.",
excluded=dict(num_dns=0, num_ip=0, num_dirnames=0, num_uri=0),
permitted=dict(num_dns=1025, num_ip=0, num_dirnames=0, num_uri=0),
sans=dict(num_dns=1024, num_ip=0, num_dirnames=0, num_uri=0))
make_chain(
'toomany-ips-permitted',
"A chain containing a large number of permitted IP name\n"
"constraints and IP names, above the limit.",
excluded=dict(num_dns=0, num_ip=0, num_dirnames=0, num_uri=0),
permitted=dict(num_dns=0, num_ip=1025, num_dirnames=0, num_uri=0),
sans=dict(num_dns=0, num_ip=1024, num_dirnames=0, num_uri=0))
make_chain(
'toomany-dirnames-permitted',
"A chain containing a large number of permitted directory name\n"
"constraints and directory names, above the limit.",
excluded=dict(num_dns=0, num_ip=0, num_dirnames=0, num_uri=0),
permitted=dict(num_dns=0, num_ip=0, num_dirnames=1025, num_uri=0),
sans=dict(num_dns=0, num_ip=0, num_dirnames=1024, num_uri=0))
make_chain(
'ok-different-types-dns',
"A chain containing a large number of name constraints and names,\n"
"but of different types, thus not triggering the limit.",
excluded=dict(num_dns=0, num_ip=1025, num_dirnames=1025, num_uri=1025),
permitted=dict(num_dns=0, num_ip=1025, num_dirnames=1025, num_uri=1025),
sans=dict(num_dns=1025, num_ip=0, num_dirnames=0, num_uri=0))
make_chain(
'ok-different-types-ips',
"A chain containing a large number of name constraints and names,\n"
"but of different types, thus not triggering the limit.",
excluded=dict(num_dns=1025, num_ip=0, num_dirnames=1025, num_uri=1025),
permitted=dict(num_dns=1025, num_ip=0, num_dirnames=1025, num_uri=1025),
sans=dict(num_dns=0, num_ip=1025, num_dirnames=0, num_uri=0))
make_chain(
'ok-different-types-dirnames',
"A chain containing a large number of name constraints and names,\n"
"but of different types, thus not triggering the limit.",
excluded=dict(num_dns=1025, num_ip=1025, num_dirnames=0, num_uri=1025),
permitted=dict(num_dns=1025, num_ip=1025, num_dirnames=0, num_uri=1025),
sans=dict(num_dns=0, num_ip=0, num_dirnames=1025, num_uri=0))
| nwjs/chromium.src | net/data/verify_certificate_chain_unittest/many-names/generate-chains.py | Python | bsd-3-clause | 7,944 |
import json
import pytest
import responses
from twitch.client import TwitchClient
from twitch.constants import BASE_URL
from twitch.exceptions import TwitchAttributeException
from twitch.resources import Channel, Community, Follow, Subscription, Team, User, Video
example_user = {
"_id": "44322889",
"name": "dallas",
}
example_channel = {
"_id": 44322889,
"name": "dallas",
}
example_follower = {
"created_at": "2016-09-16T20:37:39Z",
"notifications": False,
"user": example_user,
}
example_team = {
"_id": 10,
"name": "staff",
}
example_subscription = {
"_id": "67123294ed8305ce3a8ef09649d2237c5a300590",
"created_at": "2014-05-19T23:38:53Z",
"user": example_user,
}
example_video = {
"_id": "v106400740",
"description": "Protect your chat with AutoMod!",
"fps": {"1080p": 23.9767661758746},
}
example_community = {
"_id": "e9f17055-810f-4736-ba40-fba4ac541caa",
"name": "DallasTesterCommunity",
}
@responses.activate
def test_get():
responses.add(
responses.GET,
"{}channel".format(BASE_URL),
body=json.dumps(example_channel),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
channel = client.channels.get()
assert len(responses.calls) == 1
assert isinstance(channel, Channel)
assert channel.id == example_channel["_id"]
assert channel.name == example_channel["name"]
@responses.activate
def test_get_by_id():
channel_id = example_channel["_id"]
responses.add(
responses.GET,
"{}channels/{}".format(BASE_URL, channel_id),
body=json.dumps(example_channel),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
channel = client.channels.get_by_id(channel_id)
assert len(responses.calls) == 1
assert isinstance(channel, Channel)
assert channel.id == channel_id
assert channel.name == example_channel["name"]
@responses.activate
def test_update():
channel_id = example_channel["_id"]
responses.add(
responses.PUT,
"{}channels/{}".format(BASE_URL, channel_id),
body=json.dumps(example_channel),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
status = "Spongebob Squarepants"
channel = client.channels.update(channel_id, status=status)
assert len(responses.calls) == 1
expected_body = json.dumps({"channel": {"status": status}}).encode("utf-8")
assert responses.calls[0].request.body == expected_body
assert isinstance(channel, Channel)
assert channel.id == channel_id
assert channel.name == example_channel["name"]
@responses.activate
def test_get_editors():
channel_id = example_channel["_id"]
response = {"users": [example_user]}
responses.add(
responses.GET,
"{}channels/{}/editors".format(BASE_URL, channel_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
users = client.channels.get_editors(channel_id)
assert len(responses.calls) == 1
assert len(users) == 1
user = users[0]
assert isinstance(user, User)
assert user.id == example_user["_id"]
assert user.name == example_user["name"]
@responses.activate
def test_get_followers():
channel_id = example_channel["_id"]
response = {
"_cursor": "1481675542963907000",
"_total": 41,
"follows": [example_follower],
}
responses.add(
responses.GET,
"{}channels/{}/follows".format(BASE_URL, channel_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
followers = client.channels.get_followers(channel_id)
assert len(responses.calls) == 1
assert len(followers) == 1
follow = followers[0]
assert isinstance(follow, Follow)
assert follow.notifications == example_follower["notifications"]
assert isinstance(follow.user, User)
assert follow.user.id == example_user["_id"]
assert follow.user.name == example_user["name"]
@responses.activate
@pytest.mark.parametrize("param,value", [("limit", 101), ("direction", "abcd")])
def test_get_followers_raises_if_wrong_params_are_passed_in(param, value):
client = TwitchClient("client id")
kwargs = {param: value}
with pytest.raises(TwitchAttributeException):
client.channels.get_followers("1234", **kwargs)
@responses.activate
def test_get_teams():
channel_id = example_channel["_id"]
response = {"teams": [example_team]}
responses.add(
responses.GET,
"{}channels/{}/teams".format(BASE_URL, channel_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
teams = client.channels.get_teams(channel_id)
assert len(responses.calls) == 1
assert len(teams) == 1
team = teams[0]
assert isinstance(team, Team)
assert team.id == example_team["_id"]
assert team.name == example_team["name"]
@responses.activate
def test_get_subscribers():
channel_id = example_channel["_id"]
response = {"_total": 1, "subscriptions": [example_subscription]}
responses.add(
responses.GET,
"{}channels/{}/subscriptions".format(BASE_URL, channel_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
subscribers = client.channels.get_subscribers(channel_id)
assert len(responses.calls) == 1
assert len(subscribers) == 1
subscribe = subscribers[0]
assert isinstance(subscribe, Subscription)
assert subscribe.id == example_subscription["_id"]
assert isinstance(subscribe.user, User)
assert subscribe.user.id == example_user["_id"]
assert subscribe.user.name == example_user["name"]
@responses.activate
@pytest.mark.parametrize("param,value", [("limit", 101), ("direction", "abcd")])
def test_get_subscribers_raises_if_wrong_params_are_passed_in(param, value):
client = TwitchClient("client id", "oauth token")
kwargs = {param: value}
with pytest.raises(TwitchAttributeException):
client.channels.get_subscribers("1234", **kwargs)
@responses.activate
def test_check_subscription_by_user():
channel_id = example_channel["_id"]
user_id = example_user["_id"]
responses.add(
responses.GET,
"{}channels/{}/subscriptions/{}".format(BASE_URL, channel_id, user_id),
body=json.dumps(example_subscription),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
subscribe = client.channels.check_subscription_by_user(channel_id, user_id)
assert len(responses.calls) == 1
assert isinstance(subscribe, Subscription)
assert subscribe.id == example_subscription["_id"]
assert isinstance(subscribe.user, User)
assert subscribe.user.id == example_user["_id"]
assert subscribe.user.name == example_user["name"]
@responses.activate
def test_get_videos():
channel_id = example_channel["_id"]
response = {"videos": [example_video]}
responses.add(
responses.GET,
"{}channels/{}/videos".format(BASE_URL, channel_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
videos = client.channels.get_videos(channel_id)
assert len(responses.calls) == 1
assert len(videos) == 1
assert isinstance(videos[0], Video)
video = videos[0]
assert isinstance(video, Video)
assert video.id == example_video["_id"]
assert video.description == example_video["description"]
assert video.fps["1080p"] == example_video["fps"]["1080p"]
@responses.activate
@pytest.mark.parametrize(
"param,value", [("limit", 101), ("broadcast_type", "abcd"), ("sort", "abcd")]
)
def test_get_videos_raises_if_wrong_params_are_passed_in(param, value):
client = TwitchClient("client id", "oauth token")
kwargs = {param: value}
with pytest.raises(TwitchAttributeException):
client.channels.get_videos("1234", **kwargs)
@responses.activate
def test_start_commercial():
channel_id = example_channel["_id"]
response = {"duration": 30, "message": "", "retryafter": 480}
responses.add(
responses.POST,
"{}channels/{}/commercial".format(BASE_URL, channel_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
commercial = client.channels.start_commercial(channel_id)
assert len(responses.calls) == 1
assert isinstance(commercial, dict)
assert commercial["duration"] == response["duration"]
@responses.activate
def test_reset_stream_key():
channel_id = example_channel["_id"]
responses.add(
responses.DELETE,
"{}channels/{}/stream_key".format(BASE_URL, channel_id),
body=json.dumps(example_channel),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
channel = client.channels.reset_stream_key(channel_id)
assert len(responses.calls) == 1
assert isinstance(channel, Channel)
assert channel.id == example_channel["_id"]
assert channel.name == example_channel["name"]
@responses.activate
def test_get_community():
channel_id = example_channel["_id"]
responses.add(
responses.GET,
"{}channels/{}/community".format(BASE_URL, channel_id),
body=json.dumps(example_community),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
community = client.channels.get_community(channel_id)
assert len(responses.calls) == 1
assert isinstance(community, Community)
assert community.id == example_community["_id"]
assert community.name == example_community["name"]
@responses.activate
def test_set_community():
channel_id = example_channel["_id"]
community_id = example_community["_id"]
responses.add(
responses.PUT,
"{}channels/{}/community/{}".format(BASE_URL, channel_id, community_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id")
client.channels.set_community(channel_id, community_id)
assert len(responses.calls) == 1
@responses.activate
def test_delete_from_community():
channel_id = example_channel["_id"]
responses.add(
responses.DELETE,
"{}channels/{}/community".format(BASE_URL, channel_id),
body=json.dumps(example_community),
status=204,
content_type="application/json",
)
client = TwitchClient("client id")
client.channels.delete_from_community(channel_id)
assert len(responses.calls) == 1
| tsifrer/python-twitch-client | tests/api/test_channels.py | Python | mit | 11,170 |
#!/usr/bin/env python
# coding: utf-8
import datetime
import locale
import os
import sys
import re
import random
import struct
import traceback
import argparse
import subprocess as sp
import unicodedata
from os.path import dirname, join
from doge import wow
ROOT = join(dirname(__file__), 'static')
DEFAULT_DOGE = 'doge.txt'
class Doge(object):
def __init__(self, tty, ns):
self.tty = tty
self.ns = ns
self.doge_path = join(ROOT, ns.doge_path or DEFAULT_DOGE)
if ns.frequency:
# such frequency based
self.words = \
wow.FrequencyBasedDogeDeque(*wow.WORD_LIST, step=ns.step)
else:
self.words = wow.DogeDeque(*wow.WORD_LIST)
def setup(self):
# Setup seasonal data
self.setup_seasonal()
if self.tty.pretty:
# stdout is a tty, load Shibe and calculate how wide he is
doge = self.load_doge()
max_doge = max(map(clean_len, doge)) + 15
else:
# stdout is being piped and we should not load Shibe
doge = []
max_doge = 15
if self.tty.width < max_doge:
# Shibe won't fit, so abort.
sys.stderr.write('wow, such small terminal\n')
sys.stderr.write('no doge under {0} column\n'.format(max_doge))
sys.exit(1)
# Check for prompt height so that we can fill the screen minus how high
# the prompt will be when done.
prompt = os.environ.get('PS1', '').split('\n')
line_count = len(prompt) + 1
# Create a list filled with empty lines and Shibe at the bottom.
fill = range(self.tty.height - len(doge) - line_count)
self.lines = ['\n' for x in fill]
self.lines += doge
# Try to fetch data fed thru stdin
had_stdin = self.get_stdin_data()
# Get some system data, but only if there was nothing in stdin
if not had_stdin:
self.get_real_data()
# Apply the text around Shibe
self.apply_text()
def setup_seasonal(self):
"""
Check if there's some seasonal holiday going on, setup appropriate
Shibe picture and load holiday words.
Note: if there are two or more holidays defined for a certain date,
the first one takes precedence.
"""
# If we've specified a season, just run that one
if self.ns.season:
return self.load_season(self.ns.season)
# If we've specified another doge or no doge at all, it does not make
# sense to use seasons.
if self.ns.doge_path is not None and not self.ns.no_shibe:
return
now = datetime.datetime.now()
for season, data in wow.SEASONS.items():
start, end = data['dates']
start_dt = datetime.datetime(now.year, start[0], start[1])
# Be sane if the holiday season spans over New Year's day.
end_dt = datetime.datetime(
now.year + (start[0] > end[0] and 1 or 0), end[0], end[1])
if start_dt <= now <= end_dt:
# Wow, much holiday!
return self.load_season(season)
def load_season(self, season_key):
if season_key == 'none':
return
season = wow.SEASONS[season_key]
self.doge_path = join(ROOT, season['pic'])
self.words.extend(season['words'])
def apply_text(self):
"""
Apply text around doge
"""
# Calculate a random sampling of lines that are to have text applied
# onto them. Return value is a sorted list of line index integers.
linelen = len(self.lines)
affected = sorted(random.sample(range(linelen), int(linelen / 3.5)))
for i, target in enumerate(affected, start=1):
line = self.lines[target]
line = re.sub('\n', ' ', line)
word = self.words.get()
# If first or last line, or a random selection, use standalone wow.
if i == 1 or i == len(affected) or random.choice(range(20)) == 0:
word = 'wow'
# Generate a new DogeMessage, possibly based on a word.
self.lines[target] = DogeMessage(self, line, word).generate()
def load_doge(self):
"""
Return pretty ASCII Shibe.
wow
"""
if self.ns.no_shibe:
return ['']
with open(self.doge_path) as f:
if sys.version_info < (3, 0):
if locale.getpreferredencoding() == 'UTF-8':
doge_lines = [l.decode('utf-8') for l in f.xreadlines()]
else:
# encode to printable characters, leaving a space in place
# of untranslatable characters, resulting in a slightly
# blockier doge on non-UTF8 terminals
doge_lines = [
l.decode('utf-8')
.encode(locale.getpreferredencoding(), 'replace')
.replace('?', ' ')
for l in f.xreadlines()
]
else:
doge_lines = [l for l in f.readlines()]
return doge_lines
def get_real_data(self):
"""
Grab actual data from the system
"""
ret = []
username = os.environ.get('USER')
if username:
ret.append(username)
editor = os.environ.get('EDITOR')
if editor:
editor = editor.split('/')[-1]
ret.append(editor)
# OS, hostname and... architechture (because lel)
if hasattr(os, 'uname'):
uname = os.uname()
ret.append(uname[0])
ret.append(uname[1])
ret.append(uname[4])
# Grab actual files from $HOME.
files = os.listdir(os.environ.get('HOME'))
if files:
ret.append(random.choice(files))
# Grab some processes
ret += self.get_processes()[:2]
# Prepare the returned data. First, lowercase it.
# If there is unicode data being returned from any of the above
# Python 2 needs to decode the UTF bytes to not crash. See issue #45.
func = str.lower
if sys.version_info < (3,):
func = lambda x: str.lower(x).decode('utf-8')
self.words.extend(map(func, ret))
def filter_words(self, words, stopwords, min_length):
return [word for word in words if
len(word) >= min_length and word not in stopwords]
def get_stdin_data(self):
"""
Get words from stdin.
"""
if self.tty.in_is_tty:
# No pipez found
return False
if sys.version_info < (3, 0):
stdin_lines = (l.decode('utf-8') for l in sys.stdin.xreadlines())
else:
stdin_lines = (l for l in sys.stdin.readlines())
rx_word = re.compile("\w+", re.UNICODE)
# If we have stdin data, we should remove everything else!
self.words.clear()
word_list = [match.group(0)
for line in stdin_lines
for match in rx_word.finditer(line.lower())]
if self.ns.filter_stopwords:
word_list = self.filter_words(
word_list, stopwords=wow.STOPWORDS,
min_length=self.ns.min_length)
self.words.extend(word_list)
return True
def get_processes(self):
"""
Grab a shuffled list of all currently running process names
"""
procs = set()
try:
# POSIX ps, so it should work in most environments where doge would
p = sp.Popen(['ps', '-A', '-o', 'comm='], stdout=sp.PIPE)
output, error = p.communicate()
if sys.version_info > (3, 0):
output = output.decode('utf-8')
for comm in output.split('\n'):
name = comm.split('/')[-1]
# Filter short and weird ones
if name and len(name) >= 2 and ':' not in name:
procs.add(name)
finally:
# Either it executed properly or no ps was found.
proc_list = list(procs)
random.shuffle(proc_list)
return proc_list
def print_doge(self):
for line in self.lines:
if sys.version_info < (3, 0):
line = line.encode('utf8')
sys.stdout.write(line)
sys.stdout.flush()
class DogeMessage(object):
"""
A randomly placed and randomly colored message
"""
def __init__(self, doge, occupied, word):
self.doge = doge
self.tty = doge.tty
self.occupied = occupied
self.word = word
def generate(self):
if self.word == 'wow':
# Standalone wow. Don't apply any prefixes or suffixes.
msg = self.word
else:
# Add a prefix.
msg = u'{0} {1}'.format(wow.PREFIXES.get(), self.word)
# Seldomly add a suffix as well.
if random.choice(range(15)) == 0:
msg += u' {0}'.format(wow.SUFFIXES.get())
# Calculate the maximum possible spacer
interval = self.tty.width - onscreen_len(msg)
interval -= clean_len(self.occupied)
if interval < 1:
# The interval is too low, so the message can not be shown without
# spilling over to the subsequent line, borking the setup.
# Return the doge slice that was in this row if there was one,
# and a line break, effectively disabling the row.
return self.occupied + "\n"
# Apply spacing
msg = u'{0}{1}'.format(' ' * random.choice(range(interval)), msg)
if self.tty.pretty:
# Apply pretty ANSI color coding.
msg = u'\x1b[1m\x1b[38;5;{0}m{1}\x1b[39m\x1b[0m'.format(
wow.COLORS.get(), msg
)
# Line ends are pretty cool guys, add one of those.
return u'{0}{1}\n'.format(self.occupied, msg)
class TTYHandler(object):
def setup(self):
self.height, self.width = self.get_tty_size()
self.in_is_tty = sys.stdin.isatty()
self.out_is_tty = sys.stdout.isatty()
self.pretty = self.out_is_tty
if sys.platform == 'win32' and os.getenv('TERM') == 'xterm':
self.pretty = True
def _tty_size_windows(self, handle):
try:
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(handle)
buf = create_string_buffer(22)
if windll.kernel32.GetConsoleScreenBufferInfo(h, buf):
left, top, right, bottom = struct.unpack('4H', buf.raw[10:18])
return right - left + 1, bottom - top + 1
except:
pass
def _tty_size_linux(self, fd):
try:
import fcntl
import termios
return struct.unpack(
'hh',
fcntl.ioctl(
fd, termios.TIOCGWINSZ, struct.pack('hh', 0, 0)
)
)
except:
return
def get_tty_size(self):
"""
Get the current terminal size without using a subprocess
http://stackoverflow.com/questions/566746
I have no clue what-so-fucking ever over how this works or why it
returns the size of the terminal in both cells and pixels. But hey, it
does.
"""
if sys.platform == 'win32':
# stdin, stdout, stderr = -10, -11, -12
ret = self._tty_size_windows(-10)
ret = ret or self._tty_size_windows(-11)
ret = ret or self._tty_size_windows(-12)
else:
# stdin, stdout, stderr = 0, 1, 2
ret = self._tty_size_linux(0)
ret = ret or self._tty_size_linux(1)
ret = ret or self._tty_size_linux(2)
return ret or (25, 80)
def clean_len(s):
"""
Calculate the length of a string without it's color codes
"""
s = re.sub(r'\x1b\[[0-9;]*m', '', s)
return len(s)
def onscreen_len(s):
"""
Calculate the length of a unicode string on screen,
accounting for double-width characters
"""
if sys.version_info < (3, 0) and isinstance(s, str):
return len(s)
length = 0
for ch in s:
length += 2 if unicodedata.east_asian_width(ch) == 'W' else 1
return length
def setup_arguments():
parser = argparse.ArgumentParser('doge')
parser.add_argument(
'--shibe',
help='wow shibe file',
dest='doge_path',
choices=os.listdir(ROOT)
)
parser.add_argument(
'--no-shibe',
action="store_true",
help="wow no doge show :("
)
parser.add_argument(
'--season',
help='wow shibe season congrate',
choices=sorted(wow.SEASONS.keys()) + ['none']
)
parser.add_argument(
'-f', '--frequency',
help='such frequency based',
action='store_true'
)
parser.add_argument(
'--step',
help='beautiful step', # how much to step
# between ranks in FrequencyBasedDogeDeque
type=int,
default=2,
)
parser.add_argument(
'--min_length',
help='pretty minimum', # minimum length of a word
type=int,
default=1,
)
parser.add_argument(
'-s', '--filter_stopwords',
help='many words lol',
action='store_true'
)
parser.add_argument(
'-mh', '--max-height',
help='such max height',
type=int,
)
parser.add_argument(
'-mw', '--max-width',
help='such max width',
type=int,
)
return parser
def main():
tty = TTYHandler()
tty.setup()
parser = setup_arguments()
ns = parser.parse_args()
if ns.max_height:
tty.height = ns.max_height
if ns.max_width:
tty.width = ns.max_width
try:
shibe = Doge(tty, ns)
shibe.setup()
shibe.print_doge()
except (UnicodeEncodeError, UnicodeDecodeError):
# Some kind of unicode error happened. This is usually because the
# users system does not have a proper locale set up. Try to be helpful
# and figure out what could have gone wrong.
traceback.print_exc()
print()
lang = os.environ.get('LANG')
if not lang:
print('wow error: broken $LANG, so fail')
return 3
if not lang.endswith('UTF-8'):
print(
"wow error: locale '{0}' is not UTF-8. ".format(lang) +
"doge needs UTF-8 to print Shibe. Please set your system to "
"use a UTF-8 locale."
)
return 2
print(
"wow error: Unknown unicode error. Please report at "
"https://github.com/thiderman/doge/issues and include output from "
"/usr/bin/locale"
)
return 1
# wow very main
if __name__ == "__main__":
sys.exit(main())
| thiderman/doge | doge/core.py | Python | mit | 15,188 |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os.path
import sys
def generate_include_tag(resource_path):
(dir_name, file_name) = os.path.split(resource_path)
if (file_name.endswith('.js')):
return ' <script type="text/javascript" src="%s"></script>\n' % file_name
elif (file_name.endswith('.css')):
return ' <link rel="stylesheet" type="text/css" href="%s">\n' % file_name
else:
assert resource_path
def write_devtools_html(inspector_file, devtools_file, debug):
for line in inspector_file:
if not debug and '<script ' in line:
continue
if not debug and '<link ' in line:
continue
if '</head>' in line and not debug:
devtools_file.write(generate_include_tag("inspector.css"))
devtools_file.write(generate_include_tag("inspector.js"))
devtools_file.write(line)
if '<head>' in line:
devtools_file.write(generate_include_tag("buildSystemOnly.js"))
def main(argv):
if len(argv) < 4:
print('usage: %s inspector_html devtools_html debug' % argv[0])
return 1
# The first argument is ignored. We put 'webkit.gyp' in the inputs list
# for this script, so every time the list of script gets changed, our html
# file is rebuilt.
inspector_html_name = argv[1]
devtools_html_name = argv[2]
debug = argv[3] != '0'
inspector_html = open(inspector_html_name, 'r')
devtools_html = open(devtools_html_name, 'w')
write_devtools_html(inspector_html, devtools_html, debug)
devtools_html.close()
inspector_html.close()
# Touch output file directory to make sure that Xcode will copy
# modified resource files.
if sys.platform == 'darwin':
output_dir_name = os.path.dirname(devtools_html_name)
os.utime(output_dir_name, None)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| espadrine/opera | chromium/src/third_party/WebKit/Source/devtools/scripts/generate_devtools_html.py | Python | bsd-3-clause | 3,443 |
"""
shapefile.py
Provides read and write support for ESRI Shapefiles.
author: jlawhead<at>geospatialpython.com
date: 20130727
version: 1.2.0
Compatible with Python versions 2.4-3.x
"""
__version__ = "1.2.0"
import array
import os
from struct import pack, unpack, calcsize, error
import sys
import tempfile
import time
#
# Constants for shape types
NULL = 0
POINT = 1
POLYLINE = 3
POLYGON = 5
MULTIPOINT = 8
POINTZ = 11
POLYLINEZ = 13
POLYGONZ = 15
MULTIPOINTZ = 18
POINTM = 21
POLYLINEM = 23
POLYGONM = 25
MULTIPOINTM = 28
MULTIPATCH = 31
PYTHON3 = sys.version_info[0] == 3
if PYTHON3:
xrange = range
def b(v):
if PYTHON3:
if isinstance(v, str):
# For python 3 encode str to bytes.
return v.encode('utf-8')
elif isinstance(v, bytes):
# Already bytes.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def u(v):
if PYTHON3:
if isinstance(v, bytes):
# For python 3 decode bytes to str.
return v.decode('utf-8')
elif isinstance(v, str):
# Already str.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def is_string(v):
if PYTHON3:
return isinstance(v, str)
else:
return isinstance(v, basestring)
class _Array(array.array):
"""Converts python tuples to lits of the appropritate type.
Used to unpack different shapefile header parts."""
def __repr__(self):
return str(self.tolist())
def signed_area(coords):
"""Return the signed area enclosed by a ring using the linear time
algorithm at http://www.cgafaq.info/wiki/Polygon_Area. A value >= 0
indicates a counter-clockwise oriented ring.
"""
xs, ys = map(list, zip(*coords))
xs.append(xs[1])
ys.append(ys[1])
return sum(xs[i]*(ys[i+1]-ys[i-1]) for i in range(1, len(coords)))/2.0
class _Shape:
def __init__(self, shapeType=None):
"""Stores the geometry of the different shape types
specified in the Shapefile spec. Shape types are
usually point, polyline, or polygons. Every shape type
except the "Null" type contains points at some level for
example verticies in a polygon. If a shape type has
multiple shapes containing points within a single
geometry record then those shapes are called parts. Parts
are designated by their starting index in geometry record's
list of shapes."""
self.shapeType = shapeType
self.points = []
@property
def __geo_interface__(self):
if self.shapeType in [POINT, POINTM, POINTZ]:
return {
'type': 'Point',
'coordinates': tuple(self.points[0])
}
elif self.shapeType in [MULTIPOINT, MULTIPOINTM, MULTIPOINTZ]:
return {
'type': 'MultiPoint',
'coordinates': tuple([tuple(p) for p in self.points])
}
elif self.shapeType in [POLYLINE, POLYLINEM, POLYLINEZ]:
if len(self.parts) == 1:
return {
'type': 'LineString',
'coordinates': tuple([tuple(p) for p in self.points])
}
else:
ps = None
coordinates = []
for part in self.parts:
if ps == None:
ps = part
continue
else:
coordinates.append(tuple([tuple(p) for p in self.points[ps:part]]))
ps = part
else:
coordinates.append(tuple([tuple(p) for p in self.points[part:]]))
return {
'type': 'MultiLineString',
'coordinates': tuple(coordinates)
}
elif self.shapeType in [POLYGON, POLYGONM, POLYGONZ]:
if len(self.parts) == 1:
return {
'type': 'Polygon',
'coordinates': (tuple([tuple(p) for p in self.points]),)
}
else:
ps = None
coordinates = []
for part in self.parts:
if ps == None:
ps = part
continue
else:
coordinates.append(tuple([tuple(p) for p in self.points[ps:part]]))
ps = part
else:
coordinates.append(tuple([tuple(p) for p in self.points[part:]]))
polys = []
poly = [coordinates[0]]
for coord in coordinates[1:]:
if signed_area(coord) < 0:
polys.append(poly)
poly = [coord]
else:
poly.append(coord)
polys.append(poly)
if len(polys) == 1:
return {
'type': 'Polygon',
'coordinates': tuple(polys[0])
}
elif len(polys) > 1:
return {
'type': 'MultiPolygon',
'coordinates': polys
}
class _ShapeRecord:
"""A shape object of any type."""
def __init__(self, shape=None, record=None):
self.shape = shape
self.record = record
class ShapefileException(Exception):
"""An exception to handle shapefile specific problems."""
pass
class Reader:
"""Reads the three files of a shapefile as a unit or
separately. If one of the three files (.shp, .shx,
.dbf) is missing no exception is thrown until you try
to call a method that depends on that particular file.
The .shx index file is used if available for efficiency
but is not required to read the geometry from the .shp
file. The "shapefile" argument in the constructor is the
name of the file you want to open.
You can instantiate a Reader without specifying a shapefile
and then specify one later with the load() method.
Only the shapefile headers are read upon loading. Content
within each file is only accessed when required and as
efficiently as possible. Shapefiles are usually not large
but they can be.
"""
def __init__(self, *args, **kwargs):
self.shp = None
self.shx = None
self.dbf = None
self.shapeName = "Not specified"
self._offsets = []
self.shpLength = None
self.numRecords = None
self.fields = []
self.__dbfHdrLength = 0
# See if a shapefile name was passed as an argument
if len(args) > 0:
if is_string(args[0]):
self.load(args[0])
return
if "shp" in kwargs.keys():
if hasattr(kwargs["shp"], "read"):
self.shp = kwargs["shp"]
if hasattr(self.shp, "seek"):
self.shp.seek(0)
if "shx" in kwargs.keys():
if hasattr(kwargs["shx"], "read"):
self.shx = kwargs["shx"]
if hasattr(self.shx, "seek"):
self.shx.seek(0)
if "dbf" in kwargs.keys():
if hasattr(kwargs["dbf"], "read"):
self.dbf = kwargs["dbf"]
if hasattr(self.dbf, "seek"):
self.dbf.seek(0)
if self.shp or self.dbf:
self.load()
else:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
def load(self, shapefile=None):
"""Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file object or file name as an
argument."""
if shapefile:
(shapeName, ext) = os.path.splitext(shapefile)
self.shapeName = shapeName
try:
self.shp = open("%s.shp" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shp" % shapeName)
try:
self.shx = open("%s.shx" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shx" % shapeName)
try:
self.dbf = open("%s.dbf" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.dbf" % shapeName)
if self.shp:
self.__shpHeader()
if self.dbf:
self.__dbfHeader()
def __getFileObj(self, f):
"""Checks to see if the requested shapefile file object is
available. If not a ShapefileException is raised."""
if not f:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
if self.shp and self.shpLength is None:
self.load()
if self.dbf and len(self.fields) == 0:
self.load()
return f
def __restrictIndex(self, i):
"""Provides list-like handling of a record index with a clearer
error message if the index is out of bounds."""
if self.numRecords:
rmax = self.numRecords - 1
if abs(i) > rmax:
raise IndexError("Shape or Record index out of range.")
if i < 0: i = range(self.numRecords)[i]
return i
def __shpHeader(self):
"""Reads the header information from a .shp or .shx file."""
if not self.shp:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no shp file found")
shp = self.shp
# File length (16-bit word * 2 = bytes)
shp.seek(24)
self.shpLength = unpack(">i", shp.read(4))[0] * 2
# Shape type
shp.seek(32)
self.shapeType= unpack("<i", shp.read(4))[0]
# The shapefile's bounding box (lower left, upper right)
self.bbox = _Array('d', unpack("<4d", shp.read(32)))
# Elevation
self.elevation = _Array('d', unpack("<2d", shp.read(16)))
# Measure
self.measure = _Array('d', unpack("<2d", shp.read(16)))
def __shape(self):
"""Returns the header info and geometry for a single shape."""
f = self.__getFileObj(self.shp)
record = _Shape()
nParts = nPoints = zmin = zmax = mmin = mmax = None
(recNum, recLength) = unpack(">2i", f.read(8))
# Determine the start of the next record
next = f.tell() + (2 * recLength)
shapeType = unpack("<i", f.read(4))[0]
record.shapeType = shapeType
# For Null shapes create an empty points list for consistency
if shapeType == 0:
record.points = []
# All shape types capable of having a bounding box
elif shapeType in (3,5,8,13,15,18,23,25,28,31):
record.bbox = _Array('d', unpack("<4d", f.read(32)))
# Shape types with parts
if shapeType in (3,5,13,15,23,25,31):
nParts = unpack("<i", f.read(4))[0]
# Shape types with points
if shapeType in (3,5,8,13,15,23,25,31):
nPoints = unpack("<i", f.read(4))[0]
# Read parts
if nParts:
record.parts = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read part types for Multipatch - 31
if shapeType == 31:
record.partTypes = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read points - produces a list of [x,y] values
if nPoints:
record.points = [_Array('d', unpack("<2d", f.read(16))) for p in range(nPoints)]
# Read z extremes and values
if shapeType in (13,15,18,31):
(zmin, zmax) = unpack("<2d", f.read(16))
record.z = _Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8)))
# Read m extremes and values if header m values do not equal 0.0
if shapeType in (13,15,18,23,25,28,31) and not 0.0 in self.measure:
(mmin, mmax) = unpack("<2d", f.read(16))
# Measure values less than -10e38 are nodata values according to the spec
record.m = []
for m in _Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8))):
if m > -10e38:
record.m.append(m)
else:
record.m.append(None)
# Read a single point
if shapeType in (1,11,21):
record.points = [_Array('d', unpack("<2d", f.read(16)))]
# Read a single Z value
if shapeType == 11:
record.z = unpack("<d", f.read(8))
# Read a single M value
if shapeType in (11,21):
record.m = unpack("<d", f.read(8))
# Seek to the end of this record as defined by the record header because
# the shapefile spec doesn't require the actual content to meet the header
# definition. Probably allowed for lazy feature deletion.
f.seek(next)
return record
def __shapeIndex(self, i=None):
"""Returns the offset in a .shp file for a shape based on information
in the .shx index file."""
shx = self.shx
if not shx:
return None
if not self._offsets:
# File length (16-bit word * 2 = bytes) - header length
shx.seek(24)
shxRecordLength = (unpack(">i", shx.read(4))[0] * 2) - 100
numRecords = shxRecordLength // 8
# Jump to the first record.
shx.seek(100)
for r in range(numRecords):
# Offsets are 16-bit words just like the file length
self._offsets.append(unpack(">i", shx.read(4))[0] * 2)
shx.seek(shx.tell() + 4)
if not i == None:
return self._offsets[i]
def shape(self, i=0):
"""Returns a shape object for a shape in the the geometry
record file."""
shp = self.__getFileObj(self.shp)
i = self.__restrictIndex(i)
offset = self.__shapeIndex(i)
if not offset:
# Shx index not available so iterate the full list.
for j,k in enumerate(self.iterShapes()):
if j == i:
return k
shp.seek(offset)
return self.__shape()
def shapes(self):
"""Returns all shapes in a shapefile."""
shp = self.__getFileObj(self.shp)
# Found shapefiles which report incorrect
# shp file length in the header. Can't trust
# that so we seek to the end of the file
# and figure it out.
shp.seek(0,2)
self.shpLength = shp.tell()
shp.seek(100)
shapes = []
while shp.tell() < self.shpLength:
shapes.append(self.__shape())
return shapes
def iterShapes(self):
"""Serves up shapes in a shapefile as an iterator. Useful
for handling large shapefiles."""
shp = self.__getFileObj(self.shp)
shp.seek(0,2)
self.shpLength = shp.tell()
shp.seek(100)
while shp.tell() < self.shpLength:
yield self.__shape()
def __dbfHeaderLength(self):
"""Retrieves the header length of a dbf file header."""
if not self.__dbfHdrLength:
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
(self.numRecords, self.__dbfHdrLength) = \
unpack("<xxxxLH22x", dbf.read(32))
return self.__dbfHdrLength
def __dbfHeader(self):
"""Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger"""
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
headerLength = self.__dbfHeaderLength()
numFields = (headerLength - 33) // 32
for field in range(numFields):
fieldDesc = list(unpack("<11sc4xBB14x", dbf.read(32)))
name = 0
idx = 0
if b("\x00") in fieldDesc[name]:
idx = fieldDesc[name].index(b("\x00"))
else:
idx = len(fieldDesc[name]) - 1
fieldDesc[name] = fieldDesc[name][:idx]
fieldDesc[name] = u(fieldDesc[name])
fieldDesc[name] = fieldDesc[name].lstrip()
fieldDesc[1] = u(fieldDesc[1])
self.fields.append(fieldDesc)
terminator = dbf.read(1)
assert terminator == b("\r")
self.fields.insert(0, ('DeletionFlag', 'C', 1, 0))
def __recordFmt(self):
"""Calculates the size of a .shp geometry record."""
if not self.numRecords:
self.__dbfHeader()
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields])
fmtSize = calcsize(fmt)
return (fmt, fmtSize)
def __record(self):
"""Reads and returns a dbf record row as a list of values."""
f = self.__getFileObj(self.dbf)
recFmt = self.__recordFmt()
recordContents = unpack(recFmt[0], f.read(recFmt[1]))
if recordContents[0] != b(' '):
# deleted record
return None
record = []
for (name, typ, size, deci), value in zip(self.fields,
recordContents):
if name == 'DeletionFlag':
continue
elif not value.strip():
record.append(value)
continue
elif typ == "N":
value = value.replace(b('\0'), b('')).strip()
if value == b(''):
value = 0
elif deci:
value = float(value)
else:
value = int(value)
elif typ == b('D'):
try:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = [y, m, d]
except:
value = value.strip()
elif typ == b('L'):
value = (value in b('YyTt') and b('T')) or \
(value in b('NnFf') and b('F')) or b('?')
else:
value = u(value)
value = value.strip()
record.append(value)
return record
def record(self, i=0):
"""Returns a specific dbf record based on the supplied index."""
f = self.__getFileObj(self.dbf)
if not self.numRecords:
self.__dbfHeader()
i = self.__restrictIndex(i)
recSize = self.__recordFmt()[1]
f.seek(0)
f.seek(self.__dbfHeaderLength() + (i * recSize))
return self.__record()
def records(self):
"""Returns all records in a dbf file."""
if not self.numRecords:
self.__dbfHeader()
records = []
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHeaderLength())
for i in range(self.numRecords):
r = self.__record()
if r:
records.append(r)
return records
def iterRecords(self):
"""Serves up records in a dbf file as an iterator.
Useful for large shapefiles or dbf files."""
if not self.numRecords:
self.__dbfHeader()
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHeaderLength())
for i in xrange(self.numRecords):
r = self.__record()
if r:
yield r
def shapeRecord(self, i=0):
"""Returns a combination geometry and attribute record for the
supplied record index."""
i = self.__restrictIndex(i)
return _ShapeRecord(shape=self.shape(i), record=self.record(i))
def shapeRecords(self):
"""Returns a list of combination geometry/attribute records for
all records in a shapefile."""
shapeRecords = []
return [_ShapeRecord(shape=rec[0], record=rec[1]) \
for rec in zip(self.shapes(), self.records())]
class Writer:
"""Provides write support for ESRI Shapefiles."""
def __init__(self, shapeType=None):
self._shapes = []
self.fields = []
self.records = []
self.shapeType = shapeType
self.shp = None
self.shx = None
self.dbf = None
# Geometry record offsets and lengths for writing shx file.
self._offsets = []
self._lengths = []
# Use deletion flags in dbf? Default is false (0).
self.deletionFlag = 0
def __getFileObj(self, f):
"""Safety handler to verify file-like objects"""
if not f:
raise ShapefileException("No file-like object available.")
elif hasattr(f, "write"):
return f
else:
pth = os.path.split(f)[0]
if pth and not os.path.exists(pth):
os.makedirs(pth)
return open(f, "wb")
def __shpFileLength(self):
"""Calculates the file length of the shp file."""
# Start with header length
size = 100
# Calculate size of all shapes
for s in self._shapes:
# Add in record header and shape type fields
size += 12
# nParts and nPoints do not apply to all shapes
#if self.shapeType not in (0,1):
# nParts = len(s.parts)
# nPoints = len(s.points)
if hasattr(s,'parts'):
nParts = len(s.parts)
if hasattr(s,'points'):
nPoints = len(s.points)
# All shape types capable of having a bounding box
if self.shapeType in (3,5,8,13,15,18,23,25,28,31):
size += 32
# Shape types with parts
if self.shapeType in (3,5,13,15,23,25,31):
# Parts count
size += 4
# Parts index array
size += nParts * 4
# Shape types with points
if self.shapeType in (3,5,8,13,15,23,25,31):
# Points count
size += 4
# Points array
size += 16 * nPoints
# Calc size of part types for Multipatch (31)
if self.shapeType == 31:
size += nParts * 4
# Calc z extremes and values
if self.shapeType in (13,15,18,31):
# z extremes
size += 16
# z array
size += 8 * nPoints
# Calc m extremes and values
if self.shapeType in (23,25,31):
# m extremes
size += 16
# m array
size += 8 * nPoints
# Calc a single point
if self.shapeType in (1,11,21):
size += 16
# Calc a single Z value
if self.shapeType == 11:
size += 8
# Calc a single M value
if self.shapeType in (11,21):
size += 8
# Calculate size as 16-bit words
size //= 2
return size
def __bbox(self, shapes, shapeTypes=[]):
x = []
y = []
for s in shapes:
shapeType = self.shapeType
if shapeTypes:
shapeType = shapeTypes[shapes.index(s)]
px, py = list(zip(*s.points))[:2]
x.extend(px)
y.extend(py)
return [min(x), min(y), max(x), max(y)]
def __zbox(self, shapes, shapeTypes=[]):
z = []
for s in shapes:
try:
for p in s.points:
z.append(p[2])
except IndexError:
pass
if not z: z.append(0)
return [min(z), max(z)]
def __mbox(self, shapes, shapeTypes=[]):
m = [0]
for s in shapes:
try:
for p in s.points:
m.append(p[3])
except IndexError:
pass
return [min(m), max(m)]
def bbox(self):
"""Returns the current bounding box for the shapefile which is
the lower-left and upper-right corners. It does not contain the
elevation or measure extremes."""
return self.__bbox(self._shapes)
def zbox(self):
"""Returns the current z extremes for the shapefile."""
return self.__zbox(self._shapes)
def mbox(self):
"""Returns the current m extremes for the shapefile."""
return self.__mbox(self._shapes)
def __shapefileHeader(self, fileObj, headerType='shp'):
"""Writes the specified header type to the specified file-like object.
Several of the shapefile formats are so similar that a single generic
method to read or write them is warranted."""
f = self.__getFileObj(fileObj)
f.seek(0)
# File code, Unused bytes
f.write(pack(">6i", 9994,0,0,0,0,0))
# File length (Bytes / 2 = 16-bit words)
if headerType == 'shp':
f.write(pack(">i", self.__shpFileLength()))
elif headerType == 'shx':
f.write(pack('>i', ((100 + (len(self._shapes) * 8)) // 2)))
# Version, Shape type
f.write(pack("<2i", 1000, self.shapeType))
# The shapefile's bounding box (lower left, upper right)
if self.shapeType != 0:
try:
f.write(pack("<4d", *self.bbox()))
except error:
raise ShapefileException("Failed to write shapefile bounding box. Floats required.")
else:
f.write(pack("<4d", 0,0,0,0))
# Elevation
z = self.zbox()
# Measure
m = self.mbox()
try:
f.write(pack("<4d", z[0], z[1], m[0], m[1]))
except error:
raise ShapefileException("Failed to write shapefile elevation and measure values. Floats required.")
def __dbfHeader(self):
"""Writes the dbf header and field descriptors."""
f = self.__getFileObj(self.dbf)
f.seek(0)
version = 3
year, month, day = time.localtime()[:3]
year -= 1900
# Remove deletion flag placeholder from fields
for field in self.fields:
if field[0].startswith("Deletion"):
self.fields.remove(field)
numRecs = len(self.records)
numFields = len(self.fields)
headerLength = numFields * 32 + 33
recordLength = sum([int(field[2]) for field in self.fields]) + 1
header = pack('<BBBBLHH20x', version, year, month, day, numRecs,
headerLength, recordLength)
f.write(header)
# Field descriptors
for field in self.fields:
name, fieldType, size, decimal = field
name = b(name)
name = name.replace(b(' '), b('_'))
name = name.ljust(11).replace(b(' '), b('\x00'))
fieldType = b(fieldType)
size = int(size)
fld = pack('<11sc4xBB14x', name, fieldType, size, decimal)
f.write(fld)
# Terminator
f.write(b('\r'))
def __shpRecords(self):
"""Write the shp records"""
f = self.__getFileObj(self.shp)
f.seek(100)
recNum = 1
for s in self._shapes:
self._offsets.append(f.tell())
# Record number, Content length place holder
f.write(pack(">2i", recNum, 0))
recNum += 1
start = f.tell()
# Shape Type
if self.shapeType != 31:
s.shapeType = self.shapeType
f.write(pack("<i", s.shapeType))
# All shape types capable of having a bounding box
if s.shapeType in (3,5,8,13,15,18,23,25,28,31):
try:
f.write(pack("<4d", *self.__bbox([s])))
except error:
raise ShapefileException("Falied to write bounding box for record %s. Expected floats." % recNum)
# Shape types with parts
if s.shapeType in (3,5,13,15,23,25,31):
# Number of parts
f.write(pack("<i", len(s.parts)))
# Shape types with multiple points per record
if s.shapeType in (3,5,8,13,15,23,25,31):
# Number of points
f.write(pack("<i", len(s.points)))
# Write part indexes
if s.shapeType in (3,5,13,15,23,25,31):
for p in s.parts:
f.write(pack("<i", p))
# Part types for Multipatch (31)
if s.shapeType == 31:
for pt in s.partTypes:
f.write(pack("<i", pt))
# Write points for multiple-point records
if s.shapeType in (3,5,8,13,15,23,25,31):
try:
[f.write(pack("<2d", *p[:2])) for p in s.points]
except error:
raise ShapefileException("Failed to write points for record %s. Expected floats." % recNum)
# Write z extremes and values
if s.shapeType in (13,15,18,31):
try:
f.write(pack("<2d", *self.__zbox([s])))
except error:
raise ShapefileException("Failed to write elevation extremes for record %s. Expected floats." % recNum)
try:
if hasattr(s,"z"):
f.write(pack("<%sd" % len(s.z), *s.z))
else:
[f.write(pack("<d", p[2])) for p in s.points]
except error:
raise ShapefileException("Failed to write elevation values for record %s. Expected floats." % recNum)
# Write m extremes and values
if s.shapeType in (13,15,18,23,25,28,31):
try:
if hasattr(s,"m"):
f.write(pack("<%sd" % len(s.m), *s.m))
else:
f.write(pack("<2d", *self.__mbox([s])))
except error:
raise ShapefileException("Failed to write measure extremes for record %s. Expected floats" % recNum)
try:
[f.write(pack("<d", p[3])) for p in s.points]
except error:
raise ShapefileException("Failed to write measure values for record %s. Expected floats" % recNum)
# Write a single point
if s.shapeType in (1,11,21):
try:
f.write(pack("<2d", s.points[0][0], s.points[0][1]))
except error:
raise ShapefileException("Failed to write point for record %s. Expected floats." % recNum)
# Write a single Z value
if s.shapeType == 11:
if hasattr(s, "z"):
try:
if not s.z:
s.z = (0,)
f.write(pack("<d", s.z[0]))
except error:
raise ShapefileException("Failed to write elevation value for record %s. Expected floats." % recNum)
else:
try:
if len(s.points[0])<3:
s.points[0].append(0)
f.write(pack("<d", s.points[0][2]))
except error:
raise ShapefileException("Failed to write elevation value for record %s. Expected floats." % recNum)
# Write a single M value
if s.shapeType in (11,21):
if hasattr(s, "m"):
try:
if not s.m:
s.m = (0,)
f.write(pack("<1d", s.m[0]))
except error:
raise ShapefileException("Failed to write measure value for record %s. Expected floats." % recNum)
else:
try:
if len(s.points[0])<4:
s.points[0].append(0)
f.write(pack("<1d", s.points[0][3]))
except error:
raise ShapefileException("Failed to write measure value for record %s. Expected floats." % recNum)
# Finalize record length as 16-bit words
finish = f.tell()
length = (finish - start) // 2
self._lengths.append(length)
# start - 4 bytes is the content length field
f.seek(start-4)
f.write(pack(">i", length))
f.seek(finish)
def __shxRecords(self):
"""Writes the shx records."""
f = self.__getFileObj(self.shx)
f.seek(100)
for i in range(len(self._shapes)):
f.write(pack(">i", self._offsets[i] // 2))
f.write(pack(">i", self._lengths[i]))
def __dbfRecords(self):
"""Writes the dbf records."""
f = self.__getFileObj(self.dbf)
for record in self.records:
if not self.fields[0][0].startswith("Deletion"):
f.write(b(' ')) # deletion flag
for (fieldName, fieldType, size, dec), value in zip(self.fields, record):
fieldType = fieldType.upper()
size = int(size)
if fieldType.upper() == "N":
value = str(value).rjust(size)
elif fieldType == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size)
assert len(value) == size
value = b(value)
f.write(value)
def null(self):
"""Creates a null shape."""
self._shapes.append(_Shape(NULL))
def point(self, x, y, z=0, m=0):
"""Creates a point shape."""
pointShape = _Shape(self.shapeType)
pointShape.points.append([x, y, z, m])
self._shapes.append(pointShape)
def line(self, parts=[], shapeType=POLYLINE):
"""Creates a line shape. This method is just a convienience method
which wraps 'poly()'.
"""
self.poly(parts, shapeType, [])
def poly(self, parts=[], shapeType=POLYGON, partTypes=[]):
"""Creates a shape that has multiple collections of points (parts)
including lines, polygons, and even multipoint shapes. If no shape type
is specified it defaults to 'polygon'. If no part types are specified
(which they normally won't be) then all parts default to the shape type.
"""
polyShape = _Shape(shapeType)
polyShape.parts = []
polyShape.points = []
# Make sure polygons are closed
if shapeType in (5,15,25,31):
for part in parts:
if part[0] != part[-1]:
part.append(part[0])
for part in parts:
polyShape.parts.append(len(polyShape.points))
for point in part:
# Ensure point is list
if not isinstance(point, list):
point = list(point)
# Make sure point has z and m values
while len(point) < 4:
point.append(0)
polyShape.points.append(point)
if polyShape.shapeType == 31:
if not partTypes:
for part in parts:
partTypes.append(polyShape.shapeType)
polyShape.partTypes = partTypes
self._shapes.append(polyShape)
def field(self, name, fieldType="C", size="50", decimal=0):
"""Adds a dbf field descriptor to the shapefile."""
self.fields.append((name, fieldType, size, decimal))
def record(self, *recordList, **recordDict):
"""Creates a dbf attribute record. You can submit either a sequence of
field values or keyword arguments of field names and values. Before
adding records you must add fields for the record values using the
fields() method. If the record values exceed the number of fields the
extra ones won't be added. In the case of using keyword arguments to specify
field/value pairs only fields matching the already registered fields
will be added."""
record = []
fieldCount = len(self.fields)
# Compensate for deletion flag
if self.fields[0][0].startswith("Deletion"): fieldCount -= 1
if recordList:
[record.append(recordList[i]) for i in range(fieldCount)]
elif recordDict:
for field in self.fields:
if field[0] in recordDict:
val = recordDict[field[0]]
if val is None:
record.append("")
else:
record.append(val)
if record:
self.records.append(record)
def shape(self, i):
return self._shapes[i]
def shapes(self):
"""Return the current list of shapes."""
return self._shapes
def saveShp(self, target):
"""Save an shp file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shp'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shp = self.__getFileObj(target)
self.__shapefileHeader(self.shp, headerType='shp')
self.__shpRecords()
def saveShx(self, target):
"""Save an shx file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shx'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shx = self.__getFileObj(target)
self.__shapefileHeader(self.shx, headerType='shx')
self.__shxRecords()
def saveDbf(self, target):
"""Save a dbf file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.dbf'
self.dbf = self.__getFileObj(target)
self.__dbfHeader()
self.__dbfRecords()
def save(self, target=None, shp=None, shx=None, dbf=None):
"""Save the shapefile data to three files or
three file-like objects. SHP and DBF files can also
be written exclusively using saveShp, saveShx, and saveDbf respectively.
If target is specified but not shp,shx, or dbf then the target path and
file name are used. If no options or specified, a unique base file name
is generated to save the files and the base file name is returned as a
string.
"""
# Create a unique file name if one is not defined
if shp:
self.saveShp(shp)
if shx:
self.saveShx(shx)
if dbf:
self.saveDbf(dbf)
elif not shp and not shx and not dbf:
generated = False
if not target:
temp = tempfile.NamedTemporaryFile(prefix="shapefile_",dir=os.getcwd())
target = temp.name
generated = True
self.saveShp(target)
self.shp.close()
self.saveShx(target)
self.shx.close()
self.saveDbf(target)
self.dbf.close()
if generated:
return target
class Editor(Writer):
def __init__(self, shapefile=None, shapeType=POINT, autoBalance=1):
self.autoBalance = autoBalance
if not shapefile:
Writer.__init__(self, shapeType)
elif is_string(shapefile):
base = os.path.splitext(shapefile)[0]
if os.path.isfile("%s.shp" % base):
r = Reader(base)
Writer.__init__(self, r.shapeType)
self._shapes = r.shapes()
self.fields = r.fields
self.records = r.records()
def select(self, expr):
"""Select one or more shapes (to be implemented)"""
# TODO: Implement expressions to select shapes.
pass
def delete(self, shape=None, part=None, point=None):
"""Deletes the specified part of any shape by specifying a shape
number, part number, or point number."""
# shape, part, point
if shape and part and point:
del self._shapes[shape][part][point]
# shape, part
elif shape and part and not point:
del self._shapes[shape][part]
# shape
elif shape and not part and not point:
del self._shapes[shape]
# point
elif not shape and not part and point:
for s in self._shapes:
if s.shapeType == 1:
del self._shapes[point]
else:
for part in s.parts:
del s[part][point]
# part, point
elif not shape and part and point:
for s in self._shapes:
del s[part][point]
# part
elif not shape and part and not point:
for s in self._shapes:
del s[part]
def point(self, x=None, y=None, z=None, m=None, shape=None, part=None, point=None, addr=None):
"""Creates/updates a point shape. The arguments allows
you to update a specific point by shape, part, point of any
shape type."""
# shape, part, point
if shape and part and point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
try: self._shapes[shape][part][point]
except IndexError: self._shapes[shape][part].append([])
p = self._shapes[shape][part][point]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][point] = p
# shape, part
elif shape and part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
points = self._shapes[shape][part]
for i in range(len(points)):
p = points[i]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][i] = p
# shape
elif shape and not part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
# point
# part
if addr:
shape, part, point = addr
self._shapes[shape][part][point] = [x, y, z, m]
else:
Writer.point(self, x, y, z, m)
if self.autoBalance:
self.balance()
def validate(self):
"""An optional method to try and validate the shapefile
as much as possible before writing it (not implemented)."""
#TODO: Implement validation method
pass
def balance(self):
"""Adds a corresponding empty attribute or null geometry record depending
on which type of record was created to make sure all three files
are in synch."""
if len(self.records) > len(self._shapes):
self.null()
elif len(self.records) < len(self._shapes):
self.record()
def __fieldNorm(self, fieldName):
"""Normalizes a dbf field name to fit within the spec and the
expectations of certain ESRI software."""
if len(fieldName) > 11: fieldName = fieldName[:11]
fieldName = fieldName.upper()
fieldName.replace(' ', '_')
# Begin Testing
def test():
import doctest
doctest.NORMALIZE_WHITESPACE = 1
doctest.testfile("README.txt", verbose=1)
if __name__ == "__main__":
"""
Doctests are contained in the file 'README.txt'. This library was originally developed
using Python 2.3. Python 2.4 and above have some excellent improvements in the built-in
testing libraries but for now unit testing is done using what's available in
2.3.
"""
test()
| slub/vk2-georeference | georeference/utils/process/shapefile.py | Python | gpl-3.0 | 44,672 |
from django.utils.encoding import iri_to_uri
from urlparse import urljoin
def utility(request):
"""
Add some useful niceties to the context
"""
base_uri = "%s://%s" % (request.is_secure() and 'https' or 'http',
request.get_host())
return dict(
site_base_uri=iri_to_uri(base_uri),
absolute_uri=iri_to_uri(urljoin(base_uri, request.get_full_path())))
| iivvoo/two.ol | two/ol/context_processors.py | Python | bsd-2-clause | 419 |
"""
:codeauthor: Thomas Jackson <[email protected]>
"""
import logging
import threading
import pytest
import salt.channel.client
import salt.channel.server
import salt.config
import salt.exceptions
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.utils.platform
import salt.utils.process
from salt.ext.tornado.testing import AsyncTestCase
from saltfactories.utils.ports import get_unused_localhost_port
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.unit import skipIf
from tests.unit.transport.mixins import run_loop_in_thread
pytestmark = [
pytest.mark.skip_on_darwin,
pytest.mark.skip_on_freebsd,
]
log = logging.getLogger(__name__)
@skipIf(True, "Skip until we can devote time to fix this test")
class AsyncPubServerTest(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
"""
Tests around the publish system
"""
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "tcp",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "tcp",
"master_ip": "127.0.0.1",
"auth_timeout": 1,
"master_port": ret_port,
"master_uri": "tcp://127.0.0.1:{}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.channel.server.PubServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.channel.server.ReqServerChannel.factory(
cls.master_config
)
cls.req_server_channel.pre_fork(cls.process_manager)
cls.io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.stop = threading.Event()
cls.req_server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(
target=run_loop_in_thread,
args=(
cls.io_loop,
cls.stop,
),
)
cls.server_thread.start()
@classmethod
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
return payload, {"fun": "send_clear"}
@classmethod
def tearDownClass(cls):
cls.req_server_channel.close()
cls.server_channel.close()
cls.stop.set()
cls.server_thread.join()
cls.process_manager.terminate()
del cls.req_server_channel
def setUp(self):
super().setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super().tearDown()
failures = []
for k, v in self.io_loop._handlers.items():
if self._start_handlers.get(k) != v:
failures.append((k, v))
if failures:
raise Exception("FDs still attached to the IOLoop: {}".format(failures))
del self.channel
del self._start_handlers
def test_basic(self):
self.pub = None
def handle_pub(ret):
self.pub = ret
self.stop() # pylint: disable=not-callable
self.pub_channel = salt.channel.client.AsyncPubChannel.factory(
self.minion_opts, io_loop=self.io_loop
)
connect_future = self.pub_channel.connect()
connect_future.add_done_callback(
lambda f: self.stop() # pylint: disable=not-callable
)
self.wait()
connect_future.result()
self.pub_channel.on_recv(handle_pub)
load = {
"fun": "f",
"arg": "a",
"tgt": "t",
"jid": "j",
"ret": "r",
"tgt_type": "glob",
}
self.server_channel.publish(load)
self.wait()
self.assertEqual(self.pub["load"], load)
self.pub_channel.on_recv(None)
self.server_channel.publish(load)
with self.assertRaises(self.failureException):
self.wait(timeout=0.5)
# close our pub_channel, to pass our FD checks
self.pub_channel.close()
del self.pub_channel
| saltstack/salt | tests/unit/transport/test_tcp.py | Python | apache-2.0 | 5,132 |
import websocket
import json
import logging
import coloredlogs
import sys
import ssl
from getopt import gnu_getopt, GetoptError
from mouserver_ext import grab_window, Window
import random
import string
import time
class Mouserver:
def __init__(self, ws_url, session, window):
self.ws_url = ws_url
self.session = session
self.window = window
self.log = logging.getLogger('mouserver')
self.ws_log = logging.getLogger('websocket')
self.uid = ''.join(random.choice(string.letters) for i in xrange(20))
self.name = 'MouServer'
self.log.info("Websocket URL: %s", self.ws_url)
self.log.info("Session ID: %s", self.session)
window_name = self.window.get_name()
w, h = self.window.get_size()
self.log.info("Window: %s (%dx%d)", window_name, w, h)
self.method_table = {}
self.register('mouseMove', self.mouse_move)
self.register('mouseDown', self.mouse_down)
self.register('mouseUp', self.mouse_up)
self.wsapp = websocket.WebSocketApp(
ws_url,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
on_open=self.on_open)
def run_forever(self):
self.wsapp.run_forever(sslopt={'cert_reqs': ssl.CERT_NONE})
def on_message(self, ws, message):
try:
msg = json.loads(message)
except ValueError:
self.log.warning("Received non-JSON data")
return
if 'type' not in msg:
self.log.warning("Received data with no command")
return
msg_type = msg['type']
method = self.method_table.get(msg_type, None)
if method is not None:
method(msg)
else:
self.log.warning("Received unknown msg type: %s", msg_type)
def on_error(self, ws, error):
self.ws_log.error(error)
def on_close(self, ws):
self.ws_log.error("Connection closed")
raise MouserverConnectionClosedError("Connection closed")
def on_open(self, ws):
self.ws_log.info("Connection established")
self.ws_log.info("Joining session: %s", self.session)
ws.send(json.dumps({
'type': 'announce',
'srcID': self.uid,
'userName': self.name,
'activeMouseOnly': True
}))
ws.send(json.dumps({
'type': 'joinSession',
'srcID': self.uid,
'sessionID': self.session
}))
def register(self, msg_type, method):
self.method_table[msg_type] = method
def mouse_move(self, msg):
x = float(msg['x'])
y = float(msg['y'])
self.log.debug("mouse_move (%f, %f)", x, y)
self.window.mouse_move_ratio(x, y)
def mouse_down(self, msg):
x = float(msg['x'])
y = float(msg['y'])
# javascript (and the websockets) use 0, 1, 2 for the mouse buttons,
# but libxdo uses 1, 2, 3
button = int(msg['button']) + 1
self.log.debug("mouse_down (%f, %f, %d)", (x, y, button))
self.window.mouse_move_ratio(x, y)
self.window.mouse_down(button)
def mouse_up(self, msg):
x = float(msg['x'])
y = float(msg['y'])
# javascript (and the websockets) use 0, 1, 2 for the mouse buttons,
# but libxdo uses 1, 2, 3
button = int(msg['button']) + 1
self.log.debug("mouse_up (%f, %f, %d)", (x, y, button))
self.window.mouse_move_ratio(x, y)
self.window.mouse_up(button)
class MouserverConnectionClosedError(Exception):
pass
def print_usage():
print "usage: %s -u <websocket_url> -s <session_id> [-w <window_id>]" % sys.argv[0]
print ""
print " --url, -u <websocket_url>"
print " specifies the websocket URL to which the program should"
print " connect to receive user interaction events (required)"
print " --session, -s <session_id>"
print " specifies the string that uniquely identifies this session"
print " (required)"
print " --window, -w <window_id>"
print " specifies the X11 window ID of the window with which to interact."
print " If this is not specified, you will be prompted to select a window"
print " by clicking on it at startup."
print ""
print " --verbose, -v"
print " outputs lots of protocol information"
print " --help, -h"
print " displays this usage information."
def main():
loglevel = logging.INFO
url = None
session = None
window = None
short_opts = "hvu:s:w:"
long_opts = [
'help',
'verbose',
'url=',
'session=',
'window=',
]
try:
opts, args = gnu_getopt(sys.argv[1:], short_opts, long_opts)
except GetoptError as err:
print str(err)
print_usage()
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
print_usage()
sys.exit(0)
elif o in ('-v', '--verbose'):
loglevel = logging.DEBUG
elif o in ('-u', '--url'):
url = a
elif o in ('-s', '--session'):
session = a
elif o in ('-w', '--window'):
window = long(a)
else:
print "Unknown option: %s" % o
print_usage()
sys.exit(2)
if url is None:
print "Must specify server URL (-u)"
sys.exit(1)
if session is None:
print "Must specify session ID (-s)"
sys.exit(1)
if window is None:
print "Please select a window by clicking on it."
window = grab_window()
else:
window = Window(window)
log = logging.getLogger("main")
coloredlogs.install(level=loglevel)
while True:
server = Mouserver(url, session, window)
server.run_forever()
time.sleep(5.0)
log.warning("Restarting after 5 seconds due to dropped connection")
if __name__ == '__main__':
main()
| ssfrr/advenshare | mouserver/mouserver/server.py | Python | mit | 6,082 |
from datetime import timedelta
from six import text_type
from rest_framework.fields import DurationField
from .utils import parse_duration, duration_decimal
class DurationField(DurationField):
def to_internal_value(self, value):
if isinstance(value, timedelta):
return value
parsed = parse_duration(text_type(value))
if parsed is not None:
return parsed
else:
return None
self.fail("invalid", format="[[HH]:MM] or [[HH][.MM]]")
def to_representation(self, value):
return duration_decimal(value)
| cdubz/timestrap | core/fields.py | Python | bsd-2-clause | 593 |
from api.serializers import LocationSerializer
from events.models import Location
from rest_framework import viewsets, serializers, permissions
from api.permissions import IsEventOrganizer
class LocationSet(viewsets.ModelViewSet):
serializer_class = LocationSerializer
permission_classes = [permissions.IsAdminUser | IsEventOrganizer]
def get_queryset(self):
return Location.objects.all()
def test_location_set(superuser1_api_request):
from rest_framework.reverse import reverse
url = reverse("frontend_locations-list")
result = superuser1_api_request.get(url)
assert result.json() == []
def test_location_set1(superuser1_api_request, location_1):
from rest_framework.reverse import reverse
url = reverse("frontend_locations-list")
result = superuser1_api_request.get(url)
assert result.json() == [
{
"name": "location_name",
"place": "here",
"region": "Prague",
"gps_latitude": 52.15151,
"gps_longitude": 35.11515,
"id": location_1.pk,
}
]
def test_location_set_anon(anon_api_request):
from rest_framework.reverse import reverse
url = reverse("frontend_locations-list")
result = anon_api_request.get(url)
assert result.json() == {"detail": "Nebyly zadány přihlašovací údaje."}
| auto-mat/klub | apps/api/frontend/locations_unit.py | Python | gpl-3.0 | 1,357 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import io
import pytest
from astropy import units
asdf = pytest.importorskip('asdf')
from asdf.tests import helpers
def roundtrip_quantity(yaml, quantity):
buff = helpers.yaml_to_asdf(yaml)
with asdf.AsdfFile.open(buff) as ff:
assert (ff.tree['quantity'] == quantity).all()
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.AsdfFile.open(buff2) as ff:
assert (ff.tree['quantity'] == quantity).all()
def test_value_scalar(tmpdir):
testval = 2.71828
testunit = units.kpc
yaml = """
quantity: !unit/quantity-1.1.0
value: {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_array(tmpdir):
testval = [3.14159]
testunit = units.kg
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_multiarray(tmpdir):
testval = [x*2.3081 for x in range(10)]
testunit = units.ampere
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_ndarray(tmpdir):
from numpy import array, float64
testval = [[1,2,3],[4,5,6]]
testunit = units.km
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0
datatype: float64
data:
{}
unit: {}
""".format(testval, testunit)
data = array(testval, float64)
quantity = units.Quantity(data, unit=testunit)
roundtrip_quantity(yaml, quantity)
| funbaker/astropy | astropy/io/misc/asdf/tags/unit/tests/test_quantity.py | Python | bsd-3-clause | 1,879 |
# -*- coding: utf-8 -*-
"""
Managers of ``critica.apps.polls`` application.
"""
from django.db import models
class PollPublishedManager(models.Manager):
"""
Published poll manager.
"""
def get_query_set(self):
"""
Default QuerySet.
"""
return super(PollPublishedManager, self).get_query_set().filter(is_published=True)
| brunobord/critica | apps/polls/managers.py | Python | gpl-3.0 | 384 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.