code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.apps import AppConfig
class KolibriContentConfig(AppConfig):
name = 'kolibri.core.content'
label = 'content'
verbose_name = 'Kolibri Content'
def ready(self):
from kolibri.core.content.utils.sqlalchemybridge import prepare_bases
prepare_bases()
| DXCanas/kolibri | kolibri/core/content/apps.py | Python | mit | 410 |
"""
.. module: lemur.destinations.views
:platform: Unix
:synopsis: This module contains all of the accounts view code.
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
from flask import Blueprint
from flask.ext.restful import Api, reqparse, fields
from lemur.destinations import service
from lemur.auth.service import AuthenticatedResource
from lemur.auth.permissions import admin_permission
from lemur.common.utils import paginated_parser, marshal_items
mod = Blueprint('destinations', __name__)
api = Api(mod)
FIELDS = {
'description': fields.String,
'destinationOptions': fields.Raw(attribute='options'),
'pluginName': fields.String(attribute='plugin_name'),
'label': fields.String,
'id': fields.Integer,
}
class DestinationsList(AuthenticatedResource):
""" Defines the 'destinations' endpoint """
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(DestinationsList, self).__init__()
@marshal_items(FIELDS)
def get(self):
"""
.. http:get:: /destinations
The current account list
**Example request**:
.. sourcecode:: http
GET /destinations HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"items": [
{
"destinationOptions": [
{
"name": "accountNumber",
"required": true,
"value": 111111111112,
"helpMessage": "Must be a valid AWS account number!",
"validation": "/^[0-9]{12,12}$/",
"type": "int"
}
],
"pluginName": "aws-destination",
"id": 3,
"description": "test",
"label": "test"
}
],
"total": 1
}
:query sortBy: field to sort on
:query sortDir: acs or desc
:query page: int. default is 1
:query filter: key value pair. format is k=v;
:query limit: limit number. default is 10
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
"""
parser = paginated_parser.copy()
args = parser.parse_args()
return service.render(args)
@admin_permission.require(http_exception=403)
@marshal_items(FIELDS)
def post(self):
"""
.. http:post:: /destinations
Creates a new account
**Example request**:
.. sourcecode:: http
POST /destinations HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"destinationOptions": [
{
"name": "accountNumber",
"required": true,
"value": 111111111112,
"helpMessage": "Must be a valid AWS account number!",
"validation": "/^[0-9]{12,12}$/",
"type": "int"
}
],
"pluginName": "aws-destination",
"id": 3,
"description": "test",
"label": "test"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"destinationOptions": [
{
"name": "accountNumber",
"required": true,
"value": 111111111112,
"helpMessage": "Must be a valid AWS account number!",
"validation": "/^[0-9]{12,12}$/",
"type": "int"
}
],
"pluginName": "aws-destination",
"id": 3,
"description": "test",
"label": "test"
}
:arg label: human readable account label
:arg description: some description about the account
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
"""
self.reqparse.add_argument('label', type=str, location='json', required=True)
self.reqparse.add_argument('plugin', type=dict, location='json', required=True)
self.reqparse.add_argument('description', type=str, location='json')
args = self.reqparse.parse_args()
return service.create(args['label'], args['plugin']['slug'], args['plugin']['pluginOptions'], args['description'])
class Destinations(AuthenticatedResource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(Destinations, self).__init__()
@marshal_items(FIELDS)
def get(self, destination_id):
"""
.. http:get:: /destinations/1
Get a specific account
**Example request**:
.. sourcecode:: http
GET /destinations/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"destinationOptions": [
{
"name": "accountNumber",
"required": true,
"value": 111111111112,
"helpMessage": "Must be a valid AWS account number!",
"validation": "/^[0-9]{12,12}$/",
"type": "int"
}
],
"pluginName": "aws-destination",
"id": 3,
"description": "test",
"label": "test"
}
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
"""
return service.get(destination_id)
@admin_permission.require(http_exception=403)
@marshal_items(FIELDS)
def put(self, destination_id):
"""
.. http:put:: /destinations/1
Updates an account
**Example request**:
.. sourcecode:: http
POST /destinations/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"destinationOptions": [
{
"name": "accountNumber",
"required": true,
"value": 111111111112,
"helpMessage": "Must be a valid AWS account number!",
"validation": "/^[0-9]{12,12}$/",
"type": "int"
}
],
"pluginName": "aws-destination",
"id": 3,
"description": "test",
"label": "test"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"destinationOptions": [
{
"name": "accountNumber",
"required": true,
"value": 111111111112,
"helpMessage": "Must be a valid AWS account number!",
"validation": "/^[0-9]{12,12}$/",
"type": "int"
}
],
"pluginName": "aws-destination",
"id": 3,
"description": "test",
"label": "test"
}
:arg accountNumber: aws account number
:arg label: human readable account label
:arg description: some description about the account
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
"""
self.reqparse.add_argument('label', type=str, location='json', required=True)
self.reqparse.add_argument('plugin', type=dict, location='json', required=True)
self.reqparse.add_argument('description', type=str, location='json')
args = self.reqparse.parse_args()
return service.update(destination_id, args['label'], args['plugin']['pluginOptions'], args['description'])
@admin_permission.require(http_exception=403)
def delete(self, destination_id):
service.delete(destination_id)
return {'result': True}
class CertificateDestinations(AuthenticatedResource):
""" Defines the 'certificate/<int:certificate_id/destinations'' endpoint """
def __init__(self):
super(CertificateDestinations, self).__init__()
@marshal_items(FIELDS)
def get(self, certificate_id):
"""
.. http:get:: /certificates/1/destinations
The current account list for a given certificates
**Example request**:
.. sourcecode:: http
GET /certificates/1/destinations HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"items": [
{
"destinationOptions": [
{
"name": "accountNumber",
"required": true,
"value": 111111111112,
"helpMessage": "Must be a valid AWS account number!",
"validation": "/^[0-9]{12,12}$/",
"type": "int"
}
],
"pluginName": "aws-destination",
"id": 3,
"description": "test",
"label": "test"
}
],
"total": 1
}
:query sortBy: field to sort on
:query sortDir: acs or desc
:query page: int. default is 1
:query filter: key value pair. format is k=v;
:query limit: limit number. default is 10
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
"""
parser = paginated_parser.copy()
args = parser.parse_args()
args['certificate_id'] = certificate_id
return service.render(args)
class DestinationsStats(AuthenticatedResource):
""" Defines the 'certificates' stats endpoint """
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(DestinationsStats, self).__init__()
def get(self):
self.reqparse.add_argument('metric', type=str, location='args')
args = self.reqparse.parse_args()
items = service.stats(**args)
return dict(items=items, total=len(items))
api.add_resource(DestinationsList, '/destinations', endpoint='destinations')
api.add_resource(Destinations, '/destinations/<int:destination_id>', endpoint='destination')
api.add_resource(CertificateDestinations, '/certificates/<int:certificate_id>/destinations',
endpoint='certificateDestinations')
api.add_resource(DestinationsStats, '/destinations/stats', endpoint='destinationStats')
| rhoml/lemur | lemur/destinations/views.py | Python | apache-2.0 | 12,247 |
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
import logging
from . import models
logger = logging.getLogger("project")
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_profile_handler(sender, instance, created, **kwargs):
if not created:
return
# Create the profile object, only if it is newly created
if instance.is_superuser:
profile = models.Profile(user=instance)
profile.save()
logger.info('New user profile for {} created'.format(instance))
| furthz/colegio | src/profiles/signals.py | Python | mit | 581 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Ideal observer model with same experimental setup as convergence simulation.
Generate objects with ten positions out of a four-by-four grid that have
features randomly selected from a pool of unique features.
"""
import collections
import json
import random
import numpy as np
def generateObjects(numObjects, numFeatures):
np.random.seed(numObjects)
objects = {}
for i in xrange(numObjects):
obj = np.zeros((16,), dtype=np.int32)
obj.fill(-1)
obj[:10] = np.random.randint(numFeatures, size=10, dtype=np.int32)
np.random.shuffle(obj)
objects[i] = obj.reshape((4, 4))
return objects
def getStartingSpots(objects):
startingSpots = collections.defaultdict(list)
for i, obj in objects.iteritems():
for x in xrange(4):
for y in xrange(4):
feat = obj[x, y]
if feat != -1:
startingSpots[feat].append((i, (x, y)))
return startingSpots
def runTrial(objects, startingSpots, numFeatures):
numObjects = len(objects)
results = collections.defaultdict(int)
for targetID in xrange(numObjects):
#random.seed(targetID)
targetObject = objects[targetID]
possibleObjects = None
possiblePositions = []
for x in xrange(4):
for y in xrange(4):
if targetObject[x][y] != -1:
possiblePositions.append((x, y))
idx = range(10)
#print idx
random.shuffle(idx)
#print idx
possiblePositions = [possiblePositions[i] for i in idx]
#print possiblePositions
steps = 0
for x, y in possiblePositions:
feat = targetObject[x, y]
#print x, y, feat
steps += 1
curPos = (x, y)
if possibleObjects is None:
possibleObjects = startingSpots[feat]
else:
changeX = x - prevPos[0]
changeY = y - prevPos[1]
newPossibleObjects = []
for objectID, coords in possibleObjects:
newX = coords[0] + changeX
newY = coords[1] + changeY
if (newX < 0 or newX >= objects[objectID].shape[0] or
newY < 0 or newY >= objects[objectID].shape[1]):
continue
expectedFeat = objects[objectID][newX, newY]
if expectedFeat == feat:
newPossibleObjects.append((objectID, (newX, newY)))
possibleObjects = newPossibleObjects
possibleObjectIDs = set([pair[0] for pair in possibleObjects])
if len(possibleObjects) == 1:
assert list(possibleObjectIDs)[0] == targetID
results[steps] += 1
break
prevPos = curPos
assert len(possibleObjects) > 0
#if len(possibleObjectIDs) > 1:
if len(possibleObjects) > 1:
results[None] += 1
return results
def runSim(numObjects, numFeatures, numTrials):
# Map from # sensations to list of number of objects per trial
results = collections.defaultdict(list)
for _ in xrange(numTrials):
objects = generateObjects(numObjects, numFeatures)
# Built map from a feature to all possible positions
startingSpots = getStartingSpots(objects)
trialResults = runTrial(objects, startingSpots, numFeatures)
for steps, count in trialResults.iteritems():
results[steps].append(count)
results = dict(results)
print results
total = sum([sum(l) for l in results.values()])
average = float(sum([sum([k*v for v in l]) for k, l in results.iteritems()])) / float(total)
print "average:", average
with open("results/ideal.json", "w") as f:
json.dump(results, f)
if __name__ == "__main__":
runSim(100, 10, 10)
| neuroidss/nupic.research | projects/union_path_integration/ideal_sim.py | Python | agpl-3.0 | 4,475 |
# -*- coding: utf-8 -*-
#
# PHP Curl Class documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['ntemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PHP Curl Class'
copyright = u'2015, Zach Borboa'
author = u'Zach Borboa'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.8.1'
# The full version, including alpha/beta/rc tags.
release = '4.8.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['nstatic']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PHPCurlClassdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PHPCurlClass.tex', u'PHP Curl Class Documentation',
u'Zach Borboa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'phpcurlclass', u'PHP Curl Class Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PHPCurlClass', u'PHP Curl Class Documentation',
author, 'PHPCurlClass', '',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| elliottpost/lsm-front-end | vendor/php-curl-class/php-curl-class/docs/source/conf.py | Python | apache-2.0 | 9,312 |
import socket
if socket.gethostname() == 'Faramir': #for CNN_B
data_root = '/home/tencia/Documents/data/heart/'
data_kaggle = data_root + 'kaggle'
data_sunnybrook = data_root + 'sunnybrook'
local_root = '/home/tencia/Dropbox/heart/diagnose-heart/'
data_manual = local_root + 'manual_data'
data_intermediate = local_root + 'data_intermediate'
output_dir = local_root + 'CNN_A/output/'
tencia_output_dir = local_root + 'CNN_B/output/'
else: #for CNN_A
data_root = "/media/qiliu/share/heart/";
data_sunnybrook = data_root + '/sunnybrook';
data_kaggle = data_root;
local_dir = '/home/qiliu/Documents/Coding/heart/diagnose-heart/';
manual_data_root = local_dir + 'manual_data'
data_aug_contours = manual_data_root + '/manual_contours';
intermediate_dir = local_dir + 'CNN_A';
params_dir = intermediate_dir + '/params/'
output_dir = intermediate_dir + '/output/'
tencia_output_dir = local_dir + 'CNN_B/output'
| woshialex/diagnose-heart | SETTINGS.py | Python | mit | 978 |
str_no_string = 0
str_empty_string = 1
str_yes = 2
str_no = 3
str_blank_string = 4
str_error_string = 5
str_s0 = 6
str_blank_s1 = 7
str_reg1 = 8
str_s50_comma_s51 = 9
str_s50_and_s51 = 10
str_s52_comma_s51 = 11
str_s52_and_s51 = 12
str_msg_battle_won = 13
str_charge = 14
str_color = 15
str_hold_fire = 16
str_blunt_hold_fire = 17
str_finished = 18
str_delivered_damage = 19
str_archery_target_hit = 20
str_cant_use_inventory_now = 21
str_give_up_fight = 22
str_battle_won = 23
str_battle_lost = 24
str_kingdom_1_adjective = 25
str_kingdom_2_adjective = 26
str_kingdom_3_adjective = 27
str_kingdom_4_adjective = 28
str_kingdom_5_adjective = 29
str_kingdom_6_adjective = 30
str_credits_1 = 31
str_credits_2 = 32
str_credits_3 = 33
str_credits_4 = 34
str_credits_5 = 35
str_credits_6 = 36
str_credits_7 = 37
str_credits_8 = 38
str_credits_9 = 39
str_credits_10 = 40
str_credits_11 = 41
str_credits_12 = 42
str_credits_13 = 43
str_credits_14 = 44
str_credits_15 = 45
str_mp_ambush = 46
str_mp_ambush_fog = 47
str_mp_arabian_harbour = 48
str_mp_arabian_harbour_night = 49
str_mp_arabian_village = 50
str_mp_arabian_village_morning = 51
str_mp_arabian_village_conq = 52
str_mp_arabian_village_conq_morning = 53
str_mp_ardennes = 54
str_mp_ardennes_morning = 55
str_mp_avignon = 56
str_mp_avignon_morning = 57
str_mp_bavarian_river = 58
str_mp_bavarian_river_cloudy = 59
str_mp_beach = 60
str_mp_beach_morning = 61
str_mp_borodino = 62
str_mp_borodino_morn = 63
str_mp_champs_elysees = 64
str_mp_champs_elysees_rain = 65
str_mp_charge_to_the_rhine = 66
str_mp_charge_to_the_rhine_cloudy = 67
str_mp_citadelle_napoleon = 68
str_mp_citadelle_napoleon_morning = 69
str_mp_columbia_hill_farm = 70
str_mp_columbia_farm_morning = 71
str_mp_countryside = 72
str_mp_countryside = 73
str_mp_dust = 74
str_mp_dust_morning = 75
str_mp_european_city_summer = 76
str_mp_european_city_winter = 77
str_mp_floodplain = 78
str_mp_floodplain_storm = 79
str_mp_forest_pallisade = 80
str_mp_forest_pallisade_fog = 81
str_mp_fort_al_hafya = 82
str_mp_fort_al_hafya_night = 83
str_mp_fort_bashir = 84
str_mp_fort_bashir_morning = 85
str_mp_fort_beaver = 86
str_mp_fort_beaver_morning = 87
str_mp_fort_boyd = 88
str_mp_fort_boyd_raining = 89
str_mp_fort_brochet = 90
str_mp_fort_brochet_raining = 91
str_mp_fort_de_chartres = 92
str_mp_fort_de_chartres_raining = 93
str_mp_fort_fleetwood = 94
str_mp_fort_fleetwood_storm = 95
str_mp_fort_george = 96
str_mp_fort_george_raining = 97
str_mp_fort_hohenfels = 98
str_mp_fort_hohenfels_night = 99
str_mp_fort_lyon = 100
str_mp_fort_lyon_night = 101
str_mp_fort_mackinaw = 102
str_mp_fort_mackinaw_raining = 103
str_mp_fort_nylas = 104
str_mp_fort_nylas_raining = 105
str_mp_fort_refleax = 106
str_mp_fort_refleax_night = 107
str_mp_fort_vincey = 108
str_mp_fort_vincey_storm = 109
str_mp_french_farm = 110
str_mp_french_farm_storm = 111
str_mp_german_village = 112
str_mp_german_village_rain = 113
str_mp_hougoumont = 114
str_mp_hougoumont_night = 115
str_mp_hungarian_plains = 116
str_mp_hungarian_plains_cloud = 117
str_mp_theisland = 118
str_mp_la_haye_sainte = 119
str_mp_la_haye_sainte_night = 120
str_mp_landshut = 121
str_mp_landshut_night = 122
str_mp_minden = 123
str_mp_minden_night = 124
str_mp_naval = 125
str_mp_oaksfield_day = 126
str_mp_oaksfield_storm = 127
str_mp_outlaws_den = 128
str_mp_outlaws_den_night = 129
str_mp_quatre_bras = 130
str_mp_quatre_bras_night = 131
str_mp_river_crossing = 132
str_mp_river_crossing_morning = 133
str_mp_roxburgh = 134
str_mp_roxburgh_raining = 135
str_mp_russian_river_day = 136
str_mp_russian_river_cloudy = 137
str_mp_russian_village = 138
str_mp_russian_village_fog = 139
str_mp_russian_village_conq = 140
str_mp_russian_village_conq_night = 141
str_mp_saints_isle = 142
str_mp_saints_isle_rain = 143
str_mp_schemmerbach = 144
str_mp_schemmerbach_storm = 145
str_mp_siege_of_toulon = 146
str_mp_siege_of_toulon_night = 147
str_mp_sjotofta = 148
str_mp_sjotofta_night = 149
str_mp_slovenian_village = 150
str_mp_slovenian_village_raining = 151
str_mp_spanish_farm = 152
str_mp_spanish_farm_rain = 153
str_mp_spanish_mountain_pass = 154
str_mp_spanish_mountain_pass_evening = 155
str_mp_spanish_village = 156
str_mp_spanish_village_evening = 157
str_mp_strangefields = 158
str_mp_strangefields_storm = 159
str_mp_swamp = 160
str_mp_venice = 161
str_mp_venice_morning = 162
str_mp_walloon_farm = 163
str_mp_walloon_farm_night = 164
str_mp_testing_map = 165
str_random_multi_plain_medium = 166
str_random_multi_plain_large = 167
str_random_multi_plain_medium_rain = 168
str_random_multi_plain_large_rain = 169
str_random_multi_steppe_medium = 170
str_random_multi_steppe_large = 171
str_random_multi_steppe_forest_medium = 172
str_random_multi_steppe_forest_large = 173
str_random_multi_snow_medium = 174
str_random_multi_snow_medium_snow = 175
str_random_multi_snow_large = 176
str_random_multi_snow_large_snow = 177
str_random_multi_snow_forest_medium = 178
str_random_multi_snow_forest_medium_snow = 179
str_random_multi_snow_forest_large = 180
str_random_multi_snow_forest_large_snow = 181
str_random_multi_desert_medium = 182
str_random_multi_desert_large = 183
str_random_multi_desert_forest_medium = 184
str_random_multi_desert_forest_large = 185
str_random_multi_forest_medium = 186
str_random_multi_forest_medium_rain = 187
str_random_multi_forest_large = 188
str_random_multi_forest_large_rain = 189
str_mp_custom_map_1 = 190
str_mp_custom_map_2 = 191
str_mp_custom_map_3 = 192
str_mp_custom_map_4 = 193
str_mp_custom_map_5 = 194
str_mp_custom_map_6 = 195
str_mp_custom_map_7 = 196
str_mp_custom_map_8 = 197
str_mp_custom_map_9 = 198
str_mp_custom_map_10 = 199
str_mp_custom_map_11 = 200
str_mp_custom_map_12 = 201
str_mp_custom_map_13 = 202
str_mp_custom_map_14 = 203
str_mp_custom_map_15 = 204
str_mp_custom_map_16 = 205
str_mp_custom_map_17 = 206
str_mp_custom_map_18 = 207
str_mp_custom_map_19 = 208
str_mp_custom_map_20 = 209
str_multi_scene_end = 210
str_multi_game_type_1 = 211
str_multi_game_type_2 = 212
str_multi_game_type_3 = 213
str_multi_game_type_5 = 214
str_multi_game_type_6 = 215
str_multi_game_type_7 = 216
str_multi_game_type_8 = 217
str_multi_game_type_9 = 218
str_multi_game_type_11 = 219
str_multi_game_types_end = 220
str_multi_game_type_10 = 221
str_poll_kick_player_s1_by_s0 = 222
str_poll_ban_player_s1_by_s0 = 223
str_poll_change_map_to_s1_by_s0 = 224
str_poll_change_map_to_s1_and_factions_to_s2_and_s3_by_s0 = 225
str_poll_change_number_of_bots_to_reg0_and_reg1_by_s0 = 226
str_poll_kick_player = 227
str_poll_ban_player = 228
str_poll_change_map = 229
str_poll_change_map_with_faction = 230
str_poll_change_number_of_bots = 231
str_poll_time_left = 232
str_poll_result_yes = 233
str_poll_result_no = 234
str_server_name = 235
str_game_password = 236
str_map = 237
str_game_type = 238
str_max_number_of_players = 239
str_number_of_bots_in_team_reg1 = 240
str_team_reg1_faction = 241
str_enable_valve_anti_cheat = 242
str_allow_friendly_fire = 243
str_allow_melee_friendly_fire = 244
str_friendly_fire_damage_self_ratio = 245
str_friendly_fire_damage_friend_ratio = 246
str_spectator_camera = 247
str_control_block_direction = 248
str_map_time_limit = 249
str_round_time_limit = 250
str_players_take_control_of_a_bot_after_death = 251
str_team_points_limit = 252
str_point_gained_from_flags = 253
str_point_gained_from_capturing_flag = 254
str_respawn_period = 255
str_add_to_official_game_servers_list = 256
str_combat_speed = 257
str_combat_speed_0 = 258
str_combat_speed_1 = 259
str_combat_speed_2 = 260
str_combat_speed_3 = 261
str_combat_speed_4 = 262
str_off = 263
str_on = 264
str_defender_spawn_count_limit = 265
str_unlimited = 266
str_automatic = 267
str_by_mouse_movement = 268
str_free = 269
str_stick_to_any_player = 270
str_stick_to_team_members = 271
str_stick_to_team_members_view = 272
str_make_factions_voteable = 273
str_make_kick_voteable = 274
str_make_ban_voteable = 275
str_bots_upper_limit_for_votes = 276
str_make_maps_voteable = 277
str_valid_vote_ratio = 278
str_auto_team_balance_limit = 279
str_welcome_message = 280
str_initial_gold_multiplier = 281
str_battle_earnings_multiplier = 282
str_round_earnings_multiplier = 283
str_allow_player_banners = 284
str_force_default_armor = 285
str_reg0 = 286
str_s0_reg0 = 287
str_s0_s1 = 288
str_reg0_dd_reg1reg2 = 289
str_s0_dd_reg0 = 290
str_respawning_in_reg0_seconds = 291
str_no_more_respawns_remained_this_round = 292
str_reg0_respawns_remained = 293
str_this_is_your_last_respawn = 294
str_wait_next_round = 295
str_yes_wo_dot = 296
str_no_wo_dot = 297
str_s1_returned_flag = 298
str_s1_auto_returned_flag = 299
str_s1_captured_flag = 300
str_s1_taken_flag = 301
str_s1_neutralized_flag_reg0 = 302
str_s1_captured_flag_reg0 = 303
str_s1_pulling_flag_reg0 = 304
str_s1_defended_castle = 305
str_s1_captured_castle = 306
str_auto_team_balance_in_20_seconds = 307
str_auto_team_balance_next_round = 308
str_auto_team_balance_done = 309
str_s1_won_round = 310
str_round_draw = 311
str_round_draw_no_one_remained = 312
str_reset_to_default = 313
str_done = 314
str_player_name = 315
str_kills = 316
str_deaths = 317
str_ping = 318
str_dead = 319
str_reg0_dead = 320
str_bots_reg0_agents = 321
str_bot_1_agent = 322
str_score = 323
str_score_reg0 = 324
str_flags_reg0 = 325
str_reg0_players = 326
str_reg0_player = 327
str_reg0_alive = 328
str_reg0_player_only = 329
str_reg0_players_only = 330
str_reg0_spectator = 331
str_reg0_spectators = 332
str_open_gate = 333
str_close_gate = 334
str_open_door = 335
str_close_door = 336
str_raise_ladder = 337
str_drop_ladder = 338
str_back = 339
str_start_map = 340
str_choose_an_option = 341
str_choose_a_poll_type = 342
str_choose_faction = 343
str_choose_a_faction = 344
str_choose_troop = 345
str_choose_a_troop = 346
str_choose_items = 347
str_choose_an_item = 348
str_options = 349
str_redefine_keys = 350
str_submit_a_poll = 351
str_show_game_rules = 352
str_administrator_panel = 353
str_kick_player = 354
str_ban_player = 355
str_mute_player = 356
str_unmute_player = 357
str_quit = 358
str_poll_for_changing_the_map = 359
str_poll_for_changing_the_map_and_factions = 360
str_poll_for_changing_number_of_bots = 361
str_poll_for_kicking_a_player = 362
str_poll_for_banning_a_player = 363
str_choose_a_player = 364
str_choose_a_map = 365
str_choose_a_faction_for_team_reg0 = 366
str_choose_number_of_bots_for_team_reg0 = 367
str_spectator = 368
str_spectators = 369
str_score = 370
str_command = 371
str_profile_banner_selection_text = 372
str_use_default_banner = 373
str_player_name_s1 = 374
str_space = 375
str_us_ = 376
str_allies_ = 377
str_enemies_ = 378
str_routed = 379
str_team_reg0_bot_count_is_reg1 = 380
str_input_is_not_correct_for_the_command_type_help_for_more_information = 381
str_maximum_seconds_for_round_is_reg0 = 382
str_respawn_period_is_reg0_seconds = 383
str_bots_upper_limit_for_votes_is_reg0 = 384
str_map_is_voteable = 385
str_map_is_not_voteable = 386
str_factions_are_voteable = 387
str_factions_are_not_voteable = 388
str_players_respawn_as_bot = 389
str_players_do_not_respawn_as_bot = 390
str_kicking_a_player_is_voteable = 391
str_kicking_a_player_is_not_voteable = 392
str_banning_a_player_is_voteable = 393
str_banning_a_player_is_not_voteable = 394
str_player_banners_are_allowed = 395
str_player_banners_are_not_allowed = 396
str_default_armor_is_forced = 397
str_default_armor_is_not_forced = 398
str_percentage_of_yes_votes_required_for_a_poll_to_get_accepted_is_reg0 = 399
str_auto_team_balance_threshold_is_reg0 = 400
str_starting_gold_ratio_is_reg0 = 401
str_combat_gold_bonus_ratio_is_reg0 = 402
str_round_gold_bonus_ratio_is_reg0 = 403
str_point_gained_from_flags_is_reg0 = 404
str_point_gained_from_capturing_flag_is_reg0 = 405
str_map_time_limit_is_reg0 = 406
str_team_points_limit_is_reg0 = 407
str_defender_spawn_count_limit_is_s1 = 408
str_system_error = 409
str_routed = 410
str_s42 = 411
str_s14 = 412
str_s1_reg1 = 413
str_s1_reg2 = 414
str_s1_reg3 = 415
str_s1_reg4 = 416
str_s1_reg5 = 417
str_s1_reg6 = 418
str_s1_reg7 = 419
str_s1_reg8 = 420
str_s1_reg9 = 421
str_reg13 = 422
str_reg14 = 423
str_reg15 = 424
str_reg16 = 425
str_reg17 = 426
str_reg18 = 427
str_reg19 = 428
str_reg20 = 429
str_reg21 = 430
str_s40 = 431
str_s44 = 432
str_s41 = 433
str_s15 = 434
str_s2_s3 = 435
str_s1_s2 = 436
str_s15 = 437
str_s13 = 438
str_s12 = 439
str_s12 = 440
str_you = 441
str_we = 442
str_quick_battle_french_farm = 443
str_quick_battle_landshut = 444
str_quick_battle_river_crossing = 445
str_quick_battle_spanish_village = 446
str_quick_battle_strangefields = 447
str_quick_battle_scene_1 = 448
str_quick_battle_scene_2 = 449
str_quick_battle_scene_3 = 450
str_quick_battle_scene_4 = 451
str_quick_battle_scene_6 = 452
str_map_basic = 453
str_game_type_basic = 454
str_battle = 455
str_character = 456
str_player = 457
str_enemy = 458
str_faction = 459
str_start = 460
str_custom_battle = 461
str_plus = 462
str_minus = 463
str_server_name_s0 = 464
str_map_name_s0 = 465
str_game_type_s0 = 466
str_remaining_time_s0reg0_s1reg1 = 467
str_a_duel_request_is_sent_to_s0 = 468
str_s0_offers_a_duel_with_you = 469
str_your_duel_with_s0_is_cancelled = 470
str_a_duel_between_you_and_s0_will_start_in_3_seconds = 471
str_you_have_lost_a_duel = 472
str_you_have_won_a_duel = 473
str_server_s0 = 474
str_disallow_ranged_weapons = 475
str_ranged_weapons_are_disallowed = 476
str_ranged_weapons_are_allowed = 477
str_duel_starts_in_reg0_seconds = 478
str_true = 479
str_false = 480
str_teamkilled_s1_s2 = 481
str_kick_server_kills_first_s1_reg5 = 482
str_kick_server_kills_second_s1_reg5 = 483
str_ban_server_kills_s1_reg5 = 484
str_warning_message_first_reg5 = 485
str_warning_message_second_reg5 = 486
str_kick_to_message_first = 487
str_kick_to_message_second = 488
str_ban_to_message = 489
str_auto_kick_message_s1 = 490
str_auto_ban_message_s1 = 491
str_server_hq_base_retake_s1 = 492
str_server_hq_base_attack_s1 = 493
str_player_left_server_s1_reg13 = 494
str_push_cannon = 495
str_fire_cannon = 496
str_aim_cannon = 497
str_unlimber_cannon = 498
str_limber_cannon = 499
str_load_cartridge = 500
str_load_bomb = 501
str_load_rocket = 502
str_reload_cannon = 503
str_pick_up_round = 504
str_pick_up_shell = 505
str_pick_up_canister = 506
str_pick_up_bomb = 507
str_pick_up_rocket = 508
str_play_piano = 509
str_play_organ = 510
str_take_a_shit = 511
str_play_bell = 512
str_take_ship_control = 513
str_cannot_use_piano = 514
str_cannot_use_organ = 515
str_cannot_use_piano_angle = 516
str_cannot_use_organ_angle = 517
str_cannot_use_toilet = 518
str_piano_in_use = 519
str_organ_in_use = 520
str_toilet_in_use = 521
str_cannot_use_cannon = 522
str_cannot_use_rocket = 523
str_cannon_not_loaded = 524
str_cannon_already_has_ball = 525
str_cannon_already_loaded = 526
str_cannot_carry_more_cannon_ammo = 527
str_cannon_cannot_load_type = 528
str_need_to_have_a_lighter = 529
str_need_to_have_a_ramrod = 530
str_need_to_have_a_ball = 531
str_need_to_have_a_horse = 532
str_horse_already_has_cannon = 533
str_already_to_many_barricades = 534
str_already_to_many_ammobox = 535
str_cannon_is_already_in_use = 536
str_already_to_many_players_class_s21 = 537
str_already_to_many_players_rank_mus = 538
str_already_to_many_players_rank_srg = 539
str_already_to_many_players_rank_off = 540
str_chk_class_limits = 541
str_chk_class_limits_player_count = 542
str_limit_grenadier = 543
str_limit_skirmisher = 544
str_limit_rifle = 545
str_limit_cavalry = 546
str_limit_lancer = 547
str_limit_hussar = 548
str_limit_dragoon = 549
str_limit_cuirassier = 550
str_limit_heavycav = 551
str_limit_artillery = 552
str_limit_rocket = 553
str_limit_sapper = 554
str_limit_musician = 555
str_limit_sergeant = 556
str_limit_officer = 557
str_limit_general = 558
str_build_points_team_1 = 559
str_build_points_team_2 = 560
str_allow_multiple_firearms = 561
str_enable_bonuses = 562
str_bonus_strength = 563
str_bonus_range = 564
str_num_bots_per_squad = 565
str_scale_squad_size = 566
str_max_num_bots = 567
str_chance_of_falling_off_horse = 568
str_damage_from_horse_dying = 569
str_admin_start_map_s0_s1_s2_s5_s6 = 570
str_admin_set_max_num_players_s0_reg1 = 571
str_admin_set_num_bots_in_team_s0_s1_reg1 = 572
str_admin_set_friendly_fire_s0_s9 = 573
str_admin_set_melee_friendly_fire_s0_s9 = 574
str_admin_set_friendly_fire_damage_self_ratio_s0_reg1 = 575
str_admin_set_friendly_fire_damage_friend_ratio_s0_reg1 = 576
str_admin_set_ghost_mode_s0_s1 = 577
str_admin_set_control_block_dir_s0_s1 = 578
str_admin_set_combat_speed_s0_s1 = 579
str_admin_set_respawn_count_s0_s1 = 580
str_admin_set_add_to_servers_list_s0_s9 = 581
str_admin_set_respawn_period_s0_reg1 = 582
str_admin_set_game_max_minutes_s0_reg1 = 583
str_admin_set_round_max_seconds_s0_reg1 = 584
str_admin_set_player_respawn_as_bot_s0_s9 = 585
str_admin_set_game_max_points_s0_reg1 = 586
str_admin_set_point_gained_from_flags_s0_reg1 = 587
str_admin_set_point_gained_from_capturing_flag_s0_reg1 = 588
str_admin_set_initial_gold_multiplier_s0_reg1 = 589
str_admin_set_battle_earnings_multiplier_s0_reg1 = 590
str_admin_set_round_earnings_multiplier_s0_reg1 = 591
str_admin_set_server_name_s1_s0 = 592
str_admin_set_game_password_s1_s0 = 593
str_admin_set_welcome_message_s1 = 594
str_admin_set_welcome_message_s1_s0 = 595
str_admin_set_valid_vote_ratio_s0_reg1 = 596
str_admin_set_auto_team_balance_limit_s0_s1 = 597
str_admin_set_num_bots_voteable_s0_reg1 = 598
str_admin_set_factions_voteable_s0_s9 = 599
str_admin_set_maps_voteable_s0_s9 = 600
str_admin_set_kick_voteable_s0_s9 = 601
str_admin_set_ban_voteable_s0_s9 = 602
str_admin_set_allow_player_banners_s0_s9 = 603
str_admin_set_force_default_armor_s0_s9 = 604
str_admin_set_disallow_ranged_weapons_s0_s9 = 605
str_admin_set_mod_variable_auto_kick_s0_s9 = 606
str_admin_set_mod_variable_max_teamkills_before_kick_s0_reg1 = 607
str_admin_set_mod_variable_auto_horse_s0_s9 = 608
str_admin_set_mod_variable_auto_swap_s0_s9 = 609
str_admin_set_use_class_limits_s0_s9 = 610
str_admin_set_class_limit_player_count_s0_reg1 = 611
str_admin_set_limit_grenadier_s0_reg1 = 612
str_admin_set_limit_skirmisher_s0_reg1 = 613
str_admin_set_limit_rifle_s0_reg1 = 614
str_admin_set_limit_cavalry_s0_reg1 = 615
str_admin_set_limit_lancer_s0_reg1 = 616
str_admin_set_limit_hussar_s0_reg1 = 617
str_admin_set_limit_dragoon_s0_reg1 = 618
str_admin_set_limit_cuirassier_s0_reg1 = 619
str_admin_set_limit_heavycav_s0_reg1 = 620
str_admin_set_limit_artillery_s0_reg1 = 621
str_admin_set_limit_rocket_s0_reg1 = 622
str_admin_set_limit_sapper_s0_reg1 = 623
str_admin_set_limit_musician_s0_reg1 = 624
str_admin_set_limit_sergeant_s0_reg1 = 625
str_admin_set_limit_officer_s0_reg1 = 626
str_admin_set_limit_general_s0_reg1 = 627
str_admin_set_build_points_team_1_s0_reg1 = 628
str_admin_set_build_points_team_2_s0_reg1 = 629
str_admin_set_squad_size_s0_reg1 = 630
str_admin_set_scale_squad_size_s0_s9 = 631
str_admin_set_max_num_bots_s0_reg1 = 632
str_admin_set_allow_multiple_firearms_s0_s9 = 633
str_admin_set_enable_bonuses_s0_s9 = 634
str_admin_set_bonus_strength_s0_reg1 = 635
str_admin_set_bonus_range_s0_reg1 = 636
str_admin_set_fall_off_horse_s0_reg1 = 637
str_admin_set_horse_dying_s0_reg1 = 638
str_mute_all = 639
str_unmute_all = 640
str_slay_player = 641
str_slay_player_s2_s3 = 642
str_slay_all = 643
str_slay_all_s2 = 644
str_freeze_player = 645
str_freeze_player_s2_s3 = 646
str_freeze_all = 647
str_freeze_all_s2 = 648
str_swap_player = 649
str_swap_player_s2_s3 = 650
str_swap_all = 651
str_swap_all_s2 = 652
str_forceautobalance_all = 653
str_forceautobalance_all_s2 = 654
str_spec_player = 655
str_spec_player_s2_s3 = 656
str_spec_all = 657
str_spec_all_s2 = 658
str_kick_player_s2_s3 = 659
str_ban_player_s2_s3 = 660
str_ban_player_temp = 661
str_ban_player_temp_s2_s3 = 662
str_ban_hammer_s2_s3 = 663
str_admin_cheats = 664
str_choose_a_cheat_type = 665
str_cheat_spawn_hammer = 666
str_cheat_spawn_hammer_s2 = 667
str_cheat_spawn_hammer_2_s2 = 668
str_cheat_spawn_shotgun = 669
str_cheat_spawn_shotgun_s2 = 670
str_cheat_spawn_rocketlauncher = 671
str_cheat_spawn_rocketlauncher_s2 = 672
str_cheat_spawn_grenade = 673
str_cheat_spawn_grenade_s2 = 674
str_cheat_spawn_grenade_2_s2 = 675
str_cheat_spawn_horse = 676
str_cheat_spawn_horse_s2 = 677
str_cheat_beacon_player = 678
str_cheat_beacon_player_s2_s3 = 679
str_cheat_heal_player = 680
str_cheat_heal_player_s2_s3 = 681
str_cheat_heal_all = 682
str_cheat_heal_all_s2 = 683
str_cheat_ammo_player = 684
str_cheat_ammo_player_s2_s3 = 685
str_cheat_ammo_all = 686
str_cheat_ammo_all_s2 = 687
str_cheat_tele_to_player = 688
str_cheat_tele_to_player_s2_s3 = 689
str_cheat_tele_bring_player = 690
str_cheat_tele_bring_player_s2_s3 = 691
str_cheat_tele_wall = 692
str_cheat_tele_wall_s2 = 693
str_admin_chat = 694
str_admin_chat_intern = 695
str_admin_chat_s1_s0 = 696
str_inter_admin_chat_s1_s0 = 697
str_chk_auto_kick = 698
str_num_max_teamkills_before_kick = 699
str_chk_auto_horse = 700
str_chk_auto_swap = 701
str_reset_map = 702
str_reset_map_s2 = 703
str_console_command = 704
str_player_kicked_cheating_s2 = 705
str_next_page = 706
str_auto_assign = 707
str_begin = 708
str_game_rules = 709
str_britain_name = 710
str_france_name = 711
str_prussia_name = 712
str_russia_name = 713
str_austria_name = 714
str_infantry = 715
str_cavalry = 716
str_specialists = 717
str_ranker = 718
str_equipment = 719
str_random = 720
str_howitzer = 721
str_cannon = 722
str_all_fire_now = 723
str_left_fire_now = 724
str_middle_fire_now = 725
str_right_fire_now = 726
str_fire_at_my_command = 727
str_use_melee_weapons = 728
str_use_ranged_weapons = 729
str_melee_weapons = 730
str_ranged_weapons = 731
str_formation = 732
str_very_tight = 733
str_tight = 734
str_loose = 735
str_very_loose = 736
str_form_1_row = 737
str_form_reg0_rows = 738
str_confirm_quit_mission = 739
str_no_troop = 740
str_morning = 741
str_noon = 742
str_evening = 743
str_night = 744
str_timeofday = 745
str_fog_none = 746
str_fog_light = 747
str_fog_medium = 748
str_fog_thick = 749
str_fog_amount = 750
str_rain_amount = 751
str_mm_stakes_construct = 752
str_mm_stakes2_construct = 753
str_sandbags_construct = 754
str_chevaux_de_frise_tri_construct = 755
str_gabion_construct = 756
str_fence_construct = 757
str_plank_construct = 758
str_earthwork1_construct = 759
str_explosives_construct = 760
str_reg6_build_points = 761
str_reg7_build_points_cost = 762
str_repair_prop = 763
str_destructible_object = 764
str_build_prop = 765
str_dig_prop = 766
str_undig_prop = 767
str_construct_deconstruct = 768
str_ignite = 769
str_invalid_flag_selection = 770
str_invalid_prop_select = 771
str_invalid_prop_place = 772
str_sail_brit = 773
str_sail_fren = 774
str_select_track = 775
str_music_calls = 776
str_bagpipe_extras = 777
str_play_together = 778
str_drum_britain_1 = 779
str_drum_britain_2 = 780
str_drum_britain_3 = 781
str_drum_britain_4 = 782
str_drum_britain_5 = 783
str_drum_france_1 = 784
str_drum_france_2 = 785
str_drum_france_3 = 786
str_drum_france_4 = 787
str_drum_france_5 = 788
str_drum_prussia_1 = 789
str_drum_prussia_3 = 790
str_drum_prussia_4 = 791
str_drum_prussia_5 = 792
str_drum_prussia_6 = 793
str_drum_russia_1 = 794
str_drum_russia_2 = 795
str_drum_russia_3 = 796
str_drum_russia_4 = 797
str_drum_russia_5 = 798
str_drum_austria_1 = 799
str_drum_austria_2 = 800
str_drum_austria_3 = 801
str_drum_austria_4 = 802
str_drum_austria_5 = 803
str_drum_highland_1 = 804
str_drum_highland_2 = 805
str_drum_signal_1 = 806
str_drum_signal_2 = 807
str_drum_signal_3 = 808
str_fife_britain_1 = 809
str_fife_britain_2 = 810
str_fife_britain_3 = 811
str_fife_britain_4 = 812
str_fife_britain_5 = 813
str_fife_france_1 = 814
str_fife_france_2 = 815
str_fife_france_3 = 816
str_fife_france_4 = 817
str_fife_france_5 = 818
str_fife_prussia_1 = 819
str_fife_prussia_2 = 820
str_fife_prussia_3 = 821
str_fife_prussia_4 = 822
str_fife_prussia_5 = 823
str_fife_russia_1 = 824
str_fife_russia_2 = 825
str_fife_russia_3 = 826
str_fife_russia_4 = 827
str_fife_russia_5 = 828
str_fife_austria_1 = 829
str_fife_austria_2 = 830
str_fife_austria_3 = 831
str_fife_austria_4 = 832
str_fife_austria_5 = 833
str_bugle_britain_1 = 834
str_bugle_britain_2 = 835
str_bugle_france_1 = 836
str_bugle_france_2 = 837
str_bugle_prussia_1 = 838
str_bugle_prussia_2 = 839
str_bugle_prussia_3 = 840
str_bugle_russia_1 = 841
str_bugle_russia_2 = 842
str_bugle_russia_3 = 843
str_bugle_austria_1 = 844
str_bugle_austria_2 = 845
str_bugle_signal_1 = 846
str_bugle_signal_2 = 847
str_bugle_signal_3 = 848
str_bugle_signal_4 = 849
str_bugle_signal_5 = 850
str_bagpipes_britain_1 = 851
str_bagpipes_britain_2 = 852
str_bagpipes_extra_1 = 853
str_bagpipes_extra_2 = 854
str_bagpipes_extra_3 = 855
str_bagpipes_extra_4 = 856
str_bagpipes_extra_5 = 857
str_bagpipes_extra_6 = 858
str_bagpipes_extra_7 = 859
str_bagpipes_extra_8 = 860
str_bagpipes_extra_9 = 861
str_piano_tune_1 = 862
str_piano_tune_2 = 863
str_piano_tune_3 = 864
str_piano_tune_4 = 865
str_piano_tune_5 = 866
str_piano_tune_6 = 867
str_piano_tune_7 = 868
str_piano_tune_8 = 869
str_organ_tune_1 = 870
str_organ_tune_2 = 871
str_organ_tune_3 = 872
str_organ_tune_4 = 873
str_organ_tune_5 = 874
str_organ_tune_6 = 875
str_organ_tune_7 = 876
str_organ_tune_8 = 877
str_flag_reg3 = 878
str_mp_arabian_harbour_flag_1 = 879
str_mp_arabian_harbour_flag_2 = 880
str_mp_arabian_harbour_flag_3 = 881
str_mp_arabian_harbour_flag_4 = 882
str_mp_arabian_harbour_flag_5 = 883
str_mp_arabian_village_flag_1 = 884
str_mp_arabian_village_flag_2 = 885
str_mp_arabian_village_flag_3 = 886
str_mp_ardennes_flag_1 = 887
str_mp_ardennes_flag_2 = 888
str_mp_ardennes_flag_3 = 889
str_mp_ardennes_flag_4 = 890
str_mp_ardennes_flag_5 = 891
str_mp_ardennes_flag_6 = 892
str_mp_ardennes_flag_7 = 893
str_mp_avignon_flag_1 = 894
str_mp_avignon_flag_2 = 895
str_mp_avignon_flag_3 = 896
str_mp_avignon_flag_4 = 897
str_mp_borodino_flag_1 = 898
str_mp_borodino_flag_2 = 899
str_mp_borodino_flag_3 = 900
str_mp_borodino_flag_4 = 901
str_mp_borodino_flag_5 = 902
str_mp_borodino_flag_6 = 903
str_mp_borodino_flag_7 = 904
str_mp_columbia_hill_farm_flag_1 = 905
str_mp_columbia_hill_farm_flag_2 = 906
str_mp_columbia_hill_farm_flag_3 = 907
str_mp_columbia_hill_farm_flag_4 = 908
str_mp_european_city_flag_1 = 909
str_mp_european_city_flag_2 = 910
str_mp_european_city_flag_3 = 911
str_mp_european_city_flag_4 = 912
str_mp_french_farm_flag_1 = 913
str_mp_french_farm_flag_2 = 914
str_mp_french_farm_flag_3 = 915
str_mp_hungarian_plains_flag_1 = 916
str_mp_hungarian_plains_flag_2 = 917
str_mp_hungarian_plains_flag_3 = 918
str_mp_hungarian_plains_flag_4 = 919
str_mp_hungarian_plains_flag_5 = 920
str_mp_hungarian_plains_flag_6 = 921
str_mp_landshut_flag_1 = 922
str_mp_landshut_flag_2 = 923
str_mp_landshut_flag_3 = 924
str_mp_landshut_flag_4 = 925
str_mp_landshut_flag_5 = 926
str_mp_landshut_flag_6 = 927
str_mp_landshut_flag_7 = 928
str_mp_russian_village_flag_1 = 929
str_mp_russian_village_flag_2 = 930
str_mp_russian_village_flag_3 = 931
str_mp_minden_flag_1 = 932
str_mp_minden_flag_2 = 933
str_mp_minden_flag_3 = 934
str_mp_minden_flag_4 = 935
str_mp_minden_flag_5 = 936
str_mp_minden_flag_6 = 937
str_mp_minden_flag_7 = 938
str_mp_minden_flag_8 = 939
str_mp_minden_flag_9 = 940
str_mp_oaksfield_flag_1 = 941
str_mp_oaksfield_flag_2 = 942
str_mp_oaksfield_flag_3 = 943
str_mp_oaksfield_flag_4 = 944
str_mp_oaksfield_flag_5 = 945
str_mp_quatre_bras_flag_1 = 946
str_mp_quatre_bras_flag_2 = 947
str_mp_quatre_bras_flag_3 = 948
str_mp_quatre_bras_flag_4 = 949
str_mp_quatre_bras_flag_5 = 950
str_mp_river_crossing_flag_1 = 951
str_mp_river_crossing_flag_2 = 952
str_mp_river_crossing_flag_3 = 953
str_mp_roxburgh_flag_1 = 954
str_mp_roxburgh_flag_2 = 955
str_mp_roxburgh_flag_3 = 956
str_mp_roxburgh_flag_4 = 957
str_mp_roxburgh_flag_5 = 958
str_mp_roxburgh_flag_6 = 959
str_mp_roxburgh_flag_7 = 960
str_mp_schemmerbach_flag_1 = 961
str_mp_schemmerbach_flag_2 = 962
str_mp_schemmerbach_flag_3 = 963
str_mp_schemmerbach_flag_4 = 964
str_mp_slovenian_village_flag_1 = 965
str_mp_slovenian_village_flag_2 = 966
str_mp_slovenian_village_flag_3 = 967
str_mp_slovenian_village_flag_4 = 968
str_mp_slovenian_village_flag_5 = 969
str_mp_champs_elysees_flag_1 = 970
str_mp_champs_elysees_flag_2 = 971
str_mp_champs_elysees_flag_3 = 972
str_mp_champs_elysees_flag_4 = 973
str_mp_champs_elysees_flag_5 = 974
str_mp_champs_elysees_flag_6 = 975
str_mp_champs_elysees_flag_7 = 976
str_mp_fort_vincey_flag_1 = 977
str_mp_fort_vincey_flag_2 = 978
str_mp_fort_vincey_flag_3 = 979
str_mp_fort_vincey_flag_4 = 980
str_mp_fort_vincey_flag_5 = 981
str_mp_swamp_flag_1 = 982
str_mp_swamp_flag_2 = 983
str_mp_swamp_flag_3 = 984
str_mp_swamp_flag_4 = 985
str_mp_swamp_flag_5 = 986
str_mp_swamp_flag_6 = 987
str_mp_swamp_flag_7 = 988
str_mp_walloon_farm_flag_1 = 989
str_mp_walloon_farm_flag_2 = 990
str_mp_walloon_farm_flag_3 = 991
str_mp_walloon_farm_flag_4 = 992
str_mp_walloon_farm_flag_5 = 993
str_mp_walloon_farm_flag_6 = 994
str_scene_making_welcome_message = 995
str_tutorial_info_1 = 996
str_tutorial_info_2 = 997
str_tutorial_info_3 = 998
str_tutorial_info_4 = 999
str_tutorial_info_5 = 1000
str_tutorial_info_6 = 1001
str_tutorial_info_7 = 1002
str_tutorial_info_8 = 1003
str_tutorial_info_9 = 1004
str_tutorial_info_10 = 1005
str_tutorial_info_11 = 1006
str_tutorial_info_12 = 1007
str_tutorial_info_13 = 1008
str_tutorial_info_14 = 1009
str_tutorial_info_15 = 1010
str_tutorial_info_16 = 1011
str_tutorial_info_17 = 1012
str_tutorial_info_18 = 1013
str_tutorial_info_19 = 1014
str_tutorial_info_20 = 1015
str_tutorial_info_21 = 1016
str_tutorial_info_22 = 1017
str_tutorial_info_23 = 1018
str_tutorial_info_24 = 1019
str_tutorial_info_25 = 1020
str_tutorial_info_26 = 1021
str_vienna_1 = 1022
str_austerlitz_1_1 = 1023
str_dresden_1_1 = 1024
str_dresden_1_2 = 1025
str_dresden_2_1 = 1026
str_dresden_2_2 = 1027
str_cutscene_vienna_1 = 1028
str_cutscene_vienna_2 = 1029
str_cutscene_vienna_3 = 1030
str_cutscene_vienna_4 = 1031
str_cutscene_vienna_5 = 1032
str_cutscene_vienna_6 = 1033
str_cutscene_austerlitz_1_1 = 1034
str_cutscene_austerlitz_1_2 = 1035
str_cutscene_austerlitz_1_3 = 1036
str_cutscene_austerlitz_1_4 = 1037
str_cutscene_austerlitz_1_5 = 1038
str_cutscene_austerlitz_1_6 = 1039
str_cutscene_austerlitz_1_7 = 1040
str_cutscene_austerlitz_1_8 = 1041
str_cutscene_austerlitz_1_9 = 1042
str_cutscene_dresden_1_1 = 1043
str_cutscene_dresden_1_2 = 1044
str_cutscene_dresden_1_3 = 1045
str_cutscene_dresden_1_4 = 1046
str_cutscene_dresden_1_5 = 1047
str_cutscene_dresden_1_6 = 1048
str_cutscene_dresden_2_1 = 1049
str_cutscene_dresden_2_2 = 1050
str_cutscene_dresden_2_3 = 1051
str_cutscene_dresden_2_4 = 1052
str_cutscene_dresden_2_5 = 1053
str_cutscene_dresden_2_6 = 1054
str_mission_briefing_1 = 1055
str_mission_briefing_2 = 1056
str_mission_briefing_3 = 1057
str_mission_briefing_4 = 1058
str_mission_briefings_end = 1059
| CatalansMB/War1714 | src/ID_strings.py | Python | gpl-2.0 | 30,810 |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import logging
import os
import sys
try:
from magic import from_file as magic_from_file
except ImportError:
magic_from_file = None
from six.moves import SimpleHTTPServer as srvmod
from six.moves import socketserver
class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):
SUFFIXES = ['', '.html', '/index.html']
def do_GET(self):
# Try to detect file by applying various suffixes
for suffix in self.SUFFIXES:
if not hasattr(self, 'original_path'):
self.original_path = self.path
self.path = self.original_path + suffix
path = self.translate_path(self.path)
if os.path.exists(path):
srvmod.SimpleHTTPRequestHandler.do_GET(self)
logging.info("Found `%s`." % self.path)
break
logging.info("Tried to find `%s`, but it doesn't exist.",
self.path)
else:
# Fallback if there were no matches
logging.warning("Unable to find `%s` or variations.",
self.original_path)
def guess_type(self, path):
"""Guess at the mime type for the specified file.
"""
mimetype = srvmod.SimpleHTTPRequestHandler.guess_type(self, path)
# If the default guess is too generic, try the python-magic library
if mimetype == 'application/octet-stream' and magic_from_file:
mimetype = magic_from_file(path, mime=True)
return mimetype
if __name__ == '__main__':
PORT = len(sys.argv) in (2, 3) and int(sys.argv[1]) or 8000
SERVER = len(sys.argv) == 3 and sys.argv[2] or ""
socketserver.TCPServer.allow_reuse_address = True
try:
httpd = socketserver.TCPServer(
(SERVER, PORT), ComplexHTTPRequestHandler)
except OSError as e:
logging.error("Could not listen on port %s, server %s.", PORT, SERVER)
sys.exit(getattr(e, 'exitcode', 1))
logging.info("Serving at port %s, server %s.", PORT, SERVER)
try:
httpd.serve_forever()
except KeyboardInterrupt as e:
logging.info("Shutting down server.")
httpd.socket.close()
| jimperio/pelican | pelican/server.py | Python | agpl-3.0 | 2,272 |
# -*- coding: utf-8 -*-
# flake8: noqa
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0003_auto_20160821_1919'),
]
operations = [
migrations.AlterField(
model_name='contactrole',
name='role',
field=models.CharField(
help_text='function performed by the responsible party',
max_length=255,
choices=[
(b'author', 'party who authored the resource'),
(b'processor', 'party who has processed the data in a manner such that the resource has been modified'),
(b'publisher', 'party who published the resource'),
(b'custodian', 'party that accepts accountability and responsibility for the data and ensures appropriate care and maintenance of the resource'),
(b'pointOfContact', 'party who can be contacted for acquiring knowledge about or acquisition of the resource'),
(b'distributor', 'party who distributes the resource'),
(b'user', 'party who uses the resource'),
(b'resourceProvider', 'party that supplies the resource'),
(b'originator', 'party who created the resource'),
(b'owner', 'party that owns the resource'),
(b'principalInvestigator', 'key party responsible for gathering information and conducting research')]),
),
migrations.AlterField(
model_name='resourcebase',
name='category',
field=models.ForeignKey(
blank=True,
to='base.TopicCategory',
help_text='high-level geographic data thematic classification to assist in the grouping and search of available geographic data sets.',
null=True),
),
]
| terranodo/geonode | geonode/base/migrations/0004_auto_20160824_0245.py | Python | gpl-3.0 | 1,977 |
"""
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, POINTER, Structure, c_char_p
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject
logger = logging.getLogger('django.contrib.gis')
def load_geos():
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
# No GEOS library could be found.
if lib_path is None:
raise ImportError(
'Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names)
)
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
_lgeos = CDLL(lib_path)
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
_lgeos.initGEOS_r.restype = CONTEXT_PTR
_lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
return _lgeos
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except TypeError:
warn_msg = fmt
logger.warning('GEOS_NOTICE: %s\n', warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except TypeError:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n', err_msg)
error_h = ERRORFUNC(error_h)
# #### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure):
pass
class GEOSPrepGeom_t(Structure):
pass
class GEOSCoordSeq_t(Structure):
pass
class GEOSContextHandle_t(Structure):
pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
lgeos = SimpleLazyObject(load_geos)
class GEOSFuncFactory(object):
"""
Lazy loading of GEOS functions.
"""
argtypes = None
restype = None
errcheck = None
def __init__(self, func_name, *args, **kwargs):
self.func_name = func_name
self.restype = kwargs.pop('restype', self.restype)
self.errcheck = kwargs.pop('errcheck', self.errcheck)
self.argtypes = kwargs.pop('argtypes', self.argtypes)
self.args = args
self.kwargs = kwargs
self.func = None
def __call__(self, *args, **kwargs):
if self.func is None:
self.func = self.get_func(*self.args, **self.kwargs)
return self.func(*args, **kwargs)
def get_func(self, *args, **kwargs):
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
func = GEOSFunc(self.func_name)
func.argtypes = self.argtypes or []
func.restype = self.restype
if self.errcheck:
func.errcheck = self.errcheck
return func
# Returns the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility across 32 and 64-bit platforms.
geos_version = GEOSFuncFactory('GEOSversion', restype=c_char_p)
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0'
version_regex = re.compile(
r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))'
r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$'
)
def geos_version_info():
"""
Returns a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version().decode()
m = version_regex.match(ver)
if not m:
raise GEOSException('Could not parse version info string "%s"' % ver)
return {key: m.group(key) for key in (
'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor')}
| KrzysztofStachanczyk/Sensors-WWW-website | www/env/lib/python2.7/site-packages/django/contrib/gis/geos/libgeos.py | Python | gpl-3.0 | 6,216 |
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
#import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.insert(0, parent)
from circleclient import circleclient
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'circleclient'
copyright = u'2015, Jakub Jarosz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = circleclient.__version__
# The full version, including alpha/beta/rc tags.
release = circleclient.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'circleclientdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'circleclient.tex', u'circleclient Documentation',
u'Jakub Jarosz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'circleclient', u'circleclient Documentation',
[u'Jakub Jarosz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'circleclient', u'circleclient Documentation',
u'Jakub Jarosz', 'circleclient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| qba73/circleclient | docs/conf.py | Python | mit | 8,265 |
# ------------------------------------------------------------------------------------------------
# Copyright (c) 2016 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
import MalmoPython
agent_host = MalmoPython.AgentHost()
agent_host.setVideoPolicy( MalmoPython.VideoPolicy.LATEST_FRAME_ONLY )
agent_host.setRewardsPolicy( MalmoPython.RewardsPolicy.SUM_REWARDS )
agent_host.setObservationsPolicy( MalmoPython.ObservationsPolicy.LATEST_OBSERVATION_ONLY )
world_state = agent_host.getWorldState()
assert not world_state.has_mission_begun, 'World state says mission has already begun.'
assert not world_state.is_mission_running, 'World state says mission is already running.'
assert world_state.number_of_observations_since_last_state == 0, 'World state says observations already received.'
assert world_state.number_of_rewards_since_last_state == 0, 'World state says rewards already received.'
assert world_state.number_of_video_frames_since_last_state == 0, 'World state says video frames already received.'
assert len( world_state.observations ) == 0, 'World state has observations stored.'
assert len( world_state.rewards ) == 0, 'World state has rewards stored.'
assert len( world_state.video_frames ) == 0, 'World state has video frames stored.'
print agent_host.getUsage()
| tnarik/malmo | Malmo/test/PythonTests/test_agent_host.py | Python | mit | 2,418 |
#!/usr/bin/env python
__version__ = '1.4.0'
| terjekv/zabbix-cli | zabbix_cli/version.py | Python | gpl-3.0 | 44 |
from pages.page import Page
from selenium.webdriver.common.by import By
from selenium import webdriver
class InternalPage(Page):
@property
def logout_button(self):
return self.driver.find_element_by_css_selector("nav a[href $= '?logout']")
@property
def user_management_link(self):
return self.driver.find_element_by_css_selector("nav a[href $= '?go=users']")
@property
def user_profile_link(self):
return self.driver.find_element_by_css_selector("nav a[href $= '?go=profile']")
@property
def add_movie_link(self):
return self.driver.find_element_by_css_selector("nav a[href $= '?go=add']")
@property
def is_this_page(self):
return self.is_element_visible((By.CSS_SELECTOR, "nav"))
| sargm/selenium-py-traning-barancev | php4dvd/pages/internal_page.py | Python | apache-2.0 | 770 |
# 448. Find All Numbers Disappeared in an Array QuestionEditorial Solution My Submissions
# Total Accepted: 114
# Total Submissions: 215
# Difficulty: Medium
# Contributors: yuhaowang001
# Given an array of integers where 1 ≤ a[i] ≤ n (n = size of array), some elements appear twice and others appear once.
#
# Find all the elements of [1, n] inclusive that do not appear in this array.
#
# Could you do it without extra space and in O(n) runtime? You may assume the returned list does not count as extra space.
#
# Example:
#
# Input:
# [4,3,2,7,8,2,3,1]
#
# Output:
# [5,6]
# Subscribe to see which companies asked this question
| shawncaojob/LC | QUESTIONS/448_find_all_numbers_disappered_in_an_array_G.py | Python | gpl-3.0 | 643 |
# there is no specific lieklihood code for this experiment, because it
# falls in the category of CMB experiments described in the "newdat"
# format. The class below inherits the properties of a general class
# "Likelihood_newdat", which knows how to deal with all experiments in
# "newdat" format.
from montepython.likelihood_class import Likelihood_newdat
class boomerang(Likelihood_newdat):
pass
| baudren/montepython_public | montepython/likelihoods/boomerang/__init__.py | Python | mit | 406 |
from django.db import models
from .base import MessageAbstractModel
class SMS(MessageAbstractModel):
cmid = models.TextField()
class Meta:
verbose_name = 'SMS'
verbose_name_plural = verbose_name
def send_message(self, async=True):
from communications.tasks.sms import SendSMSTask
task = SendSMSTask()
if async:
return task.delay(self.id)
return task.run(self.id)
| dobestan/fastblog | fastblog/communications/models/sms.py | Python | mit | 442 |
from _pydevd_bundle.pydevd_constants import USE_LIB_COPY, izip
try:
try:
if USE_LIB_COPY:
from _pydev_imps._pydev_saved_modules import xmlrpclib
else:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
except ImportError:
from _pydev_imps import _pydev_xmlrpclib as xmlrpclib
try:
try:
if USE_LIB_COPY:
from _pydev_imps._pydev_saved_modules import _pydev_SimpleXMLRPCServer
from _pydev_SimpleXMLRPCServer import SimpleXMLRPCServer
else:
from SimpleXMLRPCServer import SimpleXMLRPCServer
except ImportError:
from xmlrpc.server import SimpleXMLRPCServer
except ImportError:
from _pydev_imps._pydev_SimpleXMLRPCServer import SimpleXMLRPCServer
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
execfile=execfile #Not in Py3k
except NameError:
from _pydev_imps._pydev_execfile import execfile
try:
if USE_LIB_COPY:
from _pydev_imps._pydev_saved_modules import _queue
else:
import Queue as _queue
except:
import queue as _queue #@UnresolvedImport
try:
from _pydevd_bundle.pydevd_exec import Exec
except:
from _pydevd_bundle.pydevd_exec2 import Exec
try:
from urllib import quote, quote_plus, unquote_plus
except:
from urllib.parse import quote, quote_plus, unquote_plus #@UnresolvedImport
| SlicerRt/SlicerDebuggingTools | PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_vendored/pydevd/_pydev_bundle/pydev_imports.py | Python | bsd-3-clause | 1,505 |
"""The tests for Device tracker device conditions."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.const import STATE_HOME
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a device_tracker."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_not_home",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_home",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
conditions = await async_get_device_automations(
hass, DeviceAutomationType.CONDITION, device_entry.id
)
assert_lists_same(conditions, expected_conditions)
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
hass.states.async_set("device_tracker.entity", STATE_HOME)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "device_tracker.entity",
"type": "is_home",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_home - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "device_tracker.entity",
"type": "is_not_home",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_not_home - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_home - event - test_event1"
hass.states.async_set("device_tracker.entity", "school")
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_not_home - event - test_event2"
| rohitranjan1991/home-assistant | tests/components/device_tracker/test_device_condition.py | Python | mit | 4,518 |
"""SCons.Tool.install
Tool-specific initialization for the install tool.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/install.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import os
import re
import shutil
import stat
import SCons.Action
import SCons.Tool
import SCons.Util
#
# We keep track of *all* installed files.
_INSTALLED_FILES = []
_UNIQUE_INSTALLED_FILES = None
class CopytreeError(EnvironmentError):
pass
# This is a patched version of shutil.copytree from python 2.5. It
# doesn't fail if the dir exists, which regular copytree does
# (annoyingly). Note the XXX comment in the docstring.
def scons_copytree(src, dst, symlinks=False):
"""Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an CopytreeError is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
XXX Consider this example code rather than the ultimate tool.
"""
names = os.listdir(src)
# [email protected] fix: check for dir before making dirs.
if not os.path.exists(dst):
os.makedirs(dst)
errors = []
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
scons_copytree(srcname, dstname, symlinks)
else:
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error), why:
errors.append((srcname, dstname, str(why)))
# catch the CopytreeError from the recursive copytree so that we can
# continue with other files
except CopytreeError, err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except SCons.Util.WinError:
# can't copy file access times on Windows
pass
except OSError, why:
errors.extend((src, dst, str(why)))
if errors:
raise CopytreeError, errors
#
# Functions doing the actual work of the Install Builder.
#
def copyFunc(dest, source, env):
"""Install a source file or directory into a destination by copying,
(including copying permission/mode bits)."""
if os.path.isdir(source):
if os.path.exists(dest):
if not os.path.isdir(dest):
raise SCons.Errors.UserError("cannot overwrite non-directory `%s' with a directory `%s'" % (str(dest), str(source)))
else:
parent = os.path.split(dest)[0]
if not os.path.exists(parent):
os.makedirs(parent)
scons_copytree(source, dest)
else:
shutil.copy2(source, dest)
st = os.stat(source)
os.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
return 0
#
# Functions doing the actual work of the InstallVersionedLib Builder.
#
def copyFuncVersionedLib(dest, source, env):
"""Install a versioned library into a destination by copying,
(including copying permission/mode bits) and then creating
required symlinks."""
if os.path.isdir(source):
raise SCons.Errors.UserError("cannot install directory `%s' as a version library" % str(source) )
else:
# remove the link if it is already there
try:
os.remove(dest)
except:
pass
shutil.copy2(source, dest)
st = os.stat(source)
os.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
installShlibLinks(dest, source, env)
return 0
def listShlibLinksToInstall(dest, source, env):
install_links = []
source = env.arg2nodes(source)
dest = env.fs.File(dest)
install_dir = dest.get_dir()
for src in source:
symlinks = getattr(getattr(src,'attributes',None), 'shliblinks', None)
if symlinks:
for link, linktgt in symlinks:
link_base = os.path.basename(link.get_path())
linktgt_base = os.path.basename(linktgt.get_path())
install_link = env.fs.File(link_base, install_dir)
install_linktgt = env.fs.File(linktgt_base, install_dir)
install_links.append((install_link, install_linktgt))
return install_links
def installShlibLinks(dest, source, env):
"""If we are installing a versioned shared library create the required links."""
Verbose = False
symlinks = listShlibLinksToInstall(dest, source, env)
if Verbose:
print 'installShlibLinks: symlinks=%r' % SCons.Tool.StringizeLibSymlinks(symlinks)
if symlinks:
SCons.Tool.CreateLibSymlinks(env, symlinks)
return
def installFunc(target, source, env):
"""Install a source file into a target using the function specified
as the INSTALL construction variable."""
try:
install = env['INSTALL']
except KeyError:
raise SCons.Errors.UserError('Missing INSTALL construction variable.')
assert len(target)==len(source), \
"Installing source %s into target %s: target and source lists must have same length."%(list(map(str, source)), list(map(str, target)))
for t,s in zip(target,source):
if install(t.get_path(),s.get_path(),env):
return 1
return 0
def installFuncVersionedLib(target, source, env):
"""Install a versioned library into a target using the function specified
as the INSTALLVERSIONEDLIB construction variable."""
try:
install = env['INSTALLVERSIONEDLIB']
except KeyError:
raise SCons.Errors.UserError('Missing INSTALLVERSIONEDLIB construction variable.')
assert len(target)==len(source), \
"Installing source %s into target %s: target and source lists must have same length."%(list(map(str, source)), list(map(str, target)))
for t,s in zip(target,source):
if hasattr(t.attributes, 'shlibname'):
tpath = os.path.join(t.get_dir(), t.attributes.shlibname)
else:
tpath = t.get_path()
if install(tpath,s.get_path(),env):
return 1
return 0
def stringFunc(target, source, env):
installstr = env.get('INSTALLSTR')
if installstr:
return env.subst_target_source(installstr, 0, target, source)
target = str(target[0])
source = str(source[0])
if os.path.isdir(source):
type = 'directory'
else:
type = 'file'
return 'Install %s: "%s" as "%s"' % (type, source, target)
#
# Emitter functions
#
def add_targets_to_INSTALLED_FILES(target, source, env):
""" An emitter that adds all target files to the list stored in the
_INSTALLED_FILES global variable. This way all installed files of one
scons call will be collected.
"""
global _INSTALLED_FILES, _UNIQUE_INSTALLED_FILES
_INSTALLED_FILES.extend(target)
_UNIQUE_INSTALLED_FILES = None
return (target, source)
def add_versioned_targets_to_INSTALLED_FILES(target, source, env):
""" An emitter that adds all target files to the list stored in the
_INSTALLED_FILES global variable. This way all installed files of one
scons call will be collected.
"""
global _INSTALLED_FILES, _UNIQUE_INSTALLED_FILES
Verbose = False
_INSTALLED_FILES.extend(target)
if Verbose:
print "add_versioned_targets_to_INSTALLED_FILES: target=%r" % map(str, target)
symlinks = listShlibLinksToInstall(target[0], source, env)
if symlinks:
SCons.Tool.EmitLibSymlinks(env, symlinks, target[0])
_UNIQUE_INSTALLED_FILES = None
return (target, source)
class DESTDIR_factory(object):
""" A node factory, where all files will be relative to the dir supplied
in the constructor.
"""
def __init__(self, env, dir):
self.env = env
self.dir = env.arg2nodes( dir, env.fs.Dir )[0]
def Entry(self, name):
name = SCons.Util.make_path_relative(name)
return self.dir.Entry(name)
def Dir(self, name):
name = SCons.Util.make_path_relative(name)
return self.dir.Dir(name)
#
# The Builder Definition
#
install_action = SCons.Action.Action(installFunc, stringFunc)
installas_action = SCons.Action.Action(installFunc, stringFunc)
installVerLib_action = SCons.Action.Action(installFuncVersionedLib, stringFunc)
BaseInstallBuilder = None
def InstallBuilderWrapper(env, target=None, source=None, dir=None, **kw):
if target and dir:
import SCons.Errors
raise SCons.Errors.UserError("Both target and dir defined for Install(), only one may be defined.")
if not dir:
dir=target
import SCons.Script
install_sandbox = SCons.Script.GetOption('install_sandbox')
if install_sandbox:
target_factory = DESTDIR_factory(env, install_sandbox)
else:
target_factory = env.fs
try:
dnodes = env.arg2nodes(dir, target_factory.Dir)
except TypeError:
raise SCons.Errors.UserError("Target `%s' of Install() is a file, but should be a directory. Perhaps you have the Install() arguments backwards?" % str(dir))
sources = env.arg2nodes(source, env.fs.Entry)
tgt = []
for dnode in dnodes:
for src in sources:
# Prepend './' so the lookup doesn't interpret an initial
# '#' on the file name portion as meaning the Node should
# be relative to the top-level SConstruct directory.
target = env.fs.Entry('.'+os.sep+src.name, dnode)
tgt.extend(BaseInstallBuilder(env, target, src, **kw))
return tgt
def InstallAsBuilderWrapper(env, target=None, source=None, **kw):
result = []
for src, tgt in map(lambda x, y: (x, y), source, target):
result.extend(BaseInstallBuilder(env, tgt, src, **kw))
return result
BaseVersionedInstallBuilder = None
def InstallVersionedBuilderWrapper(env, target=None, source=None, dir=None, **kw):
if target and dir:
import SCons.Errors
raise SCons.Errors.UserError("Both target and dir defined for Install(), only one may be defined.")
if not dir:
dir=target
import SCons.Script
install_sandbox = SCons.Script.GetOption('install_sandbox')
if install_sandbox:
target_factory = DESTDIR_factory(env, install_sandbox)
else:
target_factory = env.fs
try:
dnodes = env.arg2nodes(dir, target_factory.Dir)
except TypeError:
raise SCons.Errors.UserError("Target `%s' of Install() is a file, but should be a directory. Perhaps you have the Install() arguments backwards?" % str(dir))
sources = env.arg2nodes(source, env.fs.Entry)
tgt = []
for dnode in dnodes:
for src in sources:
# Prepend './' so the lookup doesn't interpret an initial
# '#' on the file name portion as meaning the Node should
# be relative to the top-level SConstruct directory.
target = env.fs.Entry('.'+os.sep+src.name, dnode)
tgt.extend(BaseVersionedInstallBuilder(env, target, src, **kw))
return tgt
added = None
def generate(env):
from SCons.Script import AddOption, GetOption
global added
if not added:
added = 1
AddOption('--install-sandbox',
dest='install_sandbox',
type="string",
action="store",
help='A directory under which all installed files will be placed.')
global BaseInstallBuilder
if BaseInstallBuilder is None:
install_sandbox = GetOption('install_sandbox')
if install_sandbox:
target_factory = DESTDIR_factory(env, install_sandbox)
else:
target_factory = env.fs
BaseInstallBuilder = SCons.Builder.Builder(
action = install_action,
target_factory = target_factory.Entry,
source_factory = env.fs.Entry,
multi = 1,
emitter = [ add_targets_to_INSTALLED_FILES, ],
source_scanner = SCons.Scanner.Base( {}, name = 'Install', recursive = False ),
name = 'InstallBuilder')
global BaseVersionedInstallBuilder
if BaseVersionedInstallBuilder is None:
install_sandbox = GetOption('install_sandbox')
if install_sandbox:
target_factory = DESTDIR_factory(env, install_sandbox)
else:
target_factory = env.fs
BaseVersionedInstallBuilder = SCons.Builder.Builder(
action = installVerLib_action,
target_factory = target_factory.Entry,
source_factory = env.fs.Entry,
multi = 1,
emitter = [ add_versioned_targets_to_INSTALLED_FILES, ],
name = 'InstallVersionedBuilder')
env['BUILDERS']['_InternalInstall'] = InstallBuilderWrapper
env['BUILDERS']['_InternalInstallAs'] = InstallAsBuilderWrapper
env['BUILDERS']['_InternalInstallVersionedLib'] = InstallVersionedBuilderWrapper
# We'd like to initialize this doing something like the following,
# but there isn't yet support for a ${SOURCE.type} expansion that
# will print "file" or "directory" depending on what's being
# installed. For now we punt by not initializing it, and letting
# the stringFunc() that we put in the action fall back to the
# hand-crafted default string if it's not set.
#
#try:
# env['INSTALLSTR']
#except KeyError:
# env['INSTALLSTR'] = 'Install ${SOURCE.type}: "$SOURCES" as "$TARGETS"'
try:
env['INSTALL']
except KeyError:
env['INSTALL'] = copyFunc
try:
env['INSTALLVERSIONEDLIB']
except KeyError:
env['INSTALLVERSIONEDLIB'] = copyFuncVersionedLib
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| xiaohaidao007/pandoraBox-SDK-mt7620 | staging_dir/host/lib/scons-2.5.0/SCons/Tool/install.py | Python | gpl-2.0 | 15,735 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
class IrModel(osv.osv):
_inherit = 'ir.model'
def _get_first_level_relations(self, cr, uid, ids, context):
field_obj = self.pool.get('ir.model.fields')
field_ids = field_obj.search(cr, uid, [
('ttype', 'in', ('many2one', 'one2many', 'many2many')),
('model_id', 'in', ids),
], context=context)
if field_ids:
models = [field['relation'] for field in field_obj.read(cr, uid, field_ids, ['relation'], context=None)]
return self.search(cr, uid, [('model', 'in', models)], context=context)
return []
def get_relations(self, cr, uid, ids, level=1, context=None):
"""
Return models linked to models given in params
If you don't want limit the relations level, indicate level = -1
"""
if isinstance(ids, (int, long)):
ids = [ids]
relation_ids, model_ids = list(ids), list(ids)
while model_ids and level:
model_ids = self._get_first_level_relations(cr, uid, model_ids, context)
model_ids = list(set(model_ids) - set(relation_ids))
relation_ids.extend(model_ids)
level -= 1
return list(set(relation_ids) - set(ids))
IrModel()
class IrModelAccess(osv.osv):
_inherit = 'ir.model.access'
def get_name(self, cr, uid, model_id, group_id=False):
model = self.pool.get('ir.model').read(cr, uid, model_id, ['model'])['model']
group = group_id and self.pool.get('res.groups').read(cr, uid, group_id, ['name'])['name'].lower() or 'all'
return '%s %s' % (model, group)
IrModelAccess()
class ResGroup(osv.osv):
_inherit = 'res.groups'
def button_complete_access_controls(self, cr, uid, ids, context=None):
"""Create access rules for the first level relation models of access rule models not only in readonly"""
context = context or {}
if isinstance(ids, (int, long)):
ids = [ids]
access_obj = self.pool.get('ir.model.access')
for group in self.browse(cr, uid, ids, context):
model_ids = [access_rule.model_id.id for access_rule in group.model_access
if access_rule.perm_write or access_rule.perm_create or access_rule.perm_unlink]
relation_model_ids = self.pool.get('ir.model').get_relations(cr, uid, model_ids, context.get('relations_level', 1), context)
for relation_model_id in relation_model_ids:
access_obj.create(cr, uid, {
'name': access_obj.get_name(cr, uid, relation_model_id, group.id),
'model_id': relation_model_id,
'group_id': group.id,
'perm_read': True,
'perm_write': False,
'perm_create': False,
'perm_unlink': False,
}, context)
return True
def _update_users(self, cr, uid, vals, context=None):
if vals.get('users'):
user_profile_ids = []
user_obj = self.pool.get('res.users')
for item in vals['users']:
user_ids = []
if item[0] == 6:
user_ids = item[2]
elif item[0] == 4:
user_ids = [item[1]]
for user in user_obj.read(cr, uid, user_ids, ['user_profile', 'user_profile_id'], context, '_classic_write'):
if user['user_profile']:
user_profile_ids.append(user['id'])
else:
user_profile_ids.append(user['user_profile_id'])
if user_profile_ids:
user_obj.write(cr, uid, list(set(user_profile_ids)), {}, context) # Update users linked to profiles
def write(self, cr, uid, ids, vals, context=None):
self._update_users(cr, uid, vals, context)
return super(ResGroup, self).write(cr, uid, ids, vals, context)
ResGroup()
| 3dfxsoftware/cbss-addons | smile_access_control/res_group.py | Python | gpl-2.0 | 4,958 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# License
# -------
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Legal Notice
# ------------
# OPENFOAM is a trademark owned by OpenCFD Ltd
# (producer and distributor of the OpenFOAM software via www.openfoam.com).
# The trademark information must remain visible and unadulterated in this
# file and via the "spack info" and comply with the term set by
# http://openfoam.com/legal/trademark-policy.php
#
# This file is not part of OpenFOAM, nor does it constitute a component of an
# OpenFOAM distribution.
#
##############################################################################
#
# Notes
# - mpi handling: WM_MPLIB=USER and provide wmake rules for special purpose
# 'USER and 'USERMPI' mpi implementations.
# The choice of 'USER' vs 'USERMPI' may change in the future.
#
# Changes
# 2017-03-28 Mark Olesen <[email protected]>
# - avoid installing intermediate targets.
# - reworked to mirror the openfoam-com package.
# If changes are needed here, consider if they need applying there too.
#
# Known issues
# - Combining +parmgridgen with +float32 probably won't work.
#
##############################################################################
import glob
import re
import shutil
import os
from spack import *
from spack.pkg.builtin.openfoam_com import OpenfoamArch
from spack.pkg.builtin.openfoam_com import add_extra_files
from spack.pkg.builtin.openfoam_com import write_environ
from spack.pkg.builtin.openfoam_com import rewrite_environ_files
class FoamExtend(Package):
"""The Extend Project is a fork of the OpenFOAM opensource library
for Computational Fluid Dynamics (CFD).
This offering is not approved or endorsed by OpenCFD Ltd,
producer and distributor of the OpenFOAM software via www.openfoam.com,
and owner of the OPENFOAM trademark.
"""
homepage = "http://www.extend-project.de/"
version('4.0', git='http://git.code.sf.net/p/foam-extend/foam-extend-4.0')
version('3.2', git='http://git.code.sf.net/p/foam-extend/foam-extend-3.2')
version('3.1', git='http://git.code.sf.net/p/foam-extend/foam-extend-3.1')
version('3.0', git='http://git.code.sf.net/p/foam-extend/foam-extend-3.0')
# variant('int64', default=False,
# description='Compile with 64-bit label')
variant('float32', default=False,
description='Compile with 32-bit scalar (single-precision)')
variant('paraview', default=False,
description='Build paraview plugins (eg, paraFoam)')
variant('scotch', default=True,
description='With scotch for decomposition')
variant('ptscotch', default=True,
description='With ptscotch for decomposition')
variant('metis', default=True,
description='With metis for decomposition')
variant('parmetis', default=True,
description='With parmetis for decomposition')
variant('parmgridgen', default=True,
description='With parmgridgen support')
variant('source', default=True,
description='Install library/application sources and tutorials')
provides('openfoam')
depends_on('mpi')
depends_on('python')
depends_on('zlib')
depends_on('flex', type='build')
depends_on('cmake', type='build')
depends_on('scotch~metis', when='~ptscotch+scotch')
depends_on('scotch~metis+mpi', when='+ptscotch')
depends_on('metis@5:', when='+metis')
depends_on('parmetis', when='+parmetis')
# mgridgen is statically linked
depends_on('parmgridgen', when='+parmgridgen', type='build')
depends_on('paraview@:5.0.1', when='+paraview')
# General patches
common = ['spack-Allwmake', 'README-spack']
assets = []
# Some user config settings
config = {
'label-size': False, # <- No int32/int64 support
'mplib': 'USERMPI', # USER | USERMPI
}
# The openfoam architecture, compiler information etc
_foam_arch = None
# Content for etc/prefs.{csh,sh}
etc_prefs = {}
# Content for etc/config.{csh,sh}/ files
etc_config = {}
phases = ['configure', 'build', 'install']
build_script = './spack-Allwmake' # <- Added by patch() method.
#
# - End of definitions / setup -
#
def setup_environment(self, spack_env, run_env):
run_env.set('FOAM_INST_DIR', os.path.dirname(self.projectdir)),
run_env.set('FOAM_PROJECT_DIR', self.projectdir)
run_env.set('WM_PROJECT_DIR', self.projectdir)
for d in ['wmake', self.archbin]: # bin already added automatically
run_env.prepend_path('PATH', join_path(self.projectdir, d))
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
"""Provide location of the OpenFOAM project.
This is identical to the WM_PROJECT_DIR value, but we avoid that
variable since it would mask the normal OpenFOAM cleanup of
previous versions.
"""
spack_env.set('FOAM_PROJECT_DIR', self.projectdir)
@property
def projectdir(self):
"""Absolute location of project directory: WM_PROJECT_DIR/"""
return self.prefix # <- install directly under prefix
@property
def foam_arch(self):
if not self._foam_arch:
self._foam_arch = OpenfoamArch(self.spec, **self.config)
return self._foam_arch
@property
def archbin(self):
"""Relative location of architecture-specific executables"""
return join_path('applications', 'bin', self.foam_arch)
@property
def archlib(self):
"""Relative location of architecture-specific libraries"""
return join_path('lib', self.foam_arch)
def patch(self):
"""Adjust OpenFOAM build for spack.
Where needed, apply filter as an alternative to normal patching."""
add_extra_files(self, self.common, self.assets)
# Adjust ParMGridGen - this is still a mess
files = [
'src/dbns/Make/options',
'src/fvAgglomerationMethods/MGridGenGamgAgglomeration/Make/options' # noqa: E501
]
for f in files:
filter_file(r'-lMGridGen', r'-lmgrid', f, backup=False)
# Adjust for flex version check
files = [
'src/thermophysicalModels/reactionThermo/chemistryReaders/chemkinReader/chemkinLexer.L', # noqa: E501
'src/surfMesh/surfaceFormats/stl/STLsurfaceFormatASCII.L', # noqa: E501
'src/meshTools/triSurface/triSurface/interfaces/STL/readSTLASCII.L', # noqa: E501
'applications/utilities/preProcessing/fluentDataToFoam/fluentDataToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/gambitToFoam/gambitToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/fluent3DMeshToFoam/fluent3DMeshToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/ansysToFoam/ansysToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/fluentMeshToFoam/fluentMeshToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/fluent3DMeshToElmer/fluent3DMeshToElmer.L' # noqa: E501
]
for f in files:
filter_file(
r'#if YY_FLEX_SUBMINOR_VERSION < 34',
r'#if YY_FLEX_MAJOR_VERSION <= 2 && YY_FLEX_MINOR_VERSION <= 5 && YY_FLEX_SUBMINOR_VERSION < 34', # noqa: E501
f, backup=False)
def configure(self, spec, prefix):
"""Make adjustments to the OpenFOAM configuration files in their various
locations: etc/bashrc, etc/config.sh/FEATURE and customizations that
don't properly fit get placed in the etc/prefs.sh file (similiarly for
csh).
"""
# Content for etc/prefs.{csh,sh}
self.etc_prefs = {
'000': { # Sort first
'compilerInstall': 'System',
},
'001': {},
'cmake': {
'CMAKE_DIR': spec['cmake'].prefix,
'CMAKE_BIN_DIR': spec['cmake'].prefix.bin,
},
'python': {
'PYTHON_DIR': spec['python'].home,
'PYTHON_BIN_DIR': spec['python'].home.bin,
},
'flex': {
'FLEX_SYSTEM': 1,
'FLEX_DIR': spec['flex'].prefix,
},
'bison': {
'BISON_SYSTEM': 1,
'BISON_DIR': spec['flex'].prefix,
},
'zlib': {
'ZLIB_SYSTEM': 1,
'ZLIB_DIR': spec['zlib'].prefix,
},
}
# Adjust configuration via prefs - sort second
self.etc_prefs['001'].update(self.foam_arch.foam_dict())
if '+scotch' in spec or '+ptscotch' in spec:
pkg = spec['scotch'].prefix
self.etc_prefs['scotch'] = {
'SCOTCH_SYSTEM': 1,
'SCOTCH_DIR': pkg,
'SCOTCH_BIN_DIR': pkg.bin,
'SCOTCH_LIB_DIR': pkg.lib,
'SCOTCH_INCLUDE_DIR': pkg.include,
}
if '+metis' in spec:
pkg = spec['metis'].prefix
self.etc_prefs['metis'] = {
'METIS_SYSTEM': 1,
'METIS_DIR': pkg,
'METIS_BIN_DIR': pkg.bin,
'METIS_LIB_DIR': pkg.lib,
'METIS_INCLUDE_DIR': pkg.include,
}
if '+parmetis' in spec:
pkg = spec['parmetis'].prefix
self.etc_prefs['parametis'] = {
'PARMETIS_SYSTEM': 1,
'PARMETIS_DIR': pkg,
'PARMETIS_BIN_DIR': pkg.bin,
'PARMETIS_LIB_DIR': pkg.lib,
'PARMETIS_INCLUDE_DIR': pkg.include,
}
if '+parmgridgen' in spec:
pkg = spec['parmgridgen'].prefix
self.etc_prefs['parmgridgen'] = {
'PARMGRIDGEN_SYSTEM': 1,
'PARMGRIDGEN_DIR': pkg,
'PARMGRIDGEN_BIN_DIR': pkg.bin,
'PARMGRIDGEN_LIB_DIR': pkg.lib,
'PARMGRIDGEN_INCLUDE_DIR': pkg.include,
}
if '+paraview' in self.spec:
self.etc_prefs['paraview'] = {
'PARAVIEW_SYSTEM': 1,
'PARAVIEW_DIR': spec['paraview'].prefix,
'PARAVIEW_BIN_DIR': spec['paraview'].prefix.bin,
}
self.etc_prefs['qt'] = {
'QT_SYSTEM': 1,
'QT_DIR': spec['qt'].prefix,
'QT_BIN_DIR': spec['qt'].prefix.bin,
}
# Write prefs files according to the configuration.
# Only need prefs.sh for building, but install both for end-users
write_environ(
self.etc_prefs,
posix=join_path('etc', 'prefs.sh'),
cshell=join_path('etc', 'prefs.csh'))
def build(self, spec, prefix):
"""Build using the OpenFOAM Allwmake script, with a wrapper to source
its environment first.
Only build if the compiler is known to be supported.
"""
self.foam_arch.has_rule(self.stage.source_path)
self.foam_arch.create_rules(self.stage.source_path, self)
args = []
if self.parallel: # Build in parallel? - pass via the environment
os.environ['WM_NCOMPPROCS'] = str(make_jobs)
builder = Executable(self.build_script)
builder(*args)
def install(self, spec, prefix):
"""Install under the projectdir"""
opts = str(self.foam_arch)
# Fairly ugly since intermediate targets are scattered inside sources
appdir = 'applications'
projdir = os.path.basename(self.projectdir)
mkdirp(self.projectdir, join_path(self.projectdir, appdir))
# Filtering: bashrc, cshrc
edits = {
'WM_PROJECT_INST_DIR': os.path.dirname(self.projectdir),
'WM_PROJECT_DIR': join_path('$WM_PROJECT_INST_DIR', projdir),
}
# All top-level files, except spack build info and possibly Allwmake
if '+source' in spec:
ignored = re.compile(r'^spack-.*')
else:
ignored = re.compile(r'^(Allclean|Allwmake|spack-).*')
files = [
f for f in glob.glob("*")
if os.path.isfile(f) and not ignored.search(f)
]
for f in files:
install(f, self.projectdir)
# Install directories. install applications/bin directly
# Install 'etc' before 'bin' (for symlinks)
for d in ['etc', 'bin', 'wmake', 'lib', join_path(appdir, 'bin')]:
install_tree(
d,
join_path(self.projectdir, d),
symlinks=True)
if '+source' in spec:
subitem = join_path(appdir, 'Allwmake')
install(subitem, join_path(self.projectdir, subitem))
ignored = [opts] # Ignore intermediate targets
for d in ['src', 'tutorials']:
install_tree(
d,
join_path(self.projectdir, d),
ignore=shutil.ignore_patterns(*ignored),
symlinks=True)
for d in ['solvers', 'utilities']:
install_tree(
join_path(appdir, d),
join_path(self.projectdir, appdir, d),
ignore=shutil.ignore_patterns(*ignored),
symlinks=True)
etc_dir = join_path(self.projectdir, 'etc')
rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc
edits,
posix=join_path(etc_dir, 'bashrc'),
cshell=join_path(etc_dir, 'cshrc'))
self.install_links()
def install_links(self):
"""Add symlinks into bin/, lib/ (eg, for other applications)"""
# Make build log visible - it contains OpenFOAM-specific information
with working_dir(self.projectdir):
os.symlink(
join_path('.spack', 'build.out'),
join_path('log.' + str(self.foam_arch)))
# -----------------------------------------------------------------------------
| skosukhin/spack | var/spack/repos/builtin/packages/foam-extend/package.py | Python | lgpl-2.1 | 15,255 |
import warnings
from typing import Optional, Union, List, Dict, Tuple, Iterable, Any, Callable, Sequence
from typing import cast
from collections import defaultdict
from pathlib import Path
import srsly
from .pipe import Pipe
from ..training import Example
from ..language import Language
from ..errors import Errors, Warnings
from ..util import ensure_path, to_disk, from_disk, SimpleFrozenList, registry
from ..tokens import Doc, Span
from ..matcher import Matcher, PhraseMatcher
from ..scorer import get_ner_prf
DEFAULT_ENT_ID_SEP = "||"
PatternType = Dict[str, Union[str, List[Dict[str, Any]]]]
@Language.factory(
"entity_ruler",
assigns=["doc.ents", "token.ent_type", "token.ent_iob"],
default_config={
"phrase_matcher_attr": None,
"validate": False,
"overwrite_ents": False,
"ent_id_sep": DEFAULT_ENT_ID_SEP,
"scorer": {"@scorers": "spacy.entity_ruler_scorer.v1"},
},
default_score_weights={
"ents_f": 1.0,
"ents_p": 0.0,
"ents_r": 0.0,
"ents_per_type": None,
},
)
def make_entity_ruler(
nlp: Language,
name: str,
phrase_matcher_attr: Optional[Union[int, str]],
validate: bool,
overwrite_ents: bool,
ent_id_sep: str,
scorer: Optional[Callable],
):
return EntityRuler(
nlp,
name,
phrase_matcher_attr=phrase_matcher_attr,
validate=validate,
overwrite_ents=overwrite_ents,
ent_id_sep=ent_id_sep,
scorer=scorer,
)
def entity_ruler_score(examples, **kwargs):
return get_ner_prf(examples)
@registry.scorers("spacy.entity_ruler_scorer.v1")
def make_entity_ruler_scorer():
return entity_ruler_score
class EntityRuler(Pipe):
"""The EntityRuler lets you add spans to the `Doc.ents` using token-based
rules or exact phrase matches. It can be combined with the statistical
`EntityRecognizer` to boost accuracy, or used on its own to implement a
purely rule-based entity recognition system. After initialization, the
component is typically added to the pipeline using `nlp.add_pipe`.
DOCS: https://spacy.io/api/entityruler
USAGE: https://spacy.io/usage/rule-based-matching#entityruler
"""
def __init__(
self,
nlp: Language,
name: str = "entity_ruler",
*,
phrase_matcher_attr: Optional[Union[int, str]] = None,
validate: bool = False,
overwrite_ents: bool = False,
ent_id_sep: str = DEFAULT_ENT_ID_SEP,
patterns: Optional[List[PatternType]] = None,
scorer: Optional[Callable] = entity_ruler_score,
) -> None:
"""Initialize the entity ruler. If patterns are supplied here, they
need to be a list of dictionaries with a `"label"` and `"pattern"`
key. A pattern can either be a token pattern (list) or a phrase pattern
(string). For example: `{'label': 'ORG', 'pattern': 'Apple'}`.
nlp (Language): The shared nlp object to pass the vocab to the matchers
and process phrase patterns.
name (str): Instance name of the current pipeline component. Typically
passed in automatically from the factory when the component is
added. Used to disable the current entity ruler while creating
phrase patterns with the nlp object.
phrase_matcher_attr (int / str): Token attribute to match on, passed
to the internal PhraseMatcher as `attr`
validate (bool): Whether patterns should be validated, passed to
Matcher and PhraseMatcher as `validate`
patterns (iterable): Optional patterns to load in.
overwrite_ents (bool): If existing entities are present, e.g. entities
added by the model, overwrite them by matches if necessary.
ent_id_sep (str): Separator used internally for entity IDs.
scorer (Optional[Callable]): The scoring method. Defaults to
spacy.scorer.get_ner_prf.
DOCS: https://spacy.io/api/entityruler#init
"""
self.nlp = nlp
self.name = name
self.overwrite = overwrite_ents
self.token_patterns = defaultdict(list) # type: ignore
self.phrase_patterns = defaultdict(list) # type: ignore
self._validate = validate
self.matcher = Matcher(nlp.vocab, validate=validate)
self.phrase_matcher_attr = phrase_matcher_attr
self.phrase_matcher = PhraseMatcher(
nlp.vocab, attr=self.phrase_matcher_attr, validate=validate
)
self.ent_id_sep = ent_id_sep
self._ent_ids = defaultdict(tuple) # type: ignore
if patterns is not None:
self.add_patterns(patterns)
self.scorer = scorer
def __len__(self) -> int:
"""The number of all patterns added to the entity ruler."""
n_token_patterns = sum(len(p) for p in self.token_patterns.values())
n_phrase_patterns = sum(len(p) for p in self.phrase_patterns.values())
return n_token_patterns + n_phrase_patterns
def __contains__(self, label: str) -> bool:
"""Whether a label is present in the patterns."""
return label in self.token_patterns or label in self.phrase_patterns
def __call__(self, doc: Doc) -> Doc:
"""Find matches in document and add them as entities.
doc (Doc): The Doc object in the pipeline.
RETURNS (Doc): The Doc with added entities, if available.
DOCS: https://spacy.io/api/entityruler#call
"""
error_handler = self.get_error_handler()
try:
matches = self.match(doc)
self.set_annotations(doc, matches)
return doc
except Exception as e:
return error_handler(self.name, self, [doc], e)
def match(self, doc: Doc):
self._require_patterns()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="\\[W036")
matches = cast(
List[Tuple[int, int, int]],
list(self.matcher(doc)) + list(self.phrase_matcher(doc)),
)
final_matches = set(
[(m_id, start, end) for m_id, start, end in matches if start != end]
)
get_sort_key = lambda m: (m[2] - m[1], -m[1])
final_matches = sorted(final_matches, key=get_sort_key, reverse=True)
return final_matches
def set_annotations(self, doc, matches):
"""Modify the document in place"""
entities = list(doc.ents)
new_entities = []
seen_tokens = set()
for match_id, start, end in matches:
if any(t.ent_type for t in doc[start:end]) and not self.overwrite:
continue
# check for end - 1 here because boundaries are inclusive
if start not in seen_tokens and end - 1 not in seen_tokens:
if match_id in self._ent_ids:
label, ent_id = self._ent_ids[match_id]
span = Span(doc, start, end, label=label)
if ent_id:
for token in span:
token.ent_id_ = ent_id
else:
span = Span(doc, start, end, label=match_id)
new_entities.append(span)
entities = [
e for e in entities if not (e.start < end and e.end > start)
]
seen_tokens.update(range(start, end))
doc.ents = entities + new_entities
@property
def labels(self) -> Tuple[str, ...]:
"""All labels present in the match patterns.
RETURNS (set): The string labels.
DOCS: https://spacy.io/api/entityruler#labels
"""
keys = set(self.token_patterns.keys())
keys.update(self.phrase_patterns.keys())
all_labels = set()
for l in keys:
if self.ent_id_sep in l:
label, _ = self._split_label(l)
all_labels.add(label)
else:
all_labels.add(l)
return tuple(sorted(all_labels))
def initialize(
self,
get_examples: Callable[[], Iterable[Example]],
*,
nlp: Optional[Language] = None,
patterns: Optional[Sequence[PatternType]] = None,
):
"""Initialize the pipe for training.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
patterns Optional[Iterable[PatternType]]: The list of patterns.
DOCS: https://spacy.io/api/entityruler#initialize
"""
self.clear()
if patterns:
self.add_patterns(patterns) # type: ignore[arg-type]
@property
def ent_ids(self) -> Tuple[Optional[str], ...]:
"""All entity ids present in the match patterns `id` properties
RETURNS (set): The string entity ids.
DOCS: https://spacy.io/api/entityruler#ent_ids
"""
keys = set(self.token_patterns.keys())
keys.update(self.phrase_patterns.keys())
all_ent_ids = set()
for l in keys:
if self.ent_id_sep in l:
_, ent_id = self._split_label(l)
all_ent_ids.add(ent_id)
return tuple(all_ent_ids)
@property
def patterns(self) -> List[PatternType]:
"""Get all patterns that were added to the entity ruler.
RETURNS (list): The original patterns, one dictionary per pattern.
DOCS: https://spacy.io/api/entityruler#patterns
"""
all_patterns = []
for label, patterns in self.token_patterns.items():
for pattern in patterns:
ent_label, ent_id = self._split_label(label)
p = {"label": ent_label, "pattern": pattern}
if ent_id:
p["id"] = ent_id
all_patterns.append(p)
for label, patterns in self.phrase_patterns.items():
for pattern in patterns:
ent_label, ent_id = self._split_label(label)
p = {"label": ent_label, "pattern": pattern.text}
if ent_id:
p["id"] = ent_id
all_patterns.append(p)
return all_patterns
def add_patterns(self, patterns: List[PatternType]) -> None:
"""Add patterns to the entity ruler. A pattern can either be a token
pattern (list of dicts) or a phrase pattern (string). For example:
{'label': 'ORG', 'pattern': 'Apple'}
{'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]}
patterns (list): The patterns to add.
DOCS: https://spacy.io/api/entityruler#add_patterns
"""
# disable the nlp components after this one in case they hadn't been initialized / deserialised yet
try:
current_index = -1
for i, (name, pipe) in enumerate(self.nlp.pipeline):
if self == pipe:
current_index = i
break
subsequent_pipes = [pipe for pipe in self.nlp.pipe_names[current_index:]]
except ValueError:
subsequent_pipes = []
with self.nlp.select_pipes(disable=subsequent_pipes):
token_patterns = []
phrase_pattern_labels = []
phrase_pattern_texts = []
phrase_pattern_ids = []
for entry in patterns:
if isinstance(entry["pattern"], str):
phrase_pattern_labels.append(entry["label"])
phrase_pattern_texts.append(entry["pattern"])
phrase_pattern_ids.append(entry.get("id"))
elif isinstance(entry["pattern"], list):
token_patterns.append(entry)
phrase_patterns = []
for label, pattern, ent_id in zip(
phrase_pattern_labels,
self.nlp.pipe(phrase_pattern_texts),
phrase_pattern_ids,
):
phrase_pattern = {"label": label, "pattern": pattern}
if ent_id:
phrase_pattern["id"] = ent_id
phrase_patterns.append(phrase_pattern)
for entry in token_patterns + phrase_patterns: # type: ignore[operator]
label = entry["label"]
if "id" in entry:
ent_label = label
label = self._create_label(label, entry["id"])
key = self.matcher._normalize_key(label)
self._ent_ids[key] = (ent_label, entry["id"])
pattern = entry["pattern"] # type: ignore
if isinstance(pattern, Doc):
self.phrase_patterns[label].append(pattern)
self.phrase_matcher.add(label, [pattern]) # type: ignore
elif isinstance(pattern, list):
self.token_patterns[label].append(pattern)
self.matcher.add(label, [pattern])
else:
raise ValueError(Errors.E097.format(pattern=pattern))
def clear(self) -> None:
"""Reset all patterns."""
self.token_patterns = defaultdict(list)
self.phrase_patterns = defaultdict(list)
self._ent_ids = defaultdict(tuple)
self.matcher = Matcher(self.nlp.vocab, validate=self._validate)
self.phrase_matcher = PhraseMatcher(
self.nlp.vocab, attr=self.phrase_matcher_attr, validate=self._validate
)
def remove(self, ent_id: str) -> None:
"""Remove a pattern by its ent_id if a pattern with this ent_id was added before
ent_id (str): id of the pattern to be removed
RETURNS: None
DOCS: https://spacy.io/api/entityruler#remove
"""
label_id_pairs = [
(label, eid) for (label, eid) in self._ent_ids.values() if eid == ent_id
]
if not label_id_pairs:
raise ValueError(Errors.E1024.format(ent_id=ent_id))
created_labels = [
self._create_label(label, eid) for (label, eid) in label_id_pairs
]
# remove the patterns from self.phrase_patterns
self.phrase_patterns = defaultdict(
list,
{
label: val
for (label, val) in self.phrase_patterns.items()
if label not in created_labels
},
)
# remove the patterns from self.token_pattern
self.token_patterns = defaultdict(
list,
{
label: val
for (label, val) in self.token_patterns.items()
if label not in created_labels
},
)
# remove the patterns from self.token_pattern
for label in created_labels:
if label in self.phrase_matcher:
self.phrase_matcher.remove(label)
else:
self.matcher.remove(label)
def _require_patterns(self) -> None:
"""Raise a warning if this component has no patterns defined."""
if len(self) == 0:
warnings.warn(Warnings.W036.format(name=self.name))
def _split_label(self, label: str) -> Tuple[str, Optional[str]]:
"""Split Entity label into ent_label and ent_id if it contains self.ent_id_sep
label (str): The value of label in a pattern entry
RETURNS (tuple): ent_label, ent_id
"""
if self.ent_id_sep in label:
ent_label, ent_id = label.rsplit(self.ent_id_sep, 1)
else:
ent_label = label
ent_id = None # type: ignore
return ent_label, ent_id
def _create_label(self, label: Any, ent_id: Any) -> str:
"""Join Entity label with ent_id if the pattern has an `id` attribute
If ent_id is not a string, the label is returned as is.
label (str): The label to set for ent.label_
ent_id (str): The label
RETURNS (str): The ent_label joined with configured `ent_id_sep`
"""
if isinstance(ent_id, str):
label = f"{label}{self.ent_id_sep}{ent_id}"
return label
def from_bytes(
self, patterns_bytes: bytes, *, exclude: Iterable[str] = SimpleFrozenList()
) -> "EntityRuler":
"""Load the entity ruler from a bytestring.
patterns_bytes (bytes): The bytestring to load.
RETURNS (EntityRuler): The loaded entity ruler.
DOCS: https://spacy.io/api/entityruler#from_bytes
"""
cfg = srsly.msgpack_loads(patterns_bytes)
self.clear()
if isinstance(cfg, dict):
self.add_patterns(cfg.get("patterns", cfg))
self.overwrite = cfg.get("overwrite", False)
self.phrase_matcher_attr = cfg.get("phrase_matcher_attr", None)
self.phrase_matcher = PhraseMatcher(
self.nlp.vocab, attr=self.phrase_matcher_attr
)
self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP)
else:
self.add_patterns(cfg)
return self
def to_bytes(self, *, exclude: Iterable[str] = SimpleFrozenList()) -> bytes:
"""Serialize the entity ruler patterns to a bytestring.
RETURNS (bytes): The serialized patterns.
DOCS: https://spacy.io/api/entityruler#to_bytes
"""
serial = {
"overwrite": self.overwrite,
"ent_id_sep": self.ent_id_sep,
"phrase_matcher_attr": self.phrase_matcher_attr,
"patterns": self.patterns,
}
return srsly.msgpack_dumps(serial)
def from_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> "EntityRuler":
"""Load the entity ruler from a file. Expects a file containing
newline-delimited JSON (JSONL) with one entry per line.
path (str / Path): The JSONL file to load.
RETURNS (EntityRuler): The loaded entity ruler.
DOCS: https://spacy.io/api/entityruler#from_disk
"""
path = ensure_path(path)
self.clear()
depr_patterns_path = path.with_suffix(".jsonl")
if path.suffix == ".jsonl": # user provides a jsonl
if path.is_file:
patterns = srsly.read_jsonl(path)
self.add_patterns(patterns)
else:
raise ValueError(Errors.E1023.format(path=path))
elif depr_patterns_path.is_file():
patterns = srsly.read_jsonl(depr_patterns_path)
self.add_patterns(patterns)
elif path.is_dir(): # path is a valid directory
cfg = {}
deserializers_patterns = {
"patterns": lambda p: self.add_patterns(
srsly.read_jsonl(p.with_suffix(".jsonl"))
)
}
deserializers_cfg = {"cfg": lambda p: cfg.update(srsly.read_json(p))}
from_disk(path, deserializers_cfg, {})
self.overwrite = cfg.get("overwrite", False)
self.phrase_matcher_attr = cfg.get("phrase_matcher_attr")
self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP)
self.phrase_matcher = PhraseMatcher(
self.nlp.vocab, attr=self.phrase_matcher_attr
)
from_disk(path, deserializers_patterns, {})
else: # path is not a valid directory or file
raise ValueError(Errors.E146.format(path=path))
return self
def to_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> None:
"""Save the entity ruler patterns to a directory. The patterns will be
saved as newline-delimited JSON (JSONL).
path (str / Path): The JSONL file to save.
DOCS: https://spacy.io/api/entityruler#to_disk
"""
path = ensure_path(path)
cfg = {
"overwrite": self.overwrite,
"phrase_matcher_attr": self.phrase_matcher_attr,
"ent_id_sep": self.ent_id_sep,
}
serializers = {
"patterns": lambda p: srsly.write_jsonl(
p.with_suffix(".jsonl"), self.patterns
),
"cfg": lambda p: srsly.write_json(p, cfg),
}
if path.suffix == ".jsonl": # user wants to save only JSONL
srsly.write_jsonl(path, self.patterns)
else:
to_disk(path, serializers, {})
| honnibal/spaCy | spacy/pipeline/entityruler.py | Python | mit | 20,510 |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'todo.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| bugsnag/bugsnag-python | tests/fixtures/django4/manage.py | Python | mit | 660 |
#coding:utf-8
#第 0000 题:将你的 QQ 头像(或者微博头像)右上角加上红色的数字,类似于微信未读信息数量那种提示效果。
from PIL import Image, ImageDraw, ImageFont
__author__ = 'Hunter'
def picture_num(img,num):
im = ImageDraw.Draw(img)
print(img.size)
numFont = ImageFont.truetype("ahronbd.ttf",300)
im.text((260, -50), num, fill=(255, 0, 0),font=numFont)
img.save("wechat_100.jpg")
img.show()
if __name__ == '__main__':
img = Image.open("wechat.jpg")
picture_num(img,"100") | Show-Me-the-Code/python | wssywh/0000/0000.py | Python | mit | 552 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# -*- author:miko-*-
# python3抓取bing主页所有背景图片
import urllib.request
import urllib,re,sys,os
def get_bing_backphoto():
if (os.path.exists('img')== False):
os.mkdir('img')
for i in range(0,24):
url = 'https://cn.bing.com/?toHttps=1&redig=265CBC0D09884CB58695FFDF89ADE88E'
#url = 'http://cn.bing.com/HPImageArchive.aspx?format=js&idx='+str(i)+'&n=1&nc=1361089515117&FORM=HYLH1'
html = urllib.request.urlopen(url).read()
if html == 'null':
print( 'open & read bing error!')
sys.exit(-1)
html = html.decode('utf-8')
#print (html)
reg = re.compile('"url":"(.*?)","urlbase"',re.S)
text = re.findall(reg,html)
#http://s.cn.bing.net/az/hprichbg/rb/LongJi_ZH-CN8658435963_1366x768.jpg
for imgurl in text :
right = imgurl.rindex('/')
name = imgurl.replace(imgurl[:right+1],'')
savepath = 'img/'+ name
print (imgurl)
urllib.request.urlretrieve(imgurl, savepath)
#print (name + ' save success!')
get_bing_backphoto() | sinomiko/project | IdeaProjects/crap/bingScrapy.py | Python | bsd-3-clause | 1,160 |
import base64
import xml.etree.ElementTree as ET
from . import log
class Response:
""" Response objects for the DBGP module.
Contains response data from a command made to the debugger.
"""
ns = '{urn:debugger_protocol_v1}'
def __init__(self, response, cmd, cmd_args, api):
self.response = response
self.cmd = cmd
self.cmd_args = cmd_args
self.xml = None
self.api = api
if "<error" in self.response:
self.__parse_error()
def __parse_error(self):
"""Parse an error message which has been returned
in the response, then raise it as a DBGPError."""
xml = self.as_xml()
err_el = xml.find('%serror' % self.ns)
if err_el is None:
raise DBGPError("Could not parse error from return XML", 1)
else:
code = err_el.get("code")
if code is None:
raise ResponseError("Missing error code in response",
self.response)
elif int(code) == 4:
raise CmdNotImplementedError('Command not implemented')
msg_el = err_el.find('%smessage' % self.ns)
if msg_el is None:
raise ResponseError("Missing error message in response",
self.response)
raise DBGPError(msg_el.text, code)
def get_cmd(self):
"""Get the command that created this response."""
return self.cmd
def get_cmd_args(self):
"""Get the arguments to the command."""
return self.cmd_args
def as_string(self):
"""Return the full response as a string.
There is a __str__ method, which will render the
whole object as a string and should be used for
displaying.
"""
return self.response
def as_xml(self):
"""Get the response as element tree XML.
Returns an xml.etree.ElementTree.Element object.
"""
if self.xml is None:
self.xml = ET.fromstring(self.response)
self.__determine_ns()
return self.xml
def __determine_ns(self):
tag_repr = str(self.xml.tag)
if tag_repr[0] != '{':
raise DBGPError('Invalid or missing XML namespace', 1)
else:
ns_parts = tag_repr.split('}')
self.ns = ns_parts[0] + '}'
def __str__(self):
return self.as_string()
class ContextNamesResponse(Response):
def names(self):
names = {}
for c in list(self.as_xml()):
names[int(c.get('id'))] = c.get('name')
return names
class TraceResponse(Response):
"""Response object returned by the trace command."""
def __str__(self):
return self.as_xml().get('trace')
class StatusResponse(Response):
"""Response object returned by the status command."""
def __str__(self):
return self.as_xml().get('status')
class StackGetResponse(Response):
"""Response object used by the stack_get command."""
def get_stack(self):
return list(self.as_xml())
class ContextGetResponse(Response):
"""Response object used by the context_get command.
The property nodes are converted into ContextProperty
objects, which are much easier to use."""
def __init__(self, response, cmd, cmd_args, api):
Response.__init__(self, response, cmd, cmd_args, api)
self.properties = []
def get_context(self):
for c in list(self.as_xml()):
self.create_properties(ContextProperty(c))
return self.properties
def create_properties(self, property):
self.properties.append(property)
for p in property.children:
self.create_properties(p)
class EvalResponse(ContextGetResponse):
"""Response object returned by the eval command."""
def __init__(self, response, cmd, cmd_args, api):
try:
ContextGetResponse.__init__(self, response, cmd, cmd_args, api)
except DBGPError as e:
if int(e.args[1]) == 206:
raise EvalError()
else:
raise e
def get_context(self):
code = self.get_code()
for c in list(self.as_xml()):
self.create_properties(EvalProperty(c, code, self.api.language))
return self.properties
def get_code(self):
cmd = self.get_cmd_args()
parts = cmd.split('-- ')
missing_padding = len(parts[1]) % 4
if missing_padding != 0:
parts[1] += '=' * (4 - missing_padding)
return base64.b64decode(parts[1].encode('utf-8')).decode('utf-8')
class BreakpointSetResponse(Response):
"""Response object returned by the breakpoint_set command."""
def get_id(self):
return int(self.as_xml().get('id'))
def __str__(self):
return self.as_xml().get('id')
class FeatureGetResponse(Response):
"""Response object specifically for the feature_get command."""
def is_supported(self):
"""Whether the feature is supported or not."""
xml = self.as_xml()
return int(xml.get('supported'))
def __str__(self):
if self.is_supported():
xml = self.as_xml()
return xml.text
return "* Feature not supported *"
class Api:
"""Api for eBGP commands.
Uses a Connection object to read and write with the debugger,
and builds commands and returns the results.
"""
conn = None
transID = 0
def __init__(self, connection):
"""Create a new Api using a Connection object.
The Connection object specifies the debugger connection,
and the Protocol provides a OO api to interacting
with it.
connection -- The Connection object to use
"""
self.language = None
self.protocol = None
self.idekey = None
self.startfile = None
self.conn = connection
if self.conn.isconnected() == 0:
self.conn.open()
self.__parse_init_msg(self.conn.recv_msg())
def __del__(self):
self.conn.close()
def __parse_init_msg(self, msg):
"""Parse the init message from the debugger"""
xml = ET.fromstring(msg)
self.language = xml.get("language")
if self.language is None:
raise ResponseError(
"Invalid XML response from debugger",
msg)
self.language = self.language.lower()
self.idekey = xml.get("idekey")
self.version = xml.get("api_version")
self.startfile = xml.get("fileuri")
def send_cmd(self, cmd, args='', res_cls=Response):
"""Send a command to the debugger.
This method automatically adds a unique transaction
ID to the command which is required by the debugger.
Returns a Response object, which contains the
response message and command.
cmd -- the command name, e.g. 'status'
args -- arguments for the command, which is optional
for certain commands (default '')
"""
args = args.strip()
send = cmd.strip()
self.transID += 1
send += ' -i ' + str(self.transID)
if args:
send += ' ' + args
log.Log("Command: " + send, log.Logger.DEBUG)
self.conn.send_msg(send)
msg = self.conn.recv_msg()
log.Log("Response: " + msg, log.Logger.DEBUG)
return res_cls(msg, cmd, args, self)
def status(self):
"""Get the debugger status.
Returns a Response object.
"""
return self.send_cmd('status', '', StatusResponse)
def feature_get(self, name):
"""Get the value of a feature from the debugger.
See the DBGP documentation for a list of features.
Returns a FeatureGetResponse object.
name -- name of the feature, e.g. encoding
"""
return self.send_cmd('feature_get', '-n ' + str(name),
FeatureGetResponse)
def feature_set(self, name, value):
"""Set the value of a debugger feature.
See the DBGP documentation for a list of features.
Returns a Response object.
name -- name of the feature, e.g. encoding
value -- new value for the feature
"""
return self.send_cmd('feature_set', '-n {} -v {}'.format(name, value))
def run(self):
"""Tell the debugger to start or resume
execution."""
return self.send_cmd('run', '', StatusResponse)
def eval(self, code):
"""Tell the debugger to start or resume
execution."""
code_enc = base64.encodebytes(code.encode('utf-8'))
args = '-- %s' % code_enc.decode('utf-8')
""" The python engine incorrectly requires length.
if self.language == 'python':
args = ("-l %i " % len(code_enc) ) + args"""
return self.send_cmd('eval', args, EvalResponse)
def step_into(self):
"""Tell the debugger to step to the next
statement.
If there's a function call, the debugger engine
will break on the first statement in the function.
"""
return self.send_cmd('step_into', '', StatusResponse)
def step_over(self):
"""Tell the debugger to step to the next
statement.
If there's a function call, the debugger engine
will stop at the next statement after the function call.
"""
return self.send_cmd('step_over', '', StatusResponse)
def step_out(self):
"""Tell the debugger to step out of the statement.
The debugger will step out of the current scope.
"""
return self.send_cmd('step_out', '', StatusResponse)
def stop(self):
"""Tell the debugger to stop execution.
The script is terminated immediately."""
return self.send_cmd('stop', '', StatusResponse)
def stack_get(self):
"""Get the stack information.
"""
return self.send_cmd('stack_get', '', StackGetResponse)
def context_get(self, context=0, stack=0):
"""Get the context variables.
"""
return self.send_cmd('context_get', '-c %i -d %i' % (int(context), int(stack)),
ContextGetResponse)
def context_names(self):
"""Get the context types.
"""
return self.send_cmd('context_names', '', ContextNamesResponse)
def property_get(self, name):
"""Get a property.
"""
return self.send_cmd(
'property_get',
'-n "%s" -d 0' % name.replace("\\", "\\\\").replace("\"", "\\\""),
ContextGetResponse
)
def detach(self):
"""Tell the debugger to detach itself from this
client.
The script is not terminated, but runs as normal
from this point."""
ret = self.send_cmd('detach', '', StatusResponse)
self.conn.close()
return ret
def breakpoint_set(self, cmd_args):
"""Set a breakpoint.
The breakpoint type is defined by the arguments, see the
Breakpoint class for more detail."""
return self.send_cmd('breakpoint_set', cmd_args, BreakpointSetResponse)
def breakpoint_list(self):
return self.send_cmd('breakpoint_list')
def breakpoint_disable(self, id):
return self.send_cmd('breakpoint_update', '-d %i -s disabled' % id, Response)
def breakpoint_enable(self, id):
return self.send_cmd('breakpoint_update', '-d %i -s enabled' % id, Response)
def breakpoint_remove(self, id):
"""Remove a breakpoint by ID.
The ID is that returned in the response from breakpoint_set."""
return self.send_cmd('breakpoint_remove', '-d %i' % id, Response)
class ContextProperty:
ns = '{urn:debugger_protocol_v1}'
def __init__(self, node, parent=None, depth=0):
self.parent = parent
self.__determine_type(node)
self._determine_displayname(node)
self.encoding = node.get('encoding')
self.depth = depth
self.size = node.get('size')
self.value = ""
self.is_last_child = False
self._determine_children(node)
self.__determine_value(node)
self.__init_children(node)
if self.type == 'scalar':
self.size = len(self.value) - 2
def __determine_value(self, node):
if self.has_children:
self.value = ""
return
self.value = self._get_enc_node_text(node, 'value')
if self.value is None:
if self.encoding == 'base64':
if node.text is None:
self.value = ""
else:
try:
self.value = base64.decodebytes(
node.text.encode("UTF-8")).decode("utf-8")
except UnicodeDecodeError:
self.value = node.text
elif not self.is_uninitialized() and not self.has_children:
self.value = node.text
if self.value is None:
self.value = ""
self.num_crs = self.value.count('\n')
if self.type.lower() in ("string", "str", "scalar"):
self.value = '`%s`' % self.value.replace('`', '\\`')
def __determine_type(self, node):
type = node.get('classname')
if type is None:
type = node.get('type')
if type is None:
type = 'unknown'
self.type = type
def _determine_displayname(self, node):
display_name = node.get('fullname')
if display_name is None:
display_name = self._get_enc_node_text(node, 'fullname', "")
if display_name == '::':
display_name = self.type
self.display_name = display_name
def _get_enc_node_text(self, node, name, default=None):
n = node.find('%s%s' % (self.ns, name))
if n is not None and n.text is not None:
if n.get('encoding') == 'base64':
val = base64.decodebytes(n.text.encode("UTF-8")).decode(
"UTF-8")
else:
val = n.text
else:
val = None
if val is None:
return default
return val
def _determine_children(self, node):
children = node.get('numchildren')
if children is None:
children = node.get('children')
if children is None:
children = 0
else:
children = int(children)
self.num_declared_children = children
self.has_children = children > 0
self.children = []
def __init_children(self, node):
if self.has_children:
idx = 0
tagname = '%sproperty' % self.ns
children = list(node)
if children is not None:
for c in children:
if c.tag == tagname:
idx += 1
p = self._create_child(c, self, self.depth + 1)
self.children.append(p)
if idx == self.num_declared_children:
p.mark_as_last_child()
def _create_child(self, node, parent, depth):
return ContextProperty(node, parent, depth)
def mark_as_last_child(self):
self.is_last_child = True
def is_uninitialized(self):
return self.type == 'uninitialized'
def child_count(self):
return len(self.children)
def type_and_size(self):
size = None
if self.has_children:
size = self.num_declared_children
elif self.size is not None:
size = self.size
if size is None:
return self.type
return "%s [%s]" % (self.type, size)
class EvalProperty(ContextProperty):
def __init__(self, node, code, language, parent=None, depth=0):
self.code = code
self.language = language.lower()
self.is_parent = parent is None
ContextProperty.__init__(self, node, parent, depth)
def _create_child(self, node, parent, depth):
return EvalProperty(node, self.code, self.language, parent, depth)
def _determine_displayname(self, node):
if self.is_parent:
self.display_name = self.code
else:
if self.language == 'php':
if self.parent.type == 'array':
if node.get('name').isdigit():
self.display_name = self.parent.display_name + \
"[%s]" % node.get('name')
else:
self.display_name = self.parent.display_name + \
"['%s']" % node.get('name')
else:
self.display_name = self.parent.display_name + \
"->" + node.get('name')
elif self.language == 'perl':
self.display_name = node.get('fullname')
else:
name = node.get('name')
if name is None:
name = "?"
name = self._get_enc_node_text(node, 'name', '?')
if self.parent.type == 'list':
self.display_name = self.parent.display_name + name
else:
self.display_name = self.parent.display_name + \
"." + name
# Errors/Exceptions
class TimeoutError(Exception):
pass
class DBGPError(Exception):
"""Raised when the debugger returns an error message."""
pass
class CmdNotImplementedError(Exception):
"""Raised when the debugger returns an error message."""
pass
class EvalError(Exception):
"""Raised when some evaluated code is invalid."""
pass
class ResponseError(Exception):
"""An error caused by an unexpected response from the
debugger (e.g. invalid format XML)."""
pass
class TraceError(Exception):
"""Raised when trace is out of domain."""
pass
| paprykarz/vdebug | python3/vdebug/dbgp.py | Python | mit | 18,056 |
import sys,Skylake
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address):
server = Skylake.SkylakeServer(server_address)
return server
if __name__ == '__main__':
httpd = make_server(SERVER_ADDRESS)
print('SkylakeWebServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever() | kochiyasanae1/skylake | test.py | Python | mit | 349 |
# -*- encoding: utf-8 -*-
"""
graph.py : tools for constructing and modifying graphs, i.e. sets of vertices
connected by edges.
@author: Andries Effting
Copyright (C) 2021 Andries Effting, Delmic
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
USA.
"""
import collections
import itertools
import sys
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, Iterator, List, Optional, Sequence, Set, Tuple, Union
import numpy
if sys.version_info < (3, 7, 4):
from collections import UserList as _UserList
class UserList(_UserList):
"""
Backport of bugfix for when using Python v3.7.3 or lower.
For more informations see: https://bugs.python.org/issue27639
"""
def __getitem__(self, i):
if isinstance(i, slice):
return self.__class__(self.data[i])
else:
return self.data[i]
else:
from collections import UserList
class GraphBase(UserList, metaclass=ABCMeta):
"""Abstract base class for graphs."""
_item_type = object
def __init__(
self,
n_or_initlist: Optional[Union[int, Sequence[Any]]] = None,
directed: bool = True,
) -> None:
"""
Initializer for GraphBase.
Parameters
----------
n_or_initlist : int or sequence, optional
If `None` (default) initializes a graph of zero order and zero size
(i.e. no vertices and no edges). If int, initializes a graph of
order `n_or_initlist` and zero size. Otherwise initialize the graph
using the sequence `n_or_initlist`.
directed : bool
If `False` the graph is undirected and symmetry of the adjacency
matrix is enforced when adding or removing edges. For undirected
graphs this means that `j in graph[i]` is True if and only if
`i in graph[j]`.
"""
self._directed = directed
if n_or_initlist is None:
super().__init__()
elif isinstance(n_or_initlist, int):
super().__init__((self._item_type() for _ in range(n_or_initlist)))
elif isinstance(n_or_initlist, collections.abc.Sequence):
super().__init__(map(self._item_type, n_or_initlist))
else:
raise ValueError(
"Unsupported type '{}', expected int or sequence".format(
type(n_or_initlist).__name__
)
)
@abstractmethod
def add_edge(self, edge: Tuple[int, int]) -> None:
pass
@abstractmethod
def remove_edge(self, edge: Tuple[int, int]) -> None:
pass
@abstractmethod
def get_edge_weight(self, edge: Tuple[int, int]) -> float:
pass
def adjacency_matrix(self) -> numpy.ndarray:
"""
Return the adjacency matrix of the graph.
Returns
-------
matrix : ndarray
The adjacency matrix.
"""
n = len(self.data)
matrix = numpy.zeros((n, n))
for vertex, neighbors in enumerate(self.data):
for neighbor in neighbors:
matrix[vertex, neighbor] = self.get_edge_weight((vertex, neighbor))
return matrix
def iter_edges(self, directed: Optional[bool] = None) -> Iterator[Tuple[int, int]]:
"""
Iterator over all the edges in the graph.
Parameters
----------
directed : bool, optional
If `True` the edges `(j, i)` and `(i, j)` will be considered as
being two separate edges. If `False`, only yield edges `(j, i)`
with `j < i`. If not specified, uses `self.directed`.
Yields
------
edge : tuple `(j, i)`
The edge connecting two vertices.
"""
directed = self._directed if directed is None else directed
for vertex, neighbors in enumerate(self.data):
for neighbor in neighbors:
if not directed and vertex > neighbor:
continue
yield (vertex, neighbor)
def iter_triangles(self) -> Iterator[Tuple[int, int, int]]:
"""
Iterator over all triangles (3-cycles) in the graph.
Yields
-------
triangle : 3-tuple `(s, t, v)`
The vertices that form a triangle, in increasing order.
References
----------
.. [1] Schank, T., & Wagner, D. (2005, May). Finding, counting and
listing all triangles in large graphs, an experimental study. In
International workshop on experimental and efficient algorithms
(pp. 606-609). Springer, Berlin, Heidelberg.
"""
n = len(self.data)
degree = list(map(len, self.data))
vertices = numpy.argsort(degree)[::-1]
index = numpy.argsort(vertices)
visited: List[Set[int]] = [set() for _ in range(n)]
for s in vertices:
for t in self.data[s]:
if index[s] < index[t]:
for v in visited[s].intersection(visited[t]):
yield tuple(sorted((v, s, t)))
visited[t].add(s)
def remove_triangles(self) -> None:
"""
Removes all triangles (3-cycles) from the graph.
Triangles are removed by deleting the least amount of edges from the
graph. This is done using a greedy algorithm where edges that are
contained in more than one triangle are removed first. If two edges are
contained in the same amount of triangles, the edge that has the
largest edge weight (distance) is removed first.
"""
triangles = set(self.iter_triangles())
if not triangles:
return # Quick return if possible
edge_counter: Dict[Tuple[int, int], int] = collections.Counter()
edge_to_triangles_map = collections.defaultdict(set)
triangle_to_edges_map = collections.defaultdict(set)
for triangle in triangles:
for edge in itertools.combinations(triangle, 2):
edge_counter[edge] += 1
edge_to_triangles_map[edge].add(triangle)
triangle_to_edges_map[triangle].add(edge)
# First consider all edges that are contained in at least two
# triangles. Remove the edge that is contained in the largest number
# of triangles and has the largest edge weight (distance).
while True:
count = max(edge_counter.values(), default=0)
if count < 2:
break
edges = [edge for edge, n in edge_counter.items() if n == count]
selected = max(edges, key=self.get_edge_weight)
self.remove_edge(selected)
# To prevent a RuntimeError loop over a copy of the set.
for triangle in edge_to_triangles_map[selected].copy():
for edge in triangle_to_edges_map[triangle]:
edge_counter[edge] -= 1
edge_to_triangles_map[edge].remove(triangle)
del triangle_to_edges_map[triangle]
triangles.remove(triangle)
# For triangles that are isolated (i.e. those that do not contain an
# edge contained in another triangle), remove the edge that has the
# largest edge weight (distance).
for triangle in triangles:
edges = list(itertools.combinations(triangle, 2))
selected = max(edges, key=self.get_edge_weight)
self.remove_edge(selected)
class WeightedGraph(GraphBase):
"""
Weighted graph represented as a list of dicts.
Each list item describes the set of neighbors of a particular vertex and
their associated weights. For example, `graph[j]` is a dictionary of which
the keys form the set of neighbors of vertex `j` and the values contain the
edge weights.
"""
_item_type = dict
def add_edge(self, edge: Tuple[int, int], weight: float = 1) -> None:
j, i = edge
self.data[j][i] = weight
if not self._directed:
self.data[i][j] = weight
def remove_edge(self, edge: Tuple[int, int]) -> None:
j, i = edge
del self.data[j][i]
if not self._directed:
del self.data[i][j]
def get_edge_weight(self, edge: Tuple[int, int]) -> float:
j, i = edge
return self.data[j][i]
class UnweightedGraph(GraphBase):
"""
Unweighted graph represented as a list of sets.
Each list item describes the set of neighbors of a particular vertex in the
graph. For example, `graph[j]` is the set of neighbors of vertex `j`.
"""
_item_type = set
def add_edge(self, edge: Tuple[int, int]) -> None:
j, i = edge
self.data[j].add(i)
if not self._directed:
self.data[i].add(j)
def remove_edge(self, edge: Tuple[int, int]) -> None:
j, i = edge
self.data[j].remove(i)
if not self._directed:
self.data[i].remove(j)
def get_edge_weight(self, edge: Tuple[int, int]) -> float:
# Always return an edge weight of 1 for an unweighted graph
return 1
| delmic/odemis | src/odemis/util/graph.py | Python | gpl-2.0 | 9,789 |
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
from time import sleep, time
from pymeasure.instruments import Instrument
from pymeasure.instruments.validators import strict_discrete_set
class LakeShore331(Instrument):
""" Represents the Lake Shore 331 Temperature Controller and provides
a high-level interface for interacting with the instrument.
.. code-block:: python
controller = LakeShore331("GPIB::1")
print(controller.setpoint_1) # Print the current setpoint for loop 1
controller.setpoint_1 = 50 # Change the setpoint to 50 K
controller.heater_range = 'low' # Change the heater range to Low
controller.wait_for_temperature() # Wait for the temperature to stabilize
print(controller.temperature_A) # Print the temperature at sensor A
"""
temperature_A = Instrument.measurement(
"KRDG? A",
""" Reads the temperature of the sensor A in Kelvin. """
)
temperature_B = Instrument.measurement(
"KRDG? B",
""" Reads the temperature of the sensor B in Kelvin. """
)
setpoint_1 = Instrument.control(
"SETP? 1", "SETP 1, %g",
""" A floating point property that controls the setpoint temperature
in Kelvin for Loop 1. """
)
setpoint_2 = Instrument.control(
"SETP? 2", "SETP 2, %g",
""" A floating point property that controls the setpoint temperature
in Kelvin for Loop 2. """
)
heater_range = Instrument.control(
"RANGE?", "RANGE %d",
""" A string property that controls the heater range, which
can take the values: off, low, medium, and high. These values
correlate to 0, 0.5, 5 and 50 W respectively. """,
validator=strict_discrete_set,
values={'off':0, 'low':1, 'medium':2, 'high':3},
map_values=True
)
def __init__(self, adapter, **kwargs):
super(LakeShore331, self).__init__(
adapter,
"Lake Shore 331 Temperature Controller",
**kwargs
)
def disable_heater(self):
""" Turns the :attr:`~.heater_range` to :code:`off` to disable the heater. """
self.heater_range = 'off'
def wait_for_temperature(self, accuracy=0.1,
interval=0.1, sensor='A', setpoint=1, timeout=360,
should_stop=lambda: False):
""" Blocks the program, waiting for the temperature to reach the setpoint
within the accuracy (%), checking this each interval time in seconds.
:param accuracy: An acceptable percentage deviation between the
setpoint and temperature
:param interval: A time in seconds that controls the refresh rate
:param sensor: The desired sensor to read, either A or B
:param setpoint: The desired setpoint loop to read, either 1 or 2
:param timeout: A timeout in seconds after which an exception is raised
:param should_stop: A function that returns True if waiting should stop, by
default this always returns False
"""
temperature_name = 'temperature_%s' % sensor
setpoint_name = 'setpoint_%d' % setpoint
# Only get the setpoint once, assuming it does not change
setpoint_value = getattr(self, setpoint_name)
def percent_difference(temperature):
return abs(100*(temperature - setpoint_value)/setpoint_value)
t = time()
while percent_difference(getattr(self, temperature_name)) > accuracy:
sleep(interval)
if (time()-t) > timeout:
raise Exception((
"Timeout occurred after waiting %g seconds for "
"the LakeShore 331 temperature to reach %g K."
) % (timeout, setpoint))
if should_stop():
return
| ralph-group/pymeasure | pymeasure/instruments/lakeshore/lakeshore331.py | Python | mit | 5,077 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PaidTask',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('task_type', models.PositiveSmallIntegerField(default=0, db_index=True, verbose_name='Type', choices=[(0, 'Translation'), (1, 'Review'), (2, 'Hourly Work'), (3, 'Correction')])),
('amount', models.FloatField(default=0, verbose_name='Amount')),
('rate', models.FloatField(default=0)),
('datetime', models.DateTimeField(verbose_name='Date', db_index=True)),
('description', models.TextField(null=True, verbose_name='Description')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| r-o-b-b-i-e/pootle | pootle/apps/reports/migrations/0001_initial.py | Python | gpl-3.0 | 1,181 |
import types
import operator
from collections import OrderedDict, Sequence
from magma.t import Flip, IntegerTypes
from magma.port import INPUT, OUTPUT, INOUT
from magma.bit import BitType, VCC, GND
from magma.array import ArrayType
from magma.tuple import TupleType
from magma.circuit import *
from magma.wire import wiredefaultclock
def hex(i):
if i < 10: return chr(ord('0')+i)
else: return chr(ord('A')+i-10)
def hstr(init, nbits):
bits = 1 << nbits
format = "0x"
nformat = []
for i in range(bits/4):
nformat.append(init%16)
init /= 16
nformat.reverse()
return format + reduce(operator.add, map(hex, nformat))
def qualifiedname(t, instname):
if t is VCC: return '1'
if t is GND: return '0'
#assert not t.anon()
return t.name.qualifiedname(sep='.')
def find(circuit, defn):
name = circuit.__name__
if not isdefinition(circuit):
return defn
for i in circuit.instances:
find(type(i), defn)
if name not in defn:
defn[name] = circuit
return defn
def compileclocks(cls):
for instance in cls.instances:
wiredefaultclock(cls, instance)
def compileinstance(self):
args = []
for k, v in self.kwargs.items():
if isinstance(v, tuple):
v = hstr(v[0], v[1])
else:
v = str(v)
args.append("%s=%s"%(k, v))
return '%s = %s(%s)' % (str(self), str(type(self)), ', '.join(args))
def compilewire(input, instname):
output = input.value()
if isinstance(output, ArrayType) or isinstance(output, TupleType):
if not output.iswhole(output.ts):
s = ''
for i in range(len(input)):
s += compilewire( input[i], instname )
return s
iname = qualifiedname( input, instname )
oname = qualifiedname( output, instname )
return 'wire(%s, %s)\n' % (oname, iname)
def compilecircuit(cls):
instname = cls.__name__
args = ['"%s"' % instname]
for k, v in cls.interface.ports.items():
assert v.isinput() or v.isoutput()
args.append('"%s"'%k)
args.append(str(Flip(type(v))))
s = ", ".join(args)
s = '%s = DefineCircuit(%s)\n' % (instname, s)
# emit instances
for instance in cls.instances:
s += compileinstance(instance) + '\n'
# emit wires from instances
for instance in cls.instances:
for input in instance.interface.inputs():
s += compilewire( input, instname )
for input in cls.interface.inputs():
s += compilewire( input, instname )
s += "EndCircuit()\n"
return s
def compile(main):
compileclocks(main)
defn = find(main,OrderedDict())
code = ''
for k, v in defn.items():
#print('compiling', k)
code += compilecircuit(v) + '\n'
return code
| bjmnbraun/icestick_fastio | thirdparty/magma/magma/ir.py | Python | mit | 2,839 |
#!/usr/bin/env python
# This file should be compatible with both Python 2 and 3.
# If it is not, please file a bug report.
"""
This is a PermissionAccepter object used to get user approval of permissions via the command line.
"""
#external imports
import os
from collections import OrderedDict
#internal imports
from subuserlib.classes.permissionsAccepters.permissionsAccepter import PermissionsAccepter
from subuserlib.classes.userOwnedObject import UserOwnedObject
import subuserlib.permissions
import subuserlib.subprocessExtras
class AcceptPermissionsAtCLI(PermissionsAccepter,UserOwnedObject):
def __init__(self,user,alwaysAccept = False):
self.__alwaysAccept = alwaysAccept
UserOwnedObject.__init__(self,user)
def getAllwaysAccept(self):
"""
Should the accepter accept the permissions/changes without actually prompting the users?
"""
return self.__alwaysAccept
def accept(self,subuser,newDefaults,oldDefaults,userApproved):
if userApproved is None:
print(subuser.getName()+" would like to have the following permissions:")
newDefaults.describe()
createNewPermissions = True
else:
createNewPermissions = False
(removedPermissions,additionsAndChanges) = subuserlib.permissions.comparePermissions(newDefaults = newDefaults, oldDefaults=oldDefaults, userApproved=userApproved)
if additionsAndChanges == {} and removedPermissions == []:
return
if not additionsAndChanges == {}:
print(subuser.getName()+" would like to add/change the following permissions:")
for permission,value in additionsAndChanges.items():
for line in subuserlib.permissions.permissionDescriptions[permission](value):
print(" - "+line)
if not removedPermissions == []:
print(subuser.getName()+" no longer needs the following permissions:")
for removedPermission in removedPermissions:
for line in subuserlib.permissions.permissionDescriptions[removedPermission](oldDefaults[removedPermission]):
print(" - "+line)
options = OrderedDict([("A","Accept and apply changes")
,("E","Apply changes and edit result")
,("e","Ignore request and edit permissions by hand")])
if createNewPermissions:
del options["e"]
for option,description in options.items():
print(option+" - "+description)
if self.getAllwaysAccept():
print("A")
choice = "A"
else:
choice = None
while not choice in options:
choice = raw_input("Please select an option:")
if (choice == "A") or (choice == "E"):
if createNewPermissions:
subuser.createPermissions(newDefaults)
else:
subuser.getPermissions().applyChanges(removedPermissions,additionsAndChanges)
subuser.getPermissions().save()
if (choice == "E") or (choice == "e"):
subuserlib.subprocessExtras.call([os.environ["EDITOR"],subuser.getPermissions().getWritePath()])
| peter1000/subuser | logic/subuserlib/classes/permissionsAccepters/acceptPermissionsAtCLI.py | Python | lgpl-3.0 | 2,995 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import random
import pygame
import pygame.gfxdraw
from hud import Button, Label, HudElement
from layers import LayersHandler
from nautili import colors
from renderer import Renderer
import settings
import wind
class Panel(object):
def __init__(self, game, offset, size):
self.game = game
self.width, self.height = size
self.offset = offset
self.hud_surface = pygame.Surface(size, pygame.SRCALPHA).convert_alpha()
self.rect = pygame.Rect(self.offset, size)
self.hud = Renderer(self.hud_surface)
self.objects = []
def get_sprites(self):
return pygame.sprite.OrderedUpdates(self.objects)
def draw_sprites(self):
all_sprites = self.get_sprites()
all_sprites.update()
all_sprites.draw(self.game.screen)
def draw(self):
self.hud.draw()
self.draw_sprites()
self.game.screen.blit(self.hud_surface, self.offset)
def mouse_over(self, event_position):
for obj in self.objects:
obj.mouse_over(event_position)
def check_click(self, event_position):
if self.rect.collidepoint(event_position):
for obj in self.objects:
obj.check_click(event_position)
return True
class RightPanel(Panel):
def __init__(self, game, offset, size):
Panel.__init__(self, game, offset, size)
button_font = pygame.font.Font(None, 35)
self.background = pygame.transform.scale(pygame.image.load(os.path.join("./data/hud", "shade.png")), size)
self.get_wind_button = Button(button_font, "Wind (Tab):", (10, 10),
offset=offset,
on_click=self.get_wind)
label_font = pygame.font.Font(None, 35)
self.wind_label = Label(label_font, colors.WHITE, "", (10, 40), offset=offset)
self.shoot_button = Button(button_font, "Shoot (Shift)", (10, 80),
offset=offset,
on_click=self.shoot)
self.shoot_label = Label(label_font, colors.WHITE, "", (10, 110), offset=offset)
self.object_info_panel = ObjectInfo(game,
(offset[0], offset[1] + 150),
(self.width, self.height / 2))
self.end_move_button = Button(button_font, "End turn (Enter)", (10, self.height - 30),
offset=offset,
on_click=self.end_move)
self.objects.append(self.get_wind_button)
self.objects.append(self.wind_label)
self.objects.append(self.shoot_button)
self.objects.append(self.shoot_label)
self.objects.append(self.end_move_button)
def draw(self):
self.game.screen.blit(self.background, self.offset)
Panel.draw(self)
self.object_info_panel.draw()
def set_model(self, **kwargs):
self.object_info_panel.set_model(**kwargs)
def get_wind(self):
if not self.get_wind_button.enabled():
return
self.game.drop_selection()
self.get_wind_button.disable()
self.shoot_label.set_text("")
self.game.wind_type = wind.get_random_wind()
self.game.wind_direction = wind.get_random_direction()
if self.game.wind_type == wind.WIND:
self.wind_label.set_text("{}".format(wind.wind_direction_to_str(self.game.wind_direction)))
else:
self.wind_label.set_text("{}".format(wind.wind_type_to_str(self.game.wind_type)))
if self.game.wind_type == wind.STORM:
self.game.force_ships_move()
def set_wind(self, wind_type, wind_direction):
if wind_type is None:
return
self.get_wind_button.disable()
self.shoot_label.set_text("")
self.game.wind_type = wind_type
self.game.wind_direction = wind_direction
if self.game.wind_type == wind.WIND:
self.wind_label.set_text("{}".format(wind.wind_direction_to_str(self.game.wind_direction)))
else:
self.wind_label.set_text("{}".format(wind.wind_type_to_str(self.game.wind_type)))
def shoot(self):
miss = random.randint(0, 2)
if self.game.player == settings.PLAYER1:
targets_to_shoot = self.game.yellow_ships + self.game.yellow_ports
else:
targets_to_shoot = self.game.green_ships + self.game.green_ports
targets_to_shoot = filter(lambda s: s.has_targets(), targets_to_shoot)
for target in targets_to_shoot:
target.shoot(not miss)
if targets_to_shoot:
if miss:
self.shoot_label.set_text("missed")
else:
self.shoot_label.set_text("hit!")
self.game.all_sprites = self.game.layers_handler.get_all_sprites()
self.game.update_player_models()
def end_move(self):
self.game.next_turn()
self.get_wind_button.enable()
self.wind_label.set_text("")
self.shoot_label.set_text("")
class TopPanel(Panel):
def __init__(self, game, offset, size):
Panel.__init__(self, game, offset, size)
label_header_font = pygame.font.Font(None, 40)
label_font = pygame.font.Font(None, 25)
self.turn_label = Label(label_header_font,
colors.YELLOW if self.game.player == settings.PLAYER1 else colors.GREEN,
"{} player turn".format(self.game.player.capitalize()), (10, 10))
self.yellow_label = Label(label_font, colors.YELLOW, "Yellow", (self.width / 2 - 300, 15))
self.yellow_counts = Label(label_font, colors.YELLOW, "ships: 0 ports: 0", (self.width / 2 - 240, 15))
self.green_label = Label(label_font, colors.GREEN, "Green", (self.width / 2 + 90, 15))
self.green_counts = Label(label_font, colors.GREEN, "ships: 0 ports: 0", (self.width / 2 + 150, 15))
self.objects.append(self.turn_label)
self.objects.append(self.yellow_label)
self.objects.append(self.yellow_counts)
self.objects.append(self.green_label)
self.objects.append(self.green_counts)
def update(self):
self.yellow_counts.set_text("ships: {} ports: {}".format(len(self.game.yellow_ships),
len(self.game.yellow_ports)))
self.green_counts.set_text("ships: {} ports: {}".format(len(self.game.green_ships),
len(self.game.green_ports)))
class MiniMap(Panel):
def __init__(self, game, offset, size):
Panel.__init__(self, game, offset, size)
self.border = pygame.Rect((0, 0), (self.width, self.height))
orig_map_width, orig_map_height = self.game.layers_handler.get_map_dimensions()
orig_map_polygon = self.game.layers_handler.get_map_polygon()
self.scale = max(orig_map_width / float(self.width), orig_map_height / float(self.height))
self.map_offset = ((self.width - orig_map_width / self.scale) / 2,
(self.height - orig_map_height / self.scale) / 2)
self.sea_polygon = map(lambda p: (p[0] / self.scale + self.map_offset[0],
p[1] / self.scale + self.map_offset[1]),
orig_map_polygon)
self.cam_width = settings.MAIN_WIN_WIDTH / self.scale
self.cam_height = settings.MAIN_WIN_HEIGHT / self.scale
def draw_layer(self, layer, color):
for tile in LayersHandler.filter_not_none(LayersHandler.flatten(layer)):
pygame.draw.circle(self.hud_surface, color,
map(lambda x, offs: int(x / self.scale + offs),
self.game.layers_handler.isometric_to_orthogonal(*tile.coords()), self.map_offset),
1)
def draw(self):
self.hud.fill(colors.BLACK)
self.hud.draw()
# Draw sea
pygame.gfxdraw.filled_polygon(self.hud_surface, self.sea_polygon, colors.LIGHT_BLUE)
# Draw islands, rocks and etc.
self.draw_layer(self.game.islands, colors.DARK_GREEN)
self.draw_layer(self.game.rocks, colors.RED)
self.draw_layer(self.game.yellow_ships, colors.YELLOW)
self.draw_layer(self.game.yellow_ports, colors.DARK_YELLOW)
self.draw_layer(self.game.yellow_royal_ports, colors.BROWN)
self.draw_layer(self.game.neutral_ports, colors.BLACK)
self.draw_layer(self.game.green_ships, colors.GREEN)
self.draw_layer(self.game.green_ports, colors.YELLOW_GREEN)
self.draw_layer(self.game.green_royal_ports, colors.LIGHT_LIGHT_BLUE)
# Draw camera rectangle
camera_offset = map(lambda p, offs: p / -self.scale + offs, self.game.get_camera_offset(), self.map_offset)
cam_rect = pygame.Rect(camera_offset, (self.cam_width, self.cam_height))
pygame.draw.rect(self.hud_surface, colors.WHITE, cam_rect, 1)
pygame.draw.rect(self.hud_surface, colors.WHITE, self.border, 1)
# Draw additional stuff
self.draw_sprites()
self.game.screen.blit(self.hud_surface, self.offset)
def check_click(self, event_position):
if self.rect.collidepoint(event_position):
current_camera_offset = map(lambda p, offs: p / -self.scale + offs,
self.game.get_camera_offset(),
self.map_offset)
mouse_minimap_coordinates = map(lambda x, y: x - y, event_position, self.offset)
new_camera_offset = (mouse_minimap_coordinates[0] - self.cam_width / 2,
mouse_minimap_coordinates[1] - self.cam_height / 2)
self.game.move_camera(map(lambda x, y: (x - y) * self.scale, current_camera_offset, new_camera_offset))
return True
class ObjectImage(HudElement):
def __init__(self, pos, offset):
HudElement.__init__(self, pos, offset)
self.image = None
self.rect = None
self.reset()
def reset(self):
font = pygame.font.Font(None, 1)
image = font.render("", True, colors.BLACK)
self.image = image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.topleft = self.pos
def set_image(self, image):
if image:
self.image = pygame.image.load(image).convert_alpha()
self.rect = self.image.get_rect()
self.rect.topleft = self.pos
else:
self.reset()
class ObjectInfo(Panel):
def __init__(self, game, offset, size):
Panel.__init__(self, game, offset, size)
self.border = pygame.Rect((0, 0), (self.width, self.height))
self.image = ObjectImage((0, 0), offset)
name_font = pygame.font.Font(None, 25)
font = pygame.font.Font(None, 22)
self.name_label = Label(name_font, colors.WHITE, "", (0, 220), offset=offset)
self.fire_range_label = Label(font, colors.WHITE, "", (10, 240), offset=offset)
self.max_move_label = Label(font, colors.WHITE, "", (10, 260), offset=offset)
self.stille_move_label = Label(font, colors.WHITE, "", (10, 280), offset=offset)
self.storm_move_label = Label(font, colors.WHITE, "", (10, 300), offset=offset)
self.objects.append(self.image)
self.objects.append(self.name_label)
self.objects.append(self.fire_range_label)
self.objects.append(self.max_move_label)
self.objects.append(self.stille_move_label)
self.objects.append(self.storm_move_label)
def set_model(self, model="", properties={}):
if model:
model = os.path.join(settings.MODELS_DIR, "{}.png".format(model))
# Read properties and display them
try:
self.name_label.set_text(properties['name'])
except KeyError:
self.name_label.set_text("")
self.name_label.center(self.width)
try:
self.fire_range_label.set_text("Fire range: {}".format(properties['fire_range']))
except KeyError:
self.fire_range_label.set_text("")
try:
self.max_move_label.set_text("Max move: {}".format(properties['max_move']))
except KeyError:
self.max_move_label.set_text("")
try:
self.stille_move_label.set_text("Stille move: {}".format(properties['stille_move']))
except KeyError:
self.stille_move_label.set_text("")
try:
self.storm_move_label.set_text("Storm range: {}".format(properties['storm_move']))
except KeyError:
self.storm_move_label.set_text("")
self.image.set_image(model)
| aikikode/nautili | nautili/panels.py | Python | gpl-3.0 | 12,872 |
#!/usr/bin/env python
'''
mouse_and_match.py [-i path | --input path: default ../data/]
Demonstrate using a mouse to interact with an image:
Read in the images in a directory one by one
Allow the user to select parts of an image with a mouse
When they let go of the mouse, it correlates (using matchTemplate) that patch with the image.
SPACE for next image
ESC to exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
# built-in modules
import os
import sys
import glob
import argparse
from math import *
drag_start = None
sel = (0,0,0,0)
def onmouse(event, x, y, flags, param):
global drag_start, sel
if event == cv2.EVENT_LBUTTONDOWN:
drag_start = x, y
sel = 0,0,0,0
elif event == cv2.EVENT_LBUTTONUP:
if sel[2] > sel[0] and sel[3] > sel[1]:
patch = gray[sel[1]:sel[3],sel[0]:sel[2]]
result = cv2.matchTemplate(gray,patch,cv2.TM_CCOEFF_NORMED)
result = np.abs(result)**3
_val, result = cv2.threshold(result, 0.01, 0, cv2.THRESH_TOZERO)
result8 = cv2.normalize(result,None,0,255,cv2.NORM_MINMAX,cv2.CV_8U)
cv2.imshow("result", result8)
drag_start = None
elif drag_start:
#print flags
if flags & cv2.EVENT_FLAG_LBUTTON:
minpos = min(drag_start[0], x), min(drag_start[1], y)
maxpos = max(drag_start[0], x), max(drag_start[1], y)
sel = minpos[0], minpos[1], maxpos[0], maxpos[1]
img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
cv2.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0,255,255), 1)
cv2.imshow("gray", img)
else:
print("selection is complete")
drag_start = None
if __name__ == '__main__':
print(__doc__)
parser = argparse.ArgumentParser(description='Demonstrate mouse interaction with images')
parser.add_argument("-i","--input", default='../data/', help="Input directory.")
args = parser.parse_args()
path = args.input
cv2.namedWindow("gray",1)
cv2.setMouseCallback("gray", onmouse)
'''Loop through all the images in the directory'''
for infile in glob.glob( os.path.join(path, '*.*') ):
ext = os.path.splitext(infile)[1][1:] #get the filename extenstion
if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm":
print(infile)
img=cv2.imread(infile,1)
if img is None:
continue
sel = (0,0,0,0)
drag_start = None
gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow("gray",gray)
if cv2.waitKey() == 27:
break
cv2.destroyAllWindows()
| zzjkf2009/Midterm_Astar | opencv/samples/python/mouse_and_match.py | Python | mit | 2,758 |
import pandas as pd
import numpy as np
import pickle
import tables
from sklearn.preprocessing import LabelEncoder
train_file = 'train_users_2'
test_file = 'test_users'
data_learn = pd.read_csv('../Data/%s.csv' % train_file, index_col='id')
data_test = pd.read_csv('../Data/%s.csv' % test_file, index_col='id')
# Encoding the labeled features
to_encode = ['gender',
'signup_method',
'signup_flow',
'language',
'affiliate_channel',
'affiliate_provider',
'first_affiliate_tracked',
'signup_app',
'first_device_type',
'first_browser',
'country_destination',
]
for feature in to_encode:
if feature == 'country_destination': # does not exist for the test set
le = LabelEncoder()
le.fit(data_learn[feature])
else:
data_learn[feature] = data_learn[feature].fillna('undef')
data_test[feature] = data_test[feature].fillna('undef')
# Reviewing the possible classes for each features and taking the union of train and learn
train_classes = pd.unique(data_learn[feature])
test_classes = pd.unique(data_test[feature])
classes = set(train_classes) | set(test_classes)
classes = list(set(train_classes) | set(test_classes))
classes = pd.Series(classes)
le = LabelEncoder()
le.fit(classes)
pickle.dump(le, open('../Encoding/LabelEncoder_%s.md' % feature, 'w'))
# Writing down the meaning of all encodings
with open('../Encoding/%s.txt' % feature, 'w') as f:
str = '\n'.join(['%s - %s' % (k,klass) for (k,klass) in enumerate(le.classes_)])
f.write(str)
""" Encoders for the sessions variables. """
store = pd.HDFStore('../Data/sessions.h5')
sessions = store['sessions']
store.close()
to_encode = ['action',
'action_type',
'action_detail',
'device_type',
]
for feature in to_encode:
# Special case of device_type which has 'unknown' instead of NaN
if feature == 'device_type':
sessions[feature][sessions[feature] == '-unknown-'] = 'undef'
sessions[feature] = sessions[feature].fillna('undef')
# Reviewing the possible classes for each features
feature_classes = pd.unique(sessions[feature])
le = LabelEncoder()
le.fit(feature_classes)
pickle.dump(le, open('../Encoding/LabelEncoder_%s.md' % feature, 'w'))
# Writing down the meaning of all encodings
with open('../Encoding/%s.txt' % feature, 'w') as f:
str = '\n'.join(['%s - %s' % (k,klass) for (k,klass) in enumerate(le.classes_)])
f.write(str)
| RomainSabathe/kaggle_airbnb2015 | Code/create_encoders.py | Python | mit | 2,686 |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
from test_framework.test_framework import PivxTestFramework
from test_framework.util import assert_equal
class UacommentTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = "exceeds maximum length (256). Reduce the number or size of uacomments."
self.assert_start_raises_init_error(0, ["-uacomment=" + 'a' * 256], expected)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')']:
expected = "User Agent comment (" + unsafe_char + ") contains unsafe characters"
self.assert_start_raises_init_error(0, ["-uacomment=" + unsafe_char], expected)
if __name__ == '__main__':
UacommentTest().main()
| Mrs-X/PIVX | test/functional/feature_uacomment.py | Python | mit | 1,480 |
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image caching and management.
"""
import os
import re
from nova.compute import utils as compute_utils
from nova import exception
from nova import utils
from nova.virt import imagecache
from nova.virt import images
from os_win import utilsfactory
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from oslo_utils import uuidutils
from compute_hyperv.i18n import _
import compute_hyperv.nova.conf
from compute_hyperv.nova import pathutils
LOG = logging.getLogger(__name__)
CONF = compute_hyperv.nova.conf.CONF
class ImageCache(imagecache.ImageCacheManager):
def __init__(self):
super(ImageCache, self).__init__()
self._pathutils = pathutils.PathUtils()
self._vhdutils = utilsfactory.get_vhdutils()
self.used_images = []
self.unexplained_images = []
self.originals = []
def _get_root_vhd_size_gb(self, instance):
if instance.old_flavor:
return instance.old_flavor.root_gb
else:
return instance.flavor.root_gb
def _resize_and_cache_vhd(self, instance, vhd_path):
vhd_size = self._vhdutils.get_vhd_size(vhd_path)['VirtualSize']
root_vhd_size_gb = self._get_root_vhd_size_gb(instance)
root_vhd_size = root_vhd_size_gb * units.Gi
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
vhd_path, root_vhd_size))
if root_vhd_internal_size < vhd_size:
raise exception.FlavorDiskSmallerThanImage(
flavor_size=root_vhd_size, image_size=vhd_size)
if root_vhd_internal_size > vhd_size:
path_parts = os.path.splitext(vhd_path)
resized_vhd_path = '%s_%s%s' % (path_parts[0],
root_vhd_size_gb,
path_parts[1])
lock_path = os.path.dirname(resized_vhd_path)
lock_name = "%s-cache.lock" % os.path.basename(resized_vhd_path)
@utils.synchronized(name=lock_name, external=True,
lock_path=lock_path)
def copy_and_resize_vhd():
if not self._pathutils.exists(resized_vhd_path):
try:
LOG.debug("Copying VHD %(vhd_path)s to "
"%(resized_vhd_path)s",
{'vhd_path': vhd_path,
'resized_vhd_path': resized_vhd_path})
self._pathutils.copyfile(vhd_path, resized_vhd_path)
LOG.debug("Resizing VHD %(resized_vhd_path)s to new "
"size %(root_vhd_size)s",
{'resized_vhd_path': resized_vhd_path,
'root_vhd_size': root_vhd_size})
self._vhdutils.resize_vhd(resized_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(resized_vhd_path):
self._pathutils.remove(resized_vhd_path)
copy_and_resize_vhd()
return resized_vhd_path
def get_cached_image(self, context, instance, rescue_image_id=None):
image_id = rescue_image_id or instance.image_ref
image_type = self.get_image_format(context, image_id, instance)
trusted_certs = instance.trusted_certs
image_path, already_exists = self.cache_image(
context, image_id, image_type, trusted_certs)
# Note: rescue images are not resized.
is_vhd = image_path.split('.')[-1].lower() == 'vhd'
if (CONF.use_cow_images and is_vhd and not rescue_image_id):
# Resize the base VHD image as it's not possible to resize a
# differencing VHD. This does not apply to VHDX images.
resized_image_path = self._resize_and_cache_vhd(instance,
image_path)
if resized_image_path:
return resized_image_path
if rescue_image_id:
self._verify_rescue_image(instance, rescue_image_id, image_path)
return image_path
def fetch(self, context, image_id, path, trusted_certs=None):
with compute_utils.disk_ops_semaphore:
images.fetch(context, image_id, path, trusted_certs)
def append_image_format(self, path, image_type, do_rename=True):
if image_type == 'iso':
format_ext = 'iso'
else:
# Historically, the Hyper-V driver allowed VHDX images registered
# as VHD. We'll continue to do so for now.
format_ext = self._vhdutils.get_vhd_format(path)
new_path = path + '.' + format_ext.lower()
if do_rename:
self._pathutils.rename(path, new_path)
return new_path
def get_image_format(self, context, image_id, instance=None):
image_format = None
if instance:
image_format = instance.system_metadata['image_disk_format']
if not image_format:
image_info = images.get_info(context, image_id)
image_format = image_info['disk_format']
return image_format
def cache_image(self, context, image_id,
image_type=None, trusted_certs=None):
if not image_type:
image_type = self.get_image_format(context, image_id)
base_image_dir = self._pathutils.get_base_vhd_dir()
base_image_path = os.path.join(base_image_dir, image_id)
lock_name = "%s-cache.lock" % image_id
@utils.synchronized(name=lock_name, external=True,
lock_path=base_image_dir)
def fetch_image_if_not_existing():
fetched = False
image_path = None
for format_ext in ['vhd', 'vhdx', 'iso']:
test_path = base_image_path + '.' + format_ext
if self._pathutils.exists(test_path):
image_path = test_path
self._update_image_timestamp(image_id)
break
if not image_path:
try:
self.fetch(context, image_id, base_image_path,
trusted_certs)
fetched = True
image_path = self.append_image_format(
base_image_path, image_type)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_image_path):
self._pathutils.remove(base_image_path)
return image_path, fetched
return fetch_image_if_not_existing()
def _verify_rescue_image(self, instance, rescue_image_id,
rescue_image_path):
rescue_image_info = self._vhdutils.get_vhd_info(rescue_image_path)
rescue_image_size = rescue_image_info['VirtualSize']
flavor_disk_size = instance.flavor.root_gb * units.Gi
if rescue_image_size > flavor_disk_size:
err_msg = _('Using a rescue image bigger than the instance '
'flavor disk size is not allowed. '
'Rescue image size: %(rescue_image_size)s. '
'Flavor disk size:%(flavor_disk_size)s.') % dict(
rescue_image_size=rescue_image_size,
flavor_disk_size=flavor_disk_size)
raise exception.ImageUnacceptable(reason=err_msg,
image_id=rescue_image_id)
def get_image_details(self, context, instance):
image_id = instance.image_ref
return images.get_info(context, image_id)
def _age_and_verify_cached_images(self, context, all_instances, base_dir):
for img in self.originals:
if img in self.used_images:
# change the timestamp on the image so as to reflect the last
# time it was used
self._update_image_timestamp(img)
elif CONF.image_cache.remove_unused_base_images:
self._remove_if_old_image(img)
def _update_image_timestamp(self, image):
backing_files = self._get_image_backing_files(image)
for img in backing_files:
os.utime(img, None)
def _get_image_backing_files(self, image):
base_file = self._pathutils.get_image_path(image)
if not base_file:
# not vhd or vhdx, ignore.
return []
backing_files = [base_file]
resize_re = re.compile('%s_[0-9]+$' % image, re.IGNORECASE)
for img in self.unexplained_images:
match = resize_re.match(img)
if match:
backing_files.append(self._pathutils.get_image_path(img))
return backing_files
def _remove_if_old_image(self, image):
backing_files = self._get_image_backing_files(image)
max_age_seconds = (
CONF.image_cache.remove_unused_original_minimum_age_seconds)
for img in backing_files:
age_seconds = self._pathutils.get_age_of_file(img)
if age_seconds > max_age_seconds:
LOG.info("Removing old, unused image: %s", img)
self._remove_old_image(img)
def _remove_old_image(self, image_path):
lock_path = os.path.dirname(image_path)
lock_name = "%s-cache.lock" % os.path.basename(image_path)
@utils.synchronized(name=lock_name, external=True,
lock_path=lock_path)
def _image_synchronized_remove():
self._pathutils.remove(image_path)
_image_synchronized_remove()
def update(self, context, all_instances):
base_vhd_dir = self._pathutils.get_base_vhd_dir()
running = self._list_running_instances(context, all_instances)
self.used_images = running['used_images'].keys()
all_files = self._list_base_images(base_vhd_dir)
self.originals = all_files['originals']
self.unexplained_images = all_files['unexplained_images']
self._age_and_verify_cached_images(context, all_instances,
base_vhd_dir)
def _list_base_images(self, base_dir):
unexplained_images = []
originals = []
for entry in os.listdir(base_dir):
file_name, extension = os.path.splitext(entry)
# extension has a leading '.'. E.g.: '.vhdx'
if extension.lstrip('.').lower() not in ['vhd', 'vhdx']:
# File is not an image. Ignore it.
# imagecache will not store images of any other formats.
continue
if uuidutils.is_uuid_like(file_name):
originals.append(file_name)
else:
unexplained_images.append(file_name)
return {'unexplained_images': unexplained_images,
'originals': originals}
| openstack/compute-hyperv | compute_hyperv/nova/imagecache.py | Python | apache-2.0 | 11,925 |
'''
Test offering client cert to origin
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import re
Test.Summary = '''
Test different combinations of TLS handshake hooks to ensure they are applied consistently.
'''
Test.SkipUnless(Condition.HasProgram("grep", "grep needs to be installed on system for this test to work"))
ts = Test.MakeATSProcess("ts", command="traffic_manager", select_ports=False)
cafile = "{0}/signer.pem".format(Test.RunDirectory)
cafile2 = "{0}/signer2.pem".format(Test.RunDirectory)
# --clientverify: "" empty string because microserver does store_true for argparse, but options is a dictionary
server = Test.MakeOriginServer("server", ssl=True, options = { "--clientCA": cafile, "--clientverify": ""}, clientcert="{0}/signed-foo.pem".format(Test.RunDirectory), clientkey="{0}/signed-foo.key".format(Test.RunDirectory))
server2 = Test.MakeOriginServer("server2", ssl=True, options = { "--clientCA": cafile2, "--clientverify": ""}, clientcert="{0}/signed2-bar.pem".format(Test.RunDirectory), clientkey="{0}/signed-bar.key".format(Test.RunDirectory))
server3 = Test.MakeOriginServer("server3")
server.Setup.Copy("ssl/signer.pem")
server.Setup.Copy("ssl/signer2.pem")
server.Setup.Copy("ssl/signed-foo.pem")
server.Setup.Copy("ssl/signed-foo.key")
server.Setup.Copy("ssl/signed2-foo.pem")
server.Setup.Copy("ssl/signed2-bar.pem")
server.Setup.Copy("ssl/signed-bar.key")
server2.Setup.Copy("ssl/signer.pem")
server2.Setup.Copy("ssl/signer2.pem")
server2.Setup.Copy("ssl/signed-foo.pem")
server2.Setup.Copy("ssl/signed-foo.key")
server2.Setup.Copy("ssl/signed2-foo.pem")
server2.Setup.Copy("ssl/signed2-bar.pem")
server2.Setup.Copy("ssl/signed-bar.key")
request_header = {"headers": "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {"headers": "GET / HTTP/1.1\r\nHost: bar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.addSSLfile("ssl/signed-foo.pem")
ts.addSSLfile("ssl/signed-foo.key")
ts.addSSLfile("ssl/signed2-foo.pem")
ts.addSSLfile("ssl/signed-bar.pem")
ts.addSSLfile("ssl/signed2-bar.pem")
ts.addSSLfile("ssl/signed-bar.key")
ts.Variables.ssl_port = 4443
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'ssl_verify_test',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.http.server_ports': '{0}'.format(ts.Variables.port),
'proxy.config.ssl.client.verify.server': 0,
'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.cert.filename': 'signed-foo.pem',
'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.private_key.filename': 'signed-foo.key',
'proxy.config.exec_thread.autoconfig.scale': 1.0,
'proxy.config.url_remap.pristine_host_hdr' : 1,
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map /case1 https://127.0.0.1:{0}/'.format(server.Variables.SSL_Port)
)
ts.Disk.remap_config.AddLine(
'map /case2 https://127.0.0.1:{0}/'.format(server2.Variables.SSL_Port)
)
ts.Disk.ssl_server_name_yaml.AddLine(
'- fqdn: bar.com')
ts.Disk.ssl_server_name_yaml.AddLine(
' client_cert: {0}/signed2-bar.pem'.format(ts.Variables.SSLDir))
ts.Disk.ssl_server_name_yaml.AddLine(
' client_key: {0}/signed-bar.key'.format(ts.Variables.SSLDir))
# Should succeed
tr = Test.AddTestRun("Connect with first client cert to first server")
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.port))
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(server2)
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.StillRunningAfter = server2
tr.Processes.Default.Command = "curl -H host:example.com http://127.0.0.1:{0}/case1".format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
#Should fail
trfail = Test.AddTestRun("Connect with first client cert to second server")
trfail.StillRunningAfter = ts
trfail.StillRunningAfter = server
trfail.StillRunningAfter = server2
trfail.Processes.Default.Command = 'curl -H host:example.com http://127.0.0.1:{0}/case2'.format(ts.Variables.port)
trfail.Processes.Default.ReturnCode = 0
trfail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
# Should succeed
trbar = Test.AddTestRun("Connect with signed2 bar to second server")
trbar.StillRunningAfter = ts
trbar.StillRunningAfter = server
trbar.StillRunningAfter = server2
trbar.Processes.Default.Command = "curl -H host:bar.com http://127.0.0.1:{0}/case2".format(ts.Variables.port)
trbar.Processes.Default.ReturnCode = 0
trbar.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
#Should fail
trbarfail = Test.AddTestRun("Connect with signed2 bar cert to first server")
trbarfail.StillRunningAfter = ts
trbarfail.StillRunningAfter = server
trbarfail.StillRunningAfter = server2
trbarfail.Processes.Default.Command = 'curl -H host:bar.com http://127.0.0.1:{0}/case1'.format(ts.Variables.port)
trbarfail.Processes.Default.ReturnCode = 0
trbarfail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
tr2 = Test.AddTestRun("Update config files")
# Update the SNI config
snipath = ts.Disk.ssl_server_name_yaml.AbsPath
recordspath = ts.Disk.records_config.AbsPath
tr2.Disk.File(snipath, id = "ssl_server_name_yaml", typename="ats:config"),
tr2.Disk.ssl_server_name_yaml.AddLine(
'- fqdn: bar.com')
tr2.Disk.ssl_server_name_yaml.AddLine(
' client_cert: {0}/signed-bar.pem'.format(ts.Variables.SSLDir))
tr2.Disk.ssl_server_name_yaml.AddLine(
' client_key: {0}/signed-bar.key'.format(ts.Variables.SSLDir))
# recreate the records.config with the cert filename changed
tr2.Disk.File(recordspath, id = "records_config", typename="ats:config:records"),
tr2.Disk.records_config.update({
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.http.server_ports': '{0}'.format(ts.Variables.port),
'proxy.config.ssl.client.verify.server': 0,
'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.cert.filename': 'signed2-foo.pem',
'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.private_key.filename': 'signed-foo.key',
'proxy.config.url_remap.pristine_host_hdr' : 1,
})
tr2.StillRunningAfter = ts
tr2.StillRunningAfter = server
tr2.StillRunningAfter = server2
tr2.Processes.Default.Command = 'echo Updated configs'
# Need to copy over the environment so traffic_ctl knows where to find the unix domain socket
tr2.Processes.Default.Env = ts.Env
tr2.Processes.Default.ReturnCode = 0
# Parking this as a ready tester on a meaningless process
# Stall the test runs until the ssl_server_name reload has completed
# At that point the new ssl_server_name settings are ready to go
def ssl_server_name_reload_done(tsenv):
def done_reload(process, hasRunFor, **kw):
cmd = "grep 'ssl_server_name.yaml finished loading' {0} | wc -l > {1}/test.out".format(ts.Disk.diags_log.Name, Test.RunDirectory)
retval = subprocess.run(cmd, shell=True, env=tsenv)
if retval.returncode == 0:
cmd ="if [ -f {0}/test.out -a \"`cat {0}/test.out`\" = \"2\" ] ; then true; else false; fi".format(Test.RunDirectory)
retval = subprocess.run(cmd, shell = True, env=tsenv)
return retval.returncode == 0
return done_reload
tr2reload = Test.AddTestRun("Reload config")
tr2reload.StillRunningAfter = ts
tr2reload.StillRunningAfter = server
tr2reload.StillRunningAfter = server2
tr2reload.Processes.Default.Command = 'traffic_ctl config reload'
# Need to copy over the environment so traffic_ctl knows where to find the unix domain socket
tr2reload.Processes.Default.Env = ts.Env
tr2reload.Processes.Default.ReturnCode = 0
#Should succeed
tr3bar = Test.AddTestRun("Make request with other bar cert to first server")
# Wait for the reload to complete
tr3bar.Processes.Default.StartBefore(server3, ready=ssl_server_name_reload_done(ts.Env))
tr3bar.StillRunningAfter = ts
tr3bar.StillRunningAfter = server
tr3bar.StillRunningAfter = server2
tr3bar.Processes.Default.Command = 'curl -H host:bar.com http://127.0.0.1:{0}/case1'.format(ts.Variables.port)
tr3bar.Processes.Default.ReturnCode = 0
tr3bar.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
#Should fail
tr3barfail = Test.AddTestRun("Make request with other bar cert to second server")
tr3barfail.StillRunningAfter = ts
tr3barfail.StillRunningAfter = server
tr3barfail.StillRunningAfter = server2
tr3barfail.Processes.Default.Command = 'curl -H host:bar.com http://127.0.0.1:{0}/case2'.format(ts.Variables.port)
tr3barfail.Processes.Default.ReturnCode = 0
tr3barfail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
#Should succeed
tr3 = Test.AddTestRun("Make request with other cert to second server")
# Wait for the reload to complete
tr3.StillRunningAfter = ts
tr3.StillRunningAfter = server
tr3.StillRunningAfter = server2
tr3.Processes.Default.Command = 'curl -H host:example.com http://127.0.0.1:{0}/case2'.format(ts.Variables.port)
tr3.Processes.Default.ReturnCode = 0
tr3.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
#Should fail
tr3fail = Test.AddTestRun("Make request with other cert to first server")
tr3fail.StillRunningAfter = ts
tr3fail.StillRunningAfter = server
tr3fail.StillRunningAfter = server2
tr3fail.Processes.Default.Command = 'curl -H host:example.com http://127.0.0.1:{0}/case1'.format(ts.Variables.port)
tr3fail.Processes.Default.ReturnCode = 0
tr3fail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
# Test the case of updating certificate contents without changing file name.
trupdate = Test.AddTestRun("Update client cert file in place")
trupdate.StillRunningAfter = ts
trupdate.StillRunningAfter = server
trupdate.StillRunningAfter = server2
# Make a meaningless config change on the path so the records.config reload logic will trigger
trupdate.Setup.CopyAs("ssl/signed2-bar.pem", ".", "{0}/signed-bar.pem".format(ts.Variables.SSLDir))
# in the config/ssl directory for records.config
trupdate.Setup.CopyAs("ssl/signed-foo.pem", ".", "{0}/signed2-foo.pem".format(ts.Variables.SSLDir))
trupdate.Processes.Default.Command = 'traffic_ctl config set proxy.config.ssl.client.cert.path {0}/; touch {1}'.format(ts.Variables.SSLDir,snipath)
# Need to copy over the environment so traffic_ctl knows where to find the unix domain socket
trupdate.Processes.Default.Env = ts.Env
trupdate.Processes.Default.ReturnCode = 0
trreload = Test.AddTestRun("Reload config after renaming certs")
trreload.StillRunningAfter = ts
trreload.StillRunningAfter = server
trreload.StillRunningAfter = server2
trreload.Processes.Default.Command = 'traffic_ctl config reload'
trreload.Processes.Default.Env = ts.Env
trreload.Processes.Default.ReturnCode = 0
#Should succeed
tr4bar = Test.AddTestRun("Make request with renamed bar cert to second server")
# Wait for the reload to complete
tr4bar.DelayStart = 10
tr4bar.StillRunningAfter = ts
tr4bar.StillRunningAfter = server
tr4bar.StillRunningAfter = server2
tr4bar.Processes.Default.Command = 'curl -H host:bar.com http://127.0.0.1:{0}/case2'.format(ts.Variables.port)
tr4bar.Processes.Default.ReturnCode = 0
tr4bar.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
#Should fail
tr4barfail = Test.AddTestRun("Make request with renamed bar cert to first server")
tr4barfail.StillRunningAfter = ts
tr4barfail.StillRunningAfter = server
tr4barfail.StillRunningAfter = server2
tr4barfail.Processes.Default.Command = 'curl -H host:bar.com http://127.0.0.1:{0}/case1'.format(ts.Variables.port)
tr4barfail.Processes.Default.ReturnCode = 0
tr4barfail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
#Should succeed
tr4 = Test.AddTestRun("Make request with renamed foo cert to first server")
tr4.StillRunningAfter = ts
tr4.StillRunningAfter = server
tr4.StillRunningAfter = server2
tr4.Processes.Default.Command = 'curl -H host:example.com http://127.0.0.1:{0}/case1'.format(ts.Variables.port)
tr4.Processes.Default.ReturnCode = 0
tr4.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
#Should fail
tr4fail = Test.AddTestRun("Make request with renamed foo cert to second server")
tr4fail.StillRunningAfter = ts
tr4fail.StillRunningAfter = server
tr4fail.StillRunningAfter = server2
tr4fail.Processes.Default.Command = 'curl -H host:example.com http://127.0.0.1:{0}/case2'.format(ts.Variables.port)
tr4fail.Processes.Default.ReturnCode = 0
tr4fail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
| chitianhao/trafficserver | tests/gold_tests/tls/tls_client_cert.test.py | Python | apache-2.0 | 15,301 |
#-*- coding: utf-8 -*
"""
Доделать
выбор скорости
показать, если ли другие варианты, давать выбирать их
кнопку пояснение (аудио)
кнопку подсказка (
"""
"""
Модуль "Робот" для обучения программированию. Версия 2.0
1. знакомство с командами управления и закраски, обход стены
2. процедуры (функции)
3. условие (ветвление)
4. for
5. while дойти до стены, обойти стену, обойти две стены, пройти по закрашенным, по стене, закрасить всё поле
6. while закрасить коридоры
7. переменная, счетчик, текст в ячейках, while до стены и обратно
8. закрасить отмеченные (использование if) (есть одна задача на установку стен)
9. закрасить треугольниками, найти сумму, закрасить четные, пройти спиралью
10. поиск пути, закрасить все. Рекурсия (вводная задача рекурсии 7-0)
Разрешено любое использование при упоминании авторства и сайта progras.ru.
Часть задач была "подсмотрена" в PascalABC
Сайт: progras.ru
skype: boris_vlasenko
email: [email protected]
phone: +7(905)505-49-49
Пожелания к новой версии:
1. Добавить "пульт прямого управления"
2. Сделать кнопку для перезапуска задачи без закрытия формы
3. Сделать кнопку для прямого перехода в форум
# Далее - Чертежник, черепушка, еще что-нибудь (робот без клеток, например)
# Робота на JS и HTML5
# Робота в Вконтак - обучение программированию.
# Войну Роботов во Вконтакт (обучение программированию)
"""
from tkinter import Tk, Canvas, Label, Button, Frame
from random import randrange as rnd
import time, os
if __name__ != 'robot':
print("Этот файл запускать не следует.")
if not os.path.exists('task.py'):
f1 = open('task.py','w')
f1.write("""#-*- coding: utf-8 -*
import robot
r = robot.rmap()
r.lm('task1-1')
def task():
pass
#------- пишите код здесь -----
#------- пишите код здесь -----
r.start(task)
#Отступ слева (tab) сохранять!
#r.help() - Список команд и краткие примеры
#r.demo() - показать решение этой задачи (только результат, не текст программы)
#r.demoAll() - показать все задачи (примерно 20 минут)
#r.rt() - вправо
#r.rt(3)- вправо на 3
#r.dn() - вниз
#r.up() - вверх
#r.lt() - влево
#r.pt() - закрасить Paint
#r.cl() - закрашена ли клетка? Color
#r.fr() - свободно ли справа? freeRight
#r.fl() - свободно ли слева? freeLeft
#r.fu() - свободно ли сверху? freeUp
#r.fd() - свободно ли снизу? freeDown
#r.wr() - стена ли справа? freeRight
#r.wl() - стена ли слева? freeLeft
#r.wu() - стена ли сверху? freeUp
#r.wd() - стена ли снизу? freeDown
#red - красный
#blue - синий
#yellow - желтый
#green - зеленый
""")
f1.close()
if os.path.exists('task.py'): print("Был создан файл task.py - запустите его")
else: print("""Не удалось создать task.py. Попробуйте создать его самостоятельно с таким содержанием:
#----начало файла task.py------------------------
#-*- coding: utf-8 -*
import robot
r = robot.rmap()
r.lm('task1')
r.help() # Список команд. Уберите, если не нужно
#------- пишите код здесь vvvv -----
#------- пишите код здесь ^^^^ -----
r.end()
#----конец файла task.py------------------------
""")
else: print("Запустите task.py")
print("""
Сайт: progras.ru
skype: boris_vlasenko
email: [email protected]
phone: +7(905)505-49-49
""")
class rmap():
_var = [1]
_nc = 0
_nr = 0
_r = 0
_c = 0
_size = 0
_w = 0
_d = 0
_NoneUpdate = False
_Nonelabel = False
_Nonegettext = False
_field = []
_endPoint = (0,0)
_robot = '' # рисунок Робота (синее кольцо)
_park = ''
_canvas = ''
sleep = 0.5
_task = ''
_solve = ''
_test = ''
_res = ''
_bum = 0
m = []
m.append('task1')
m.append('task2')
m.append('task3')
m.append('task4')
m.append('task5')
m.append('task6')
m.append('task7')
m.append('task8')
m.append('task9')
m.append('task10')
m.append('task11')
m.append('task12')
m.append('task13')
class _v: # будет содержать изображение текста и квадратиков закраски и меток. Чтобы можно было "поднимать изображение"
text = ''
label = ''
color = ''
class _Tcell():
color = ''
text = ''
label = '' # color
wUp = False
wLeft = False
v = ''
def help(self):
""" Вывести список команд Робота
Примеры использования по команде r.help_full()
"""
print("""
Пояснение по каждой команде: print команда.__doc__
Например:
print r.color.__doc__
---=: Команды перемещения :=---
r.rt() # Вправо
r.lt() # Влево
r.dn() # Вниз
r.up() # Вверх
r.jumpTo(r,c) # Прыжок в точку. Без особых указаний в задачах не использовать
-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=
---=: Команды изменения среды :=---
r.pt([цвет]) # Закрасить указанным цветом. По умолчанию зеленым
r.sw(направление) # Установить стену с указанной стороны
r.settext(тест) # Вписать в клетку текст
-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=
---=: Команды обратной связи :=---
r.cl() # Каким цветом закрашена клетка? r.color()
r.label() # Какого цвета метка в клетке?
r.gettext() # Какой текст в клетке?
r.getCoords() # Где Робот?
r.getCoordR() # В какой строке Робот?
r.getCoordС() # В каком столбце Робот?
r.fu() # Сверху свободно?
r.fd() # Снизу свободно?
r.fr() # Справа свободно?
r.fl() # Слева свободно?
r.wu() # Сверху стена?
r.wd() # Снизу стена?
r.wr() # Справа стена?
r.wl() # Слева стена?
r.isPark # Робот на парковке?
-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=
---=: Дополнительно :=---
r.sleep = 0.4 # Установить размер задержки после каждого хода. Меньше значение - быстрее Робот.
r._NoneUpdate = False # Отключить прорисовку поля
r._NoneUpdate = True # Включить прорисовку поля
r.demo() # Показать, что нужно сделать в текущей задаче
r.demoAll() # Показать все задачи (с решениями, по очереди)
r.randcolor() # Генерировать случайный цвет
-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=
""")
def help_full(self):
""" Примеры. Для получения списка команд r.help()
Примеры использования по команде r.help_full()
Больше информации по каждой команде: print команда.__doc__
Например:
print r.color.__doc__
"""
print("""
Не реализовано в данной версии.
Если нужно - пишите на [email protected] или на сайте progras.ru
""")
def demo(self):
"""Показать выполнение задачи
Пример использования:
#-------------------
r.demo()
#-------------------
Для уcкорения использовать r.sleep = 0.01
В задании 10-3(4/5) можно отключить обновление экрана
#-------------------
r._NoneUpdate = True
r.demo()
r._NoneUpdate = False
#-------------------
"""
global r
r = self
exec(self._solve)
def demoAll(self):
"""Показать выполнение всех заданий в автоматическом режиме
Пример использования:
#-------------------
r.demoAll()
#-------------------
Для того, чтобы Робот двигался быстрее, используйте
#-------------------
r.sleep = 0
r.demoAll()
#-------------------
"""
global r
r = self
for x in r.m:
r.lm(x)
print(x)
r.demo()
r.pause()
def __init__(self):
self._w = 4 # толщина стен
self._d = 4 # на столько меньше клетки закраска (с каждой стороны)
self.sleep = 0.5 # замедление
self._font_size = self._size // 2
self._tk = Tk()
self._tk.geometry('+0+0')
x = (self._tk.winfo_screenwidth() - self._tk.winfo_reqwidth()) / 3
y = (self._tk.winfo_screenheight() - self._tk.winfo_reqheight()) / 4
self._tk.wm_geometry("+%d+%d" % (x, y))
self._tk.title('Robot-hobot')
self._canvas = Canvas(self._tk, width=(self._size*(self._nc+1)), height=(self._size*(self._nr+1)), bg="gray")
buttons = Frame(self._tk)
self.task = Label (self._tk, justify = 'left')
self.res = Label (self._tk, justify = 'left')
self._but_start = Button(buttons,text = 'start',width=10,height=1)
self._but_start.bind('<ButtonRelease-1>',self.but1)
self._but_demo = Button(buttons,text = 'demo',width=10,height=1)
self._but_demo.bind('<ButtonRelease-1>',self.but_demo)
self._but_reload = Button(buttons,text = 'reload',width=10,height=1)
self._but_reload.bind('<ButtonRelease-1>',self.but_reload)
self._but_load_next = Button(buttons,text = 'load next',width=10,height=1)
self._but_load_next.bind('<ButtonRelease-1>',self.but_load_next)
buttons.grid(row=0, column=0, sticky = "w")
self._canvas.grid(row=1, column=0, sticky = "e")
self._but_start.pack(side = "left")
self._but_demo.pack(side = "left")
self._but_reload.pack(side = "left")
self._but_load_next.pack(side = "left")
self.task.grid(row=3, column=0, sticky = "w")
self.res.grid(row=4, column=0, sticky = "w")
## self.loadmap()
def but_load_next(self,event):
print ("load next")
index = self.m.index(self._cur_map)
if index < len(self.m)-1:
self.lm(self.m[index+1])
else:
self.lm(self.m[0])
def but_demo(self,event):
print ("demo")
self.demo()
def but1(self,event):
print ('start')
#self.lm(self._cur_map)
self.solve_task()
def but_reload(self,event):
print ("reload")
self.lm(self._cur_map)
def clear (self):
"Очистка данных (без перерисовки)"
self._canvas.delete('all')
self._field = []
self._park = []
self._Nonelabel = False
self._NoneisPark = False
self._Nonesettext = False
self._test = ''
self._res = ''
self._bum = 0
for r in range(1,self._nr+2):
row = []
for c in range(1,self._nc+2):
row.append (self._Tcell())
self._field.append(row)
for r in range (1,self._nr):
for c in range(1,self._nc):
self._field[r][c].text = ''
self._field[r][c].color = ''
self._field[r][c].label = ''
self._field[r][c].wUp = False
self._field[r][c].wLeft = False
self._field[r][c].v = self._v()
for c in range (1,self._nc):
self._field[1][c].wUp = True
self._field[self._nr][c].wUp = True
for r in range (1,self._nr):
self._field[r][1].wLeft = True
self._field[r][self._nc].wLeft = True
self._solve = ''
self._r = 1
self._c = 1
def _paintMap(self):
"Перерисовка по имеющимся данным"
remc = self._c
remr = self._r
size = self._size
sleep = self.sleep
self.sleep = 0
self._bg = [self._canvas.create_rectangle(1,1,(size*(self._nc+1)), (size*(self._nr+1)), fill="gray")]
# создать поле
for r in range (1, self._nr+1):
self._bg.append(self._canvas.create_line(size,r*size,self._nc*size,r*size))
if r < self._nr: self._canvas.create_text(size/2,r*size+size/2,text=r)
for c in range (1, self._nc+1):
self._bg.append(self._canvas.create_line(c*size,size,c*size,self._nr*size))
if c < self._nc: self._bg.append(self._canvas.create_text(c*size+size/2,size/2,text=c))
# клетки и номера столбцов и строк
for r in range (1,self._nr):
for c in range(1,self._nc):
self._r = r
self._c = c
if self._field[r][c].wUp: # стена сверху
self.setWall('up')
if self._field[r][c].wLeft: # стена слева
self.setWall('left')
if self._field[r][c].color != '' : # закраска
self.paint(self._field[r][c].color)
if self._field[r][c].label != '' : # метка0000
d = self._d
x1 = self._size*(c)
x2 = self._size*(c+1)
y1 = self._size*(r)
y2 = self._size*(r+1)
self._canvas.delete(self._field[r][c].v.label)
self._field[r][c].v.label = self._canvas.create_rectangle(x1+d,y1+d,x2-d,y2-d, width = d-1, outline = self._field[r][c].label)
self._canvas.lift(self._robot)
self.settext(self._field[r][c].text) # текст
for self._c in range (1,self._nc):
if self._field[self._nr][self._c].wUp: # стена сверху
self.setWall('down')
for self._r in range (1,self._nr):
if self._field[self._r][self._nc].wLeft: # стена слева
self.setWall('right')
r = self._endPoint[0]
c = self._endPoint[1]
self._canvas.delete(self._park)
if r > 0 and c > 0:
self._park = self._canvas.create_oval (c*size+6,r*size+6, c*size+size-6,r*size+size-6, width = 3, outline = 'yellow')
# конечная точка
self.jumpTo((remr,remc))
self._task = '\n'+self._task
self.task.config(text = self._task)
self.res.config()
self._update()
self.sleep = sleep
#self.pause()
def _update(self):
"Обновить canvas"
if not self._NoneUpdate:
self._canvas.update()
time.sleep(self.sleep)
def start(self,fun):
self.solve_task = fun
self._tk.mainloop()
##Робот
def pause(self,t=1):
"""Приостановка выполнения программы. Пауза в секундах.
#-------------------
r.pause() # пауза в одну секунду
#-------------------
r.pause(2) # пауза две секунды
#-------------------
"""
time.sleep(t)
def left(self, a = 1):
"""Шаг влево
#-------------------
r.left()
#-------------------
r.lt()
#-------------------
r.lt(3)
#-------------------
"""
if a == 1:
if self.freeLeft():
self._c -= 1
self._canvas.move(self._robot,-self._size*a,0)
self._update()
else:
self._stop()
else :
for z in range(0,a):
self.left()
def right(self, a = 1):
""" Шаг вправо
#-------------------
r.right()
#-------------------
r.rt()
#-------------------
r.rt(5)
#-------------------
"""
if a == 1:
if self.freeRight():
self._c += 1
self._canvas.move(self._robot,self._size*a,0)
self._update()
else:
self._stop()
else :
for z in range(0,a):
self.right()
def up(self, a = 1):
"""Шаг вверх
#-------------------
r.up()
#-------------------
r.up(3)
#-------------------
"""
if a == 1:
if self.freeUp():
self._r -= 1
self._canvas.move(self._robot,0,-self._size*a)
self._update()
else:
self._stop()
else :
for z in range(0,a):
self.up()
def down(self, a = 1):
""" Шаг вниз
#-------------------
r.down()
#-------------------
r.dn()
#-------------------
r.dn(4)
#-------------------
"""
if a == 1:
if self.freeDown():
self._r += 1
self._canvas.move(self._robot,0,self._size*a)
self._update()
else:
self._stop()
else :
for z in range(0,a):
self.down()
def jumpTo(self,coord=(1,1)):
"""Прыжок в клетку с указанными координами. Через стены.
#-------------------
r.jumpTo((2,3)) # Робот окажется в третьем столбце второй строки
#-------------------
"""
r = coord[0]
c = coord[1]
if ( 0 < r < self._nc) and (0 < c < self._nc):
self._r = r
self._c = c
size = self._size
self._canvas.coords(self._robot, c*size+4,r*size+4, c*size+size-4,r*size+size-4)
self._canvas.lift(self._robot)
self._update()
else:
print("Попытка переместиться за пределы поля. Отказано.")
def paint (self, color = 'green'):
""" Закрасить текущую клетку выбранным цветом. Если цвет не указан, то зеленым
#-------------------
r.paint() # Закрасит текущую клетку зеленым цветом
#-------------------
r.pt() # Закрасит текущую клетку зеленым цветом
#-------------------
r.pt('red') # Закрасит текущую клетку красным цветом
#-------------------
r.pt(r.randcolor()) # Закрасит текущую клетку случайным цветом
#-------------------
r.pt(r.label()) # Закрасит текущую клетку цветом метки в этой клетке
#-------------------
"""
d = self._d+1
self._field[self._r][self._c].color = color
x1 = self._size*(self._c)
x2 = self._size*(self._c+1)
y1 = self._size*(self._r)
y2 = self._size*(self._r+1)
self._canvas.delete(self._field[self._r][self._c].v.color)
self._field[self._r][self._c].v.color = self._canvas.create_rectangle(x1+d,y1+d,x2-d,y2-d, width = 0, fill = color)
self._canvas.lift(self._field[self._r][self._c].v.text)
self._canvas.lift(self._robot)
self._canvas.lift(self._park)
self._update()
def setWall (self, target):
""" Установить стену с указанной стороны
#-------------------
r.sw('up') # Установить стену сверху
#-------------------
r.sw('left') # Установить стену слева
#-------------------
r.sw('down') # Установить стену снизу
#-------------------
r.sw('right') # Установить стену справа
#-------------------
"""
size = self._size
w = self._w
if target == 'up':
r = self._r
c = self._c
x1 = size*(c)-1
x2 = size*(c+1)+1
y1 = size*(r)
y2 = size*(r+1)
self._field[r][c].wUp = True
self._canvas.create_line(x1,y1,x2,y1, width = w)
elif target == 'left':
r = self._r
c = self._c
x1 = size*(c)
x2 = size*(c+1)
y1 = size*(r)-1
y2 = size*(r+1)+1
self._field[r][c].wLeft = True
self._canvas.create_line(x1,y1,x1,y2, width = w)
elif target == 'down':
r = self._r+1
c = self._c
x1 = size*(c)-1
x2 = size*(c+1)+1
y1 = size*(r)
y2 = size*(r+1)
self._field[r][c].wDown = True
self._canvas.create_line(x1,y1,x2,y1, width = w)
elif target == 'right':
r = self._r
c = self._c+1
x1 = size*(c)
x2 = size*(c+1)
y1 = size*(r)-1
y2 = size*(r+1)+1
self._field[r][c].wRight = True
self._canvas.create_line(x1,y1,x1,y2, width = 4)
self._update()
def wallUp (self):
""" Возвращает истину, если сверху есть стена
#-------------------
if r.wallUp(): r.pt() # Закрасить, если сверху стена
#-------------------
if r.wu(): r.pt() # Закрасить, если сверху стена
#-------------------
if r.wu():
r.pt() # Закрасить, если сверху стена
r.rt() # Перейти вправо
#-------------------
while r.wu(): # Идти вправо, пока сверху есть стена
r.rt()
"""
return self._field[self._r][self._c].wUp
def wallDown (self):
""" Возвращает истину, если снизу есть стена
#-------------------
if r.wallDown(): r.pt() # Закрасить, если снизу стена
#-------------------
if r.wd(): r.pt() # Закрасить, если снизу стена
#-------------------
if r.wd():
r.pt() # Закрасить, если снизу стена
r.rt() # Перейти вправо
#-------------------
while r.wd(): # Идти вправо, пока снизу есть стена
r.rt()
"""
return self._field[self._r+1][self._c].wUp
def wallLeft (self):
""" Возвращает истину, если слева есть стена
#-------------------
if r.wallLeft(): r.pt() # Закрасить, если слева стена
#-------------------
if r.wl(): r.pt() # Закрасить, если слева стена
#-------------------
if r.wl():
r.pt() # Закрасить, если слева стена
r.dn() # Перейти вниз
#-------------------
while r.wl(): # Идти вниз, пока слева есть стена
r.dn()
"""
return self._field[self._r][self._c].wLeft
def wallRight (self):
""" Возвращает истину, если справа есть стена
#-------------------
if r.wallRight(): r.pt() # Закрасить, если справа стена
#-------------------
if r.wr(): r.pt() # Закрасить, если справа стена
#-------------------
if r.wr():
r.pt() # Закрасить, если справа стена
r.dn() # Перейти вниз
#-------------------
while r.wr(): # Идти вниз, пока справа есть стена
r.dn()
"""
return self._field[self._r][self._c+1].wLeft
def freeUp (self):
""" Возвращает истину, если сверху свободно (нет стены)
#-------------------
if r.freeUp(): r.pt() # Закрасить, если сверху свободно
#-------------------
if r.fu(): r.up() # Шагнуть вверх, если сверху свободно
#-------------------
if r.fu():
r.up() # Шагнуть вверх
r.pt() # Закрасить
r.dn() # Перейти вниз
#-------------------
while r.fu(): # Идти вверх, пока сверху свободно
r.up()
"""
return not self._field[self._r][self._c].wUp
def freeDown (self):
""" Возвращает истину, если снизу свободно (нет стены)
#-------------------
if r.freeDown(): r.pt() # Закрасить, если снизу свободно
#-------------------
if r.fd(): r.dn() # Шагнуть вверх, если снизу свободно
#-------------------
if r.fd():
r.dn() # Шагнуть снизу
r.pt() # Закрасить
r.up() # Перейти вверх
#-------------------
while r.fd(): # Идти вниз, пока снизу свободно
r.dn()
"""
return not self._field[self._r+1][self._c].wUp
def freeLeft (self):
""" Возвращает истину, если слева свободно (нет стены)
#-------------------
if r.freeLeft(): r.pt() # Закрасить, если слева свободно
#-------------------
if r.fl(): r.lt() # Шагнуть влево, если слева свободно
#-------------------
if r.fl():
r.lt() # Шагнуть влево
r.pt() # Закрасить
r.rt() # Перейти вправо
#-------------------
while r.fl(): # Идти влево, пока слева свободно
r.lt()
"""
return not self._field[self._r][self._c].wLeft
def freeRight (self):
""" Возвращает истину, если снизу свободно (нет стены)
#-------------------
if r.freeDown(): r.pt() # Закрасить, если снизу свободно
#-------------------
if r.fd(): r.dn() # Шагнуть вверх, если снизу свободно
#-------------------
if r.fd():
r.dn() # Шагнуть снизу
r.pt() # Закрасить
r.up() # Перейти вверх
#-------------------
while r.fd(): # Идти вниз, пока снизу свободно
r.dn()
"""
return not self._field[self._r][self._c+1].wLeft
def getCoords(self):
" Возвращает координаты в виде (row,column)"
return (self._r,self._c)
def getCoordR(self):
" Возвращает номер строки, в которой находиться Робот"
return self._r
def getCoordC(self):
" Возвращает номер столбца, в которой находиться Робот"
return self._c
def isPark (self):
" Возвращает истину, если Робот находиться на парковке"
if self._NoneisPark: self.null()
else: return self._endPoint == self.getCoords()
def color (self):
""" Возвращает цвет, которым закрашена клетка
Можно использовать для проверки, закрашена ли клетка:
#-------------------
# Закрасить, если сверху закрашено
r.up()
if r.color():
r.dn()
r.pt()
else:
r.dn()
#-------------------
if r.color() == 'red': r.rt() # Вправо, если закрашено красным
#-------------------
"""
return self._field[self._r][self._c].color
def randcolor (self):
""" Возвращает случайный цвет
#-------------------
r.pt(r.randcolor()) # Закрасить случайным цветом
#-------------------
# Закрасить соседнюю клетку тем же цветом, что и текущая
x = r.color()
r.rt()
r.pt(x)
#-------------------
"""
cr = rnd(1,255,10)
cg = rnd(1,255,10)
cb = rnd(1,255,10)
color = "#%02X%02X%02X" %(cr,cg,cb)
return str(color)
def label (self):
""" Возвращает цвет метки текущей клетки
#-------------------
if r.label() == 'red': r.pt('red') # Закрасить клетку красным, если метка красная
#-------------------
"""
if self._Nonelabel: self.null()
else: return self._field[self._r][self._c].label
def gettext(self):
""" Возвращает текст, записанный в ячейке.
#-------------------
if r.gettext() != '': r.rt() # Перейти вправо, если в ячейке есть какой-нибудь текст
#-------------------
if r.gettext() == '3': r.rt() # Перейти вправо, если в ячейке записано 3
#-------------------
n = r.gettext()
if n: r.rt(n) # Перейти вправо на количество шагов, указанное в клетке
#-------------------
"""
if self._Nonegettext: self.null()
else: return self._field[self._r][self._c].text
def settext(self,text):
""" Записать текст в клетку
#-------------------
r.settext(3)
#-------------------
"""
self._field[self._r][self._c].text = text
d = 1
x1 = self._size*(self._c)
x2 = self._size*(self._c+1)
y1 = self._size*(self._r)
y2 = self._size*(self._r+1)
self._canvas.delete(self._field[self._r][self._c].v.text)
self._field[self._r][self._c].v.text = self._canvas.create_text(self._c*self._size+self._size/2,self._r*self._size+self._size/2,text =
self._field[self._r][self._c].text, font = ('Helvetica', self._font_size,'bold'))
self._update()
def _stop (self):
print ("Bum!")
self._bum = 1
self._canvas.delete(self._robot)
x = self._c
y = self._r
self._robot = self._canvas.create_oval(
x*self._size+2*self._d,y*self._size+2*self._d,
x*self._size+self._size-2*self._d,y*self._size+self._size-2*self._d,
fill = '#FF0000')
def null (self, *args):
print('Эта команда запрещена к использованию в данной задаче. Ищите другой способ')
return ''
def loadmap(self,mn=m[0],variant=0):
""" Загрузить карту (задачу)
#-------------------
r.loadmap('task10-5')
#-------------------
r.lm('task10-5') # Загрузить задачу по названию
#-------------------
r.lm(r.m[5]) # Загрузить задачу по номеру
#-------------------
# Вывести полный список названий и номеров заданий
for x in r.m:
print r.m.index(x),x
#-------------------
"""
self._tk.title(mn)
self._cur_map = mn
self._NoneUpdate = False
self._endPoint = (0, 0)
# self._NoneUpdate = True
if mn == 'task1':
self._nc = 7
self._nr = 5
self._size = 30
self.clear()
self._r = 3
self._c = 2
self._solve = ''
self._endPoint = (3,5)
self._task = 'Необходимо перевести Робота по лабиринту\n' \
' из начального положения в конечное.\n'
self._field[2][2].wUp = True
self._field[2][3].wUp = True
self._field[2][4].wUp = True
self._field[2][5].wUp = True
self._field[4][2].wUp = True
self._field[4][3].wUp = True
self._field[4][4].wUp = True
self._field[4][5].wUp = True
self._field[2][4].wLeft = True
self._field[3][3].wLeft = True
self._field[3][5].wLeft = True
##--------------------------------------------------------------------------------------------
elif mn == 'task2':
self._nc = 16
self._nr = 4
self._size = 30
self.clear()
self._r = 3
self._c = 1
self._solve = ''
self._task = 'Составьте программу рисования узора.\n'
##--------------------------------------------------------------------------------------------
elif mn == 'task3':
self._nc = 10
self._nr = 5
self._size = 30
self.clear()
self._r = 2
self._c = 1
self._endPoint = (2,9)
self._solve = ''
self._task = 'Необходимо провести Робота вдоль коридора\n' \
' из начального положения в конечное,\n' \
' заглядывая в каждый боковой коридор.'
for i in range(2, 9):
self._field[2][i].wUp = True
if i%2 == 0:
self._field[3][i].wUp = True
else:
self._field[4][i].wUp = True
if i < 8:
self._field[3][i+1].wLeft = True
##--------------------------------------------------------------------------------------------
elif mn == 'task4':
self._nc = 8
self._nr = 12
self._size = 30
self.clear()
self._r = rnd(1, self._nr)
self._c = rnd(1, self._nc)
for i in range(0, 5):
for j in range(0, 3):
self._field[6+2*j-i][2+i].label = 'red'
self._solve = ''
self._task = 'Составьте программу закрашивания\n' \
' клеток поля, отмеченных звездочкой.\n'
##--------------------------------------------------------------------------------------------
elif mn == 'task5':
self._nc = 11
self._nr = 10
self._r = 1
self._c = 1
self._size = 30
self.clear()
self._solve = ''
self._task = 'Составьте программу рисования узора.'
##--------------------------------------------------------------------------------------------
##--------------------------------------------------------------------------------------------
elif mn == 'task6':
self._nc = 25
self._nr = 25
self._r = 1
self._c = 1
self._size = 20
self.clear()
self._solve = ''
self._task = 'Составьте программу рисования фигуры в виде буквы "Т".\n' \
' Вертикальные и горизонтальные размеры пользователь вводит\n' \
' с клавиатуры. Ввод данных можно осуществлять любым способом.\n'
##-------------------------------------------------------------------------------------------------------
elif mn == 'task7':
self._nc = 16
self._nr = 11
self._size = 25
self.clear()
self._r = rnd(1, self._nr)
self._c = rnd(1, self._nc)
self._field[3][2].wUp = True
self._field[2][9].wUp = True
self._field[3][12].wUp = True
self._field[6][12].wUp = True
self._field[7][3].wUp = True
self._field[7][9].wUp = True
self._field[8][6].wUp = True
self._field[9][2].wUp = True
self._field[9][11].wUp = True
for i in range(0, 4):
self._field[4][5+i].wUp = True
self._field[5][5+i].wUp = True
self._solve = ''
self._task = 'Где-то в поле Робота находится горизонтальный коридор шириной в одну клетку\n' \
' неизвестной длины. Робот из верхнего левого угла поля должен дойти до\n' \
' коридора и закрасить клетки внутри него, как указано в задании. По полю\n' \
' Робота в произвольном порядке располагаются стены, но расстояние \n' \
'между ними больше одной клетки.\n'
##--------------------------------------------------------------------------------------------
elif mn == 'task8':
self._nc = 16
self._nr = 11
self._size = 25
self.clear()
self._r = rnd(1, self._nr)
self._c = rnd(1, self._nc)
self._field[2][6].wLeft = True
self._field[3][6].wLeft = True
self._field[5][6].wLeft = True
self._field[6][6].wLeft = True
self._field[7][6].wLeft = True
self._field[8][6].wLeft = True
self._solve = ''
self._task = 'Где-то в поле Робота находится вертикальная стена с отверстием в одну клетку,\n' \
' размеры которой неизвестны. Робот из произвольной клетки должен дойти до\n' \
' стены и закрасить клетки как показано в задании.\n'
##--------------------------------------------------------------------------------------------
elif mn == 'task9':
self._nc = 20
self._nr = 20
self._size = 25
self.clear()
self._r = rnd(1, self._nr)
self._c = rnd(1, self._nc)
c = rnd(2,16)
r = rnd(2,16)
w = rnd(3,8)
h = rnd(3,8)
if c + w >= self._nc: w = self._nc-c
if r + h >= self._nc: h = self._nr-r
for rcount in range(0,h):
for ccount in range(0,w):
self._field[r + rcount][c+ccount].label = 'green'
self._solve = ''
self._task = 'На поле находится квадрат из закрашенных клеток. Вычислить и вывести на экран площадь квадрата.\n'
##--------------------------------------------------------------------------------------------
elif mn == 'task10':
self._nc = 15
self._nr = 11
self._size = 30
self.clear()
self._r = 2
self._c = 1
self._field[2][1].wUp = True
self._field[2][2].wUp = True
self._field[2][4].wUp = True
self._field[2][5].wUp = True
self._field[2][6].wUp = True
self._field[2][8].wUp = True
self._field[2][9].wUp = True
self._field[2][11].wUp = True
self._field[2][12].wUp = True
self._field[2][13].wLeft = True
self._field[3][1].wUp = True
self._field[3][2].wUp = True
self._field[3][3].wUp = True
self._field[3][4].wUp = True
self._field[3][6].wUp = True
self._field[3][7].wUp = True
self._field[3][8].wUp = True
self._field[3][10].wUp = True
self._field[3][11].wUp = True
self._field[3][12].wLeft = True
self._field[4][3].wLeft = True
self._field[4][3].wUp = True
self._field[4][4].wUp = True
self._field[4][5].wUp = True
self._field[4][6].wUp = True
self._field[4][8].wUp = True
self._field[4][9].wUp = True
self._field[4][10].wUp = True
self._field[4][11].wUp = True
self._field[4][13].wLeft = True
self._field[5][3].wLeft = True
self._field[5][4].wLeft = True
self._field[5][4].wUp = True
self._field[5][6].wUp = True
self._field[5][7].wUp = True
self._field[5][8].wUp = True
self._field[5][10].wUp = True
self._field[5][11].wUp = True
self._field[5][12].wUp = True
self._field[6][3].wLeft = True
self._field[6][4].wUp = True
self._field[6][5].wLeft = True
self._field[7][3].wUp = True
self._field[7][4].wLeft = True
self._field[7][6].wUp = True
self._field[7][7].wLeft = True
self._field[8][4].wUp = True
self._field[8][5].wUp = True
self._field[8][6].wLeft = True
self._field[8][7].wUp = True
self._field[8][8].wLeft = True
self._field[9][6].wUp = True
self._field[9][7].wLeft = True
self._field[9][8].wUp = True
self._field[9][9].wUp = True
self._field[9][10].wLeft = True
self._field[10][7].wUp = True
self._field[10][9].wLeft = True
self._field[10][10].wLeft = True
self._endPoint = (10,1)
self._solve = """
"""
self._task = 'Необходимо провести Робота по коридору шириной в одну клетку из начального положения до конца коридора, \n' \
'закрашивая при этом все клетки коридора, которые имеют выход. Выходы размером в одну клетку располагаются \n' \
'произвольно по всей длине коридора. Коридор заканчивается тупиком. Коридор имеет два горизонтальных и \n' \
'диагональный участки. Пример коридора показан на рисунке.\n'
elif mn == 'task11':
self._nc = 15
self._nr = 11
self._size = 30
self.clear()
self._r = rnd(1, self._nr)
self._c = rnd(1, self._nc)
for i in range(1,self._nr):
for j in range(1,self._nc):
self._field[i][j].text = str(rnd(0, 10))
self._task = 'На поле 10х15 каждой в каждой клетке записана цифра (от 0 до 9).\n Закрасить квадрат 2х2 с наименьшей суммой значений клеток.'
elif mn == 'task12':
self._nc = 15
self._nr = 6
self._size = 30
self.clear()
self._r = 2
self._c = 13
self._field[2][2].wUp = True
self._field[2][3].wLeft = True
self._field[3][3].wLeft = True
self._field[4][3].wLeft = True
self._field[5][3].wUp = True
self._field[5][4].wUp = True
self._field[4][5].wLeft = True
self._field[3][5].wLeft = True
self._field[2][5].wLeft = True
self._field[2][5].wUp = True
self._field[2][6].wLeft = True
self._field[3][6].wLeft = True
self._field[4][6].wLeft = True
self._field[5][6].wUp = True
self._field[5][7].wUp = True
self._field[5][8].wUp = True
self._field[4][9].wLeft = True
self._field[3][9].wLeft = True
self._field[2][9].wLeft = True
self._field[2][9].wUp = True
self._field[2][10].wUp = True
self._field[2][11].wLeft = True
self._field[3][11].wLeft = True
self._field[4][11].wLeft = True
self._field[5][11].wUp = True
self._field[4][12].wLeft = True
self._field[3][12].wLeft = True
self._field[2][12].wLeft = True
self._field[2][12].wUp = True
self._field[2][13].wUp = True
self._task = 'Робот движется вдоль стены, профиль которой показан на рисунке,\n' \
' от начального положения до конца стены. Необходимо закрасить\n' \
' все внутренние углы стены, как показано на примере. Размеры стены\n могут быть произвольны.'
elif mn == 'task13':
self._nc = 20
self._nr = 20
self._size = 25
self.clear()
self._r = rnd(self._nr/2, self._nr)
self._c = rnd(self._nc/2, self._nc)
col = rnd(2, self._nc/2)
row = rnd(4, self._nr/2)
height = rnd(4, self._nr-4)
if row + height >= self._nr:
height = self._nr - row-1
for i in range(row, row+height):
self._field[i][col].wLeft = True
##--------------------------------------------------------------------------------------------
##--------------------------------------------------------------------------------------------
# сделать прямое управление с демонстрацией датчиков и возможностей
# при запуске робота создавать task.py и справочник :)
# сделать робота без клеток !!!
##--------------------------------------------------------------------------------------------
##--------------------------------------------------------------------------------------------
else:
print(mn)
self._task = "Нет задачи с таким номером"
self._test = '-'
self._canvas.config(
width=(self._size*(self._nc+1)),
height=(self._size*(self._nr+1)))
x = y = 1
d = self._d
d = 6
self._robot = self._canvas.create_oval(
x*self._size+d,y*self._size+d,
x*self._size+self._size-d,y*self._size+self._size-d,
outline = '#4400FF', width = 3)
self._paintMap()
lm = loadmap
lt = left
rt = right
dn = down
pt = paint
sw = setWall
wu = wallUp
wd = wallDown
wl = wallLeft
wr = wallRight
fu = freeUp
fd = freeDown
fl = freeLeft
fr = freeRight
cl = color
| IlinArkady/IlinArkady | practice4/robot.py | Python | gpl-3.0 | 48,182 |
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Archives a set of files.
"""
import ast
import optparse
import os
import sys
import zipfile
def DoZip(inputs, output, base_dir):
with zipfile.ZipFile(output, 'w') as outfile:
for f in inputs:
outfile.write(f, os.path.relpath(f, base_dir))
def main():
parser = optparse.OptionParser()
parser.add_option('--inputs', help='List of files to archive.')
parser.add_option('--output', help='Path to output archive.')
parser.add_option('--base-dir',
help='If provided, the paths in the archive will be '
'relative to this directory', default='.')
options, _ = parser.parse_args()
inputs = ast.literal_eval(options.inputs)
output = options.output
base_dir = options.base_dir
DoZip(inputs, output, base_dir)
if __name__ == '__main__':
sys.exit(main())
| AndroidOpenDevelopment/android_external_chromium_org | build/android/gn/zip.py | Python | bsd-3-clause | 1,012 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("SGDClassifier" , "FourClass_10" , "postgresql")
| antoinecarme/sklearn2sql_heroku | tests/classification/FourClass_10/ws_FourClass_10_SGDClassifier_postgresql_code_gen.py | Python | bsd-3-clause | 145 |
from devito.ir.iet.nodes import * # noqa
from devito.ir.iet.visitors import * # noqa
from devito.ir.iet.utils import * # noqa
from devito.ir.iet.efunc import * # noqa
from devito.ir.iet.algorithms import * # noqa
| opesci/devito | devito/ir/iet/__init__.py | Python | mit | 218 |
#!/usr/bin/env python
# hashlib is only available in python >= 2.5
try:
import hashlib
_digest = hashlib.sha1
except ImportError:
import sha
_digest = sha.new
import sys
import os
sys.path.insert(0, os.getenv("GITPYTHONLIB","."))
from git_remote_helpers.util import die, debug, warn
from git_remote_helpers.git.repo import GitRepo
from git_remote_helpers.git.exporter import GitExporter
from git_remote_helpers.git.importer import GitImporter
from git_remote_helpers.git.non_local import NonLocalGit
def get_repo(alias, url):
"""Returns a git repository object initialized for usage.
"""
repo = GitRepo(url)
repo.get_revs()
repo.get_head()
hasher = _digest()
hasher.update(repo.path)
repo.hash = hasher.hexdigest()
repo.get_base_path = lambda base: os.path.join(
base, 'info', 'fast-import', repo.hash)
prefix = 'refs/testgit/%s/' % alias
debug("prefix: '%s'", prefix)
repo.gitdir = os.environ["GIT_DIR"]
repo.alias = alias
repo.prefix = prefix
repo.exporter = GitExporter(repo)
repo.importer = GitImporter(repo)
repo.non_local = NonLocalGit(repo)
return repo
def local_repo(repo, path):
"""Returns a git repository object initalized for usage.
"""
local = GitRepo(path)
local.non_local = None
local.gitdir = repo.gitdir
local.alias = repo.alias
local.prefix = repo.prefix
local.hash = repo.hash
local.get_base_path = repo.get_base_path
local.exporter = GitExporter(local)
local.importer = GitImporter(local)
return local
def do_capabilities(repo, args):
"""Prints the supported capabilities.
"""
print "import"
print "export"
print "refspec refs/heads/*:%s*" % repo.prefix
dirname = repo.get_base_path(repo.gitdir)
if not os.path.exists(dirname):
os.makedirs(dirname)
path = os.path.join(dirname, 'testgit.marks')
print "*export-marks %s" % path
if os.path.exists(path):
print "*import-marks %s" % path
print # end capabilities
def do_list(repo, args):
"""Lists all known references.
Bug: This will always set the remote head to master for non-local
repositories, since we have no way of determining what the remote
head is at clone time.
"""
for ref in repo.revs:
debug("? refs/heads/%s", ref)
print "? refs/heads/%s" % ref
if repo.head:
debug("@refs/heads/%s HEAD" % repo.head)
print "@refs/heads/%s HEAD" % repo.head
else:
debug("@refs/heads/master HEAD")
print "@refs/heads/master HEAD"
print # end list
def update_local_repo(repo):
"""Updates (or clones) a local repo.
"""
if repo.local:
return repo
path = repo.non_local.clone(repo.gitdir)
repo.non_local.update(repo.gitdir)
repo = local_repo(repo, path)
return repo
def do_import(repo, args):
"""Exports a fast-import stream from testgit for git to import.
"""
if len(args) != 1:
die("Import needs exactly one ref")
if not repo.gitdir:
die("Need gitdir to import")
ref = args[0]
refs = [ref]
while True:
line = sys.stdin.readline()
if line == '\n':
break
if not line.startswith('import '):
die("Expected import line.")
# strip of leading 'import '
ref = line[7:].strip()
refs.append(ref)
repo = update_local_repo(repo)
repo.exporter.export_repo(repo.gitdir, refs)
print "done"
def do_export(repo, args):
"""Imports a fast-import stream from git to testgit.
"""
if not repo.gitdir:
die("Need gitdir to export")
update_local_repo(repo)
changed = repo.importer.do_import(repo.gitdir)
if not repo.local:
repo.non_local.push(repo.gitdir)
for ref in changed:
print "ok %s" % ref
print
COMMANDS = {
'capabilities': do_capabilities,
'list': do_list,
'import': do_import,
'export': do_export,
}
def sanitize(value):
"""Cleans up the url.
"""
if value.startswith('testgit::'):
value = value[9:]
return value
def read_one_line(repo):
"""Reads and processes one command.
"""
line = sys.stdin.readline()
cmdline = line
if not cmdline:
warn("Unexpected EOF")
return False
cmdline = cmdline.strip().split()
if not cmdline:
# Blank line means we're about to quit
return False
cmd = cmdline.pop(0)
debug("Got command '%s' with args '%s'", cmd, ' '.join(cmdline))
if cmd not in COMMANDS:
die("Unknown command, %s", cmd)
func = COMMANDS[cmd]
func(repo, cmdline)
sys.stdout.flush()
return True
def main(args):
"""Starts a new remote helper for the specified repository.
"""
if len(args) != 3:
die("Expecting exactly three arguments.")
sys.exit(1)
if os.getenv("GIT_DEBUG_TESTGIT"):
import git_remote_helpers.util
git_remote_helpers.util.DEBUG = True
alias = sanitize(args[1])
url = sanitize(args[2])
if not alias.isalnum():
warn("non-alnum alias '%s'", alias)
alias = "tmp"
args[1] = alias
args[2] = url
repo = get_repo(alias, url)
debug("Got arguments %s", args[1:])
more = True
while (more):
more = read_one_line(repo)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| kevingessner/git | git-remote-testgit.py | Python | gpl-2.0 | 5,443 |
# Copyright (c) 2014 Benjamin Massey
# Dark Souls II stats calculator
# This program is available under the MIT License.
# Please see the file COPYING in this distribution
# for license information.
# Incase you got this file from elsewhere, the github
# page for it is: https://github.com/BenjaminMassey/Dark-Souls-2-Stats
# Contact me at [email protected]
# Level
level = int(input("Please enter your soul level: "))
LevelHealth = 0
# Vigor
# The calculations for health are very strange, so these
# are purely estimates.
vigor = int(input("Please enter your vigor: "))
#Endurance
endurance = int(input("Please enter your endurance: "))
if endurance < 21:
stamina = 80 + (2 * endurance)
if endurance > 20:
stamina = 80 + 40 + (endurance - 20)
print("Your character should have " + str(stamina) + " total stamina.")
if endurance <= 20:
LevelHealth = LevelHealth + (2 * endurance)
elif endurance < 51:
LevelHealth = LevelHealth + 40 + endurance
else:
LevelHealth = LevelHealth + 70 + 0
#Vitality
vitality = int(input("Please enter your vitality: "))
CarryWeight = (1.5 * vitality) + 38.5
if vitality > 29:
print("Your vitality is too high to perfectly calculate, however it may be around " + str(CarryWeight) + " pounds.")
if vitality <= 29:
print("You can hold: " + str(CarryWeight) + " pounds.")
carrying = float(input("How many pounds are you carrying?: "))
CarryPercent = round((100 * carrying / CarryWeight), 1)
print("Since you are carrying about " + str(carrying) + " pounds, you should have around " + str(CarryPercent) + "% carry capacity.")
if vitality <= 20:
LevelHealth = LevelHealth + (2 * vitality)
elif vitality < 51:
LevelHealth = LevelHealth + 40 + vitality
else:
LevelHealth = LevelHealth + 70 + 0
#Attunement
attunement = int(input("Please enter your attunement: "))
if attunement < 10:
print("You should have no attunement slots")
elif attunement < 14:
print("You should have one attunement slot")
elif attunement < 16:
print("You should have two attunement slots")
elif attunement < 20:
print("You should have three attunemnet slots")
elif attunement < 25:
print("You should have four attunement slots")
elif attunement < 30:
print("You should have five attunemnet slots")
elif attunement < 40:
print("You should have six attunement slots")
elif attunement < 50:
print("You should have seven attunemnet slots")
elif attunement < 60:
print("You should have eight attunement slots")
elif attunement < 75:
print("You should have nine attunement slots")
else:
print("You have the maximum attunement slots, 10")
if attunement <= 20:
LevelHealth = LevelHealth + (2 * attunement)
elif attunement < 51:
LevelHealth = LevelHealth + 40 + attunement
else:
LevelHealth = LevelHealth + 70 + 0
#Strength
strength = int(input("Please enter your strength: "))
if strength <= 20:
LevelHealth = LevelHealth + (2 * strength)
elif strength < 51:
LevelHealth = LevelHealth + 40 + strength
else:
LevelHealth = LevelHealth + 70 + 0
#Dexterity
dexterity = int(input("Please enter your dexterity: "))
if dexterity <= 20:
LevelHealth = LevelHealth + (2 * dexterity)
elif dexterity < 51:
LevelHealth = LevelHealth + 40 + dexterity
else:
LevelHealth = LevelHealth + 70 + 0
#Adaptability
adaptability = int(input("Please enter your adaptability: "))
if adaptability <= 20:
LevelHealth = LevelHealth + (2 * adaptability)
elif adaptability < 51:
LevelHealth = LevelHealth + 40 + adaptability
else:
LevelHealth = LevelHealth + 70 + 0
#Intelligence
intelligence = int(input("Please enter your intelligence: "))
if intelligence <= 20:
LevelHealth = LevelHealth + (2 * intelligence)
elif intelligence < 51:
LevelHealth = LevelHealth + 40 + intelligence
else:
LevelHealth = LevelHealth + 70 + 0
#Faith
faith = int(input("Please enter your faith: "))
if faith <= 20:
LevelHealth = LevelHealth + (2 * faith)
elif faith < 51:
LevelHealth = LevelHealth + 40 + faith
else:
LevelHealth = LevelHealth + 70 + 0
#Final Health Calculations
lowvigor = (30 * vigor) + 500 + LevelHealth
if vigor < 20:
print("Your character should have around " + str(lowvigor) + " HP.")
midvigor = (20 * (vigor - 20)) + 500 + 600 + LevelHealth
if vigor >= 20 and vigor < 50:
print("Your character should have around " + str(midvigor) + " HP.")
highvigor = (5 * (vigor - 50)) + 500 + 600 + 600 + LevelHealth
if vigor >= 50 and vigor < 100:
print("Your character should have around " + str(highvigor) + " HP.")
if vigor >= 100:
print("There must have been an error, since it appears that you have an impossible amount of vigor. Please try again.")
| BenjaminMassey/Dark-Souls-2-Stats | DS2Stats.py | Python | mit | 4,783 |
# -*- coding: utf8 -*-
import sys, os
sys.path.append(os.path.abspath('.'))
import re
from operator import attrgetter
import difflib
# Pylons model init sequence
import pylons.test
import logging
from quanthistling.config.environment import load_environment
from quanthistling.model.meta import Session, metadata
from quanthistling import model
import quanthistling.dictdata.books
from paste.deploy import appconfig
import functions
def annotate_crossrefs(entry):
# delete head annotations
crossreference_annotations = [ a for a in entry.annotations if a.value=='crossreference']
for a in crossreference_annotations:
Session.delete(a)
crossreference_match = re.search(u"Vea ([^.]*)(?:\.|$)", entry.fullentry)
if crossreference_match:
entry.append_annotation(crossreference_match.start(1), crossreference_match.end(1), u'crossreference', u'dictinterpretation')
def annotate_head(entry):
# delete head annotations
head_annotations = [ a for a in entry.annotations if a.value=='head' or a.value=="iso-639-3" or a.value=="doculect"]
for a in head_annotations:
Session.delete(a)
heads = []
head_end_pos = functions.get_last_bold_pos_at_start(entry)
head_start_pos = 0
substr = entry.fullentry[head_start_pos:head_end_pos]
start = head_start_pos
for match in re.finditer(r'(?:, ?|$)', substr):
end = match.start(0)
inserted_head = functions.insert_head(entry, start, end)
heads.append(inserted_head)
start = match.end(0)
return heads
def annotate_pos(entry):
# delete pos annotations
pos_annotations = [ a for a in entry.annotations if a.value=='pos']
for a in pos_annotations:
Session.delete(a)
head_end_pos = functions.get_last_bold_pos_at_start(entry)
re_bracket = re.compile(u"\(.*?\)")
match_bracket = re_bracket.search(entry.fullentry, head_end_pos)
if match_bracket and match_bracket.start(0) < (head_end_pos + 2):
entry.append_annotation(match_bracket.start(0)+1, match_bracket.end(0)-1, u'pos', u'dictinterpretation')
def annotate_translations(entry):
# delete pos annotations
trans_annotations = [ a for a in entry.annotations if a.value=='translation']
for a in trans_annotations:
Session.delete(a)
translations_start = functions.get_pos_or_head_end(entry) + 1
translations_end = len(entry.fullentry)
if re.match(u"\.? ?Vea ", entry.fullentry[translations_start:]):
return
first_bold_after_pos = functions.get_first_bold_start_in_range(entry, translations_start, translations_end)
if first_bold_after_pos != -1:
translations_end = first_bold_after_pos
start = translations_start
for match in re.finditer(u"(?:[,;] ?|$)", entry.fullentry[translations_start:translations_end]):
mybreak = False
# are we in a bracket?
for m in re.finditer(r'\(.*?\)', entry.fullentry[translations_start:translations_end]):
if match.start(0) >= m.start(0) and match.end(0) <= m.end(0):
mybreak = True
if not mybreak:
end = match.start(0) + translations_start
subsubstr = entry.fullentry[start:end]
if not(re.match(r"\s*$", subsubstr)):
functions.insert_translation(entry, start, end)
start = match.end(0) + translations_start
def annotate_examples(entry):
# delete example annotations
ex_annotations = [ a for a in entry.annotations if a.value=='example-src' or a.value=='example-tgt']
for a in ex_annotations:
Session.delete(a)
after_head_or_pos = functions.get_pos_or_head_end(entry) + 1
if re.match(u"\.? ?Vea ", entry.fullentry[after_head_or_pos:]):
return
first_bold_after_pos = functions.get_first_bold_start_in_range(entry, after_head_or_pos, len(entry.fullentry))
if first_bold_after_pos == -1:
return
sorted_annotations = [ a for a in entry.annotations if a.value=='bold' and a.start > after_head_or_pos ]
sorted_annotations = sorted(sorted_annotations, key=attrgetter('start'))
i = 0
start_annotation = i
end_annotation = i
while i < len(sorted_annotations):
# concat successive annotations
next = False
if ( i < (len(sorted_annotations))-1 ):
if ((sorted_annotations[i].end == sorted_annotations[i+1].start) or (sorted_annotations[i].end == (sorted_annotations[i+1].start-1))):
end_annotation = i + 1
next = True
if not next:
# is there another bold annotation after this one?
if end_annotation < (len(sorted_annotations)-1):
entry.append_annotation(sorted_annotations[start_annotation].start, sorted_annotations[end_annotation].end, u'example-src', u'dictinterpretation')
entry.append_annotation(sorted_annotations[end_annotation].end, sorted_annotations[end_annotation+1].start, u'example-tgt', u'dictinterpretation')
else:
entry.append_annotation(sorted_annotations[start_annotation].start, sorted_annotations[end_annotation].end, u'example-src', u'dictinterpretation')
entry.append_annotation(sorted_annotations[end_annotation].end, len(entry.fullentry), u'example-tgt', u'dictinterpretation')
start_annotation = i + 1
end_annotation = i + 1
i = i + 1
def main(argv):
bibtex_key = u"shell1987"
if len(argv) < 2:
print "call: annotations_for%s.py ini_file" % bibtex_key
exit(1)
ini_file = argv[1]
conf = appconfig('config:' + ini_file, relative_to='.')
if not pylons.test.pylonsapp:
load_environment(conf.global_conf, conf.local_conf)
# Create the tables if they don't already exist
metadata.create_all(bind=Session.bind)
dictdatas = Session.query(model.Dictdata).join(
(model.Book, model.Dictdata.book_id==model.Book.id)
).filter(model.Book.bibtex_key==bibtex_key).all()
for dictdata in dictdatas:
entries = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id).all()
#entries = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id,startpage=40,pos_on_page=9).all()
#entries = []
startletters = set()
for e in entries:
heads = annotate_head(e)
if not e.is_subentry:
for h in heads:
if len(h) > 0:
startletters.add(h[0].lower())
annotate_pos(e)
annotate_translations(e)
annotate_examples(e)
annotate_crossrefs(e)
dictdata.startletters = unicode(repr(sorted(list(startletters))))
Session.commit()
if __name__ == "__main__":
main(sys.argv) | FrankNagel/qlc | src/webapp/quanthistling/scripts/annotations/annotations_for_shell1987.py | Python | gpl-3.0 | 7,100 |
from flask import render_template, redirect, request, url_for, flash, jsonify
from flask.ext.login import login_user, logout_user, login_required, current_user
from . import auth
from ..extensions import db
from ..models import User, Location
from .forms import LoginForm, RegistrationForm
from autocomplete.views import autocomplete_view
from sqlalchemy import func
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter(func.lower(User.email) == func.lower(form.email.data)).first()
if user is None:
user = User.query.filter(func.lower(User.username) == func.lower(form.email.data)).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
flash('Hey, {0}!'.format(user.username))
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User()
user.email = form.email.data
user.username = form.username.data
user.location = form.location.data
user.email_notifications = form.email_notifications.data
user.password = form.password.data
user.confirmed = True
db.session.add(user)
db.session.commit()
#token = user.generate_confirmation_token()
#send_email(user.email, 'Confirm Your Account',
# 'auth/email/confirm', user=user, token=token)
#flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
def find_loc(query):
results = Location.query.filter(Location.name.like('%'+str(query)+'%')).limit(5)
locs = []
for loc in results:
locs.append({'id':loc.id, 'title' : loc.name, 'data' : [] })
return locs
@auth.route('/autocomplete')
def autocomplete():
return autocomplete_view(find_loc, 'autocomplete.html')
| foodsnag/foodsnag-web | app/auth/views.py | Python | mit | 2,264 |
"""empty message
Revision ID: 177b55430b61
Revises: None
Create Date: 2015-01-29 22:29:18.963249
"""
# revision identifiers, used by Alembic.
revision = '177b55430b61'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('stream',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('stream')
### end Alembic commands ###
| WatchPeopleCode/WatchPeopleCode | migrations/versions/177b55430b61_.py | Python | mit | 679 |
import tft_ir_api as IR
n = 2
unrolls = 2
low = 1.0
high = 10.0
A = list()
for j in range(n):
row = list()
for i in range(n):
row.append(IR.RealVE("a{}{}".format(i,j), 0, low, high))
A.append(row)
b = list()
for i in range(n):
b.append(IR.RealVE("b{}".format(i), 1, low, high))
x = list()
for i in range(n):
x.append(IR.FConst(1.0))
g=2
#j k = 0
#j while convergence not reached: # while loop
for k in range(unrolls): # replacement for while loop
for i in range(n): # i loop
sigma = IR.FConst(0.0)
for j in range(n): # j loop
if j != i:
sigma = IR.BE("+", g, sigma, IR.BE("*", g, A[i][j], x[j]))
g += 1
# end j loop
x[i] = IR.BE("/", g, IR.BE("-", g, b[i], sigma), A[i][j])
g += 1
# end i loop
#j check convergence
#j k = k+1
# end while loop
print(x[0])
rs = x[0]
IR.TuneExpr(rs)
| soarlab/FPTuner | examples/micro/jacobi-n2u2.py | Python | mit | 916 |
from django.conf import settings
from django.template.loader import render_to_string
import mailgun
def send_unsubscribe_confirmation(signup):
context = {
'email': signup['email'],
'key': signup['key'],
'mooc_title': settings.MOOC_TITLE,
'mooc_domain': settings.MOOC_DOMAIN
}
subject = render_to_string('unsubscribe/emails/confirm-subject.txt', context).strip()
text_body = render_to_string('unsubscribe/emails/confirm.txt', context).strip()
html_body = render_to_string('unsubscribe/emails/confirm.html', context).strip()
mailgun.api.send_email(signup['email'], settings.DEFAULT_FROM_EMAIL,
subject, text_body, html_body, tags=['user_link']
)
| p2pu/mechanical-mooc | unsubscribe/emails.py | Python | mit | 716 |
__all__ = ["ThreadWrap", "ExecuteGetResponse"]
import threading
import subprocess
import time
import Queue
import sys
import re
from browser.status import *
from log import VLOG
""" wrapper of basic thread where commands enqueued should run on same thread with
same session id for avoiding race condition.
since it uses condition var to sync the timeline between threads, it's thread safety.
ThreadWrap is truly run by ThreadWrap.start(), and then it in sequence runs its
task(aka, command), it does not quit until receive quit command wrapped in task.
Finally you can call its you can dynamically append new task by ThreadWrap.PostTask(cmd) """
class ThreadWrap(threading.Thread):
def __init__(self, condition, session_id, session):
threading.Thread.__init__(self, name=session_id)
# use to sync between main thread and itself
self.condition = condition
# use to control its own state
self.queue = Queue.Queue()
self.session = session
# tracing shared vars by any command
self.status = Status(kOk)
self.value = {}
# delay enough time to make sure its parents thread acquire the condition first, so
# that parent thread can add itself to notify table
self.is_ready = False
def run(self):
while True:
if not self.is_ready:
continue
if self.queue:
cmd = self.queue.get()
self.status = cmd.Run()
if hasattr(cmd, 'is_send_func_'):
# since in low level, switching between threads makes socket reset to NoneType, we
# use a condition var to sync between threads to make safety of socket
self.condition.acquire()
self.condition.notify()
self.condition.release()
if hasattr(cmd, 'is_quit_func_'):
# is_quit_func_ is a dynamically attr where it is easily glued to cmd by
# cmd.is_quit_func_ = True, when run() notice the attr of cmd, the thread
# wrapper is finaly quit
return
else:
# block itself until waked by self.PostTask()
# release the ownership of cpu
time.sleep(0.05)
def PostTask(self, cmd):
self.queue.put(cmd)
return
""" since python' subprocess module does not support manual timeout setting.
This class binds the wanted commands and post the task to another thread which
can be under control in timeout setting calling thread.join(timeout) """
class ExecuteGetResponse(object):
def __init__(self, cmd="", timeout=3):
self.cmd = cmd
self.timeout = timeout
self.process = None
self.stdout = ""
self.stderr = ""
self.is_timeout = False
self.Run()
def Task(self):
self.process = subprocess.Popen(self.cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(self.stdout, self.stderr) = self.process.communicate()
return
def Run(self):
thread = threading.Thread(target=self.Task)
thread.start()
thread.join(self.timeout)
if thread.is_alive():
self.is_timeout = True
self.process.terminate()
thread.join()
return
# return status and response<string>
def GetResponse(self):
# handle timeout error
if self.is_timeout:
msg = "Xdb command timed out after %s seconds" % str(self.timeout)
return (Status(kTimeout, msg), "")
# handle command execute shell-like error, etc. command unregconize or spelled error
if self.stderr:
VLOG(3, "Xdb: %s - %s" % (self.cmd, self.stderr))
return (Status(kUnknownError, "Failed to run Xdb command, is the Xdb server running?"), "")
# handle adb execute error
matchObj = re.search(r'error', self.stdout, re.I)
if matchObj:
VLOG(3, "Xdb: %s - %s" % (self.cmd, self.stdout))
return (Status(kUnknownError, "Failed to run Xdb command, detailed message:" + self.stdout), "")
return (Status(kOk), self.stdout)
| PeterWangIntel/crosswalk-webdriver-python | base/thread_wrap.py | Python | bsd-3-clause | 3,865 |
import os
import random
import pygame
import zwave.helper
class Player(pygame.sprite.Sprite):
def __init__(self, game, model = "01"):
super(Player, self).__init__()
self.weapon = {}
self.weapon["type"] = "gun"
self.weapon["delay"] = 20
self.weapon["timer"] = 0
self.weapon["damage"] = [35, 65]
self.weapon["bullets"] = []
self.life = 100
self.total_life = 100
self.speed = 2
self.score = 0
self.kills = {}
self.kills["zombies"] = 0
self.kills["headcrabs"] = 0
## init values ##
self.game = game
self.model = model
self.size = 65 * game.scale
self.angle = 0
self.center = {}
self.last = {}
self.generate_position()
path = os.path.join("assets", "img", "players", self.model, "sprite.png")
self.image_base = zwave.helper.pygame_image(path, self.size)
self.image = self.image_base
self.rect = self.image.get_rect()
self.rect.x = self.x
self.rect.y = self.y
self.set_colliders()
def generate_position(self):
## set position ##
self.x = self.game.center["x"] - (self.size / 2)
self.y = self.game.center["y"] - (self.size / 2)
## saves the actual position of the enemy, relative to game screen ##
self.center["x"] = self.game.center["x"]
self.center["y"] = self.game.center["y"]
def set_colliders(self):
## default collider, with same size of sprite image ##
self.collider1 = pygame.sprite.GroupSingle(self)
## touch/collider2 is a small collider for player, that simulates a better "touch" for the player, ##
## without the large original image edges ##
self.touch = pygame.sprite.Sprite()
self.touch.up = self
self.touch.size = int(self.size / 2)
self.touch.image = pygame.surface.Surface((self.touch.size, self.touch.size))
self.touch.image.fill((255, 0, 0))
self.touch.image.set_colorkey((255, 0, 0))
self.touch.rect = self.touch.image.get_rect()
self.touch.rect.x = self.center["x"] - (self.touch.size / 2)
self.touch.rect.y = self.center["y"] - (self.touch.size / 2)
self.collider2 = pygame.sprite.GroupSingle(self.touch)
def update_colliders(self):
## update position of the second collider of enemy ##
self.touch.rect.x = self.center["x"] - (self.touch.size / 2)
self.touch.rect.y = self.center["y"] - (self.touch.size / 2)
def collision(self, collider1, collider2):
## check collider 1 ##
if collider1 == "walls":
collider1 = self.game.map.collider["walls"]
elif collider1 == "enemies":
collider1 = self.game.enemies["colliders"]
return pygame.sprite.groupcollide(collider2, collider1, False, False)
def update_angle(self):
## update enemy angle based in player location ##
self.angle = zwave.helper.angle_by_two_points(self.center, self.game.mouse)
self.image = zwave.helper.pygame_rotate(self.image_base, self.angle)
def update_position(self):
## check if had collision, if had, set last position of view ##
if self.collision("walls", self.collider2) or self.collision("enemies", self.collider2):
self.game.x = self.game.last["x"]
self.game.y = self.game.last["y"]
## save current positon of view for future use ##
self.game.last["x"] = self.game.x
self.game.last["y"] = self.game.y
## make 'keys' variable with pressed keys
keys = pygame.key.get_pressed()
## footsteps sound if the player is walking ##
if keys[pygame.K_w] or keys[pygame.K_s] or keys[pygame.K_a] or keys[pygame.K_d]:
if not self.game.sound["channels"]["steps"].get_busy():
self.game.sound["channels"]["steps"].play(self.game.sound["steps"], -1)
else:
self.game.sound["channels"]["steps"].stop()
## picks speed for each axis ##
velocity = zwave.helper.velocity_by_keys(self.speed * self.game.scale, keys)
## movement according to keys down ##
if keys[pygame.K_w]:
self.game.y -= velocity
if keys[pygame.K_s]:
self.game.y += velocity
if keys[pygame.K_a]:
self.game.x -= velocity
if keys[pygame.K_d]:
self.game.x += velocity
def shot(self):
## checks if timer for the shot is zero ##
if (self.weapon["timer"] == 0) and (self.alive()):
## check if the type of weapon is gun ##
if self.weapon["type"] == "gun":
angle = zwave.helper.angle_by_two_points(self.center, self.game.mouse)
bullet = Bullet(angle, self.game)
self.weapon["bullets"].append(bullet)
## gunshot sound ##
self.game.sound["channels"]["attacks"].play(self.game.sound["gunshot"], 0)
## add timer for next gunshot ##
self.weapon["timer"] = self.weapon["delay"]
def update_bullets(self):
## random damage by weapon damage range ##
damage = random.randint(self.weapon["damage"][0], self.weapon["damage"][1])
## get all bullets instances ##
for bullet in self.weapon["bullets"]:
collider = bullet.collider()
## check collision with walls ##
if self.collision("walls", collider):
bullet.kill()
## check collision with enemies ##
elif self.collision("enemies", collider):
enemy = self.collision("enemies", collider)[bullet][0].up
enemy.life -= damage
bullet.kill()
## if had no collision ##
else:
bullet.update()
def draw(self):
for bullet in self.weapon["bullets"]:
group = bullet.collider()
group.draw(self.game.screen)
self.collider1.draw(self.game.screen)
self.collider2.draw(self.game.screen)
def wave_update(self):
if self.weapon["damage"][0] < 100:
self.weapon["damage"][0] += 10
self.weapon["damage"][1] += 20
if self.weapon["delay"] > 20:
self.weapon["delay"] -= 3
if self.total_life < 300:
self.total_life += 10
if self.life < self.total_life:
if (self.total_life - self.life) >= 25:
self.life += 25
else:
self.life += self.total_life - self.life
if self.speed < 4:
self.speed += 0.1
def kill(self):
if self.game.sound["channels"]["steps"].get_busy():
self.game.sound["channels"]["steps"].stop()
self.touch.kill()
super(Player, self).kill()
def update(self):
if self.life > 0:
## update gunshot timer ##
if self.weapon["timer"] > 0:
self.weapon["timer"] -= 1
self.update_bullets()
self.update_angle()
self.update_position()
self.update_colliders()
elif self.alive():
self.kill()
class Bullet(pygame.sprite.Sprite):
def __init__(self, angle, game):
pygame.sprite.Sprite.__init__(self)
## init values ##
self.angle = angle - 180
self.size = 10 * game.scale
path = os.path.join("assets", "img", "bullet.png")
self.image = zwave.helper.pygame_image(path, self.size)
self.image = zwave.helper.pygame_rotate(self.image, angle)
self.rect = self.image.get_rect()
self.rect.x = game.player.center["x"] - (self.size / 2)
self.rect.y = game.player.center["y"] - (self.size / 2)
self.velocity = zwave.helper.velocity_by_angle(35 * game.scale, self.angle)
self.sgroup = pygame.sprite.GroupSingle(self)
def update(self):
self.rect.x -= self.velocity["x"]
self.rect.y -= self.velocity["y"]
def collider(self):
return self.sgroup
| josefreittas/zwave | zwave/player.py | Python | mit | 8,177 |
# Copyright 2021 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class MembershipRequest(models.Model):
_inherit = "membership.request"
distribution_list_ids = fields.Many2many(
comodel_name="distribution.list",
relation="membership_request_distribution_list_rel",
column1="request_id",
column2="list_id",
string="Newsletters (opt-in)",
domain=[("newsletter", "=", True)],
)
distribution_list_ids_opt_out = fields.Many2many(
comodel_name="distribution.list",
relation="membership_request_distribution_list_opt_out_rel",
column1="request_id",
column2="list_id",
string="Newsletters (opt-out)",
domain=[("newsletter", "=", True)],
)
request_type = fields.Selection(selection_add=[("n", "Other")])
@api.constrains("distribution_list_ids", "distribution_list_ids_opt_out")
def _check_distribution_list_ids(self):
"""
The same distribution.list cannot be inside both fields
distribution_list_ids and distribution_list_ids_opt_out.
"""
for record in self:
if set(record.distribution_list_ids.ids).intersection(
set(record.distribution_list_ids_opt_out.ids)
):
raise ValidationError(
_(
"You cannot add the same newsletter "
"to newsletters (opt-in) and newsletters (opt-out)."
)
)
def validate_request(self):
self.ensure_one()
res = super(MembershipRequest, self).validate_request()
self.distribution_list_ids.write(
{
"res_partner_opt_in_ids": [(4, self.partner_id.id)],
"res_partner_opt_out_ids": [(3, self.partner_id.id)],
}
)
self.distribution_list_ids_opt_out.write(
{
"res_partner_opt_out_ids": [(4, self.partner_id.id)],
"res_partner_opt_in_ids": [(3, self.partner_id.id)],
}
)
return res
@api.model
def _onchange_partner_id_vals(
self, is_company, request_type, partner_id, technical_name
):
"""
Keep Other as request type when the partner is a company
"""
res = super(MembershipRequest, self)._onchange_partner_id_vals(
is_company, request_type, partner_id, technical_name
)
if is_company and request_type == "n":
res["value"]["request_type"] = "n"
return res
| mozaik-association/mozaik | mozaik_communication/models/membership_request.py | Python | agpl-3.0 | 2,688 |
try:
import ujson as json
except:
import json
from SerializerBase import *
class SerializerUJson(SerializerBase):
def __init__(self):
self.__jslocation__ = "j.data.serializer.json"
def dumps(self, obj, sort_keys=False, indent=False):
return json.dumps(obj, ensure_ascii=False, sort_keys=sort_keys, indent=indent)
def loads(self, s):
if isinstance(s, bytes):
s = s.decode('utf-8')
return json.loads(s)
| Jumpscale/jumpscale_core8 | lib/JumpScale/data/serializers/SerializerUJson.py | Python | apache-2.0 | 470 |
from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import requests
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument("--auth-username", help="specify api key used to authenticate")
subparser.add_argument("--auth-token", help="specify token used authenticate")
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.dnspark.com/v2')
def authenticate(self):
payload = self._get('/dns/{0}'.format(self.options['domain']))
if not payload['additional']['domain_id']:
raise Exception('No domain found')
self.domain_id = payload['additional']['domain_id']
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
record = {
'rname': self._relative_name(name),
'rtype': type,
'rdata': content
}
payload = {}
try:
payload = self._post('/dns/{0}'.format(self.domain_id), record)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 400:
payload = {}
raise e
# http 400 is ok here, because the record probably already exists
logger.debug('create_record: %s', True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
filter = {}
payload = self._get('/dns/{0}'.format(self.domain_id))
records = []
for record in payload['records']:
processed_record = {
'type': record['rtype'],
'name': record['rname'],
'ttl': record['ttl'],
'content': record['rdata'],
'id': record['record_id']
}
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'] == content]
logger.debug('list_records: %s', records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {
'ttl': self.options['ttl']
}
if type:
data['rtype'] = type
if name:
data['rname'] = self._relative_name(name)
if content:
data['rdata'] = content
payload = self._put('/dns/{0}'.format(identifier), data)
logger.debug('update_record: %s', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self.list_records(type, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
logger.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
payload = self._delete('/dns/{0}'.format(record_id))
# is always True at this point, if a non 200 response is returned an error is raised.
logger.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
default_auth = (self.options['auth_username'], self.options['auth_token'])
r = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers=default_headers,
auth=default_auth)
r.raise_for_status() # if the request fails for any reason, throw an error.
return r.json()
| tnwhitwell/lexicon | lexicon/providers/dnspark.py | Python | mit | 4,739 |
from fastapi.testclient import TestClient
from docs_src.behind_a_proxy.tutorial002 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/app": {
"get": {
"summary": "Read Main",
"operationId": "read_main_app_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
}
}
},
"servers": [{"url": "/api/v1"}],
}
def test_openapi():
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == openapi_schema
def test_main():
response = client.get("/app")
assert response.status_code == 200
assert response.json() == {"message": "Hello World", "root_path": "/api/v1"}
| tiangolo/fastapi | tests/test_tutorial/test_behind_a_proxy/test_tutorial002.py | Python | mit | 993 |
import os
import shutil
import gym
import numpy as np
import pytest
from stable_baselines import (A2C, ACER, ACKTR, GAIL, DDPG, DQN, PPO1, PPO2,
TD3, TRPO, SAC)
from stable_baselines.common.cmd_util import make_atari_env
from stable_baselines.common.vec_env import VecFrameStack, DummyVecEnv
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common.callbacks import CheckpointCallback
from stable_baselines.gail import ExpertDataset, generate_expert_traj
EXPERT_PATH_PENDULUM = "stable_baselines/gail/dataset/expert_pendulum.npz"
EXPERT_PATH_DISCRETE = "stable_baselines/gail/dataset/expert_cartpole.npz"
@pytest.mark.parametrize("expert_env", [('Pendulum-v0', EXPERT_PATH_PENDULUM, True),
('CartPole-v1', EXPERT_PATH_DISCRETE, False)])
def test_gail(tmp_path, expert_env):
env_id, expert_path, load_from_memory = expert_env
env = gym.make(env_id)
traj_data = None
if load_from_memory:
traj_data = np.load(expert_path)
expert_path = None
dataset = ExpertDataset(traj_data=traj_data, expert_path=expert_path, traj_limitation=10,
sequential_preprocessing=True)
# Note: train for 1M steps to have a working policy
model = GAIL('MlpPolicy', env, adversary_entcoeff=0.0, lam=0.92, max_kl=0.001,
expert_dataset=dataset, hidden_size_adversary=64, verbose=0)
model.learn(300)
model.save(str(tmp_path / "GAIL-{}".format(env_id)))
model = model.load(str(tmp_path / "GAIL-{}".format(env_id)), env=env)
model.learn(300)
evaluate_policy(model, env, n_eval_episodes=5)
del dataset, model
@pytest.mark.parametrize("generate_env", [
(SAC, 'MlpPolicy', 'Pendulum-v0', 1, 10),
(DQN, 'MlpPolicy', 'CartPole-v1', 1, 10),
(A2C, 'MlpLstmPolicy', 'Pendulum-v0', 1, 10),
(A2C, 'MlpLstmPolicy', 'CartPole-v1', 1, 10),
(A2C, 'CnnPolicy', 'BreakoutNoFrameskip-v4', 8, 1),
])
def test_generate(tmp_path, generate_env):
model, policy, env_name, n_env, n_episodes = generate_env
if n_env > 1:
env = make_atari_env(env_name, num_env=n_env, seed=0)
model = model(policy, env, verbose=0)
else:
model = model(policy, env_name, verbose=0)
dataset = generate_expert_traj(model, str(tmp_path / 'expert'), n_timesteps=300, n_episodes=n_episodes,
image_folder=str(tmp_path / 'test_recorded_images'))
assert set(dataset.keys()).issuperset(['actions', 'obs', 'rewards', 'episode_returns', 'episode_starts'])
assert sum(dataset['episode_starts']) == n_episodes
assert len(dataset['episode_returns']) == n_episodes
n_timesteps = len(dataset['episode_starts'])
for key, val in dataset.items():
if key != 'episode_returns':
assert val.shape[0] == n_timesteps, "inconsistent number of timesteps at '{}'".format(key)
dataset_loaded = np.load(str(tmp_path / 'expert.npz'), allow_pickle=True)
assert dataset.keys() == dataset_loaded.keys()
for key in dataset.keys():
assert (dataset[key] == dataset_loaded[key]).all(), "different data at '{}'".format(key)
# Cleanup folder
if os.path.isdir(str(tmp_path / 'test_recorded_images')):
shutil.rmtree(str(tmp_path / 'test_recorded_images'))
def test_generate_callable(tmp_path):
"""
Test generating expert trajectories with a callable.
"""
env = gym.make("CartPole-v1")
# Here the expert is a random agent
def dummy_expert(_obs):
return env.action_space.sample()
generate_expert_traj(dummy_expert, tmp_path / 'dummy_expert_cartpole', env, n_timesteps=0, n_episodes=10)
def test_pretrain_twice(tmp_path):
"""
Test pretraining twice in the same execution.
"""
dataset = ExpertDataset(expert_path=EXPERT_PATH_PENDULUM, traj_limitation=10,
sequential_preprocessing=True, verbose=0)
model = PPO2("MlpPolicy", "Pendulum-v0")
model.pretrain(dataset, n_epochs=5)
model.pretrain(dataset, n_epochs=5)
del dataset, model
@pytest.mark.xfail(reason="Not Enough Memory", strict=False)
def test_pretrain_images(tmp_path):
env = make_atari_env("PongNoFrameskip-v4", num_env=1, seed=0)
env = VecFrameStack(env, n_stack=3)
model = PPO2('CnnPolicy', env)
generate_expert_traj(model, str(tmp_path / 'expert_pong'), n_timesteps=0, n_episodes=1,
image_folder=str(tmp_path / 'pretrain_recorded_images'))
expert_path = str(tmp_path / 'expert_pong.npz')
dataset = ExpertDataset(expert_path=expert_path, traj_limitation=1, batch_size=32,
sequential_preprocessing=True)
model.pretrain(dataset, n_epochs=2)
shutil.rmtree(str(tmp_path / 'pretrain_recorded_images'))
env.close()
del dataset, model, env
def test_gail_callback(tmp_path):
dataset = ExpertDataset(expert_path=EXPERT_PATH_PENDULUM, traj_limitation=10,
sequential_preprocessing=True, verbose=0)
model = GAIL("MlpPolicy", "Pendulum-v0", dataset)
checkpoint_callback = CheckpointCallback(save_freq=150, save_path=str(tmp_path / 'logs/gail/'), name_prefix='gail')
model.learn(total_timesteps=301, callback=checkpoint_callback)
shutil.rmtree(str(tmp_path / 'logs/gail/'))
del dataset, model
@pytest.mark.parametrize("model_class", [A2C, ACKTR, GAIL, DDPG, PPO1, PPO2, SAC, TD3, TRPO])
def test_behavior_cloning_box(tmp_path, model_class):
"""
Behavior cloning with continuous actions.
"""
dataset = ExpertDataset(expert_path=EXPERT_PATH_PENDULUM, traj_limitation=10,
sequential_preprocessing=True, verbose=0)
model = model_class("MlpPolicy", "Pendulum-v0")
model.pretrain(dataset, n_epochs=5)
model.save(str(tmp_path / "test-pretrain"))
del dataset, model
@pytest.mark.parametrize("model_class", [A2C, ACER, ACKTR, DQN, GAIL, PPO1, PPO2, TRPO])
def test_behavior_cloning_discrete(tmp_path, model_class):
dataset = ExpertDataset(expert_path=EXPERT_PATH_DISCRETE, traj_limitation=10,
sequential_preprocessing=True, verbose=0)
model = model_class("MlpPolicy", "CartPole-v1")
model.pretrain(dataset, n_epochs=5)
model.save(str(tmp_path / "test-pretrain"))
del dataset, model
def test_dataset_param_validation():
with pytest.raises(ValueError):
ExpertDataset()
traj_data = np.load(EXPERT_PATH_PENDULUM)
with pytest.raises(ValueError):
ExpertDataset(traj_data=traj_data, expert_path=EXPERT_PATH_PENDULUM)
def test_generate_vec_env_non_image_observation():
env = DummyVecEnv([lambda: gym.make('CartPole-v1')] * 2)
model = PPO2('MlpPolicy', env)
model.learn(total_timesteps=300)
generate_expert_traj(model, save_path='.', n_timesteps=0, n_episodes=5)
| hill-a/stable-baselines | tests/test_gail.py | Python | mit | 7,125 |
#!/usr/bin/env python
import argparse
import sys
import os
from shutil import rmtree
from KafNafParserPy import KafNafParser
from lxml import etree
from xml.sax.saxutils import escape
class Clexelt:
def __init__(self, this_item, this_pos):
self.item = this_item #bank.n
self.pos = this_pos #unk
self.instances = []
self.existing_instances = set()
def exists(self,instance_id):
return instance_id in self.existing_instances
def add_instance(self,this_instance):
self.instances.append(this_instance)
self.existing_instances.add(this_instance.id)
def __repr__(self):
return self.item+' '+self.pos+' '+str(len(self.instances))
def __iter__(self):
for ins in self.instances:
yield ins
def create_xml_node(self):
node = etree.Element('lexelt')
node.set('item',self.item)
node.set('pos',self.pos)
for instance in self.instances:
node.append(instance.create_xml_node())
return node
class Cinstance:
def __init__(self):
self.id = ''
self.docsrc = ''
self.tokens = []
self.index_head = []
self.key = ''
def create_xml_node(self):
node = etree.Element('instance')
node.set('id', self.id)
node.set('docsrc', self.docsrc)
this_string = '<context>'
start_head = min(self.index_head)
end_head = max(self.index_head) + 1
for num_token, token in enumerate(self.tokens):
if num_token == start_head:
this_string+=' <head>'+escape(token)
elif num_token == end_head:
this_string+='</head> '+escape(token)
else:
this_string+=' '+escape(token)
this_string+='</context>'
context_node = etree.fromstring(this_string)
node.append(context_node)
return node
def add_file(filename, data_lexelt, reftype='lexical_key'):
obj = KafNafParser(filename)
tokens_per_sent = {}
sent_for_token = {}
sents_in_order = []
for token in obj.get_tokens():
sentid = token.get_sent()
if sentid not in sents_in_order:
sents_in_order.append(sentid)
sent_for_token[token.get_id()] = sentid
if sentid not in tokens_per_sent: tokens_per_sent[sentid] = []
tokens_per_sent[sentid].append((token.get_id(), token.get_text()))
annotated_lemmas = [] # LIST of (full_id, token ids, lemma,pos,synset)
for term in obj.get_terms():
synset_label = None
for ext_ref in term.get_external_references():
if ext_ref.get_reftype() == 'lexical_key':
synset_label = term.get_lemma()+'%'+ext_ref.get_reference()
elif ext_ref.get_reftype() == 'sense' and ext_ref.get_resource() == 'WordNet-3.0':
synset_label = ext_ref.get_reference()
if synset_label is not None:
break
if synset_label is not None:
annotated_lemmas.append((filename+'#'+term.get_id(), term.get_span().get_span_ids(), term.get_lemma(), term.get_pos(), synset_label))
for full_id, token_ids, lemma,pos, synset_label in annotated_lemmas:
#CREATE NEW INSTANCE
this_key = lemma+'.'+pos.lower()[0]
if this_key not in data_lexelt:
data_lexelt[this_key] = Clexelt(this_key,pos)
if not data_lexelt[this_key].exists(full_id):
#Create the new instance
new_instance = Cinstance()
new_instance.id = full_id
new_instance.docsrc = filename
new_instance.key = synset_label
tokens = []
target_indexes = []
this_sent = sent_for_token[token_ids[0]]
index = sents_in_order.index(this_sent)
start_idx = max(index-2,0)
end_idx = min(index+2,len(sents_in_order)-1)
selected_sents = sents_in_order[start_idx:end_idx+1]
num_token = 0
for current_sent in selected_sents:
for token_id, token_text in tokens_per_sent[str(current_sent)]:
tokens.append(token_text)
if token_id in token_ids:
target_indexes.append(num_token)
num_token += 1
new_instance.tokens = tokens[:]
new_instance.index_head = target_indexes[:]
data_lexelt[this_key].add_instance(new_instance)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Creates IMS training file from KAF/NAF files', version='1.0')
input_group = parser.add_mutually_exclusive_group(required=True)
#input_group.add_argument('-d', dest='directory', help='Directory with NAF/KAF files')
input_group.add_argument('-f', dest='file', help='Single KAF/NAF file')
input_group.add_argument('-l', dest='file_paths', help='File with a list of paths to KAF/NAF files')
#parser.add_argument('-e', dest='extension', choices = ['KAF','NAF'], help='Type of input files when input is a directory')
parser.add_argument('-o', dest='output', required=True, help='Output folder')
args = parser.parse_args()
#if args.directory is not None and args.extension is None:
# print>>sys.stderr,'Extension of files must be specified when the input is a directory'
# sys.exit(-1)
data_lexelt = {}
if args.file_paths is not None:
fi = open(args.file_paths,'r')
for path in fi:
print 'Processing %s' % path.strip()
add_file(path.strip(), data_lexelt)
else:
add_file(args.file, data_lexelt)
if args.output[-1] == '/':
args.output = args.output[:-1]
if os.path.exists(args.output):
rmtree(args.output)
os.mkdir(args.output)
fd_list = open(args.output+'.word_list','w')
for item_key, lexelt in data_lexelt.items():
fd_list.write('%s\n' % item_key)
output_xml = os.path.join(args.output,item_key+'.train.xml')
corpus = etree.Element('corpus')
corpus.set('lang','en')
corpus.append(lexelt.create_xml_node())
train_tree = etree.ElementTree(corpus)
train_tree.write(output_xml,encoding='UTF-8', pretty_print=True, xml_declaration=True)
key_filename = os.path.join(args.output,item_key+'.train.key')
fd_key = open(key_filename,'w')
for instance in lexelt:
fd_key.write('%s %s %s\n' % (lexelt.item, instance.id, instance.key))
fd_key.close()
fd_list.close()
print 'List of words in %s' % fd_list.name
| rubenIzquierdo/it_makes_sense_WSD | convert_to_ims_training_format.py | Python | apache-2.0 | 6,874 |
# -*- coding: utf-8 -*-
# Copyright 2019 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import (
hr_employment_status,
hr_employee,
)
| open-synergy/opnsynid-hr | hr_employee_employment_status/models/__init__.py | Python | agpl-3.0 | 194 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class MrpProduction(orm.Model):
""" Model name: MrpProduction
"""
_inherit = 'mrp.production'
def _get_total_line(self, cr, uid, ids, fields, args, context=None):
""" Fields function for calculate
"""
res = {}
query = '''
SELECT
mrp_id,
sum(product_uom_qty) as todo,
sum(product_uom_maked_sync_qty) as done,
sum(product_uom_qty) - sum(product_uom_maked_sync_qty) as
remain,
sum(product_uom_qty) = sum(product_uom_maked_sync_qty) as ok
FROM
sale_order_line
GROUP BY
mrp_id
HAVING
mrp_id in (%s);
''' % (','.join(map(lambda x: str(x), ids)))
cr.execute(query)
_logger.warning('Start query:\n\t:%s' % query)
for item in cr.fetchall():
res[item[0]] = {}
res[item[0]]['total_line_todo'] = item[1]
res[item[0]]['total_line_done'] = item[2]
res[item[0]]['total_line_remain'] = item[3]
res[item[0]]['total_line_ok'] = item[4]
_logger.warning('End query')
return res
_columns = {
'total_line_todo': fields.function(
_get_total_line, method=True,
type='float', string='Todo',
store=False, multi=True),
'total_line_done': fields.function(
_get_total_line, method=True,
type='float', string='Done',
store=False, multi=True),
'total_line_remain': fields.function(
_get_total_line, method=True,
type='float', string='Residui',
store=False, multi=True),
'total_line_ok': fields.function(
_get_total_line, method=True,
type='boolean', string='Completed',
store=False, multi=True),
}
| Micronaet/micronaet-production | production_accounting_external_total/total.py | Python | agpl-3.0 | 3,496 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.tools.translate import _
from openerp.tools import (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class XmlrpcOperation(orm.Model):
""" Model name: XmlrpcOperation
"""
_inherit = 'xmlrpc.operation'
# ------------------
# Override function:
# ------------------
def execute_operation(self, cr, uid, operation, parameter, context=None):
""" Virtual function that will be overrided
operation: in this module is 'invoice'
context: xmlrpc context dict
"""
try:
if operation != 'invoice':
# Super call for other cases:
return super(XmlrpcOperation, self).execute_operation(
cr, uid, operation, parameter, context=context)
server_pool = self.pool.get('xmlrpc.server')
xmlrpc_server = server_pool.get_xmlrpc_server(
cr, uid, context=context)
res = xmlrpc_server.execute('invoice', parameter)
if res.get('error', False):
_logger.error(res['error'])
# TODO raise
# TODO confirm export!
except:
_logger.error(sys.exc_info())
raise osv.except_osv(
_('Connect error:'), _('XMLRPC connecting server'))
return res
class AccountInvoice(orm.Model):
""" Add export function to invoice obj
"""
_inherit = 'account.invoice'
def server_action_xmlrpc_export_scheduled(self, cr, uid, ids, context=None):
if context is None:
context = {}
if not ids:
ids = context.get('active_ids')
if not ids:
_logger.error('Error in selection invoice')
return False
total = 0
for invoice in self.browse(cr, uid, ids, context=context):
if not invoice.xmlrpc_scheduled and not invoice.xmlrpc_sync:
total += 1
self.xmlrpc_export_scheduled(
cr, uid, [invoice.id], context=context)
_logger.warning('Invoice to be imported, tot.: %s' % total)
return {}
def xmlrpc_export_scheduled(self, cr, uid, ids, context=None):
""" Schedule for import
"""
return self.write(cr, uid, ids, {
'xmlrpc_scheduled': True,
}, context=context)
def xmlrpc_export_unscheduled(self, cr, uid, ids, context=None):
""" Schedule for import
"""
return self.write(cr, uid, ids, {
'xmlrpc_scheduled': False,
}, context=context)
def _xmlrpc_clean_description(self, value, cut=False):
""" Remove \n and \t and return first 'cut' char
"""
value = value.replace('\n', ' ')
value = value.replace('\r', '')
value = value.replace('\t', ' ')
if cut:
return value[:cut]
else:
return value
def dummy_button(self, cr, uid, ids, context=None):
""" For show an icon as a button
"""
return True
def reset_xmlrpc_export_invoice(self, cr, uid, ids, context=None):
""" Remove sync status
"""
assert len(ids) == 1, 'No multi export for now' # TODO remove!!!
_logger.warning('Reset sync invoice: %s' % ids[0])
return self.write(cr, uid, ids, {
'xmlrpc_sync': False}, context=context)
def xmlrpc_export_invoice(self, cr, uid, ids, context=None):
""" Export current invoice
# TODO manage list of invoices?
"""
def get_comment_line(self, parameter, value):
""" Split line in comment line max 60 char
"""
value = (value or u'').strip()
# -----------------------------------------------------------------
# Replace some not ASCII char:
# -----------------------------------------------------------------
value = value.replace(u'€', u'EUR ')
value = value.replace(u' ', u' ')
value = value.replace(u'®', u' (R)')
value = value.replace(u'™', u' TM')
while value: # Split in 60 char:
# TODO change filler space
parameter['input_file_string'] += self.pool.get(
'xmlrpc.server').clean_as_ascii(
'%44sD%16s%-60s%237s\r\n' % (
'',
'',
self._xmlrpc_clean_description(
value, 60), # Remove \n
'',
))
value = value[60:]
return True
# ---------------------------------------------------------------------
# Start procedure:
# ---------------------------------------------------------------------
if context is None:
context = {}
start_hour_default = '08:00' # Always 8
assert len(ids) == 1, 'No multi export for now' # TODO remove!!!
# TODO use with validate trigger for get the number
parameter = {}
# ---------------------------------------------------------------------
# Access company record for extra parameters:
# ---------------------------------------------------------------------
picking_pool = self.pool.get('stock.picking')
product_pool = self.pool.get('product.product')
company_pool = self.pool.get('res.company')
company_ids = company_pool.search(cr, uid, [], context=context)
company = company_pool.browse(cr, uid, company_ids, context=context)[0]
# Generate string for export file:
mask = '%s%s%s%s%s' % ( # 3 block for readability:
'%-2s%-2s%-6s%-8s%-2s%-8s%-8s%-8s', # header
'%-1s%-16s%-60s%-2s%10.2f%10.3f%-5s%-5s%-50s%-10s%-8s%1s%-8s%-8s', # row
'%-2s%-20s%-10s%-8s%-24s%-1s%-16s%-1s%-10s%-10s', # Fattura PA
'%-3s%-13s%2s', # foot
'\r\n', # Win CR
)
parameter['input_file_string'] = ''
last_picking = False # Last picking for reference:
for invoice_temp in self.browse(cr, uid, ids, context=context):
# Reload invoice with partner lang:
context['lang'] = invoice_temp.partner_id.lang or 'it_IT'
invoice = self.browse(cr, uid, invoice_temp.id, context=context)
partner = invoice.partner_id
if not invoice.number:
raise osv.except_osv(
_('XMLRPC sync error'),
_('Invoice must be validated!'))
# -----------------------------------------------------------------
# Note pre document:
# -----------------------------------------------------------------
if invoice.text_note_pre:
get_comment_line(self, parameter, invoice.text_note_pre)
ddt_number = ddt_date = ddt_destination = ''
i_ddt = 0
last_ddt = False
previous_picking = False
for line in invoice.invoice_line:
# -------------------------------------------------------------
# Order, Partner order, DDT reference:
# -------------------------------------------------------------
# destination (if not present DDT used invoice code):
ddt_destination = invoice.partner_id.sql_destination_code or ''
picking = line.generator_move_id.picking_id
if picking and (not last_picking or last_picking != picking):
last_picking = picking # Save for not print again
# get_comment_line(self, parameter,
# picking_pool.write_reference_from_picking(picking))
if picking.ddt_id: # If DDT is present
ddt = picking.ddt_id
ddt_number_block = ddt.name.split('/')
ddt_number = '%s-%s' % (
ddt_number_block[1], ddt_number_block[-1])
ddt_destination = \
ddt.destination_partner_id.sql_destination_code
ddt_date = ddt.date[:10]
# If DDT Block print ID:
if not last_ddt or ddt_number != last_ddt:
i_ddt += 1
last_ddt = ddt_number
try: # Module: invoice_payment_cost (not in dep.)
refund_line = 'S' if line.refund_line else ' '
except:
refund_line = ' '
if invoice.mx_agent_id: # Agent set up in document
agent_code = invoice.mx_agent_id.sql_agent_code or \
invoice.mx_agent_id.sql_supplier_code or ''
else: # use partner one's
agent_code = invoice.partner_id.agent_id.sql_agent_code \
or invoice.partner_id.agent_id.sql_supplier_code or ''
# -------------------------------------------------------------
# Note pre line:
# -------------------------------------------------------------
if line.text_note_pre:
for block in line.text_note_pre.split('\n'):
get_comment_line(self, parameter, block)
# -------------------------------------------------------------
# Fattura PA "long" fields:
# -------------------------------------------------------------
product = line.product_id
# Description:
if line.use_text_description:
description = line.name or ''
else:
description = product.name or ''
# Add extra duty code if needed:
if invoice.fiscal_position.duty_print and product.duty_code:
description += '\nCustom code: %s' % product.duty_code
# -------------------------------------------------------------
# Invoice field "needed" Fattura PA:
# -------------------------------------------------------------
goods_description = \
invoice.goods_description_id.account_ref or ''
carriage_condition = \
invoice.carriage_condition_id.account_ref or ''
transportation_reason = \
invoice.transportation_reason_id.account_ref or ''
transportation_method = \
invoice.transportation_method_id.account_ref or ''
carrier_code = \
invoice.default_carrier_id.partner_id.sql_supplier_code \
or ''
parcels = '%s' % invoice.parcels
# TODO check error:
# Direct invoice: goods, carriage, transportation, method
if invoice.default_carrier_id and not carrier_code:
raise osv.except_osv(
_('XMLRPC error'),
_('Carrier need Account code!'))
# -------------------------------------------------------------
# LAST BLOCK: Reference for order / DDT yet wrote:
# -------------------------------------------------------------
if not previous_picking and picking: # For sure (needed?)
previous_picking = picking
if picking and previous_picking != picking:
get_comment_line(
self, parameter,
picking_pool.write_reference_from_picking(picking))
previous_picking = picking
# Start transport:
start_transport = invoice.start_transport or ''
if start_transport:
# GMT problem
start_dt = datetime.strptime(
start_transport, DEFAULT_SERVER_DATETIME_FORMAT) + \
timedelta(hours=2) # TODO Very bad GMT update!
start_transport = start_dt.strftime(
DEFAULT_SERVER_DATE_FORMAT)[:10].replace(
' ', '').replace('-', '').replace('/', '')
start_transport += start_hour_default
# -------------------------------------------------------------
# DATA LINE:
# -------------------------------------------------------------
parameter['input_file_string'] += self.pool.get(
'xmlrpc.server').clean_as_ascii(
mask % (
# -------------------------------------------------
# Header:
# -------------------------------------------------
# Doc (2)
invoice.journal_id.account_code,
# Serie (2)
invoice.journal_id.account_serie,
# N.(6N) # val.
int(invoice.number.split('/')[-1]),
# Date (8)
'%s%s%s' % (
invoice.date_invoice[:4],
invoice.date_invoice[5:7],
invoice.date_invoice[8:10],
),
# Transport reason (2)
invoice.transportation_reason_id.import_id or '',
# Customer code (8)
invoice.partner_id.sql_customer_code or '',
# Destination code (8)
invoice.destination_partner_id.sql_destination_code or '',
# Agent code (8)
agent_code,
# -------------------------------------------------
# Detail:
# -------------------------------------------------
# Tipo di riga 1 (D, R, T)
'R',
# Code (16)
product.default_code or '',
# Description (60)
self._xmlrpc_clean_description(description, 60),
# UOM (2)
product.uom_id.account_ref or '',
# Q. 10N (2 dec.)
line.quantity,
# Price 10N (3 dec.)
line.price_unit,
# Tax (5)
line.invoice_line_tax_id[0].account_ref \
if line.invoice_line_tax_id else '',
# Provv. (5)
0,
# Previous block discount:
# Discount (50)
line.multi_discount_rates or '',
# Discount numeric (10)
('%s' % (line.discount or '')).replace('.', ','),
# Account (8)
line.account_id.account_ref or '',
# Refund (1)
refund_line,
(product.duty_code or '')[:8], # Duty (8)
('%s' % (product.weight_net or '')).replace(
'.', ',')[:8],
# -------------------------------------------------
# Extra data for Fattura PA
# -------------------------------------------------
i_ddt,
ddt_number,
ddt_date.replace('-', ''),
ddt_destination, # or invoice if DDT not present
# -------------------------------------------------
# Extra data for invoice:
# -------------------------------------------------
goods_description,
carriage_condition,
transportation_reason,
transportation_method,
carrier_code,
parcels,
# -------------------------------------------------
# Foot:
# -------------------------------------------------
# Codice Pagamento 3
invoice.payment_term.import_id \
if invoice.payment_term else '',
start_transport,
invoice.xmlrpc_invoice_mode or '',
# TODO bank
))
# -------------------------------------------------------------
# Fattura PA "long" fields:
# -------------------------------------------------------------
product = line.product_id
# Extra data for Fattura PA:
# 1. Description long:
if len(description) > 60:
get_comment_line(
self,
parameter,
self._xmlrpc_clean_description(description[60:]),
)
# 2. Color:
get_comment_line(
self,
parameter,
self._xmlrpc_clean_description(product.colour or ''),
)
# 3. FSC Certified:
if product.fsc_certified_id and company.fsc_certified and \
company.fsc_from_date <= invoice.date_invoice:
get_comment_line(self, parameter,
self._xmlrpc_clean_description(
product.fsc_certified_id.text or ''))
# 4. PEFC Certified:
if product.pefc_certified_id and company.pefc_certified and \
company.pefc_from_date<= invoice.date_invoice:
get_comment_line(self, parameter,
self._xmlrpc_clean_description(
product.pefc_certified_id.text or ''))
# 5. Partic:
if invoice.partner_id.use_partic:
partic = product_pool._xmlrpc_get_partic_description(
cr, uid, product.id, invoice.partner_id.id,
context=context)
get_comment_line(self, parameter,
self._xmlrpc_clean_description(partic))
# -------------------------------------------------------------
# Note post line:
# -------------------------------------------------------------
if line.text_note_post:
get_comment_line(self, parameter, line.text_note_post)
# -----------------------------------------------------------------
# End document dat
# -----------------------------------------------------------------
# BEFORE ALL:
if previous_picking: # Always write last line comment:
get_comment_line(
self, parameter,
picking_pool.write_reference_from_picking(
previous_picking))
# A. End note comment:
if invoice.text_note_post:
text = invoice.text_note_post
for block in text.split('\n'):
get_comment_line(self, parameter, block)
# B. Text note for account position
text = partner.property_account_position.text_note_invoice or ''
if text:
text = picking_pool._parser_template_substitute(invoice, text)
for block in text.split('\n'):
get_comment_line(self, parameter, block)
# C. Text comment for account position
text = partner.property_account_position.text_comment_invoice or ''
if text:
text = picking_pool._parser_template_substitute(invoice, text)
for block in text.split('\n'):
get_comment_line(self, parameter, block)
# D. FSC PEFC Certified:
try:
if company.fsc_certified or company.pefc_certified:
text = company.xfc_document_note
for block in text.split('\n'):
get_comment_line(self, parameter, block)
except:
pass # no FSC Management
# E. Split payment:
try:
if partner.split_payment:
text = \
'Operazione soggetta alla scissione dei pagamenti. ' +\
'Art. 17 ter DPR633/72'
get_comment_line(self, parameter, text)
except:
pass # no Split Payment Management
# F. Force vector
try:
if invoice.force_vector:
text = 'Vettore:\n%s' % invoice.force_vector.strip()
for block in text.split('\n'):
get_comment_line(self, parameter, block)
except:
pass # Error do nothing
# G. Private partner:
# try:
# if partner.is_private:
# text = "COPIA, IL DOCUMENTO FISCALMENTE VALIDO E' " + \
# "ESCLUSIVAMENTE QUELLO DISPONIBILE NELL'AREA " + \
# "RISERVATA DELL'AGENZIA DELLE ENTRATE"
# get_comment_line(self, parameter, text)
# except:
# pass # no Partner private
# H. Privacy policy:
try:
privacy_policy = (
invoice.company_id.privacy_policy or '').strip()
if privacy_policy:
text = 'Privacy: %s' % privacy_policy
for block in text.split('\n'):
get_comment_line(self, parameter, block)
except:
pass # Error do nothing
# I. Custom duty block:
try:
if invoice.fiscal_position.duty_print:
duty_block = (invoice.duty_block or '').strip()
if duty_block:
for block in duty_block.split('\n'):
get_comment_line(self, parameter, block)
except:
pass # Error do nothing
# XXX Remove used for extract file:
# open('/home/thebrush/prova.csv', 'w').write(
# parameter['input_file_string'])
# return False
res = self.pool.get('xmlrpc.operation').execute_operation(
cr, uid, 'invoice', parameter=parameter, context=context)
result_string_file = res.get('result_string_file', False)
if result_string_file:
if result_string_file.startswith('OK'):
# TODO test if number passed if for correct invoice number!
self.write(cr, uid, ids, {
'xmlrpc_sync': True,
'xmlrpc_scheduled': False,
}, context=context)
return True
else:
raise osv.except_osv(
_('Error import invoice:'),
_('Comment: %s' % result_string_file),
)
# TODO write better error
raise osv.except_osv(
_('Sync error:'),
_('Cannot sync with accounting! (return esit not present'),
)
_columns = {
'xmlrpc_invoice_mode': fields.selection(
string=u'Modalità fattura contabile',
selection=[
('ac', 'Fattura di Acconto'),
# ('fd', 'Fattura Differita'),
# ('fa', 'Fattura Accompagnatoria'),
],
),
'xmlrpc_sync': fields.boolean('XMLRPC syncronized'),
'xmlrpc_scheduled': fields.boolean(
'XMLRPC Schedulata',
help='Schedulata per importazione automatica'),
'xmlrpc_note': fields.text('XMLRPC note'),
}
| Micronaet/micronaet-xmlrpc | xmlrpc_operation_invoice/operation.py | Python | agpl-3.0 | 26,194 |
from copy import copy, deepcopy
from textwrap import dedent
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import dask.array as da
except ImportError:
pass
import numpy as np
import pandas as pd
from xray import (align, concat, conventions, backends, Dataset, DataArray,
Variable, Coordinate, auto_combine, open_dataset,
set_options)
from xray.core import indexing, utils
from xray.core.pycompat import iteritems, OrderedDict
from . import (TestCase, unittest, InaccessibleArray, UnexpectedDataAccess,
requires_dask)
def create_test_data(seed=None):
rs = np.random.RandomState(seed)
_vars = {'var1': ['dim1', 'dim2'],
'var2': ['dim1', 'dim2'],
'var3': ['dim3', 'dim1']}
_dims = {'dim1': 8, 'dim2': 9, 'dim3': 10}
obj = Dataset()
obj['time'] = ('time', pd.date_range('2000-01-01', periods=20))
obj['dim1'] = ('dim1', np.arange(_dims['dim1'], dtype='int64'))
obj['dim2'] = ('dim2', 0.5 * np.arange(_dims['dim2']))
obj['dim3'] = ('dim3', list('abcdefghij'))
for v, dims in sorted(_vars.items()):
data = rs.normal(size=tuple(_dims[d] for d in dims))
obj[v] = (dims, data, {'foo': 'variable'})
obj.coords['numbers'] = ('dim3', np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 3],
dtype='int64'))
return obj
class InaccessibleVariableDataStore(backends.InMemoryDataStore):
def get_variables(self):
def lazy_inaccessible(x):
data = indexing.LazilyIndexedArray(InaccessibleArray(x.values))
return Variable(x.dims, data, x.attrs)
return dict((k, lazy_inaccessible(v)) for
k, v in iteritems(self._variables))
class TestDataset(TestCase):
def test_repr(self):
data = create_test_data(seed=123)
data.attrs['foo'] = 'bar'
# need to insert str dtype at runtime to handle both Python 2 & 3
expected = dedent("""\
<xray.Dataset>
Dimensions: (dim1: 8, dim2: 9, dim3: 10, time: 20)
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
* dim1 (dim1) int64 0 1 2 3 4 5 6 7
* dim2 (dim2) float64 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0
* dim3 (dim3) %s 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
numbers (dim3) int64 0 1 2 0 0 1 1 2 2 3
Data variables:
var1 (dim1, dim2) float64 -1.086 0.9973 0.283 -1.506 -0.5786 1.651 ...
var2 (dim1, dim2) float64 1.162 -1.097 -2.123 1.04 -0.4034 -0.126 ...
var3 (dim3, dim1) float64 0.5565 -0.2121 0.4563 1.545 -0.2397 0.1433 ...
Attributes:
foo: bar""") % data['dim3'].dtype
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
print(actual)
self.assertEqual(expected, actual)
with set_options(display_width=100):
max_len = max(map(len, repr(data).split('\n')))
assert 90 < max_len < 100
expected = dedent("""\
<xray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
*empty*""")
actual = '\n'.join(x.rstrip() for x in repr(Dataset()).split('\n'))
print(actual)
self.assertEqual(expected, actual)
# verify that ... doesn't appear for scalar coordinates
data = Dataset({'foo': ('x', np.ones(10))}).mean()
expected = dedent("""\
<xray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
foo float64 1.0""")
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
print(actual)
self.assertEqual(expected, actual)
# verify long attributes are truncated
data = Dataset(attrs={'foo': 'bar' * 1000})
self.assertTrue(len(repr(data)) < 1000)
def test_constructor(self):
x1 = ('x', 2 * np.arange(100))
x2 = ('x', np.arange(1000))
z = (['x', 'y'], np.arange(1000).reshape(100, 10))
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
Dataset({'a': x1, 'b': x2})
with self.assertRaisesRegexp(ValueError, 'must be defined with 1-d'):
Dataset({'a': x1, 'x': z})
with self.assertRaisesRegexp(TypeError, 'must be an array or'):
Dataset({'x': (1, 2, 3, 4, 5, 6, 7)})
with self.assertRaisesRegexp(ValueError, 'already exists as a scalar'):
Dataset({'x': 0, 'y': ('x', [1, 2, 3])})
# verify handling of DataArrays
expected = Dataset({'x': x1, 'z': z})
actual = Dataset({'z': expected['z']})
self.assertDatasetIdentical(expected, actual)
def test_constructor_1d(self):
expected = Dataset({'x': (['x'], 5.0 + np.arange(5))})
actual = Dataset({'x': 5.0 + np.arange(5)})
self.assertDatasetIdentical(expected, actual)
actual = Dataset({'x': [5, 6, 7, 8, 9]})
self.assertDatasetIdentical(expected, actual)
def test_constructor_0d(self):
expected = Dataset({'x': ([], 1)})
for arg in [1, np.array(1), expected['x']]:
actual = Dataset({'x': arg})
self.assertDatasetIdentical(expected, actual)
d = pd.Timestamp('2000-01-01T12')
args = [True, None, 3.4, np.nan, 'hello', u'uni', b'raw',
np.datetime64('2000-01-01T00'), d, d.to_datetime()]
for arg in args:
print(arg)
expected = Dataset({'x': ([], arg)})
actual = Dataset({'x': arg})
self.assertDatasetIdentical(expected, actual)
def test_constructor_auto_align(self):
a = DataArray([1, 2], [('x', [0, 1])])
b = DataArray([3, 4], [('x', [1, 2])])
# verify align uses outer join
expected = Dataset({'a': ('x', [1, 2, np.nan]),
'b': ('x', [np.nan, 3, 4])})
actual = Dataset({'a': a, 'b': b})
self.assertDatasetIdentical(expected, actual)
# regression test for GH346
self.assertIsInstance(actual.variables['x'], Coordinate)
# variable with different dimensions
c = ('y', [3, 4])
expected2 = expected.merge({'c': c})
actual = Dataset({'a': a, 'b': b, 'c': c})
self.assertDatasetIdentical(expected2, actual)
# variable that is only aligned against the aligned variables
d = ('x', [3, 2, 1])
expected3 = expected.merge({'d': d})
actual = Dataset({'a': a, 'b': b, 'd': d})
self.assertDatasetIdentical(expected3, actual)
e = ('x', [0, 0])
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
Dataset({'a': a, 'b': b, 'e': e})
def test_constructor_compat(self):
data = OrderedDict([('x', DataArray(0, coords={'y': 1})),
('y', ('z', [1, 1, 1]))])
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
Dataset(data, compat='equals')
expected = Dataset({'x': 0}, {'y': ('z', [1, 1, 1])})
actual = Dataset(data)
self.assertDatasetIdentical(expected, actual)
actual = Dataset(data, compat='broadcast_equals')
self.assertDatasetIdentical(expected, actual)
data = OrderedDict([('y', ('z', [1, 1, 1])),
('x', DataArray(0, coords={'y': 1}))])
actual = Dataset(data)
self.assertDatasetIdentical(expected, actual)
original = Dataset({'a': (('x', 'y'), np.ones((2, 3)))},
{'c': (('x', 'y'), np.zeros((2, 3)))})
expected = Dataset({'a': ('x', np.ones(2)),
'b': ('y', np.ones(3))},
{'c': (('x', 'y'), np.zeros((2, 3)))})
# use an OrderedDict to ensure test results are reproducible; otherwise
# the order of appearance of x and y matters for the order of
# dimensions in 'c'
actual = Dataset(OrderedDict([('a', original['a'][:, 0].drop('y')),
('b', original['a'][0].drop('x'))]))
self.assertDatasetIdentical(expected, actual)
data = {'x': DataArray(0, coords={'y': 3}), 'y': ('z', [1, 1, 1])}
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
Dataset(data)
data = {'x': DataArray(0, coords={'y': 1}), 'y': [1, 1]}
actual = Dataset(data)
expected = Dataset({'x': 0}, {'y': [1, 1]})
self.assertDatasetIdentical(expected, actual)
def test_constructor_with_coords(self):
with self.assertRaisesRegexp(ValueError, 'redundant variables and co'):
Dataset({'a': ('x', [1])}, {'a': ('x', [1])})
ds = Dataset({}, {'a': ('x', [1])})
self.assertFalse(ds.data_vars)
self.assertItemsEqual(ds.coords.keys(), ['x', 'a'])
def test_properties(self):
ds = create_test_data()
self.assertEqual(ds.dims,
{'dim1': 8, 'dim2': 9, 'dim3': 10, 'time': 20})
self.assertItemsEqual(ds, list(ds.variables))
self.assertItemsEqual(ds.keys(), list(ds.variables))
self.assertNotIn('aasldfjalskdfj', ds.variables)
self.assertIn('dim1', repr(ds.variables))
self.assertEqual(len(ds), 8)
self.assertItemsEqual(ds.data_vars, ['var1', 'var2', 'var3'])
self.assertItemsEqual(ds.data_vars.keys(), ['var1', 'var2', 'var3'])
self.assertIn('var1', ds.data_vars)
self.assertNotIn('dim1', ds.data_vars)
self.assertNotIn('numbers', ds.data_vars)
self.assertEqual(len(ds.data_vars), 3)
self.assertItemsEqual(ds.indexes, ['dim1', 'dim2', 'dim3', 'time'])
self.assertEqual(len(ds.indexes), 4)
self.assertIn('dim1', repr(ds.indexes))
self.assertItemsEqual(ds.coords,
['time', 'dim1', 'dim2', 'dim3', 'numbers'])
self.assertIn('dim1', ds.coords)
self.assertIn('numbers', ds.coords)
self.assertNotIn('var1', ds.coords)
self.assertEqual(len(ds.coords), 5)
self.assertEqual(Dataset({'x': np.int64(1),
'y': np.float32([1, 2])}).nbytes, 16)
def test_attr_access(self):
ds = Dataset({'tmin': ('x', [42], {'units': 'Celcius'})},
attrs={'title': 'My test data'})
self.assertDataArrayIdentical(ds.tmin, ds['tmin'])
self.assertDataArrayIdentical(ds.tmin.x, ds.x)
self.assertEqual(ds.title, ds.attrs['title'])
self.assertEqual(ds.tmin.units, ds['tmin'].attrs['units'])
self.assertLessEqual(set(['tmin', 'title']), set(dir(ds)))
self.assertIn('units', set(dir(ds.tmin)))
# should defer to variable of same name
ds.attrs['tmin'] = -999
self.assertEqual(ds.attrs['tmin'], -999)
self.assertDataArrayIdentical(ds.tmin, ds['tmin'])
def test_variable(self):
a = Dataset()
d = np.random.random((10, 3))
a['foo'] = (('time', 'x',), d)
self.assertTrue('foo' in a.variables)
self.assertTrue('foo' in a)
a['bar'] = (('time', 'x',), d)
# order of creation is preserved
self.assertEqual(list(a), ['foo', 'time', 'x', 'bar'])
self.assertTrue(all([a['foo'][i].values == d[i]
for i in np.ndindex(*d.shape)]))
# try to add variable with dim (10,3) with data that's (3,10)
with self.assertRaises(ValueError):
a['qux'] = (('time', 'x'), d.T)
def test_modify_inplace(self):
a = Dataset()
vec = np.random.random((10,))
attributes = {'foo': 'bar'}
a['x'] = ('x', vec, attributes)
self.assertTrue('x' in a.coords)
self.assertIsInstance(a.coords['x'].to_index(),
pd.Index)
self.assertVariableIdentical(a.coords['x'], a.variables['x'])
b = Dataset()
b['x'] = ('x', vec, attributes)
self.assertVariableIdentical(a['x'], b['x'])
self.assertEqual(a.dims, b.dims)
# this should work
a['x'] = ('x', vec[:5])
a['z'] = ('x', np.arange(5))
with self.assertRaises(ValueError):
# now it shouldn't, since there is a conflicting length
a['x'] = ('x', vec[:4])
arr = np.random.random((10, 1,))
scal = np.array(0)
with self.assertRaises(ValueError):
a['y'] = ('y', arr)
with self.assertRaises(ValueError):
a['y'] = ('y', scal)
self.assertTrue('y' not in a.dims)
def test_coords_properties(self):
# use an OrderedDict for coordinates to ensure order across python
# versions
# use int64 for repr consistency on windows
data = Dataset(OrderedDict([('x', ('x', np.array([-1, -2], 'int64'))),
('y', ('y', np.array([0, 1, 2], 'int64'))),
('foo', (['x', 'y'],
np.random.randn(2, 3)))]),
OrderedDict([('a', ('x', np.array([4, 5], 'int64'))),
('b', np.int64(-10))]))
self.assertEqual(4, len(data.coords))
self.assertItemsEqual(['x', 'y', 'a', 'b'], list(data.coords))
self.assertVariableIdentical(data.coords['x'], data['x'].variable)
self.assertVariableIdentical(data.coords['y'], data['y'].variable)
self.assertIn('x', data.coords)
self.assertIn('a', data.coords)
self.assertNotIn(0, data.coords)
self.assertNotIn('foo', data.coords)
with self.assertRaises(KeyError):
data.coords['foo']
with self.assertRaises(KeyError):
data.coords[0]
expected = dedent("""\
Coordinates:
* x (x) int64 -1 -2
* y (y) int64 0 1 2
a (x) int64 4 5
b int64 -10""")
actual = repr(data.coords)
self.assertEqual(expected, actual)
self.assertEqual({'x': 2, 'y': 3}, data.coords.dims)
def test_coords_modify(self):
data = Dataset({'x': ('x', [-1, -2]),
'y': ('y', [0, 1, 2]),
'foo': (['x', 'y'], np.random.randn(2, 3))},
{'a': ('x', [4, 5]), 'b': -10})
actual = data.copy(deep=True)
actual.coords['x'] = ('x', ['a', 'b'])
self.assertArrayEqual(actual['x'], ['a', 'b'])
actual = data.copy(deep=True)
actual.coords['z'] = ('z', ['a', 'b'])
self.assertArrayEqual(actual['z'], ['a', 'b'])
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
data.coords['x'] = ('x', [-1])
actual = data.copy()
del actual.coords['b']
expected = data.reset_coords('b', drop=True)
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(KeyError):
del data.coords['not_found']
with self.assertRaises(KeyError):
del data.coords['foo']
actual = data.copy(deep=True)
actual.coords.update({'c': 11})
expected = data.merge({'c': 11}).set_coords('c')
self.assertDatasetIdentical(expected, actual)
def test_coords_set(self):
one_coord = Dataset({'x': ('x', [0]),
'yy': ('x', [1]),
'zzz': ('x', [2])})
two_coords = Dataset({'zzz': ('x', [2])},
{'x': ('x', [0]),
'yy': ('x', [1])})
all_coords = Dataset(coords={'x': ('x', [0]),
'yy': ('x', [1]),
'zzz': ('x', [2])})
actual = one_coord.set_coords('x')
self.assertDatasetIdentical(one_coord, actual)
actual = one_coord.set_coords(['x'])
self.assertDatasetIdentical(one_coord, actual)
actual = one_coord.set_coords('yy')
self.assertDatasetIdentical(two_coords, actual)
actual = one_coord.set_coords(['yy', 'zzz'])
self.assertDatasetIdentical(all_coords, actual)
actual = one_coord.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = two_coords.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords(['yy', 'zzz'])
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords('zzz')
self.assertDatasetIdentical(two_coords, actual)
with self.assertRaisesRegexp(ValueError, 'cannot remove index'):
one_coord.reset_coords('x')
actual = all_coords.reset_coords('zzz', drop=True)
expected = all_coords.drop('zzz')
self.assertDatasetIdentical(expected, actual)
expected = two_coords.drop('zzz')
self.assertDatasetIdentical(expected, actual)
def test_coords_to_dataset(self):
orig = Dataset({'foo': ('y', [-1, 0, 1])}, {'x': 10, 'y': [2, 3, 4]})
expected = Dataset(coords={'x': 10, 'y': [2, 3, 4]})
actual = orig.coords.to_dataset()
self.assertDatasetIdentical(expected, actual)
def test_coords_merge(self):
orig_coords = Dataset(coords={'a': ('x', [1, 2])}).coords
other_coords = Dataset(coords={'b': ('x', ['a', 'b'])}).coords
expected = Dataset(coords={'a': ('x', [1, 2]),
'b': ('x', ['a', 'b'])})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'x': ('x', ['a'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'x': ('x', ['a', 'b'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'x': ('x', ['a', 'b', 'c'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'a': ('x', [8, 9])}).coords
expected = Dataset(coords={'x': range(2)})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'x': np.nan}).coords
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(orig_coords.to_dataset(), actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(orig_coords.to_dataset(), actual)
def test_coords_merge_mismatched_shape(self):
orig_coords = Dataset(coords={'a': ('x', [1, 1])}).coords
other_coords = Dataset(coords={'a': 1}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'a': ('y', [1])}).coords
expected = Dataset(coords={'a': (['x', 'y'], [[1], [1]])})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected.T, actual)
orig_coords = Dataset(coords={'a': ('x', [np.nan])}).coords
other_coords = Dataset(coords={'a': np.nan}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
def test_equals_and_identical(self):
data = create_test_data(seed=42)
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = create_test_data(seed=42)
data2.attrs['foobar'] = 'baz'
self.assertTrue(data.equals(data2))
self.assertFalse(data.identical(data2))
del data2['time']
self.assertFalse(data.equals(data2))
data = create_test_data(seed=42).rename({'var1': None})
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = data.reset_coords()
self.assertFalse(data2.equals(data))
self.assertFalse(data2.identical(data))
def test_equals_failures(self):
data = create_test_data()
self.assertFalse(data.equals('foo'))
self.assertFalse(data.identical(123))
self.assertFalse(data.broadcast_equals({1: 2}))
def test_broadcast_equals(self):
data1 = Dataset(coords={'x': 0})
data2 = Dataset(coords={'x': [0]})
self.assertTrue(data1.broadcast_equals(data2))
self.assertFalse(data1.equals(data2))
self.assertFalse(data1.identical(data2))
def test_attrs(self):
data = create_test_data(seed=42)
data.attrs = {'foobar': 'baz'}
self.assertTrue(data.attrs['foobar'], 'baz')
self.assertIsInstance(data.attrs, OrderedDict)
@requires_dask
def test_chunk(self):
data = create_test_data()
for v in data.variables.values():
self.assertIsInstance(v.data, np.ndarray)
self.assertEqual(data.chunks, {})
reblocked = data.chunk()
for v in reblocked.variables.values():
self.assertIsInstance(v.data, da.Array)
expected_chunks = dict((d, (s,)) for d, s in data.dims.items())
self.assertEqual(reblocked.chunks, expected_chunks)
reblocked = data.chunk({'time': 5, 'dim1': 5, 'dim2': 5, 'dim3': 5})
expected_chunks = {'time': (5,) * 4, 'dim1': (5, 3),
'dim2': (5, 4), 'dim3': (5, 5)}
self.assertEqual(reblocked.chunks, expected_chunks)
reblocked = data.chunk(expected_chunks)
self.assertEqual(reblocked.chunks, expected_chunks)
# reblock on already blocked data
reblocked = reblocked.chunk(expected_chunks)
self.assertEqual(reblocked.chunks, expected_chunks)
self.assertDatasetIdentical(reblocked, data)
with self.assertRaisesRegexp(ValueError, 'some chunks'):
data.chunk({'foo': 10})
@requires_dask
def test_dask_is_lazy(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
ds = open_dataset(store).chunk()
with self.assertRaises(UnexpectedDataAccess):
ds.load()
with self.assertRaises(UnexpectedDataAccess):
ds['var1'].values
# these should not raise UnexpectedDataAccess:
ds.var1.data
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
ds.transpose()
ds.mean()
ds.fillna(0)
ds.rename({'dim1': 'foobar'})
ds.set_coords('var1')
ds.drop('var1')
def test_isel(self):
data = create_test_data()
slicers = {'dim1': slice(None, None, 2), 'dim2': slice(0, 2)}
ret = data.isel(**slicers)
# Verify that only the specified dimension was altered
self.assertItemsEqual(data.dims, ret.dims)
for d in data.dims:
if d in slicers:
self.assertEqual(ret.dims[d],
np.arange(data.dims[d])[slicers[d]].size)
else:
self.assertEqual(data.dims[d], ret.dims[d])
# Verify that the data is what we expect
for v in data:
self.assertEqual(data[v].dims, ret[v].dims)
self.assertEqual(data[v].attrs, ret[v].attrs)
slice_list = [slice(None)] * data[v].values.ndim
for d, s in iteritems(slicers):
if d in data[v].dims:
inds = np.nonzero(np.array(data[v].dims) == d)[0]
for ind in inds:
slice_list[ind] = s
expected = data[v].values[slice_list]
actual = ret[v].values
np.testing.assert_array_equal(expected, actual)
with self.assertRaises(ValueError):
data.isel(not_a_dim=slice(0, 2))
ret = data.isel(dim1=0)
self.assertEqual({'time': 20, 'dim2': 9, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes, list(ret.indexes) + ['dim1'])
ret = data.isel(time=slice(2), dim1=0, dim2=slice(5))
self.assertEqual({'time': 2, 'dim2': 5, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes, list(ret.indexes) + ['dim1'])
ret = data.isel(time=0, dim1=0, dim2=slice(5))
self.assertItemsEqual({'dim2': 5, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes,
list(ret.indexes) + ['dim1', 'time'])
def test_sel(self):
data = create_test_data()
int_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(2),
'dim3': slice(3)}
loc_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(0, 0.5),
'dim3': slice('a', 'c')}
self.assertDatasetEqual(data.isel(**int_slicers),
data.sel(**loc_slicers))
data['time'] = ('time', pd.date_range('2000-01-01', periods=20))
self.assertDatasetEqual(data.isel(time=0),
data.sel(time='2000-01-01'))
self.assertDatasetEqual(data.isel(time=slice(10)),
data.sel(time=slice('2000-01-01',
'2000-01-10')))
self.assertDatasetEqual(data, data.sel(time=slice('1999', '2005')))
times = pd.date_range('2000-01-01', periods=3)
self.assertDatasetEqual(data.isel(time=slice(3)),
data.sel(time=times))
self.assertDatasetEqual(data.isel(time=slice(3)),
data.sel(time=(data['time.dayofyear'] <= 3)))
td = pd.to_timedelta(np.arange(3), unit='days')
data = Dataset({'x': ('td', np.arange(3)), 'td': td})
self.assertDatasetEqual(data, data.sel(td=td))
self.assertDatasetEqual(data, data.sel(td=slice('3 days')))
self.assertDatasetEqual(data.isel(td=0), data.sel(td='0 days'))
self.assertDatasetEqual(data.isel(td=0), data.sel(td='0h'))
self.assertDatasetEqual(data.isel(td=slice(1, 3)),
data.sel(td=slice('1 days', '2 days')))
def test_isel_points(self):
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
actual = data.isel_points(dim1=pdim1, dim2=pdim2, dim3=pdim3,
dim='test_coord')
assert 'test_coord' in actual.coords
assert actual.coords['test_coord'].shape == (len(pdim1), )
actual = data.isel_points(dim1=pdim1, dim2=pdim2)
assert 'points' in actual.coords
np.testing.assert_array_equal(pdim1, actual['dim1'])
# test that the order of the indexers doesn't matter
self.assertDatasetIdentical(data.isel_points(dim1=pdim1, dim2=pdim2),
data.isel_points(dim2=pdim2, dim1=pdim1))
# make sure we're raising errors in the right places
with self.assertRaisesRegexp(ValueError,
'All indexers must be the same length'):
data.isel_points(dim1=[1, 2], dim2=[1, 2, 3])
with self.assertRaisesRegexp(ValueError,
'dimension bad_key does not exist'):
data.isel_points(bad_key=[1, 2])
with self.assertRaisesRegexp(TypeError, 'Indexers must be integers'):
data.isel_points(dim1=[1.5, 2.2])
with self.assertRaisesRegexp(TypeError, 'Indexers must be integers'):
data.isel_points(dim1=[1, 2, 3], dim2=slice(3))
with self.assertRaisesRegexp(ValueError,
'Indexers must be 1 dimensional'):
data.isel_points(dim1=1, dim2=2)
with self.assertRaisesRegexp(ValueError,
'Existing dimension names are not valid'):
data.isel_points(dim1=[1, 2], dim2=[1, 2], dim='dim2')
# test to be sure we keep around variables that were not indexed
ds = Dataset({'x': [1, 2, 3, 4], 'y': 0})
actual = ds.isel_points(x=[0, 1, 2])
self.assertDataArrayIdentical(ds['y'], actual['y'])
# tests using index or DataArray as a dim
stations = Dataset()
stations['station'] = ('station', ['A', 'B', 'C'])
stations['dim1s'] = ('station', [1, 2, 3])
stations['dim2s'] = ('station', [4, 5, 1])
actual = data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=stations['station'])
assert 'station' in actual.coords
assert 'station' in actual.dims
self.assertDataArrayIdentical(actual['station'].drop(['dim1', 'dim2']),
stations['station'])
# make sure we get the default points coordinate when a list is passed
actual = data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=['A', 'B', 'C'])
assert 'points' in actual.coords
# can pass a numpy array
data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=np.array([4, 5, 6]))
def test_sel_points(self):
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
expected = data.isel_points(dim1=pdim1, dim2=pdim2, dim3=pdim3,
dim='test_coord')
actual = data.sel_points(dim1=data.dim1[pdim1], dim2=data.dim2[pdim2],
dim3=data.dim3[pdim3], dim='test_coord')
self.assertDatasetIdentical(expected, actual)
data = Dataset({'foo': (('x', 'y'), np.arange(9).reshape(3, 3))})
expected = Dataset({'foo': ('points', [0, 4, 8])},
{'x': ('points', range(3)),
'y': ('points', range(3))})
actual = data.sel_points(x=[0.1, 1.1, 2.5], y=[0, 1.2, 2.0],
method='pad')
self.assertDatasetIdentical(expected, actual)
def test_sel_method(self):
data = create_test_data()
if pd.__version__ >= '0.16':
expected = data.sel(dim1=1)
actual = data.sel(dim1=0.95, method='nearest')
self.assertDatasetIdentical(expected, actual)
expected = data.sel(dim2=[1.5])
actual = data.sel(dim2=[1.45], method='backfill')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(NotImplementedError, 'slice objects'):
data.sel(dim2=slice(1, 3), method='ffill')
with self.assertRaisesRegexp(TypeError, '``method``'):
# this should not pass silently
data.sel(data)
def test_loc(self):
data = create_test_data()
expected = data.sel(dim3='a')
actual = data.loc[dict(dim3='a')]
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(TypeError, 'can only lookup dict'):
data.loc['a']
with self.assertRaises(TypeError):
data.loc[dict(dim3='a')] = 0
def test_reindex_like(self):
data = create_test_data()
data['letters'] = ('dim3', 10 * ['a'])
expected = data.isel(dim1=slice(10), time=slice(13))
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
expected = data.copy(deep=True)
expected['dim3'] = ('dim3', list('cdefghijkl'))
expected['var3'][:-2] = expected['var3'][2:]
expected['var3'][-2:] = np.nan
expected['letters'] = expected['letters'].astype(object)
expected['letters'][-2:] = np.nan
expected['numbers'] = expected['numbers'].astype(float)
expected['numbers'][:-2] = expected['numbers'][2:].values
expected['numbers'][-2:] = np.nan
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
def test_reindex(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.reindex())
expected = data.isel(dim1=slice(10))
actual = data.reindex(dim1=data['dim1'][:10])
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].values)
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].to_index())
self.assertDatasetIdentical(actual, expected)
# test dict-like argument
actual = data.reindex({'dim1': data['dim1'][:10]})
self.assertDatasetIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'cannot specify both'):
data.reindex({'x': 0}, x=0)
with self.assertRaisesRegexp(ValueError, 'dictionary'):
data.reindex('foo')
# out of order
expected = data.sel(dim1=data['dim1'][:10:-1])
actual = data.reindex(dim1=data['dim1'][:10:-1])
self.assertDatasetIdentical(actual, expected)
# regression test for #279
expected = Dataset({'x': ('time', np.random.randn(5))})
time2 = DataArray(np.arange(5), dims="time2")
actual = expected.reindex(time=time2)
self.assertDatasetIdentical(actual, expected)
# another regression test
ds = Dataset({'foo': (['x', 'y'], np.zeros((3, 4)))})
expected = Dataset({'foo': (['x', 'y'], np.zeros((3, 2))),
'x': [0, 1, 3]})
expected['foo'][-1] = np.nan
actual = ds.reindex(x=[0, 1, 3], y=[0, 1])
self.assertDatasetIdentical(expected, actual)
def test_reindex_method(self):
ds = Dataset({'x': ('y', [10, 20])})
y = [-0.5, 0.5, 1.5]
actual = ds.reindex(y=y, method='backfill')
expected = Dataset({'x': ('y', [10, 20, np.nan]), 'y': y})
self.assertDatasetIdentical(expected, actual)
actual = ds.reindex(y=y, method='pad')
expected = Dataset({'x': ('y', [np.nan, 10, 20]), 'y': y})
self.assertDatasetIdentical(expected, actual)
alt = Dataset({'y': y})
actual = ds.reindex_like(alt, method='pad')
self.assertDatasetIdentical(expected, actual)
def test_align(self):
left = create_test_data()
right = left.copy(deep=True)
right['dim3'] = ('dim3', list('cdefghijkl'))
right['var3'][:-2] = right['var3'][2:]
right['var3'][-2:] = np.random.randn(*right['var3'][-2:].shape)
right['numbers'][:-2] = right['numbers'][2:]
right['numbers'][-2:] = -10
intersection = list('cdefghij')
union = list('abcdefghijkl')
left2, right2 = align(left, right, join='inner')
self.assertArrayEqual(left2['dim3'], intersection)
self.assertDatasetIdentical(left2, right2)
left2, right2 = align(left, right, join='outer')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertArrayEqual(left2['dim3'], union)
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='left')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], left['dim3'])
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='right')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], right['dim3'])
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
with self.assertRaisesRegexp(ValueError, 'invalid value for join'):
align(left, right, join='foobar')
with self.assertRaises(TypeError):
align(left, right, foo='bar')
def test_variable_indexing(self):
data = create_test_data()
v = data['var1']
d1 = data['dim1']
d2 = data['dim2']
self.assertVariableEqual(v, v[d1.values])
self.assertVariableEqual(v, v[d1])
self.assertVariableEqual(v[:3], v[d1 < 3])
self.assertVariableEqual(v[:, 3:], v[:, d2 >= 1.5])
self.assertVariableEqual(v[:3, 3:], v[d1 < 3, d2 >= 1.5])
self.assertVariableEqual(v[:3, :2], v[range(3), range(2)])
self.assertVariableEqual(v[:3, :2], v.loc[d1[:3], d2[:2]])
def test_drop_variables(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.drop([]))
expected = Dataset(dict((k, data[k]) for k in data if k != 'time'))
actual = data.drop('time')
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['time'])
self.assertDatasetIdentical(expected, actual)
expected = Dataset(dict((k, data[k]) for
k in ['dim2', 'dim3', 'time', 'numbers']))
actual = data.drop('dim1')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'cannot be found'):
data.drop('not_found_here')
def test_drop_index_labels(self):
data = Dataset({'A': (['x', 'y'], np.random.randn(2, 3)),
'x': ['a', 'b']})
actual = data.drop(1, 'y')
expected = data.isel(y=[0, 2])
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['a'], 'x')
expected = data.isel(x=[1])
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['a', 'b'], 'x')
expected = data.isel(x=slice(0, 0))
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(ValueError):
# not contained in axis
data.drop(['c'], dim='x')
def test_copy(self):
data = create_test_data()
for copied in [data.copy(deep=False), copy(data)]:
self.assertDatasetIdentical(data, copied)
for k in data:
v0 = data.variables[k]
v1 = copied.variables[k]
self.assertIs(v0, v1)
copied['foo'] = ('z', np.arange(5))
self.assertNotIn('foo', data)
for copied in [data.copy(deep=True), deepcopy(data)]:
self.assertDatasetIdentical(data, copied)
for k in data:
v0 = data.variables[k]
v1 = copied.variables[k]
self.assertIsNot(v0, v1)
def test_rename(self):
data = create_test_data()
newnames = {'var1': 'renamed_var1', 'dim2': 'renamed_dim2'}
renamed = data.rename(newnames)
variables = OrderedDict(data.variables)
for k, v in iteritems(newnames):
variables[v] = variables.pop(k)
for k, v in iteritems(variables):
dims = list(v.dims)
for name, newname in iteritems(newnames):
if name in dims:
dims[dims.index(name)] = newname
self.assertVariableEqual(Variable(dims, v.values, v.attrs),
renamed[k])
self.assertEqual(v.encoding, renamed[k].encoding)
self.assertEqual(type(v), type(renamed.variables[k]))
self.assertTrue('var1' not in renamed)
self.assertTrue('dim2' not in renamed)
with self.assertRaisesRegexp(ValueError, "cannot rename 'not_a_var'"):
data.rename({'not_a_var': 'nada'})
# verify that we can rename a variable without accessing the data
var1 = data['var1']
data['var1'] = (var1.dims, InaccessibleArray(var1.values))
renamed = data.rename(newnames)
with self.assertRaises(UnexpectedDataAccess):
renamed['renamed_var1'].values
def test_rename_inplace(self):
times = pd.date_range('2000-01-01', periods=3)
data = Dataset({'z': ('x', [2, 3, 4]), 't': ('t', times)})
copied = data.copy()
renamed = data.rename({'x': 'y'})
data.rename({'x': 'y'}, inplace=True)
self.assertDatasetIdentical(data, renamed)
self.assertFalse(data.equals(copied))
self.assertEquals(data.dims, {'y': 3, 't': 3})
# check virtual variables
self.assertArrayEqual(data['t.dayofyear'], [1, 2, 3])
def test_swap_dims(self):
original = Dataset({'x': [1, 2, 3], 'y': ('x', list('abc')), 'z': 42})
expected = Dataset({'z': 42}, {'x': ('y', [1, 2, 3]), 'y': list('abc')})
actual = original.swap_dims({'x': 'y'})
self.assertDatasetIdentical(expected, actual)
self.assertIsInstance(actual.variables['y'], Coordinate)
self.assertIsInstance(actual.variables['x'], Variable)
roundtripped = actual.swap_dims({'y': 'x'})
self.assertDatasetIdentical(original.set_coords('y'), roundtripped)
actual = original.copy()
actual.swap_dims({'x': 'y'}, inplace=True)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'cannot swap'):
original.swap_dims({'y': 'x'})
with self.assertRaisesRegexp(ValueError, 'replacement dimension'):
original.swap_dims({'x': 'z'})
def test_update(self):
data = create_test_data(seed=0)
expected = data.copy()
var2 = Variable('dim1', np.arange(8))
actual = data.update({'var2': var2})
expected['var2'] = var2
self.assertDatasetIdentical(expected, actual)
actual = data.copy()
actual_result = actual.update(data, inplace=True)
self.assertIs(actual_result, actual)
self.assertDatasetIdentical(expected, actual)
actual = data.update(data, inplace=False)
expected = data
self.assertIsNot(actual, expected)
self.assertDatasetIdentical(expected, actual)
other = Dataset(attrs={'new': 'attr'})
actual = data.copy()
actual.update(other)
self.assertDatasetIdentical(expected, actual)
def test_update_auto_align(self):
ds = Dataset({'x': ('t', [3, 4])})
expected = Dataset({'x': ('t', [3, 4]), 'y': ('t', [np.nan, 5])})
actual = ds.copy()
other = {'y': ('t', [5]), 't': [1]}
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
actual.update(other)
actual.update(Dataset(other))
self.assertDatasetIdentical(expected, actual)
actual = ds.copy()
other = Dataset({'y': ('t', [5]), 't': [100]})
actual.update(other)
expected = Dataset({'x': ('t', [3, 4]), 'y': ('t', [np.nan] * 2)})
self.assertDatasetIdentical(expected, actual)
def test_merge(self):
data = create_test_data()
ds1 = data[['var1']]
ds2 = data[['var3']]
expected = data[['var1', 'var3']]
actual = ds1.merge(ds2)
self.assertDatasetIdentical(expected, actual)
actual = ds2.merge(ds1)
self.assertDatasetIdentical(expected, actual)
actual = data.merge(data)
self.assertDatasetIdentical(data, actual)
actual = data.reset_coords(drop=True).merge(data)
self.assertDatasetIdentical(data, actual)
actual = data.merge(data.reset_coords(drop=True))
self.assertDatasetIdentical(data, actual)
with self.assertRaises(ValueError):
ds1.merge(ds2.rename({'var3': 'var1'}))
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data.reset_coords().merge(data)
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data.merge(data.reset_coords())
def test_merge_broadcast_equals(self):
ds1 = Dataset({'x': 0})
ds2 = Dataset({'x': ('y', [0, 0])})
actual = ds1.merge(ds2)
self.assertDatasetIdentical(ds2, actual)
actual = ds2.merge(ds1)
self.assertDatasetIdentical(ds2, actual)
actual = ds1.copy()
actual.update(ds2)
self.assertDatasetIdentical(ds2, actual)
ds1 = Dataset({'x': np.nan})
ds2 = Dataset({'x': ('y', [np.nan, np.nan])})
actual = ds1.merge(ds2)
self.assertDatasetIdentical(ds2, actual)
def test_merge_compat(self):
ds1 = Dataset({'x': 0})
ds2 = Dataset({'x': 1})
for compat in ['broadcast_equals', 'equals', 'identical']:
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat=compat)
ds2 = Dataset({'x': [0, 0]})
for compat in ['equals', 'identical']:
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat=compat)
ds2 = Dataset({'x': ((), 0, {'foo': 'bar'})})
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat='identical')
with self.assertRaisesRegexp(ValueError, 'compat=\S+ invalid'):
ds1.merge(ds2, compat='foobar')
def test_merge_auto_align(self):
ds1 = Dataset({'a': ('x', [1, 2])})
ds2 = Dataset({'b': ('x', [3, 4]), 'x': [1, 2]})
expected = Dataset({'a': ('x', [1, 2, np.nan]),
'b': ('x', [np.nan, 3, 4])})
self.assertDatasetIdentical(expected, ds1.merge(ds2))
self.assertDatasetIdentical(expected, ds2.merge(ds1))
expected = expected.isel(x=slice(2))
self.assertDatasetIdentical(expected, ds1.merge(ds2, join='left'))
self.assertDatasetIdentical(expected, ds2.merge(ds1, join='right'))
expected = expected.isel(x=slice(1, 2))
self.assertDatasetIdentical(expected, ds1.merge(ds2, join='inner'))
self.assertDatasetIdentical(expected, ds2.merge(ds1, join='inner'))
def test_getitem(self):
data = create_test_data()
self.assertIsInstance(data['var1'], DataArray)
self.assertVariableEqual(data['var1'], data.variables['var1'])
with self.assertRaises(KeyError):
data['notfound']
with self.assertRaises(KeyError):
data[['var1', 'notfound']]
actual = data[['var1', 'var2']]
expected = Dataset({'var1': data['var1'], 'var2': data['var2']})
self.assertDatasetEqual(expected, actual)
actual = data['numbers']
expected = DataArray(data['numbers'].variable, [data['dim3']],
name='numbers')
self.assertDataArrayIdentical(expected, actual)
actual = data[dict(dim1=0)]
expected = data.isel(dim1=0)
self.assertDatasetIdentical(expected, actual)
def test_getitem_hashable(self):
data = create_test_data()
data[(3, 4)] = data['var1'] + 1
expected = data['var1'] + 1
expected.name = (3, 4)
self.assertDataArrayIdentical(expected, data[(3, 4)])
with self.assertRaisesRegexp(KeyError, "('var1', 'var2')"):
data[('var1', 'var2')]
def test_virtual_variables(self):
# access virtual variables
data = create_test_data()
expected = DataArray(1 + np.arange(20), coords=[data['time']],
dims='time', name='dayofyear')
self.assertDataArrayIdentical(expected, data['time.dayofyear'])
self.assertArrayEqual(data['time.month'].values,
data.variables['time'].to_index().month)
self.assertArrayEqual(data['time.season'].values, 'DJF')
# test virtual variable math
self.assertArrayEqual(data['time.dayofyear'] + 1, 2 + np.arange(20))
self.assertArrayEqual(np.sin(data['time.dayofyear']),
np.sin(1 + np.arange(20)))
# ensure they become coordinates
expected = Dataset({}, {'dayofyear': data['time.dayofyear']})
actual = data[['time.dayofyear']]
self.assertDatasetEqual(expected, actual)
# non-coordinate variables
ds = Dataset({'t': ('x', pd.date_range('2000-01-01', periods=3))})
self.assertTrue((ds['t.year'] == 2000).all())
def test_time_season(self):
ds = Dataset({'t': pd.date_range('2000-01-01', periods=12, freq='M')})
expected = ['DJF'] * 2 + ['MAM'] * 3 + ['JJA'] * 3 + ['SON'] * 3 + ['DJF']
self.assertArrayEqual(expected, ds['t.season'])
def test_slice_virtual_variable(self):
data = create_test_data()
self.assertVariableEqual(data['time.dayofyear'][:10],
Variable(['time'], 1 + np.arange(10)))
self.assertVariableEqual(data['time.dayofyear'][0], Variable([], 1))
def test_setitem(self):
# assign a variable
var = Variable(['dim1'], np.random.randn(8))
data1 = create_test_data()
data1['A'] = var
data2 = data1.copy()
data2['A'] = var
self.assertDatasetIdentical(data1, data2)
# assign a dataset array
dv = 2 * data2['A']
data1['B'] = dv.variable
data2['B'] = dv
self.assertDatasetIdentical(data1, data2)
# can't assign an ND array without dimensions
with self.assertRaisesRegexp(ValueError,
'dimensions .* must have the same len'):
data2['C'] = var.values.reshape(2, 4)
# but can assign a 1D array
data1['C'] = var.values
data2['C'] = ('C', var.values)
self.assertDatasetIdentical(data1, data2)
# can assign a scalar
data1['scalar'] = 0
data2['scalar'] = ([], 0)
self.assertDatasetIdentical(data1, data2)
# can't use the same dimension name as a scalar var
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data1['newvar'] = ('scalar', [3, 4, 5])
# can't resize a used dimension
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
data1['dim1'] = data1['dim1'][:5]
# override an existing value
data1['A'] = 3 * data2['A']
self.assertVariableEqual(data1['A'], 3 * data2['A'])
with self.assertRaises(NotImplementedError):
data1[{'x': 0}] = 0
def test_setitem_auto_align(self):
ds = Dataset()
ds['x'] = ('y', range(3))
ds['y'] = 1 + np.arange(3)
expected = Dataset({'x': ('y', range(3)), 'y': 1 + np.arange(3)})
self.assertDatasetIdentical(ds, expected)
ds['y'] = DataArray(range(3), dims='y')
expected = Dataset({'x': ('y', range(3))})
self.assertDatasetIdentical(ds, expected)
ds['x'] = DataArray([1, 2], dims='y')
expected = Dataset({'x': ('y', [1, 2, np.nan])})
self.assertDatasetIdentical(ds, expected)
ds['x'] = 42
expected = Dataset({'x': 42, 'y': range(3)})
self.assertDatasetIdentical(ds, expected)
ds['x'] = DataArray([4, 5, 6, 7], dims='y')
expected = Dataset({'x': ('y', [4, 5, 6])})
self.assertDatasetIdentical(ds, expected)
def test_assign(self):
ds = Dataset()
actual = ds.assign(x = [0, 1, 2], y = 2)
expected = Dataset({'x': [0, 1, 2], 'y': 2})
self.assertDatasetIdentical(actual, expected)
self.assertEqual(list(actual), ['x', 'y'])
self.assertDatasetIdentical(ds, Dataset())
actual = actual.assign(y = lambda ds: ds.x ** 2)
expected = Dataset({'y': ('x', [0, 1, 4])})
self.assertDatasetIdentical(actual, expected)
actual = actual.assign_coords(z = 2)
expected = Dataset({'y': ('x', [0, 1, 4])}, {'z': 2})
self.assertDatasetIdentical(actual, expected)
ds = Dataset({'a': ('x', range(3))}, {'b': ('x', ['A'] * 2 + ['B'])})
actual = ds.groupby('b').assign(c = lambda ds: 2 * ds.a)
expected = ds.merge({'c': ('x', [0, 2, 4])})
self.assertDatasetIdentical(actual, expected)
actual = ds.groupby('b').assign(c = lambda ds: ds.a.sum())
expected = ds.merge({'c': ('x', [1, 1, 2])})
self.assertDatasetIdentical(actual, expected)
actual = ds.groupby('b').assign_coords(c = lambda ds: ds.a.sum())
expected = expected.set_coords('c')
self.assertDatasetIdentical(actual, expected)
def test_delitem(self):
data = create_test_data()
all_items = set(data)
self.assertItemsEqual(data, all_items)
del data['var1']
self.assertItemsEqual(data, all_items - set(['var1']))
del data['dim1']
self.assertItemsEqual(data, set(['time', 'dim2', 'dim3', 'numbers']))
self.assertNotIn('dim1', data.dims)
self.assertNotIn('dim1', data.coords)
def test_squeeze(self):
data = Dataset({'foo': (['x', 'y', 'z'], [[[1], [2]]])})
for args in [[], [['x']], [['x', 'z']]]:
def get_args(v):
return [set(args[0]) & set(v.dims)] if args else []
expected = Dataset(dict((k, v.squeeze(*get_args(v)))
for k, v in iteritems(data.variables)))
expected.set_coords(data.coords, inplace=True)
self.assertDatasetIdentical(expected, data.squeeze(*args))
# invalid squeeze
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
data.squeeze('y')
def test_groupby(self):
data = Dataset({'z': (['x', 'y'], np.random.randn(3, 5))},
{'x': ('x', list('abc')),
'c': ('x', [0, 1, 0])})
groupby = data.groupby('x')
self.assertEqual(len(groupby), 3)
expected_groups = {'a': 0, 'b': 1, 'c': 2}
self.assertEqual(groupby.groups, expected_groups)
expected_items = [('a', data.isel(x=0)),
('b', data.isel(x=1)),
('c', data.isel(x=2))]
for actual, expected in zip(groupby, expected_items):
self.assertEqual(actual[0], expected[0])
self.assertDatasetEqual(actual[1], expected[1])
identity = lambda x: x
for k in ['x', 'c', 'y']:
actual = data.groupby(k, squeeze=False).apply(identity)
self.assertDatasetEqual(data, actual)
def test_groupby_returns_new_type(self):
data = Dataset({'z': (['x', 'y'], np.random.randn(3, 5))})
actual = data.groupby('x').apply(lambda ds: ds['z'])
expected = data['z']
self.assertDataArrayIdentical(expected, actual)
actual = data['z'].groupby('x').apply(lambda x: x.to_dataset())
expected = data
self.assertDatasetIdentical(expected, actual)
def test_groupby_iter(self):
data = create_test_data()
for n, (t, sub) in enumerate(list(data.groupby('dim1'))[:3]):
self.assertEqual(data['dim1'][n], t)
self.assertVariableEqual(data['var1'][n], sub['var1'])
self.assertVariableEqual(data['var2'][n], sub['var2'])
self.assertVariableEqual(data['var3'][:, n], sub['var3'])
def test_groupby_errors(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'must be 1 dimensional'):
data.groupby('var1')
with self.assertRaisesRegexp(ValueError, 'must have a name'):
data.groupby(np.arange(10))
with self.assertRaisesRegexp(ValueError, 'length does not match'):
data.groupby(data['dim1'][:3])
with self.assertRaisesRegexp(ValueError, "must have a 'dims'"):
data.groupby(data.coords['dim1'].to_index())
def test_groupby_reduce(self):
data = Dataset({'xy': (['x', 'y'], np.random.randn(3, 4)),
'xonly': ('x', np.random.randn(3)),
'yonly': ('y', np.random.randn(4)),
'letters': ('y', ['a', 'a', 'b', 'b'])})
expected = data.mean('y')
expected['yonly'] = expected['yonly'].variable.expand_dims({'x': 3})
actual = data.groupby('x').mean()
self.assertDatasetAllClose(expected, actual)
actual = data.groupby('x').mean('y')
self.assertDatasetAllClose(expected, actual)
letters = data['letters']
expected = Dataset({'xy': data['xy'].groupby(letters).mean(),
'xonly': (data['xonly'].mean().variable
.expand_dims({'letters': 2})),
'yonly': data['yonly'].groupby(letters).mean()})
actual = data.groupby('letters').mean()
self.assertDatasetAllClose(expected, actual)
def test_groupby_math(self):
reorder_dims = lambda x: x.transpose('dim1', 'dim2', 'dim3', 'time')
ds = create_test_data()
for squeeze in [True, False]:
grouped = ds.groupby('dim1', squeeze=squeeze)
expected = reorder_dims(ds + ds.coords['dim1'])
actual = grouped + ds.coords['dim1']
self.assertDatasetIdentical(expected, reorder_dims(actual))
actual = ds.coords['dim1'] + grouped
self.assertDatasetIdentical(expected, reorder_dims(actual))
ds2 = 2 * ds
expected = reorder_dims(ds + ds2)
actual = grouped + ds2
self.assertDatasetIdentical(expected, reorder_dims(actual))
actual = ds2 + grouped
self.assertDatasetIdentical(expected, reorder_dims(actual))
grouped = ds.groupby('numbers')
zeros = DataArray([0, 0, 0, 0], [('numbers', range(4))])
expected = ((ds + Variable('dim3', np.zeros(10)))
.transpose('dim3', 'dim1', 'dim2', 'time'))
actual = grouped + zeros
self.assertDatasetEqual(expected, actual)
actual = zeros + grouped
self.assertDatasetEqual(expected, actual)
with self.assertRaisesRegexp(ValueError, 'dimensions .* do not exist'):
grouped + ds
with self.assertRaisesRegexp(ValueError, 'dimensions .* do not exist'):
ds + grouped
with self.assertRaisesRegexp(TypeError, 'only support binary ops'):
grouped + 1
with self.assertRaisesRegexp(TypeError, 'only support binary ops'):
grouped + grouped
with self.assertRaisesRegexp(TypeError, 'in-place operations'):
ds += grouped
ds = Dataset({'x': ('time', np.arange(100)),
'time': pd.date_range('2000-01-01', periods=100)})
with self.assertRaisesRegexp(ValueError, 'no overlapping labels'):
ds + ds.groupby('time.month')
def test_groupby_math_virtual(self):
ds = Dataset({'x': ('t', [1, 2, 3])},
{'t': pd.date_range('20100101', periods=3)})
grouped = ds.groupby('t.day')
actual = grouped - grouped.mean()
expected = Dataset({'x': ('t', [0, 0, 0])},
ds[['t', 't.day']])
self.assertDatasetIdentical(actual, expected)
def test_groupby_nan(self):
# nan should be excluded from groupby
ds = Dataset({'foo': ('x', [1, 2, 3, 4])},
{'bar': ('x', [1, 1, 2, np.nan])})
actual = ds.groupby('bar').mean()
expected = Dataset({'foo': ('bar', [1.5, 3]), 'bar': [1, 2]})
self.assertDatasetIdentical(actual, expected)
def test_resample_and_first(self):
times = pd.date_range('2000-01-01', freq='6H', periods=10)
ds = Dataset({'foo': (['time', 'x', 'y'], np.random.randn(10, 5, 3)),
'bar': ('time', np.random.randn(10), {'meta': 'data'}),
'time': times})
actual = ds.resample('1D', dim='time', how='first')
expected = ds.isel(time=[0, 4, 8])
self.assertDatasetIdentical(expected, actual)
# upsampling
expected_time = pd.date_range('2000-01-01', freq='3H', periods=19)
expected = ds.reindex(time=expected_time)
for how in ['mean', 'sum', 'first', 'last', np.mean]:
actual = ds.resample('3H', 'time', how=how)
self.assertDatasetEqual(expected, actual)
def test_to_array(self):
ds = Dataset(OrderedDict([('a', 1), ('b', ('x', [1, 2, 3]))]),
coords={'c': 42}, attrs={'Conventions': 'None'})
data = [[1, 1, 1], [1, 2, 3]]
coords = {'x': range(3), 'c': 42, 'variable': ['a', 'b']}
dims = ('variable', 'x')
expected = DataArray(data, coords, dims, attrs=ds.attrs)
actual = ds.to_array()
self.assertDataArrayIdentical(expected, actual)
actual = ds.to_array('abc', name='foo')
expected = expected.rename({'variable': 'abc'}).rename('foo')
self.assertDataArrayIdentical(expected, actual)
def test_to_and_from_dataframe(self):
x = np.random.randn(10)
y = np.random.randn(10)
t = list('abcdefghij')
ds = Dataset(OrderedDict([('a', ('t', x)),
('b', ('t', y)),
('t', ('t', t))]))
expected = pd.DataFrame(np.array([x, y]).T, columns=['a', 'b'],
index=pd.Index(t, name='t'))
actual = ds.to_dataframe()
# use the .equals method to check all DataFrame metadata
assert expected.equals(actual), (expected, actual)
# verify coords are included
actual = ds.set_coords('b').to_dataframe()
assert expected.equals(actual), (expected, actual)
# check roundtrip
self.assertDatasetIdentical(ds, Dataset.from_dataframe(actual))
# test a case with a MultiIndex
w = np.random.randn(2, 3)
ds = Dataset({'w': (('x', 'y'), w)})
ds['y'] = ('y', list('abc'))
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ['a', 'b', 'c', 'a', 'b', 'c']],
names=['x', 'y'])
expected = pd.DataFrame(w.reshape(-1), columns=['w'], index=exp_index)
actual = ds.to_dataframe()
self.assertTrue(expected.equals(actual))
# check roundtrip
self.assertDatasetIdentical(ds, Dataset.from_dataframe(actual))
# check pathological cases
df = pd.DataFrame([1])
actual = Dataset.from_dataframe(df)
expected = Dataset({0: ('index', [1])})
self.assertDatasetIdentical(expected, actual)
df = pd.DataFrame()
actual = Dataset.from_dataframe(df)
expected = Dataset()
self.assertDatasetIdentical(expected, actual)
# regression test for GH278
# use int64 to ensure consistent results for the pandas .equals method
# on windows (which requires the same dtype)
ds = Dataset({'x': pd.Index(['bar']),
'a': ('y', np.array([1], 'int64'))}).isel(x=0)
# use .loc to ensure consistent results on Python 3
actual = ds.to_dataframe().loc[:, ['a', 'x']]
expected = pd.DataFrame([[1, 'bar']], index=pd.Index([0], name='y'),
columns=['a', 'x'])
assert expected.equals(actual), (expected, actual)
ds = Dataset({'x': np.array([0], 'int64'),
'y': np.array([1], 'int64')})
actual = ds.to_dataframe()
idx = pd.MultiIndex.from_arrays([[0], [1]], names=['x', 'y'])
expected = pd.DataFrame([[]], index=idx)
assert expected.equals(actual), (expected, actual)
# regression test for GH449
df = pd.DataFrame(np.zeros((2, 2)))
df.columns = ['foo', 'foo']
with self.assertRaisesRegexp(ValueError, 'non-unique columns'):
Dataset.from_dataframe(df)
def test_pickle(self):
data = create_test_data()
roundtripped = pickle.loads(pickle.dumps(data))
self.assertDatasetIdentical(data, roundtripped)
# regression test for #167:
self.assertEqual(data.dims, roundtripped.dims)
def test_lazy_load(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
for decode_cf in [True, False]:
ds = open_dataset(store, decode_cf=decode_cf)
with self.assertRaises(UnexpectedDataAccess):
ds.load()
with self.assertRaises(UnexpectedDataAccess):
ds['var1'].values
# these should not raise UnexpectedDataAccess:
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
def test_dropna(self):
x = np.random.randn(4, 4)
x[::2, 0] = np.nan
y = np.random.randn(4)
y[-1] = np.nan
ds = Dataset({'foo': (('a', 'b'), x), 'bar': (('b', y))})
expected = ds.isel(a=slice(1, None, 2))
actual = ds.dropna('a')
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(1, 3))
actual = ds.dropna('b')
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', subset=['foo', 'bar'])
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(1, None))
actual = ds.dropna('b', subset=['foo'])
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(3))
actual = ds.dropna('b', subset=['bar'])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('a', subset=[])
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('a', subset=['bar'])
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('a', how='all')
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('b', how='all', subset=['bar'])
expected = ds.isel(b=[0, 1, 2])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', thresh=1, subset=['bar'])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', thresh=2)
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('b', thresh=4)
expected = ds.isel(b=[1, 2, 3])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('a', thresh=3)
expected = ds.isel(a=[1, 3])
self.assertDatasetIdentical(actual, ds)
with self.assertRaisesRegexp(ValueError, 'a single dataset dimension'):
ds.dropna('foo')
with self.assertRaisesRegexp(ValueError, 'invalid how'):
ds.dropna('a', how='somehow')
with self.assertRaisesRegexp(TypeError, 'must specify how or thresh'):
ds.dropna('a', how=None)
def test_fillna(self):
ds = Dataset({'a': ('x', [np.nan, 1, np.nan, 3])})
# fill with -1
actual = ds.fillna(-1)
expected = Dataset({'a': ('x', [-1, 1, -1, 3])})
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna({'a': -1})
self.assertDatasetIdentical(expected, actual)
other = Dataset({'a': -1})
actual = ds.fillna(other)
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna({'a': other.a})
self.assertDatasetIdentical(expected, actual)
# fill with range(4)
b = DataArray(range(4), dims='x')
actual = ds.fillna(b)
expected = b.rename('a').to_dataset()
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(expected)
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(range(4))
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(b[:3])
self.assertDatasetIdentical(expected, actual)
# left align variables
ds['b'] = np.nan
actual = ds.fillna({'a': -1, 'c': 'foobar'})
expected = Dataset({'a': ('x', [-1, 1, -1, 3]), 'b': np.nan})
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'no overlapping'):
ds.fillna({'x': 0})
with self.assertRaisesRegexp(ValueError, 'no overlapping'):
ds.fillna(Dataset(coords={'a': 0}))
# groupby
expected = Dataset({'a': ('x', range(4))})
for target in [ds, expected]:
target.coords['b'] = ('x', [0, 0, 1, 1])
actual = ds.groupby('b').fillna(DataArray([0, 2], dims='b'))
self.assertDatasetIdentical(expected, actual)
actual = ds.groupby('b').fillna(Dataset({'a': ('b', [0, 2])}))
self.assertDatasetIdentical(expected, actual)
def test_where(self):
ds = Dataset({'a': ('x', range(5))})
expected = Dataset({'a': ('x', [np.nan, np.nan, 2, 3, 4])})
actual = ds.where(ds > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(ds.a > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(ds.a.values > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(True)
self.assertDatasetIdentical(ds, actual)
expected = ds.copy(deep=True)
expected['a'].values = [np.nan] * 5
actual = ds.where(False)
self.assertDatasetIdentical(expected, actual)
# 2d
ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]])})
expected = Dataset({'a': (('x', 'y'), [[np.nan, 1], [2, 3]])})
actual = ds.where(ds > 0)
self.assertDatasetIdentical(expected, actual)
# groupby
ds = Dataset({'a': ('x', range(5))}, {'c': ('x', [0, 0, 1, 1, 1])})
cond = Dataset({'a': ('c', [True, False])})
expected = ds.copy(deep=True)
expected['a'].values = [0, 1] + [np.nan] * 3
actual = ds.groupby('c').where(cond)
self.assertDatasetIdentical(expected, actual)
def test_reduce(self):
data = create_test_data()
self.assertEqual(len(data.mean().coords), 0)
actual = data.max()
expected = Dataset(dict((k, v.max())
for k, v in iteritems(data.data_vars)))
self.assertDatasetEqual(expected, actual)
self.assertDatasetEqual(data.min(dim=['dim1']),
data.min(dim='dim1'))
for reduct, expected in [('dim2', ['dim1', 'dim3', 'time']),
(['dim2', 'time'], ['dim1', 'dim3']),
(('dim2', 'time'), ['dim1', 'dim3']),
((), ['dim1', 'dim2', 'dim3', 'time'])]:
actual = data.min(dim=reduct).dims
print(reduct, actual, expected)
self.assertItemsEqual(actual, expected)
self.assertDatasetEqual(data.mean(dim=[]), data)
def test_reduce_bad_dim(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'Dataset does not contain'):
ds = data.mean(dim='bad_dim')
def test_reduce_non_numeric(self):
data1 = create_test_data(seed=44)
data2 = create_test_data(seed=44)
add_vars = {'var4': ['dim1', 'dim2']}
for v, dims in sorted(add_vars.items()):
size = tuple(data1.dims[d] for d in dims)
data = np.random.random_integers(0, 100, size=size).astype(np.str_)
data1[v] = (dims, data, {'foo': 'variable'})
self.assertTrue('var4' not in data1.mean())
self.assertDatasetEqual(data1.mean(), data2.mean())
self.assertDatasetEqual(data1.mean(dim='dim1'),
data2.mean(dim='dim1'))
def test_reduce_strings(self):
expected = Dataset({'x': 'a'})
ds = Dataset({'x': ('y', ['a', 'b'])})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 'b'})
actual = ds.max()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 0})
actual = ds.argmin()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 1})
actual = ds.argmax()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': b'a'})
ds = Dataset({'x': ('y', np.array(['a', 'b'], 'S1'))})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': u'a'})
ds = Dataset({'x': ('y', np.array(['a', 'b'], 'U1'))})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
def test_reduce_dtypes(self):
# regression test for GH342
expected = Dataset({'x': 1})
actual = Dataset({'x': True}).sum()
self.assertDatasetIdentical(expected, actual)
# regression test for GH505
expected = Dataset({'x': 3})
actual = Dataset({'x': ('y', np.array([1, 2], 'uint16'))}).sum()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 1 + 1j})
actual = Dataset({'x': ('y', [1, 1j])}).sum()
self.assertDatasetIdentical(expected, actual)
def test_reduce_keep_attrs(self):
data = create_test_data()
_attrs = {'attr1': 'value1', 'attr2': 2929}
attrs = OrderedDict(_attrs)
data.attrs = attrs
# Test dropped attrs
ds = data.mean()
self.assertEqual(ds.attrs, {})
for v in ds.data_vars.values():
self.assertEqual(v.attrs, {})
# Test kept attrs
ds = data.mean(keep_attrs=True)
self.assertEqual(ds.attrs, attrs)
for k, v in ds.data_vars.items():
self.assertEqual(v.attrs, data[k].attrs)
def test_reduce_argmin(self):
# regression test for #205
ds = Dataset({'a': ('x', [0, 1])})
expected = Dataset({'a': ([], 0)})
actual = ds.argmin()
self.assertDatasetIdentical(expected, actual)
actual = ds.argmin('x')
self.assertDatasetIdentical(expected, actual)
def test_reduce_scalars(self):
ds = Dataset({'x': ('a', [2, 2]), 'y': 2, 'z': ('b', [2])})
expected = Dataset({'x': 0, 'y': 0, 'z': 0})
actual = ds.var()
self.assertDatasetIdentical(expected, actual)
def test_reduce_only_one_axis(self):
def mean_only_one_axis(x, axis):
if not isinstance(axis, (int, np.integer)):
raise TypeError('non-integer axis')
return x.mean(axis)
ds = Dataset({'a': (['x', 'y'], [[0, 1, 2, 3, 4]])})
expected = Dataset({'a': ('x', [2])})
actual = ds.reduce(mean_only_one_axis, 'y')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(TypeError, 'non-integer axis'):
ds.reduce(mean_only_one_axis)
with self.assertRaisesRegexp(TypeError, 'non-integer axis'):
ds.reduce(mean_only_one_axis, ['x', 'y'])
def test_count(self):
ds = Dataset({'x': ('a', [np.nan, 1]), 'y': 0, 'z': np.nan})
expected = Dataset({'x': 1, 'y': 1, 'z': 0})
actual = ds.count()
self.assertDatasetIdentical(expected, actual)
def test_apply(self):
data = create_test_data()
data.attrs['foo'] = 'bar'
self.assertDatasetIdentical(data.apply(np.mean), data.mean())
expected = data.mean(keep_attrs=True)
actual = data.apply(lambda x: x.mean(keep_attrs=True), keep_attrs=True)
self.assertDatasetIdentical(expected, actual)
self.assertDatasetIdentical(data.apply(lambda x: x, keep_attrs=True),
data.drop('time'))
def scale(x, multiple=1):
return multiple * x
actual = data.apply(scale, multiple=2)
self.assertDataArrayEqual(actual['var1'], 2 * data['var1'])
self.assertDataArrayIdentical(actual['numbers'], data['numbers'])
actual = data.apply(np.asarray)
expected = data.drop('time') # time is not used on a data var
self.assertDatasetEqual(expected, actual)
def make_example_math_dataset(self):
variables = OrderedDict(
[('bar', ('x', np.arange(100, 400, 100))),
('foo', (('x', 'y'), 1.0 * np.arange(12).reshape(3, 4)))])
coords = {'abc': ('x', ['a', 'b', 'c']),
'y': 10 * np.arange(4)}
ds = Dataset(variables, coords)
ds['foo'][0, 0] = np.nan
return ds
def test_dataset_number_math(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds, +ds)
self.assertDatasetIdentical(ds, ds + 0)
self.assertDatasetIdentical(ds, 0 + ds)
self.assertDatasetIdentical(ds, ds + np.array(0))
self.assertDatasetIdentical(ds, np.array(0) + ds)
actual = ds.copy(deep=True)
actual += 0
self.assertDatasetIdentical(ds, actual)
def test_unary_ops(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds.apply(abs), abs(ds))
self.assertDatasetIdentical(ds.apply(lambda x: x + 4), ds + 4)
for func in [lambda x: x.isnull(),
lambda x: x.round(),
lambda x: x.astype(int)]:
self.assertDatasetIdentical(ds.apply(func), func(ds))
self.assertDatasetIdentical(ds.isnull(), ~ds.notnull())
# don't actually patch these methods in
with self.assertRaises(AttributeError):
ds.item
with self.assertRaises(AttributeError):
ds.searchsorted
def test_dataset_array_math(self):
ds = self.make_example_math_dataset()
expected = ds.apply(lambda x: x - ds['foo'])
self.assertDatasetIdentical(expected, ds - ds['foo'])
self.assertDatasetIdentical(expected, -ds['foo'] + ds)
self.assertDatasetIdentical(expected, ds - ds['foo'].variable)
self.assertDatasetIdentical(expected, -ds['foo'].variable + ds)
actual = ds.copy(deep=True)
actual -= ds['foo']
self.assertDatasetIdentical(expected, actual)
expected = ds.apply(lambda x: x + ds['bar'])
self.assertDatasetIdentical(expected, ds + ds['bar'])
actual = ds.copy(deep=True)
actual += ds['bar']
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'bar': ds['bar'] + np.arange(3)})
self.assertDatasetIdentical(expected, ds[['bar']] + np.arange(3))
self.assertDatasetIdentical(expected, np.arange(3) + ds[['bar']])
def test_dataset_dataset_math(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds, ds + 0 * ds)
self.assertDatasetIdentical(ds, ds + {'foo': 0, 'bar': 0})
expected = ds.apply(lambda x: 2 * x)
self.assertDatasetIdentical(expected, 2 * ds)
self.assertDatasetIdentical(expected, ds + ds)
self.assertDatasetIdentical(expected, ds + ds.data_vars)
self.assertDatasetIdentical(expected, ds + dict(ds.data_vars))
actual = ds.copy(deep=True)
expected_id = id(actual)
actual += ds
self.assertDatasetIdentical(expected, actual)
self.assertEqual(expected_id, id(actual))
self.assertDatasetIdentical(ds == ds, ds.notnull())
subsampled = ds.isel(y=slice(2))
expected = 2 * subsampled
self.assertDatasetIdentical(expected, subsampled + ds)
self.assertDatasetIdentical(expected, ds + subsampled)
def test_dataset_math_auto_align(self):
ds = self.make_example_math_dataset()
subset = ds.isel(x=slice(2), y=[1, 3])
expected = 2 * subset
actual = ds + subset
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'no overlapping labels'):
ds.isel(x=slice(1)) + ds.isel(x=slice(1, None))
actual = ds + ds[['bar']]
expected = (2 * ds[['bar']]).merge(ds.coords)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'no overlapping data'):
ds + Dataset()
with self.assertRaisesRegexp(ValueError, 'no overlapping data'):
Dataset() + Dataset()
ds2 = Dataset(coords={'bar': 42})
with self.assertRaisesRegexp(ValueError, 'no overlapping data'):
ds + ds2
# maybe unary arithmetic with empty datasets should raise instead?
self.assertDatasetIdentical(Dataset() + 1, Dataset())
for other in [ds.isel(x=slice(2)), ds.bar.isel(x=slice(0))]:
actual = ds.copy(deep=True)
other = ds.isel(x=slice(2))
actual += other
expected = ds + other.reindex_like(ds)
self.assertDatasetIdentical(expected, actual)
def test_dataset_math_errors(self):
ds = self.make_example_math_dataset()
with self.assertRaises(TypeError):
ds['foo'] += ds
with self.assertRaises(TypeError):
ds['foo'].variable += ds
with self.assertRaisesRegexp(ValueError, 'must have the same'):
ds += ds[['bar']]
# verify we can rollback in-place operations if something goes wrong
# nb. inplace datetime64 math actually will work with an integer array
# but not floats thanks to numpy's inconsistent handling
other = DataArray(np.datetime64('2000-01-01T12'), coords={'c': 2})
actual = ds.copy(deep=True)
with self.assertRaises(TypeError):
actual += other
self.assertDatasetIdentical(actual, ds)
def test_dataset_transpose(self):
ds = Dataset({'a': (('x', 'y'), np.random.randn(3, 4)),
'b': (('y', 'x'), np.random.randn(4, 3))})
actual = ds.transpose()
expected = ds.apply(lambda x: x.transpose())
self.assertDatasetIdentical(expected, actual)
actual = ds.T
self.assertDatasetIdentical(expected, actual)
actual = ds.transpose('x', 'y')
expected = ds.apply(lambda x: x.transpose('x', 'y'))
self.assertDatasetIdentical(expected, actual)
ds = create_test_data()
actual = ds.transpose()
for k in ds:
self.assertEqual(actual[k].dims[::-1], ds[k].dims)
new_order = ('dim2', 'dim3', 'dim1', 'time')
actual = ds.transpose(*new_order)
for k in ds:
expected_dims = tuple(d for d in new_order if d in ds[k].dims)
self.assertEqual(actual[k].dims, expected_dims)
with self.assertRaisesRegexp(ValueError, 'arguments to transpose'):
ds.transpose('dim1', 'dim2', 'dim3')
with self.assertRaisesRegexp(ValueError, 'arguments to transpose'):
ds.transpose('dim1', 'dim2', 'dim3', 'time', 'extra_dim')
def test_dataset_diff_n1_simple(self):
ds = Dataset({'foo': ('x', [5, 5, 6, 6])})
actual = ds.diff('x')
expected = Dataset({'foo': ('x', [0, 1, 0])})
expected.coords['x'].values = [1, 2, 3]
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n1_lower(self):
ds = Dataset({'foo': ('x', [5, 5, 6, 6])})
actual = ds.diff('x', label='lower')
expected = Dataset({'foo': ('x', [0, 1, 0])})
expected.coords['x'].values = [0, 1, 2]
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n1(self):
ds = create_test_data(seed=1)
actual = ds.diff('dim2')
expected = dict()
expected['var1'] = DataArray(np.diff(ds['var1'].values, axis=1),
[ds['dim1'].values,
ds['dim2'].values[1:]],
['dim1', 'dim2'])
expected['var2'] = DataArray(np.diff(ds['var2'].values, axis=1),
[ds['dim1'].values,
ds['dim2'].values[1:]],
['dim1', 'dim2'])
expected['var3'] = ds['var3']
expected = Dataset(expected, coords={'time': ds['time'].values})
expected.coords['numbers'] = ('dim3', ds['numbers'].values)
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n2(self):
ds = create_test_data(seed=1)
actual = ds.diff('dim2', n=2)
expected = dict()
expected['var1'] = DataArray(np.diff(ds['var1'].values, axis=1, n=2),
[ds['dim1'].values,
ds['dim2'].values[2:]],
['dim1', 'dim2'])
expected['var2'] = DataArray(np.diff(ds['var2'].values, axis=1, n=2),
[ds['dim1'].values,
ds['dim2'].values[2:]],
['dim1', 'dim2'])
expected['var3'] = ds['var3']
expected = Dataset(expected, coords={'time': ds['time'].values})
expected.coords['numbers'] = ('dim3', ds['numbers'].values)
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_exception_n_neg(self):
ds = create_test_data(seed=1)
with self.assertRaisesRegexp(ValueError, 'must be non-negative'):
ds.diff('dim2', n=-1)
def test_dataset_diff_exception_label_str(self):
ds = create_test_data(seed=1)
with self.assertRaisesRegexp(ValueError, '\'label\' argument has to'):
ds.diff('dim2', label='raise_me')
def test_real_and_imag(self):
attrs = {'foo': 'bar'}
ds = Dataset({'x': ((), 1 + 2j, attrs)}, attrs=attrs)
expected_re = Dataset({'x': ((), 1, attrs)}, attrs=attrs)
self.assertDatasetIdentical(ds.real, expected_re)
expected_im = Dataset({'x': ((), 2, attrs)}, attrs=attrs)
self.assertDatasetIdentical(ds.imag, expected_im)
| petercable/xray | xray/test/test_dataset.py | Python | apache-2.0 | 87,064 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models, _
from odoo.exceptions import UserError
from odoo.tools import float_compare, float_is_zero
class AccountMoveLine(models.Model):
_inherit = 'account.move.line'
sale_line_ids = fields.Many2many(
'sale.order.line',
'sale_order_line_invoice_rel',
'invoice_line_id', 'order_line_id',
string='Sales Order Lines', readonly=True, copy=False)
def _copy_data_extend_business_fields(self, values):
# OVERRIDE to copy the 'sale_line_ids' field as well.
super(AccountMoveLine, self)._copy_data_extend_business_fields(values)
values['sale_line_ids'] = [(6, None, self.sale_line_ids.ids)]
def _prepare_analytic_line(self):
""" Note: This method is called only on the move.line that having an analytic account, and
so that should create analytic entries.
"""
values_list = super(AccountMoveLine, self)._prepare_analytic_line()
# filter the move lines that can be reinvoiced: a cost (negative amount) analytic line without SO line but with a product can be reinvoiced
move_to_reinvoice = self.env['account.move.line']
for index, move_line in enumerate(self):
values = values_list[index]
if 'so_line' not in values:
if move_line._sale_can_be_reinvoice():
move_to_reinvoice |= move_line
# insert the sale line in the create values of the analytic entries
if move_to_reinvoice:
map_sale_line_per_move = move_to_reinvoice._sale_create_reinvoice_sale_line()
for values in values_list:
sale_line = map_sale_line_per_move.get(values.get('move_id'))
if sale_line:
values['so_line'] = sale_line.id
return values_list
def _sale_can_be_reinvoice(self):
""" determine if the generated analytic line should be reinvoiced or not.
For Vendor Bill flow, if the product has a 'erinvoice policy' and is a cost, then we will find the SO on which reinvoice the AAL
"""
self.ensure_one()
if self.sale_line_ids:
return False
uom_precision_digits = self.env['decimal.precision'].precision_get('Product Unit of Measure')
return float_compare(self.credit or 0.0, self.debit or 0.0, precision_digits=uom_precision_digits) != 1 and self.product_id.expense_policy not in [False, 'no']
def _sale_create_reinvoice_sale_line(self):
sale_order_map = self._sale_determine_order()
sale_line_values_to_create = [] # the list of creation values of sale line to create.
existing_sale_line_cache = {} # in the sales_price-delivery case, we can reuse the same sale line. This cache will avoid doing a search each time the case happen
# `map_move_sale_line` is map where
# - key is the move line identifier
# - value is either a sale.order.line record (existing case), or an integer representing the index of the sale line to create in
# the `sale_line_values_to_create` (not existing case, which will happen more often than the first one).
map_move_sale_line = {}
for move_line in self:
sale_order = sale_order_map.get(move_line.id)
# no reinvoice as no sales order was found
if not sale_order:
continue
# raise if the sale order is not currenlty open
if sale_order.state != 'sale':
message_unconfirmed = _('The Sales Order %s linked to the Analytic Account %s must be validated before registering expenses.')
messages = {
'draft': message_unconfirmed,
'sent': message_unconfirmed,
'done': _('The Sales Order %s linked to the Analytic Account %s is currently locked. You cannot register an expense on a locked Sales Order. Please create a new SO linked to this Analytic Account.'),
'cancel': _('The Sales Order %s linked to the Analytic Account %s is cancelled. You cannot register an expense on a cancelled Sales Order.'),
}
raise UserError(messages[sale_order.state] % (sale_order.name, sale_order.analytic_account_id.name))
price = move_line._sale_get_invoice_price(sale_order)
# find the existing sale.line or keep its creation values to process this in batch
sale_line = None
if move_line.product_id.expense_policy == 'sales_price' and move_line.product_id.invoice_policy == 'delivery': # for those case only, we can try to reuse one
map_entry_key = (sale_order.id, move_line.product_id.id, price) # cache entry to limit the call to search
sale_line = existing_sale_line_cache.get(map_entry_key)
if sale_line: # already search, so reuse it. sale_line can be sale.order.line record or index of a "to create values" in `sale_line_values_to_create`
map_move_sale_line[move_line.id] = sale_line
existing_sale_line_cache[map_entry_key] = sale_line
else: # search for existing sale line
sale_line = self.env['sale.order.line'].search([
('order_id', '=', sale_order.id),
('price_unit', '=', price),
('product_id', '=', move_line.product_id.id),
('is_expense', '=', True),
], limit=1)
if sale_line: # found existing one, so keep the browse record
map_move_sale_line[move_line.id] = existing_sale_line_cache[map_entry_key] = sale_line
else: # should be create, so use the index of creation values instead of browse record
# save value to create it
sale_line_values_to_create.append(move_line._sale_prepare_sale_line_values(sale_order, price))
# store it in the cache of existing ones
existing_sale_line_cache[map_entry_key] = len(sale_line_values_to_create) - 1 # save the index of the value to create sale line
# store it in the map_move_sale_line map
map_move_sale_line[move_line.id] = len(sale_line_values_to_create) - 1 # save the index of the value to create sale line
else: # save its value to create it anyway
sale_line_values_to_create.append(move_line._sale_prepare_sale_line_values(sale_order, price))
map_move_sale_line[move_line.id] = len(sale_line_values_to_create) - 1 # save the index of the value to create sale line
# create the sale lines in batch
new_sale_lines = self.env['sale.order.line'].create(sale_line_values_to_create)
# build result map by replacing index with newly created record of sale.order.line
result = {}
for move_line_id, unknown_sale_line in map_move_sale_line.items():
if isinstance(unknown_sale_line, int): # index of newly created sale line
result[move_line_id] = new_sale_lines[unknown_sale_line]
elif isinstance(unknown_sale_line, models.BaseModel): # already record of sale.order.line
result[move_line_id] = unknown_sale_line
return result
def _sale_determine_order(self):
""" Get the mapping of move.line with the sale.order record on which its analytic entries should be reinvoiced
:return a dict where key is the move line id, and value is sale.order record (or None).
"""
analytic_accounts = self.mapped('analytic_account_id')
# link the analytic account with its open SO by creating a map: {AA.id: sale.order}, if we find some analytic accounts
mapping = {}
if analytic_accounts: # first, search for the open sales order
sale_orders = self.env['sale.order'].search([('analytic_account_id', 'in', analytic_accounts.ids), ('state', '=', 'sale')], order='create_date DESC')
for sale_order in sale_orders:
mapping[sale_order.analytic_account_id.id] = sale_order
analytic_accounts_without_open_order = analytic_accounts.filtered(lambda account: not mapping.get(account.id))
if analytic_accounts_without_open_order: # then, fill the blank with not open sales orders
sale_orders = self.env['sale.order'].search([('analytic_account_id', 'in', analytic_accounts_without_open_order.ids)], order='create_date DESC')
for sale_order in sale_orders:
mapping[sale_order.analytic_account_id.id] = sale_order
# map of AAL index with the SO on which it needs to be reinvoiced. Maybe be None if no SO found
return {move_line.id: mapping.get(move_line.analytic_account_id.id) for move_line in self}
def _sale_prepare_sale_line_values(self, order, price):
""" Generate the sale.line creation value from the current move line """
self.ensure_one()
last_so_line = self.env['sale.order.line'].search([('order_id', '=', order.id)], order='sequence desc', limit=1)
last_sequence = last_so_line.sequence + 1 if last_so_line else 100
fpos = order.fiscal_position_id or order.fiscal_position_id._get_fiscal_position(order.partner_id)
taxes = fpos.map_tax(self.product_id.taxes_id)
return {
'order_id': order.id,
'name': self.name,
'sequence': last_sequence,
'price_unit': price,
'tax_id': [x.id for x in taxes],
'discount': 0.0,
'product_id': self.product_id.id,
'product_uom': self.product_uom_id.id,
'product_uom_qty': 0.0,
'is_expense': True,
}
def _sale_get_invoice_price(self, order):
""" Based on the current move line, compute the price to reinvoice the analytic line that is going to be created (so the
price of the sale line).
"""
self.ensure_one()
unit_amount = self.quantity
amount = (self.credit or 0.0) - (self.debit or 0.0)
if self.product_id.expense_policy == 'sales_price':
return order.pricelist_id._get_product_price(
self.product_id,
1.0,
self.product_uom_id,
date=order.date_order,
)
uom_precision_digits = self.env['decimal.precision'].precision_get('Product Unit of Measure')
if float_is_zero(unit_amount, precision_digits=uom_precision_digits):
return 0.0
# Prevent unnecessary currency conversion that could be impacted by exchange rate
# fluctuations
if self.company_id.currency_id and amount and self.company_id.currency_id == order.currency_id:
return abs(amount / unit_amount)
price_unit = abs(amount / unit_amount)
currency_id = self.company_id.currency_id
if currency_id and currency_id != order.currency_id:
price_unit = currency_id._convert(price_unit, order.currency_id, order.company_id, order.date_order or fields.Date.today())
return price_unit
| jeremiahyan/odoo | addons/sale/models/account_move_line.py | Python | gpl-3.0 | 11,377 |
#!/usr/bin/python
# Copyright 2012. Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.bfgroup.xyz/b2/LICENSE.txt)
# Test correct "-p" option handling.
import BoostBuild
t = BoostBuild.Tester(["-d1"], pass_toolset=False)
t.write("file.jam", """\
prefix = "echo \\"" ;
suffix = "\\"" ;
if $(NT)
{
prefix = "(echo " ;
suffix = ")" ;
}
actions go
{
$(prefix)stdout$(suffix)
>&2 $(prefix)stderr$(suffix)
}
ECHO "{{{" $(XXX) "}}}" ;
ALWAYS all ;
go all ;
""")
t.run_build_system(["-ffile.jam", "-sXXX=1"], stderr="")
t.expect_output_lines("{{{ 1 }}}")
t.expect_output_lines("stdout")
t.expect_output_lines("stderr")
t.expect_nothing_more()
t.run_build_system(["-ffile.jam", "-sXXX=2", "-p0"], stderr="")
t.expect_output_lines("{{{ 2 }}}")
t.expect_output_lines("stdout")
t.expect_output_lines("stderr")
t.expect_nothing_more()
t.run_build_system(["-ffile.jam", "-sXXX=3", "-p1"], stderr="")
t.expect_output_lines("{{{ 3 }}}")
t.expect_output_lines("stdout")
t.expect_output_lines("stderr*", False)
t.expect_nothing_more()
t.run_build_system(["-ffile.jam", "-sXXX=4", "-p2"], stderr="stderr\n")
t.expect_output_lines("{{{ 4 }}}")
t.expect_output_lines("stdout*", False)
t.expect_output_lines("stderr*", False)
t.expect_nothing_more()
t.run_build_system(["-ffile.jam", "-sXXX=5", "-p3"], stderr="stderr\n")
t.expect_output_lines("{{{ 5 }}}")
t.expect_output_lines("stdout")
t.expect_output_lines("stderr*", False)
t.expect_nothing_more()
t.cleanup()
| qianqians/abelkhan | cpp_component/3rdparty/boost/tools/build/test/core_action_output.py | Python | lgpl-2.1 | 1,560 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-08-04 09:36
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('article', '0004_auto_20170803_0210'),
]
operations = [
migrations.CreateModel(
name='ArticleTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(max_length=500)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tag', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='articlepost',
name='created',
field=models.DateTimeField(default=datetime.datetime(2017, 8, 4, 9, 36, 43, 480013, tzinfo=utc)),
),
migrations.AddField(
model_name='articlepost',
name='article_tag',
field=models.ManyToManyField(blank=True, related_name='article_tag', to='article.ArticleTag'),
),
]
| glon/django_test | article/migrations/0005_auto_20170804_0936.py | Python | apache-2.0 | 1,345 |
# coding=utf-8
import time
import copy
from mycodo.inputs.base_input import AbstractInput
from mycodo.inputs.sensorutils import is_device
def constraints_pass_measure_range(mod_input, value):
"""
Check if the user input is acceptable
:param mod_input: SQL object with user-saved Input options
:param value: float
:return: tuple: (bool, list of strings)
"""
errors = []
all_passed = True
# Ensure valid range is selected
if value not in ['1000', '2000', '3000', '5000']:
all_passed = False
errors.append("Invalid range")
return all_passed, errors, mod_input
# Measurements
measurements_dict = {
0: {
'measurement': 'co2',
'unit': 'ppm'
}
}
# Input information
INPUT_INFORMATION = {
'input_name_unique': 'MH_Z19',
'input_manufacturer': 'Winsen',
'input_name': 'MH-Z19',
'input_library': 'serial',
'measurements_name': 'CO2',
'measurements_dict': measurements_dict,
'url_datasheet': 'https://www.winsen-sensor.com/d/files/PDF/Infrared%20Gas%20Sensor/NDIR%20CO2%20SENSOR/MH-Z19%20CO2%20Ver1.0.pdf',
'message': 'This is the version of the sensor that does not include the ability to conduct '
'automatic baseline correction (ABC). See the B version of the sensor if you wish to use ABC.',
'options_enabled': [
'uart_location',
'uart_baud_rate',
'period',
'pre_output'
],
'options_disabled': ['interface'],
'interfaces': ['UART'],
'uart_location': '/dev/ttyAMA0',
'uart_baud_rate': 9600,
'custom_options': [
{
'id': 'measure_range',
'type': 'select',
'default_value': '5000',
'options_select': [
('1000', '0 - 1000 ppmv'),
('2000', '0 - 2000 ppmv'),
('3000', '0 - 3000 ppmv'),
('5000', '0 - 5000 ppmv'),
],
'required': True,
'constraints_pass': constraints_pass_measure_range,
'name': 'Measurement Range',
'phrase': 'Set the measuring range of the sensor'
}
],
'custom_actions_message': 'Zero point calibration: activate the sensor in a 400 ppmv CO2 environment (outside '
'air), allow to run for 20 minutes, then press the Calibrate Zero Point button.<br>Span '
'point calibration: activate the sensor in an environment with a stable CO2 concentration'
' between 1000 and 2000 ppmv (2000 recommended), allow to run for 20 minutes, enter the '
'ppmv value in the Span Point (ppmv) input field, then press the Calibrate Span Point '
'button. If running a span point calibration, run a zero point calibration first. A span '
'point calibration is not necessary and should only be performed if you know what you are'
' doing and can accurately produce a 2000 ppmv environment.',
'custom_actions': [
{
'id': 'calibrate_zero_point',
'type': 'button',
'name': 'Calibrate Zero Point'
},
{
'id': 'span_point_value_ppmv',
'type': 'integer',
'default_value': 2000,
'name': 'Span Point (ppmv)',
'phrase': 'The ppmv concentration for a span point calibration'
},
{
'id': 'calibrate_span_point',
'type': 'button',
'name': 'Calibrate Span Point'
}
]
}
class InputModule(AbstractInput):
"""A sensor support class that monitors the MH-Z19's CO2 concentration."""
def __init__(self, input_dev, testing=False):
super(InputModule, self).__init__(input_dev, testing=testing, name=__name__)
self.ser = None
self.measuring = None
self.calibrating = None
self.measure_range = None
if not testing:
self.setup_custom_options(
INPUT_INFORMATION['custom_options'], input_dev)
self.initialize_input()
def initialize_input(self):
import serial
if is_device(self.input_dev.uart_location):
try:
self.ser = serial.Serial(
port=self.input_dev.uart_location,
baudrate=self.input_dev.baud_rate,
timeout=1,
writeTimeout=5)
except serial.SerialException:
self.logger.exception('Opening serial')
else:
self.logger.error('Could not open "{dev}". Check the device location is correct.'.format(
dev=self.input_dev.uart_location))
if self.measure_range:
self.set_measure_range(self.measure_range)
time.sleep(0.1)
def get_measurement(self):
"""Gets the MH-Z19's CO2 concentration in ppmv."""
if not self.ser:
self.logger.error("Error 101: Device not set up. See https://kizniche.github.io/Mycodo/Error-Codes#error-101 for more info.")
return
self.return_dict = copy.deepcopy(measurements_dict)
while self.calibrating:
time.sleep(0.1)
self.measuring = True
try:
self.ser.flushInput()
self.ser.write(bytearray([0xff, 0x01, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79]))
time.sleep(.01)
resp = self.ser.read(9)
if not resp:
self.logger.debug("No response")
elif len(resp) < 4:
self.logger.debug("Too few values in response '{}'".format(resp))
elif resp[0] != 0xff or resp[1] != 0x86:
self.logger.error("Bad checksum")
elif len(resp) >= 4:
high = resp[2]
low = resp[3]
co2 = (high * 256) + low
self.value_set(0, co2)
except:
self.logger.exception("get_measurement()")
finally:
self.measuring = False
return self.return_dict
def set_measure_range(self, measure_range):
"""
Sets the measurement range. Options are: '1000', '2000', '3000', or '5000' (ppmv)
:param measure_range: string
:return: None
"""
if measure_range == '1000':
self.ser.write(bytearray([0xff, 0x01, 0x99, 0x00, 0x00, 0x00, 0x03, 0xe8, 0x7b]))
elif measure_range == '2000':
self.ser.write(bytearray([0xff, 0x01, 0x99, 0x00, 0x00, 0x00, 0x07, 0xd0, 0x8f]))
elif measure_range == '3000':
self.ser.write(bytearray([0xff, 0x01, 0x99, 0x00, 0x00, 0x00, 0x0b, 0xb8, 0xa3]))
elif measure_range == '5000':
self.ser.write(bytearray([0xff, 0x01, 0x99, 0x00, 0x00, 0x00, 0x13, 0x88, 0xcb]))
else:
return "out of range"
def calibrate_span_point(self, args_dict):
"""
Span Point Calibration
from https://github.com/UedaTakeyuki/mh-z19
"""
if 'span_point_value_ppmv' not in args_dict:
self.logger.error("Cannot conduct span point calibration without a ppmv value")
return
if not isinstance(args_dict['span_point_value_ppmv'], int):
self.logger.error("ppmv value does not represent an integer: '{}', type: {}".format(
args_dict['span_point_value_ppmv'], type(args_dict['span_point_value_ppmv'])))
return
while self.measuring:
time.sleep(0.1)
self.calibrating = True
try:
self.logger.info("Conducting span point calibration with a value of {} ppmv".format(
args_dict['span_point_value_ppmv']))
b3 = args_dict['span_point_value_ppmv'] // 256
b4 = args_dict['span_point_value_ppmv'] % 256
c = self.checksum([0x01, 0x88, b3, b4])
self.ser.write(bytearray([0xff, 0x01, 0x88, b3, b4, 0x00, 0x0b, 0xb8, c]))
time.sleep(0.1)
except:
self.logger.exception()
finally:
self.calibrating = False
# byte3 = struct.pack('B', b3)
# byte4 = struct.pack('B', b4)
# request = b"\xff\x01\x88" + byte3 + byte4 + b"\x00\x00\x00" + c
# self.ser.write(request)
def calibrate_zero_point(self, args_dict):
"""
Zero Point Calibration
from https://github.com/UedaTakeyuki/mh-z19
"""
while self.measuring:
time.sleep(0.1)
self.calibrating = True
try:
self.logger.info("Conducting zero point calibration")
self.ser.write(bytearray([0xff, 0x01, 0x87, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78]))
time.sleep(0.1)
except:
self.logger.exception()
finally:
self.calibrating = False
# request = b"\xff\x01\x87\x00\x00\x00\x00\x00\x78"
# self.ser.write(request)
@staticmethod
def checksum(array):
return 0xff - (sum(array) % 0x100) + 1
# return struct.pack('B', 0xff - (sum(array) % 0x100) + 1)
| kizniche/Mycodo | mycodo/inputs/mh_z19.py | Python | gpl-3.0 | 9,171 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import errno
from os.path import isdir, isfile, join, dirname
import random
import shutil
import time
import itertools
from six import viewkeys
import six.moves.cPickle as pickle
from swift import gettext_ as _
import eventlet
from eventlet import GreenPool, tpool, Timeout, sleep, hubs
from eventlet.green import subprocess
from eventlet.support.greenlets import GreenletExit
from swift.common.ring.utils import is_local_device
from swift.common.utils import whataremyips, unlink_older_than, \
compute_eta, get_logger, dump_recon_cache, ismount, \
rsync_module_interpolation, mkdirs, config_true_value, list_from_csv, \
get_hub, tpool_reraise, config_auto_int_value, storage_directory
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
from swift.obj import ssync_sender
from swift.obj.diskfile import get_data_dir, get_tmp_dir, DiskFileRouter
from swift.common.storage_policy import POLICIES, REPL_POLICY
DEFAULT_RSYNC_TIMEOUT = 900
hubs.use_hub(get_hub())
def _do_listdir(partition, replication_cycle):
return (((partition + replication_cycle) % 10) == 0)
class ObjectReplicator(Daemon):
"""
Replicate objects.
Encapsulates most logic and data needed by the object replication process.
Each call to .replicate() performs one replication pass. It's up to the
caller to do this in a loop.
"""
def __init__(self, conf, logger=None):
"""
:param conf: configuration object obtained from ConfigParser
:param logger: logging object
"""
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-replicator')
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.bind_ip = conf.get('bind_ip', '0.0.0.0')
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
self.port = None if self.servers_per_port else \
int(conf.get('bind_port', 6200))
self.concurrency = int(conf.get('concurrency', 1))
self.stats_interval = int(conf.get('stats_interval', '300'))
self.ring_check_interval = int(conf.get('ring_check_interval', 15))
self.next_check = time.time() + self.ring_check_interval
self.replication_cycle = random.randint(0, 9)
self.partition_times = []
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.rsync_timeout = int(conf.get('rsync_timeout',
DEFAULT_RSYNC_TIMEOUT))
self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
self.rsync_compress = config_true_value(
conf.get('rsync_compress', 'no'))
self.rsync_module = conf.get('rsync_module', '').rstrip('/')
if not self.rsync_module:
self.rsync_module = '{replication_ip}::object'
self.http_timeout = int(conf.get('http_timeout', 60))
self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.node_timeout = float(conf.get('node_timeout', 10))
self.sync_method = getattr(self, conf.get('sync_method') or 'rsync')
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.default_headers = {
'Content-Length': '0',
'user-agent': 'object-replicator %s' % os.getpid()}
self.rsync_error_log_line_length = \
int(conf.get('rsync_error_log_line_length', 0))
self.handoffs_first = config_true_value(conf.get('handoffs_first',
False))
self.handoff_delete = config_auto_int_value(
conf.get('handoff_delete', 'auto'), 0)
if any((self.handoff_delete, self.handoffs_first)):
self.logger.warning('Handoff only mode is not intended for normal '
'operation, please disable handoffs_first and '
'handoff_delete before the next '
'normal rebalance')
self._df_router = DiskFileRouter(conf, self.logger)
def _zero_stats(self):
"""Zero out the stats."""
self.stats = {'attempted': 0, 'success': 0, 'failure': 0,
'hashmatch': 0, 'rsync': 0, 'remove': 0,
'start': time.time(), 'failure_nodes': {}}
def _add_failure_stats(self, failure_devs_info):
for node, dev in failure_devs_info:
self.stats['failure'] += 1
failure_devs = self.stats['failure_nodes'].setdefault(node, {})
failure_devs.setdefault(dev, 0)
failure_devs[dev] += 1
def _get_my_replication_ips(self):
my_replication_ips = set()
ips = whataremyips()
for policy in POLICIES:
self.load_object_ring(policy)
for local_dev in [dev for dev in policy.object_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] == self.port]:
my_replication_ips.add(local_dev['replication_ip'])
return list(my_replication_ips)
# Just exists for doc anchor point
def sync(self, node, job, suffixes, *args, **kwargs):
"""
Synchronize local suffix directories from a partition with a remote
node.
:param node: the "dev" entry for the remote node to sync with
:param job: information about the partition being synced
:param suffixes: a list of suffixes which need to be pushed
:returns: boolean and dictionary, boolean indicating success or failure
"""
return self.sync_method(node, job, suffixes, *args, **kwargs)
def load_object_ring(self, policy):
"""
Make sure the policy's rings are loaded.
:param policy: the StoragePolicy instance
:returns: appropriate ring object
"""
policy.load_ring(self.swift_dir)
return policy.object_ring
def _rsync(self, args):
"""
Execute the rsync binary to replicate a partition.
:returns: return code of rsync process. 0 is successful
"""
start_time = time.time()
ret_val = None
try:
with Timeout(self.rsync_timeout):
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
results = proc.stdout.read()
ret_val = proc.wait()
except Timeout:
self.logger.error(_("Killing long-running rsync: %s"), str(args))
proc.kill()
return 1 # failure response code
total_time = time.time() - start_time
for result in results.split('\n'):
if result == '':
continue
if result.startswith('cd+'):
continue
if not ret_val:
self.logger.info(result)
else:
self.logger.error(result)
if ret_val:
error_line = _('Bad rsync return code: %(ret)d <- %(args)s') % \
{'args': str(args), 'ret': ret_val}
if self.rsync_error_log_line_length:
error_line = error_line[:self.rsync_error_log_line_length]
self.logger.error(error_line)
else:
log_method = self.logger.info if results else self.logger.debug
log_method(
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
return ret_val
def rsync(self, node, job, suffixes):
"""
Uses rsync to implement the sync method. This was the first
sync method in Swift.
"""
if not os.path.exists(job['path']):
return False, {}
args = [
'rsync',
'--recursive',
'--whole-file',
'--human-readable',
'--xattrs',
'--itemize-changes',
'--ignore-existing',
'--timeout=%s' % self.rsync_io_timeout,
'--contimeout=%s' % self.rsync_io_timeout,
'--bwlimit=%s' % self.rsync_bwlimit,
'--exclude=.*.%s' % ''.join('[0-9a-zA-Z]' for i in range(6))
]
if self.rsync_compress and \
job['region'] != node['region']:
# Allow for compression, but only if the remote node is in
# a different region than the local one.
args.append('--compress')
rsync_module = rsync_module_interpolation(self.rsync_module, node)
had_any = False
for suffix in suffixes:
spath = join(job['path'], suffix)
if os.path.exists(spath):
args.append(spath)
had_any = True
if not had_any:
return False, {}
data_dir = get_data_dir(job['policy'])
args.append(join(rsync_module, node['device'],
data_dir, job['partition']))
return self._rsync(args) == 0, {}
def ssync(self, node, job, suffixes, remote_check_objs=None):
return ssync_sender.Sender(
self, node, job, suffixes, remote_check_objs)()
def check_ring(self, object_ring):
"""
Check to see if the ring has been updated
:param object_ring: the ring to check
:returns: boolean indicating whether or not the ring has changed
"""
if time.time() > self.next_check:
self.next_check = time.time() + self.ring_check_interval
if object_ring.has_changed():
return False
return True
def update_deleted(self, job):
"""
High-level method that replicates a single partition that doesn't
belong on this node.
:param job: a dict containing info about the partition to be replicated
"""
def tpool_get_suffixes(path):
return [suff for suff in os.listdir(path)
if len(suff) == 3 and isdir(join(path, suff))]
self.replication_count += 1
self.logger.increment('partition.delete.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
failure_devs_info = set()
begin = time.time()
handoff_partition_deleted = False
try:
responses = []
suffixes = tpool.execute(tpool_get_suffixes, job['path'])
synced_remote_regions = {}
delete_objs = None
if suffixes:
for node in job['nodes']:
self.stats['rsync'] += 1
kwargs = {}
if node['region'] in synced_remote_regions and \
self.conf.get('sync_method', 'rsync') == 'ssync':
kwargs['remote_check_objs'] = \
synced_remote_regions[node['region']]
# candidates is a dict(hash=>timestamp) of objects
# for deletion
success, candidates = self.sync(
node, job, suffixes, **kwargs)
if success:
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'],
node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes), headers=headers)
conn.getresponse().read()
if node['region'] != job['region']:
synced_remote_regions[node['region']] = viewkeys(
candidates)
else:
failure_devs_info.add((node['replication_ip'],
node['device']))
responses.append(success)
for cand_objs in synced_remote_regions.values():
if delete_objs is None:
delete_objs = cand_objs
else:
delete_objs = delete_objs & cand_objs
if self.handoff_delete:
# delete handoff if we have had handoff_delete successes
delete_handoff = len([resp for resp in responses if resp]) >= \
self.handoff_delete
else:
# delete handoff if all syncs were successful
delete_handoff = len(responses) == len(job['nodes']) and \
all(responses)
if delete_handoff:
self.stats['remove'] += 1
if (self.conf.get('sync_method', 'rsync') == 'ssync' and
delete_objs is not None):
self.logger.info(_("Removing %s objects"),
len(delete_objs))
_junk, error_paths = self.delete_handoff_objs(
job, delete_objs)
# if replication works for a hand-off device and it failed,
# the remote devices which are target of the replication
# from the hand-off device will be marked. Because cleanup
# after replication failed means replicator needs to
# replicate again with the same info.
if error_paths:
failure_devs_info.update(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
else:
self.delete_partition(job['path'])
handoff_partition_deleted = True
elif not suffixes:
self.delete_partition(job['path'])
handoff_partition_deleted = True
except (Exception, Timeout):
self.logger.exception(_("Error syncing handoff partition"))
self._add_failure_stats(failure_devs_info)
finally:
target_devs_info = set([(target_dev['replication_ip'],
target_dev['device'])
for target_dev in job['nodes']])
self.stats['success'] += len(target_devs_info - failure_devs_info)
if not handoff_partition_deleted:
self.handoffs_remaining += 1
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.delete.timing', begin)
def delete_partition(self, path):
self.logger.info(_("Removing partition: %s"), path)
tpool.execute(shutil.rmtree, path)
def delete_handoff_objs(self, job, delete_objs):
success_paths = []
error_paths = []
for object_hash in delete_objs:
object_path = storage_directory(job['obj_path'], job['partition'],
object_hash)
tpool.execute(shutil.rmtree, object_path, ignore_errors=True)
suffix_dir = dirname(object_path)
try:
os.rmdir(suffix_dir)
success_paths.append(object_path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
error_paths.append(object_path)
self.logger.exception(
"Unexpected error trying to cleanup suffix dir:%r",
suffix_dir)
return success_paths, error_paths
def update(self, job):
"""
High-level method that replicates a single partition.
:param job: a dict containing info about the partition to be replicated
"""
self.replication_count += 1
self.logger.increment('partition.update.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
target_devs_info = set()
failure_devs_info = set()
begin = time.time()
df_mgr = self._df_router[job['policy']]
try:
hashed, local_hash = tpool_reraise(
df_mgr._get_hashes, job['device'],
job['partition'], job['policy'],
do_listdir=_do_listdir(
int(job['partition']),
self.replication_cycle))
self.suffix_hash += hashed
self.logger.update_stats('suffix.hashes', hashed)
attempts_left = len(job['nodes'])
synced_remote_regions = set()
random.shuffle(job['nodes'])
nodes = itertools.chain(
job['nodes'],
job['policy'].object_ring.get_more_nodes(
int(job['partition'])))
while attempts_left > 0:
# If this throws StopIteration it will be caught way below
node = next(nodes)
target_devs_info.add((node['replication_ip'], node['device']))
attempts_left -= 1
# if we have already synced to this remote region,
# don't sync again on this replication pass
if node['region'] in synced_remote_regions:
continue
try:
with Timeout(self.http_timeout):
resp = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'', headers=headers).getresponse()
if resp.status == HTTP_INSUFFICIENT_STORAGE:
self.logger.error(
_('%(replication_ip)s/%(device)s '
'responded as unmounted'), node)
attempts_left += 1
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
if resp.status != HTTP_OK:
self.logger.error(_("Invalid response %(resp)s "
"from %(ip)s"),
{'resp': resp.status,
'ip': node['replication_ip']})
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
remote_hash = pickle.loads(resp.read())
del resp
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
if not suffixes:
self.stats['hashmatch'] += 1
continue
hashed, recalc_hash = tpool_reraise(
df_mgr._get_hashes,
job['device'], job['partition'], job['policy'],
recalculate=suffixes)
self.logger.update_stats('suffix.hashes', hashed)
local_hash = recalc_hash
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
self.stats['rsync'] += 1
success, _junk = self.sync(node, job, suffixes)
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes),
headers=headers)
conn.getresponse().read()
if not success:
failure_devs_info.add((node['replication_ip'],
node['device']))
# add only remote region when replicate succeeded
if success and node['region'] != job['region']:
synced_remote_regions.add(node['region'])
self.suffix_sync += len(suffixes)
self.logger.update_stats('suffix.syncs', len(suffixes))
except (Exception, Timeout):
failure_devs_info.add((node['replication_ip'],
node['device']))
self.logger.exception(_("Error syncing with node: %s") %
node)
self.suffix_count += len(local_hash)
except (Exception, Timeout):
failure_devs_info.update(target_devs_info)
self._add_failure_stats(failure_devs_info)
self.logger.exception(_("Error syncing partition"))
finally:
self.stats['success'] += len(target_devs_info - failure_devs_info)
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.update.timing', begin)
def stats_line(self):
"""
Logs various stats for the currently running replication pass.
"""
if self.replication_count:
elapsed = (time.time() - self.start) or 0.000001
rate = self.replication_count / elapsed
self.logger.info(
_("%(replicated)d/%(total)d (%(percentage).2f%%)"
" partitions replicated in %(time).2fs (%(rate).2f/sec, "
"%(remaining)s remaining)"),
{'replicated': self.replication_count, 'total': self.job_count,
'percentage': self.replication_count * 100.0 / self.job_count,
'time': time.time() - self.start, 'rate': rate,
'remaining': '%d%s' % compute_eta(self.start,
self.replication_count,
self.job_count)})
self.logger.info(_('%(success)s successes, %(failure)s failures')
% self.stats)
if self.suffix_count:
self.logger.info(
_("%(checked)d suffixes checked - "
"%(hashed).2f%% hashed, %(synced).2f%% synced"),
{'checked': self.suffix_count,
'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
'synced': (self.suffix_sync * 100.0) / self.suffix_count})
self.partition_times.sort()
self.logger.info(
_("Partition times: max %(max).4fs, "
"min %(min).4fs, med %(med).4fs"),
{'max': self.partition_times[-1],
'min': self.partition_times[0],
'med': self.partition_times[
len(self.partition_times) // 2]})
else:
self.logger.info(
_("Nothing replicated for %s seconds."),
(time.time() - self.start))
def kill_coros(self):
"""Utility function that kills all coroutines currently running."""
for coro in list(self.run_pool.coroutines_running):
try:
coro.kill(GreenletExit)
except GreenletExit:
pass
def heartbeat(self):
"""
Loop that runs in the background during replication. It periodically
logs progress.
"""
while True:
eventlet.sleep(self.stats_interval)
self.stats_line()
def detect_lockups(self):
"""
In testing, the pool.waitall() call very occasionally failed to return.
This is an attempt to make sure the replicator finishes its replication
pass in some eventuality.
"""
while True:
eventlet.sleep(self.lockup_timeout)
if self.replication_count == self.last_replication_count:
self.logger.error(_("Lockup detected.. killing live coros."))
self.kill_coros()
self.last_replication_count = self.replication_count
def build_replication_jobs(self, policy, ips, override_devices=None,
override_partitions=None):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
jobs = []
df_mgr = self._df_router[policy]
self.all_devs_info.update(
[(dev['replication_ip'], dev['device'])
for dev in policy.object_ring.devs if dev])
data_dir = get_data_dir(policy)
found_local = False
for local_dev in [dev for dev in policy.object_ring.devs
if (dev
and is_local_device(ips,
self.port,
dev['replication_ip'],
dev['replication_port'])
and (override_devices is None
or dev['device'] in override_devices))]:
found_local = True
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(policy))
if self.mount_check and not ismount(dev_path):
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
self.logger.warning(
_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() -
df_mgr.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
if (override_partitions is not None
and partition not in override_partitions):
continue
if (partition.startswith('auditor_status_') and
partition.endswith('.json')):
# ignore auditor status files
continue
part_nodes = None
try:
job_path = join(obj_path, partition)
part_nodes = policy.object_ring.get_part_nodes(
int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
obj_path=obj_path,
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy=policy,
partition=partition,
region=local_dev['region']))
except ValueError:
if part_nodes:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in nodes])
else:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
continue
if not found_local:
self.logger.error("Can't find itself in policy with index %d with"
" ips %s and with port %s in ring file, not"
" replicating",
int(policy), ", ".join(ips), self.port)
return jobs
def collect_jobs(self, override_devices=None, override_partitions=None,
override_policies=None):
"""
Returns a sorted list of jobs (dictionaries) that specify the
partitions, nodes, etc to be rsynced.
:param override_devices: if set, only jobs on these devices
will be returned
:param override_partitions: if set, only jobs on these partitions
will be returned
:param override_policies: if set, only jobs in these storage
policies will be returned
"""
jobs = []
ips = whataremyips(self.bind_ip)
for policy in POLICIES:
# Skip replication if next_part_power is set. In this case
# every object is hard-linked twice, but the replicator can't
# detect them and would create a second copy of the file if not
# yet existing - and this might double the actual transferred
# and stored data
next_part_power = getattr(
policy.object_ring, 'next_part_power', None)
if next_part_power is not None:
self.logger.warning(
_("next_part_power set in policy '%s'. Skipping"),
policy.name)
continue
if policy.policy_type == REPL_POLICY:
if (override_policies is not None and
str(policy.idx) not in override_policies):
continue
# ensure rings are loaded for policy
self.load_object_ring(policy)
jobs += self.build_replication_jobs(
policy, ips, override_devices=override_devices,
override_partitions=override_partitions)
random.shuffle(jobs)
if self.handoffs_first:
# Move the handoff parts to the front of the list
jobs.sort(key=lambda job: not job['delete'])
self.job_count = len(jobs)
return jobs
def replicate(self, override_devices=None, override_partitions=None,
override_policies=None):
"""Run a replication pass"""
self.start = time.time()
self.suffix_count = 0
self.suffix_sync = 0
self.suffix_hash = 0
self.replication_count = 0
self.last_replication_count = -1
self.replication_cycle = (self.replication_cycle + 1) % 10
self.partition_times = []
self.my_replication_ips = self._get_my_replication_ips()
self.all_devs_info = set()
self.handoffs_remaining = 0
stats = eventlet.spawn(self.heartbeat)
lockup_detector = eventlet.spawn(self.detect_lockups)
eventlet.sleep() # Give spawns a cycle
current_nodes = None
try:
self.run_pool = GreenPool(size=self.concurrency)
jobs = self.collect_jobs(override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
for job in jobs:
current_nodes = job['nodes']
if override_devices and job['device'] not in override_devices:
continue
if override_partitions and \
job['partition'] not in override_partitions:
continue
dev_path = join(self.devices_dir, job['device'])
if self.mount_check and not ismount(dev_path):
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
self.logger.warning(_('%s is not mounted'), job['device'])
continue
if self.handoffs_first and not job['delete']:
# in handoffs first mode, we won't process primary
# partitions until rebalance was successful!
if self.handoffs_remaining:
self.logger.warning(_(
"Handoffs first mode still has handoffs "
"remaining. Aborting current "
"replication pass."))
break
if not self.check_ring(job['policy'].object_ring):
self.logger.info(_("Ring change detected. Aborting "
"current replication pass."))
return
try:
if isfile(job['path']):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning(
'Removing partition directory '
'which was a file: %s', job['path'])
os.remove(job['path'])
continue
except OSError:
continue
if job['delete']:
self.run_pool.spawn(self.update_deleted, job)
else:
self.run_pool.spawn(self.update, job)
current_nodes = None
with Timeout(self.lockup_timeout):
self.run_pool.waitall()
except (Exception, Timeout):
if current_nodes:
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in current_nodes])
else:
self._add_failure_stats(self.all_devs_info)
self.logger.exception(_("Exception in top-level replication loop"))
self.kill_coros()
finally:
stats.kill()
lockup_detector.kill()
self.stats_line()
self.stats['attempted'] = self.replication_count
def run_once(self, *args, **kwargs):
self._zero_stats()
self.logger.info(_("Running object replicator in script mode."))
override_devices = list_from_csv(kwargs.get('devices'))
override_partitions = list_from_csv(kwargs.get('partitions'))
override_policies = list_from_csv(kwargs.get('policies'))
if not override_devices:
override_devices = None
if not override_partitions:
override_partitions = None
if not override_policies:
override_policies = None
self.replicate(
override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
total = (time.time() - self.stats['start']) / 60
self.logger.info(
_("Object replication complete (once). (%.02f minutes)"), total)
if not (override_partitions or override_devices):
replication_last = time.time()
dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger)
def run_forever(self, *args, **kwargs):
self.logger.info(_("Starting object replicator in daemon mode."))
# Run the replicator continually
while True:
self._zero_stats()
self.logger.info(_("Starting object replication pass."))
# Run the replicator
self.replicate()
total = (time.time() - self.stats['start']) / 60
self.logger.info(
_("Object replication complete. (%.02f minutes)"), total)
replication_last = time.time()
dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger)
self.logger.debug('Replication sleeping for %s seconds.',
self.interval)
sleep(self.interval)
| psachin/swift | swift/obj/replicator.py | Python | apache-2.0 | 38,417 |
import sys
def DPChange(money, coins):
minNumCoins = [sys.maxsize]*(money+1)
minNumCoins[0] = 0
for m in range(1, money+1):
for i in range(0, len(coins)):
if m >= coins[i]:
if minNumCoins[m-coins[i]] + 1 < minNumCoins[m]:
minNumCoins[m] = minNumCoins[m-coins[i]] + 1
return minNumCoins[money]
if __name__ == "__main__":
file = sys.argv[1]
with open(file) as f:
money = int(f.readline())
coins = f.readline().split(',')
coins = [int(c) for c in coins]
minNumCoins = DPChange(money, coins)
print(minNumCoins) | Shenmolu/rosalind | MinNumCoin.py | Python | gpl-3.0 | 629 |
# https://djangosnippets.org/snippets/690/
import re
from django.template.defaultfilters import slugify
def unique_slugify(instance, value, slug_field_name='slug', queryset=None,
slug_separator='-'):
"""
Calculates and stores a unique slug of ``value`` for an instance.
``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check against for uniqueness).
``queryset`` usually doesn't need to be explicitly provided - it'll default
to using the ``.all()`` queryset from the model's default manager.
"""
slug_field = instance._meta.get_field(slug_field_name)
slug = getattr(instance, slug_field.attname)
slug_len = slug_field.max_length
# Sort out the initial slug, limiting its length if necessary.
slug = slugify(value)
if slug_len:
slug = slug[:slug_len]
slug = _slug_strip(slug, slug_separator)
original_slug = slug
# Create the queryset if one wasn't explicitly provided and exclude the
# current instance from the queryset.
if queryset is None:
queryset = instance.__class__._default_manager.all()
if instance.pk:
queryset = queryset.exclude(pk=instance.pk)
# Find a unique slug. If one matches, at '-2' to the end and try again
# (then '-3', etc).
next = 2
while not slug or queryset.filter(**{slug_field_name: slug}):
slug = original_slug
end = '%s%s' % (slug_separator, next)
if slug_len and len(slug) + len(end) > slug_len:
slug = slug[:slug_len-len(end)]
slug = _slug_strip(slug, slug_separator)
slug = '%s%s' % (slug, end)
next += 1
setattr(instance, slug_field.attname, slug)
def _slug_strip(value, separator='-'):
"""
Cleans up a slug by removing slug separator characters that occur at the
beginning or end of a slug.
If an alternate separator is used, it will also replace any instances of
the default '-' separator with the new separator.
"""
separator = separator or ''
if separator == '-' or not separator:
re_sep = '-'
else:
re_sep = '(?:-|%s)' % re.escape(separator)
# Remove multiple instances and if an alternate separator is provided,
# replace the default '-' separator.
if separator != re_sep:
value = re.sub('%s+' % re_sep, separator, value)
# Remove separator from the beginning and end of the slug.
if separator:
if separator != '-':
re_sep = re.escape(separator)
value = re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
return value | thelabnyc/wagtail_blog | blog/utils.py | Python | apache-2.0 | 2,644 |
#!/usr/bin/env python
# ------------------------------------------------------------
# lexical_specs.py
#
# Trinity language lexicographical specifications
#
# Authors:
# Victor De Ponte, 05-38087, <[email protected]>
# Francisco Martinez, 09-10502, <[email protected]>
# ------------------------------------------------------------
from lexer.token import OneLineComment, Token
################################################################################
############################# Tokens specification #############################
################################################################################
class Tk_Comment(OneLineComment):
_pattern = r'#.*$'
_name = 'Comment'
class Tk_str(Token):
_pattern = r'"([^"\\]|\\.)*"'
_name = 'String'
def __init__(self, pattern=None):
super(Tk_str, self).__init__(pattern=pattern)
self._shows_value = True
def match(self, line_no, col_no, inputString):
ret = super(Tk_str, self).match(line_no, col_no, inputString)
if ret is not None:
ret._value = ret._value[1:len(ret._value)-1]
return ret
def getSpan(self):
if self._value is not None:
return len(self._value) + 2
else:
return 0
class Tk_ID(Token):
_pattern = r'[a-zA-Z][a-zA-Z0-9_]*'
_name = 'Id'
def __init__(self, pattern=None):
super(Tk_ID, self).__init__(pattern=pattern)
self._shows_value = True
class Tk_num(Token):
_pattern = r'[-]?([0-9]+)(\.[0-9]+)?'
_name = 'Number'
def __init__(self, pattern=None):
super(Tk_num, self).__init__(pattern=pattern)
self._shows_value = True
def match(self, line_no, col_no, inputString):
ret = super(Tk_num, self).match(line_no, col_no, inputString)
if ret is not None:
self._shown_value = float(self._value) if '.' in self._value else int(self._value)
return ret
class Tk_true(Token):
_pattern = r'\btrue\b'
_name = 'True'
class Tk_false(Token):
_pattern = r'\bfalse\b'
_name = 'False'
class Tk_bool(Token):
_pattern = r'\bboolean\b'
_name = 'Reserved word \'boolean\''
class Tk_number(Token):
_pattern = r'\bnumber\b'
_name = 'Reserved word \'number\''
class Tk_mat(Token):
_pattern = r'\bmatrix\b'
_name = 'Reserved word \'matrix\''
class Tk_row(Token):
_pattern = r'\brow\b'
_name = 'Reserved word \'row\''
class Tk_col(Token):
_pattern = r'\bcol\b'
_name = 'Reserved word \'col\''
class Tk_not(Token):
_pattern = r'\bnot\b'
_name = 'Reserved word \'not\''
class Tk_div(Token):
_pattern = r'\bdiv\b'
_name = '\'div\' operator'
class Tk_mod(Token):
_pattern = r'\bmod\b'
_name = '\'mod\' operator'
class Tk_print(Token):
_pattern = r'\bprint\b'
_name = 'Reserved word \'print\''
class Tk_use(Token):
_pattern = r'\buse\b'
_name = 'Reserved word \'use\''
class Tk_in(Token):
_pattern = r'\bin\b'
_name = 'Reserved word \'in\''
class Tk_end(Token):
_pattern = r'\bend\b'
_name = 'Reserved word \'end\''
class Tk_set(Token):
_pattern = r'\bset\b'
_name = 'Reserved word \'set\''
class Tk_read(Token):
_pattern = r'\bread\b'
_name = 'Reserved word \'read\''
class Tk_if(Token):
_pattern = r'\bif\b'
_name = 'Reserved word \'if\''
class Tk_then(Token):
_pattern = r'\bthen\b'
_name = 'Reserved word \'then\''
class Tk_else(Token):
_pattern = r'\belse\b'
_name = 'Reserved word \'else\''
class Tk_for(Token):
_pattern = r'\bfor\b'
_name = 'Reserved word \'for\''
class Tk_do(Token):
_pattern = r'\bdo\b'
_name = 'Reserved word \'do\''
class Tk_while(Token):
_pattern = r'\bwhile\b'
_name = 'Reserved word \'while\''
class Tk_function(Token):
_pattern = r'\bfunction\b'
_name = 'Reserved word \'function\''
class Tk_ret(Token):
_pattern = r'\breturn\b'
_name = 'Reserved word \'return\''
class Tk_beg(Token):
_pattern = r'\bbegin\b'
_name = 'Reserved word \'begin\''
class Tk_prog(Token):
_pattern = r'\bprogram\b'
_name = 'Reserved word \'program\''
class Tk_mplus(Token):
_pattern = r'\.\+\.'
_name = '.+. operator'
class Tk_mminus(Token):
_pattern = r'\.-\.'
_name = '.-. operator'
class Tk_mtimes(Token):
_pattern = r'\.\*\.'
_name = '.*. operator'
class Tk_mrdiv(Token):
_pattern = r'\./\.'
_name = './. operator'
class Tk_mrmod(Token):
_pattern = r'\.%\.'
_name = '.%. operator'
class Tk_mdiv(Token):
_pattern = r'\.div\.'
_name = '.div. operator'
class Tk_mmod(Token):
_pattern = r'\.mod\.'
_name = '.mod. operator'
class Tk_eq(Token):
_pattern = r'=='
_name = 'Equivalence'
class Tk_neq(Token):
_pattern = r'/='
_name = 'Inequivalence'
class Tk_leq(Token):
_pattern = r'<='
_name = 'Less or equal than'
class Tk_geq(Token):
_pattern = r'>='
_name = 'Greater or equal than'
class Tk_comma(Token):
_pattern = r','
_name = 'Comma'
class Tk_colon(Token):
_pattern = r':'
_name = 'Colon'
class Tk_scolon(Token):
_pattern = r';'
_name = 'Semicolon'
class Tk_obrace(Token):
_pattern = r'\{'
_name = 'Opening brace {'
class Tk_cbrace(Token):
_pattern = r'\}'
_name = 'Closing brace }'
class Tk_oparen(Token):
_pattern = r'\('
_name = 'Opening parenthesis ('
class Tk_cparen(Token):
_pattern = r'\)'
_name = 'Closing parenthesis )'
class Tk_obrack(Token):
_pattern = r'\['
_name = 'Opening bracket ['
class Tk_cbrack(Token):
_pattern = r'\]'
_name = 'Closing bracket ]'
class Tk_and(Token):
_pattern = r'&'
_name = 'Ampersand'
class Tk_or(Token):
_pattern = r'\|'
_name = 'Pipe'
class Tk_assign(Token):
_pattern = r'='
_name = 'Assign'
class Tk_great(Token):
_pattern = r'>'
_name = 'Greater than'
class Tk_less(Token):
_pattern = r'<'
_name = 'Less than'
class Tk_plus(Token):
_pattern = r'\+'
_name = 'Plus'
class Tk_minus(Token):
_pattern = r'-'
_name = 'Minus'
class Tk_times(Token):
_pattern = r'\*'
_name = 'Times'
class Tk_rdiv(Token):
_pattern = r'/'
_name = 'Into'
class Tk_rmod(Token):
_pattern = r'%'
_name = '% operator'
class Tk_trans(Token):
_pattern = r'\''
_name = 'Transpose'
token_classes = [
Tk_Comment,
Tk_str,
Tk_true,
Tk_false,
Tk_bool,
Tk_number,
Tk_mat,
Tk_row,
Tk_col,
Tk_not,
Tk_div,
Tk_mod,
Tk_print,
Tk_use,
Tk_in,
Tk_end,
Tk_set,
Tk_read,
Tk_if,
Tk_then,
Tk_else,
Tk_for,
Tk_do,
Tk_while,
Tk_function,
Tk_ret,
Tk_beg,
Tk_prog,
Tk_ID,
Tk_minus,
Tk_num,
Tk_mplus,
Tk_mminus,
Tk_mtimes,
Tk_mrdiv,
Tk_mrmod,
Tk_mdiv,
Tk_mmod,
Tk_eq,
Tk_neq,
Tk_leq,
Tk_geq,
Tk_comma,
Tk_colon,
Tk_scolon,
Tk_obrace,
Tk_cbrace,
Tk_oparen,
Tk_cparen,
Tk_obrack,
Tk_cbrack,
Tk_and,
Tk_or,
Tk_assign,
Tk_great,
Tk_less,
Tk_plus,
Tk_times,
Tk_rdiv,
Tk_rmod,
Tk_trans
]
################################################################################
######################### End of Tokens specification ##########################
################################################################################
| throoze/trinity | lang/lexical_specs.py | Python | gpl-3.0 | 7,459 |
"""
Menu Model [DiamondQuest]
Defines a menu.
Author(s): Wilfrantz Dede, Jason C. McDonald, Stanislav Schmidt
"""
# LICENSE (BSD-3-Clause)
# Copyright (c) 2020 MousePaw Media.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# CONTRIBUTING
# See https://www.mousepawmedia.com/developers for information
# on how to contribute to our projects.
import abc
import collections
from enum import Enum
import pygame
from diamondquest.common import FontAttributes, FontAttributeDefaults
from diamondquest.model.game import GameModel
class MenuItem(abc.ABC):
"""An abstract base class for menu items.
Attributes
----------
key_down_listeners : dict
A dictionary storing key listeners.
"""
def __init__(self):
self.key_down_listeners = collections.defaultdict(list)
@property
@abc.abstractmethod
def text(self):
"""The text of the menu item."""
@property
@abc.abstractmethod
def text_attributes(self):
"""The text attributes of the menu item."""
def add_key_down_listener(self, key, listener):
"""Add a key down listener.
Parameters
----------
key : int
The key press that should be handled.
listener : function
The handler for the given key press. It should
take no parameters and not return anything.
"""
if listener not in self.key_down_listeners[key]:
self.key_down_listeners[key].append(listener)
def remove_key_down_listener(self, key, listener):
"""Remove a given key listener.
Parameters
----------
key : int
The key press that was handled by the listener.
listener : function
The listener to remove.
Returns
-------
status : bool
If the listener was found and removed then True is
returned, otherwise False.
"""
if listener in self.key_down_listeners[key]:
self.key_down_listeners[key].remove(listener)
return True
else:
return False
def handle_key_press(self, key):
"""Handle key presses when this item is focused.
Parameters
----------
key : int
The key that was pressed.
"""
for listener in self.key_down_listeners[key]:
listener()
class TextItem(MenuItem):
"""A menu item that is only static text."""
def __init__(
self, text, attributes=FontAttributeDefaults.MENU,
):
super().__init__()
self.raw_text = text
self.attributes = attributes
# icon
@property
def text(self):
return self.raw_text
@property
def text_attributes(self):
return self.attributes
class ButtonType(Enum):
STATIC = 0 # text never changes
SCROLL = 1 # left/right arrows scroll through options
INPUT = 2 # user can type into button text
class ButtonItem(MenuItem):
"""An interactive menu item."""
def __init__(
self,
text,
attributes=FontAttributeDefaults.MENU,
button_type=ButtonType.STATIC,
):
super().__init__()
self.text_item = TextItem(text, attributes)
self.button_type = button_type
@property
def text(self):
return self.text_item.text
@property
def text_attributes(self):
return self.text_item.text_attributes
class MenuType(Enum):
GAME = 0
DEV = 1
class MenuModel:
"""The model for the menu."""
menu_items = {} # a dictionary storing button instances
menus = {} # a dictionary storing menu instances
menu_in_use = MenuType.GAME # which menu the game is currently using
@classmethod
def initialize(cls):
cls.menu_items["text_existing_miner"] = TextItem(text="Existing Miner")
cls.menu_items["scroll_existing_miner"] = ButtonItem(
text="<none>", button_type=ButtonType.SCROLL
)
cls.menu_items["text_new_miner"] = TextItem(text="New Miner")
cls.menu_items["input_new_miner"] = ButtonItem(
text="Enter Name", button_type=ButtonType.INPUT
)
cls.menu_items["scroll_music_volume"] = ButtonItem(
text="Music: 10", button_type=ButtonType.SCROLL
)
cls.menu_items["scroll_sound_volume"] = ButtonItem(
text="Sound: 10", button_type=ButtonType.SCROLL
)
cls.menu_items["button_quit"] = ButtonItem(text="QUIT")
cls.menu_items["button_quit"].add_key_down_listener(
pygame.K_RETURN, lambda: GameModel.stop_game()
)
cls.menus[MenuType.GAME] = MenuModel(
title="DiamondQuest",
items=[
cls.menu_items["text_existing_miner"],
cls.menu_items["scroll_existing_miner"],
cls.menu_items["text_new_miner"],
cls.menu_items["input_new_miner"],
cls.menu_items["scroll_music_volume"],
cls.menu_items["scroll_sound_volume"],
cls.menu_items["button_quit"],
],
)
cls.menus[MenuType.DEV] = MenuModel(title="DevMenu", items=[])
@classmethod
def get_menu(cls, menu_type=None):
"""Called by the View to get the contents of the menu."""
# If no specific menu is requested, get the default.
if menu_type is None:
menu_type = cls.menu_in_use
if menu_type not in cls.menus:
raise ValueError(f"No such menu type {menu_type}")
return cls.menus[menu_type]
@classmethod
def use_menu(cls, menu_type):
"""Select which menu to use by default."""
cls.menu_in_use = menu_type
def __init__(self, title, items):
self.title = TextItem(title)
self.items = items
self.selectable_items = [
i for i, item in enumerate(items) if isinstance(item, ButtonItem)
]
self.which_selected = 0 if len(self.selectable_items) > 0 else -1
@property
def selected_item_idx(self):
if self.which_selected == -1:
return -1
return self.selectable_items[self.which_selected]
def __iter__(self):
iter(self.items)
@classmethod
def select_next_item(cls):
menu = cls.get_menu()
n_items = len(menu.selectable_items)
menu.which_selected = (menu.which_selected + 1) % n_items
@classmethod
def select_prev_item(cls):
menu = cls.get_menu()
n_items = len(menu.selectable_items)
menu.which_selected = (menu.which_selected - 1 + n_items) % n_items
@classmethod
def get_selected_item(cls):
menu = cls.get_menu()
idx = menu.selected_item_idx
if idx > 0:
return menu.items[idx]
else:
return None
| mousepawgames/diamondquest | src/diamondquest/model/menu/menu.py | Python | gpl-3.0 | 8,321 |
import tools
def logout():
'''
Logs out of the current user
'''
return tools.term('pkill -u $USER')
def get_cpu_percent():
'''
Gets the current CPU percent
'''
return tools.term("vmstat 1 2 | tail -1 | awk '{print 100-$15}'")
def get_cpu_temp():
'''
Gets the current CPU temperature
'''
temp_str = tools.term('cat /sys/class/thermal/thermal_zone0/temp')
return int((int(temp_str) if len(temp_str) else -1000) / 1000)
def get_mem_used():
'''
Gets the current amount of memory being used
'''
txt = tools.term('vmstat -s | egrep -m2 -o "[0-9]+" | tail -1')
return int(int(txt) / 1000)
def get_network_ssid():
'''
Gets the currently connected network SSID
'''
return tools.term('iwgetid -r')
def get_battery_capacity():
'''
Gets the current battery capacity
'''
cap_str = tools.term('cat /sys/class/power_supply/BAT0/capacity')
return int(cap_str) if len(cap_str) else -1
def get_battery_state():
'''
Gets the battery state (Charging, Not Charging)
'''
return tools.term('cat /sys/class/power_supply/BAT0/status')
def get_active_window_name():
'''
Gets the active window name/title
'''
return tools.term('xdotool getwindowfocus getwindowname')
def get_workspace():
'''
Gets the current workspace number
'''
workspace = tools.term('xprop -root _NET_CURRENT_DESKTOP | grep -o "[0-9]*"')
return int(workspace) if len(workspace) else 0
def get_user():
'''
Gets the currently logged in user's username
'''
return tools.term('id -u -n')
def get_alsa_volume():
'''
Gets the volume level reported by alsamixer
'''
txt = tools.term('amixer sget Master | egrep -o "[0-9]+%" | head -1')
txt = txt.replace('%', '')
return int(txt)
def set_alsa_volume(percent):
'''
Sets the volume level using alsamixer
:param percent: The volume percent to set to
'''
return tools.term("amixer sset 'Master' %s%%" % (percent))
| TSedlar/gr8bar | src/modules/linux.py | Python | gpl-3.0 | 2,039 |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from nw_util import *
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("nwapp=" + os.path.dirname(os.path.abspath(__file__)))
driver = webdriver.Chrome(executable_path=os.environ["CHROMEDRIVER"], chrome_options=chrome_options)
driver.implicitly_wait(5)
try:
print driver.current_url
switch_to_app(driver)
print driver.current_url
print "wait for devtools open"
wait_window_handles(driver, 2)
print driver.window_handles
print "switch to devtools"
switch_to_devtools(driver)
print "click Console panel"
devtools_click_tab(driver, "console")
print "check if there is warning message in console panel"
elems = driver.find_elements_by_class_name("console-message-text")
output = ""
if len(elems) > 2:
for i in range(len(elems)):
if "MediaElementAudioSource" in elems[i].get_attribute("innerHTML"):
output = elems[i].get_attribute("innerHTML")
break
elif len(elems) == 1:
output = elems[0].get_attribute("innerHTML")
else:
output = ""
print output
assert("MediaElementAudioSource" not in output)
assert(output is "")
finally:
driver.quit()
| nwjs/nw.js | test/sanity/issue5787-MediaElementAudioSource/test.py | Python | mit | 1,377 |
"""
Django settings for csp project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import logging
from datetime import timedelta
import dj_database_url
import dj_redis_url
import django
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v1*ah#)@vyov!7c@n&c2^-*=8d)-d!u9@#c4o*@k=1(1!jul6&'
HASHID_KEY = 'ho(f%5a9dl_*)(*h2n6v#&yk5+mbc8u58uhlbexoqkj@d)0h6='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False)
TEMPLATE_DEBUG = DEBUG
APPEND_SLASH = True
# Allow all host headers
ALLOWED_HOSTS = ['*']
PRODUCTION_HOSTS = ['daemo.herokuapp.com', 'daemo.stanford.edu']
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',),
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
# 'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
# 'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10
}
OAUTH2_PROVIDER = {
# this is the list of available scopes
'SCOPES': {'read': 'Read scope', 'write': 'Write scope'}
}
ACCESS_TOKEN_EXPIRE_SECONDS = 604800
OAUTH2_PROVIDER_APPLICATION_MODEL = 'oauth2_provider.Application'
OAUTH2_PROVIDER_ACCESS_TOKEN_MODEL = 'oauth2_provider.AccessToken'
MIGRATION_MODULES = {
'oauth2_provider': 'crowdsourcing.migrations.oauth2_provider',
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django.contrib.sessions',
'django.contrib.postgres',
'oauth2_provider',
'corsheaders',
'compressor',
'crispy_forms',
'rest_framework',
'ws4redis',
'crowdsourcing',
'mturk'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'crowdsourcing.middleware.active.CustomActiveViewMiddleware',
'crowdsourcing.middleware.requirement.RequirementMiddleware',
'django.middleware.security.SecurityMiddleware',
# 'corsheaders.middleware.CorsPostCsrfMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'csp.urls'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.AllowAllUsersModelBackend',
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'static/django_templates'), os.path.join(BASE_DIR, 'static/mturk')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'ws4redis.context_processors.default',
]
},
},
]
WSGI_APPLICATION = 'csp.wsgi.application'
DATABASES = {
'default': dj_database_url.config()
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# other finders..
'compressor.finders.CompressorFinder',
)
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
# Email
EMAIL_HOST = 'localhost'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_ENABLED = True
EMAIL_SENDER = '[email protected]'
EMAIL_SENDER_DEV = ''
EMAIL_SENDER_PASSWORD_DEV = ''
EMAIL_BACKEND = "crowdsourcing.backends.sendgrid_backend.SendGridBackend"
SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY', '')
# Email messages
EMAIL_NOTIFICATIONS_INTERVAL = os.environ.get('EMAIL_NOTIFICATIONS_INTERVAL', 30)
# Others
GRAPH_MODELS = {
'all_applications': True,
'group_models': True,
}
if float(django.get_version()[0:3]) < 1.8:
FIXTURE_DIRS = (
os.path.join(BASE_DIR, 'fixtures')
)
# Stripe
STRIPE_SECRET_KEY = os.environ.get('STRIPE_SECRET_KEY', '')
STRIPE_PUBLIC_KEY = os.environ.get('STRIPE_PUBLIC_KEY', '')
REGISTRATION_ALLOWED = os.environ.get('REGISTRATION_ALLOWED', False)
PASSWORD_RESET_ALLOWED = True
LOGIN_URL = '/login'
USERNAME_MAX_LENGTH = 30
# CORS
CORS_ORIGIN_ALLOW_ALL = True
CORS_REPLACE_HTTPS_REFERER = True
# Use only to restrict to specific servers/domains
# CORS_ORIGIN_WHITELIST = (
# 'stanford-qa.com',
# )
CORS_URLS_REGEX = r'^/api/done/*$'
CORS_ALLOW_METHODS = (
'GET',
'POST',
'OPTIONS'
)
HALF_OFF = True
NON_PROFIT_EMAILS = ['.edu', '.org']
SITE_HOST = os.environ.get('SITE_HOST', 'https://daemo.org')
REDIS_URL = os.environ.get('REDIS_URL', 'redis://localhost:6379')
REDIS_CONNECTION = dj_redis_url.parse(REDIS_URL)
DISCOURSE_API_KEY = os.environ.get('DISCOURSE_API_KEY', '')
DISCOURSE_BASE_URL = os.environ.get('DISCOURSE_BASE_URL', 'https://forum.daemo.org')
DISCOURSE_SSO_SECRET = os.environ.get('DISCOURSE_SSO_SECRET', 'ku_&j@77ghe6%-6788fg)^dmc4f((jx)w=o!q%+h!teydc7zes')
DISCOURSE_TOPIC_TASKS = os.environ.get('DISCOURSE_TOPIC_TASKS', None)
if DISCOURSE_TOPIC_TASKS is not None:
DISCOURSE_TOPIC_TASKS = int(DISCOURSE_TOPIC_TASKS)
MAX_TASKS_IN_PROGRESS = int(os.environ.get('MAX_TASKS_IN_PROGRESS', 8))
# Task Expiration
TASK_EXPIRATION_BEAT = os.environ.get('TASK_EXPIRATION_BEAT', 1)
DEFAULT_TASK_TIMEOUT = timedelta(hours=os.environ.get('DEFAULT_TASK_TIMEOUT', 8))
# MTurk
MTURK_CLIENT_ID = os.environ.get('MTURK_CLIENT_ID', 'INVALID')
MTURK_CLIENT_SECRET = os.environ.get('MTURK_CLIENT_SECRET', 'INVALID')
MTURK_HOST = os.environ.get('MTURK_HOST', 'mechanicalturk.sandbox.amazonaws.com')
MTURK_WORKER_HOST = os.environ.get('MTURK_WORKER_HOST', 'https://workersandbox.mturk.com/mturk/externalSubmit')
ID_HASH_MIN_LENGTH = 8
MTURK_WORKER_USERNAME = 'mturk'
MTURK_QUALIFICATIONS = os.environ.get('MTURK_QUALIFICATIONS', True)
MTURK_BEAT = os.environ.get('MTURK_BEAT', 1)
AWS_DAEMO_KEY = os.environ.get('AWS_DAEMO_KEY')
MTURK_ONLY = os.environ.get('MTURK_ONLY', False)
MTURK_COMPLETION_TIME = int(os.environ.get('MTURK_COMPLETION_TIME', 12))
MTURK_THRESHOLD = 0.61
POST_TO_MTURK = os.environ.get('POST_TO_MTURK', True)
MTURK_SYS_QUALIFICATIONS = os.environ.get('MTURK_SYS_QUALIFICATIONS', True)
WORKER_SPLIT_PERCENT = float(os.environ.get('WORKER_SPLIT_PERCENTILE', 0.75))
# AWS
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID', '')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', '')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME', 'daemo')
AWS_S3_FILE_OVERWRITE = False
# Celery
BROKER_URL = REDIS_URL
BROKER_POOL_LIMIT = None
CELERY_RESULT_BACKEND = REDIS_URL
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'America/Los_Angeles'
FEED_BOOMERANG = 1
BOOMERANG_MIDPOINT = 1.99
BOOMERANG_MAX = 3.0
BOOMERANG_WORKERS_NEEDED = int(os.environ.get('BOOMERANG_WORKERS_NEEDED', 15))
HEART_BEAT_BOOMERANG = int(os.environ.get('HEART_BEAT_BOOMERANG', 5))
BOOMERANG_LAMBDA = float(os.environ.get('BOOMERANG_LAMBDA', 0.6))
BOOMERANG_TASK_ALPHA = float(os.environ.get('BOOMERANG_TASK_ALPHA', 0.3))
BOOMERANG_REQUESTER_ALPHA = float(os.environ.get('BOOMERANG_REQUESTER_ALPHA', 0.4))
BOOMERANG_PLATFORM_ALPHA = float(os.environ.get('BOOMERANG_PLATFORM_ALPHA', 0.5))
MIN_RATINGS_REQUIRED = 5
COLLECTIVE_REJECTION_THRESHOLD = 7
IS_SANDBOX = os.environ.get('SANDBOX', 'False') == 'True'
DAEMO_FIRST = True
AUTO_APPROVE_FREQ = os.environ.get('AUTO_APPROVE_FREQ', 8) # hours
EXPIRE_RETURNED_TASKS = os.environ.get('EXPIRE_RETURNED_TASKS', 2) # days
# Sessions
SESSION_ENGINE = 'redis_sessions.session'
SESSION_REDIS_HOST = REDIS_CONNECTION['HOST']
SESSION_REDIS_PORT = REDIS_CONNECTION['PORT']
SESSION_REDIS_DB = REDIS_CONNECTION['DB']
SESSION_REDIS_PASSWORD = REDIS_CONNECTION['PASSWORD']
SESSION_REDIS_PREFIX = 'session'
# Web-sockets
WS4REDIS_CONNECTION = {
'host': REDIS_CONNECTION['HOST'],
'port': REDIS_CONNECTION['PORT'],
'db': REDIS_CONNECTION['DB'],
'password': REDIS_CONNECTION['PASSWORD'],
}
WEBSOCKET_URL = '/ws/'
WS4REDIS_EXPIRE = 1800
# WS4REDIS_HEARTBEAT = '--heartbeat--'
WS4REDIS_PREFIX = 'ws'
WS_API_URLS = ['/ws/bot']
# Payments (Stripe)
DAEMO_WORKER_PAY = timedelta(minutes=int(os.environ.get('DAEMO_WORKER_PAY', 60)))
DAEMO_CHARGEBACK_FEE = 0.005
STRIPE_CHARGE_LIFETIME = timedelta(days=90)
from utils import ws4redis_process_request
WS4REDIS_PROCESS_REQUEST = ws4redis_process_request
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("Daemo", '[email protected]'), # add more team members
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
SERVER_EMAIL = '[email protected]'
CELERY_REDIS_MAX_CONNECTIONS = 10
CELERY_IGNORE_RESULT = True
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
MIN_WORKERS_FOR_STATS = 10
WORKER_ACTIVITY_DAYS = 30
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
},
'suppress_deprecated': {
'()': 'csp.settings.SuppressDeprecated'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false', 'suppress_deprecated'],
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True
},
'console': {
'level': 'INFO',
'filters': ['suppress_deprecated'],
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.security': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'py.warnings': {
'handlers': ['console'],
},
}
}
class SuppressDeprecated(logging.Filter):
def filter(self, record):
warnings = [
'RemovedInDjango18Warning',
'RemovedInDjango19Warning',
'RemovedInDjango110Warning:',
]
# Return false to suppress message.
return not any([warn in record.getMessage() for warn in warnings])
PYTHON_VERSION = 2
try:
from local_settings import *
except Exception as e:
if DEBUG:
print e.message
CELERYBEAT_SCHEDULE = {
# 'mturk-push-tasks': {
# 'task': 'mturk.tasks.mturk_publish',
# 'schedule': timedelta(minutes=int(MTURK_BEAT)),
# },
'pay-workers': {
'task': 'crowdsourcing.tasks.pay_workers',
'schedule': DAEMO_WORKER_PAY,
},
# 'expire-hits': {
# 'task': 'mturk.tasks.expire_hits',
# 'schedule': timedelta(minutes=int(TASK_EXPIRATION_BEAT)),
# },
'expire-tasks': {
'task': 'crowdsourcing.tasks.expire_tasks',
'schedule': timedelta(minutes=int(TASK_EXPIRATION_BEAT)),
},
'auto-approve-tasks': {
'task': 'crowdsourcing.tasks.auto_approve_tasks',
'schedule': timedelta(minutes=4),
},
'email-notifications': {
'task': 'crowdsourcing.tasks.email_notifications',
'schedule': timedelta(minutes=int(EMAIL_NOTIFICATIONS_INTERVAL)),
},
'update-feed-boomerang': {
'task': 'crowdsourcing.tasks.update_feed_boomerang',
'schedule': timedelta(minutes=HEART_BEAT_BOOMERANG),
}
}
# Secure Settings
if not DEBUG:
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURE_HSTS_SECONDS = 31536000
# SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_FRAME_DENY = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = True
CSRF_TRUSTED_ORIGINS = [
'daemo.herokuapp.com', 'daemo.stanford.edu',
'daemo-staging.herokuapp.com', 'daemo-staging.stanford.edu',
'daemo.org', 'www.daemo.org', 'daemo-test.herokuapp.com',
'sandbox.daemo.org', 'www.sandbox.daemo.org'
]
REQUIRED_CONFIGS = ['AWS_DAEMO_KEY', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'STRIPE_SECRET_KEY',
'STRIPE_PUBLIC_KEY', 'SITE_HOST']
for config in REQUIRED_CONFIGS:
if config not in locals() and config not in globals():
print("Required configuration parameter is missing: {}".format(config))
exit(-1)
| crowdresearch/daemo | csp/settings.py | Python | mit | 15,090 |
# -*- coding: utf-8 -*-
# Copyright 2015 Spanish National Research Council
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from ooi.log import log as logging
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Controller(object):
def __init__(self, app, openstack_version):
self.app = app
self.openstack_version = openstack_version
| alvarolopez/ooi | ooi/api/base.py | Python | apache-2.0 | 900 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('documents', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='document',
name='author',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, blank=True),
preserve_default=True,
),
]
| danjac/ownblock | ownblock/ownblock/apps/documents/migrations/0002_document_author.py | Python | mit | 566 |
#!/usr/bin/env python2
import math
from sys import stdout
# Colored message ANSI constants
g_green = chr(27) + "[32m" if stdout.isatty() else ""
g_yellow = chr(27) + "[33m" if stdout.isatty() else ""
g_normal = chr(27) + "[0m" if stdout.isatty() else ""
def printStatsOfList(results, label='Statistics', summaryOnly=False):
total = totalSq = n = 0
allOfThem = []
for a in results:
total += a
totalSq += a*a
n += 1
allOfThem.append(a)
if n == 0:
return
varianceFull = (totalSq - total*total/n)/n
if varianceFull < 0.:
varianceFull = 0.
if n > 1:
variance = (totalSq - total*total/n)/(n-1)
if variance < 0.:
variance = 0.
else:
variance = 0.
srted = sorted(allOfThem)
if summaryOnly:
s = g_green + ("%6.2f" % (total/n)) + " +/- " + "%6.2f%%" + g_normal
print s % ((100*math.sqrt(variance)*n/total) if total > 0 else 0.),
else:
print "\n", g_yellow+label+g_normal, ":"
samplesNo = len(allOfThem)
measurements = [
("Average value", total/n),
("Std deviation", math.sqrt(varianceFull)),
("Sample stddev", math.sqrt(variance)),
("Median",
srted[samplesNo/2]
if samplesNo % 2
else 0.5*(srted[samplesNo/2 - 1] + srted[samplesNo/2])),
("Min", srted[0]),
("Max", srted[-1]),
(g_green+"Overall", (str(total/n)+" +/- "+"%2.1f%%"+g_normal) %
((100*math.sqrt(variance)*n/total) if total > 0 else 0.))
]
for label, value in measurements:
print "%*s:" % (15, label),
if isinstance(value, str):
print value
else:
print "%.5f" % value
def readListOfIntegersOrFloatsFromStdin():
while True:
try:
a = float(raw_input())
yield a
except:
break
if __name__ == "__main__":
printStatsOfList(readListOfIntegersOrFloatsFromStdin())
| ttsiodras/HexSpeak | contrib/stats.py | Python | gpl-2.0 | 2,072 |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.jvm_binary import JarRules, JvmBinary, Skip
from pants.backend.jvm.tasks.detect_duplicates import DuplicateDetector
from pants.base.exceptions import TaskError
from pants.java.jar.jar_dependency import JarDependency
from pants.java.jar.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_mkdir, safe_mkdir_for, touch
from pants_test.backend.jvm.tasks.jvm_binary_task_test_base import JvmBinaryTaskTestBase
class DuplicateDetectorTest(JvmBinaryTaskTestBase):
@classmethod
def task_type(cls):
return DuplicateDetector
def setUp(self):
super().setUp()
self.classes_dir = os.path.join(self.test_workdir, "classes")
safe_mkdir(self.classes_dir)
def generate_class(name):
path = os.path.join(self.classes_dir, name)
touch(path)
return path
test_class_path = generate_class("com/twitter/Test.class")
duplicate_class_path = generate_class("com/twitter/commons/Duplicate.class")
unique_class_path = generate_class("org/apache/Unique.class")
unicode_class_path = generate_class("cucumber/api/java/zh_cn/假如.class")
def generate_jar(path, *class_name):
jar_path = os.path.join(self.test_workdir, "jars", path)
safe_mkdir_for(jar_path)
with open_zip(jar_path, "w") as zipfile:
for clazz in class_name:
zipfile.write(clazz, os.path.relpath(clazz, self.classes_dir))
return jar_path
self.test_jar = generate_jar("test.jar", test_class_path, duplicate_class_path)
self.dups_jar = generate_jar("dups.jar", duplicate_class_path, unique_class_path)
self.no_dups_jar = generate_jar("no_dups.jar", unique_class_path)
self.unicode_jar = generate_jar("unicode_class.jar", unicode_class_path)
def resolved_jarlib(name, jar_path):
resolved_jar = ResolvedJar(
M2Coordinate(org="org.example", name=name, rev="0.0.1"),
cache_path=jar_path,
pants_path=jar_path,
)
jar_dep = JarDependency(org="org.example", name=name, rev="0.0.1")
jar_library = self.make_target(
spec=f"3rdparty:{name}", target_type=JarLibrary, jars=[jar_dep]
)
return jar_library, resolved_jar
self.test_jarlib, self.test_resolved_jar = resolved_jarlib("test", self.test_jar)
self.dups_jarlib, self.dups_resolved_jar = resolved_jarlib("dups", self.dups_jar)
self.no_dups_jarlib, self.no_dups_resolved_jar = resolved_jarlib(
"no_dups", self.no_dups_jar
)
self.unicode_jarlib, self.unicode_resolved_jar = resolved_jarlib(
"unicode", self.unicode_jar
)
def _setup_external_duplicate(self):
jvm_binary = self.make_target(
spec="src/java/com/twitter:thing",
target_type=JvmBinary,
dependencies=[self.test_jarlib, self.dups_jarlib],
)
context = self.context(target_roots=[jvm_binary])
task = self.create_task(context)
classpath = self.get_runtime_classpath(context)
classpath.add_jars_for_targets([self.test_jarlib], "default", [self.test_resolved_jar])
classpath.add_jars_for_targets([self.dups_jarlib], "default", [self.dups_resolved_jar])
return task, jvm_binary
def test_duplicate_found_external(self):
self.set_options(fail_fast=False)
task, jvm_binary = self._setup_external_duplicate()
conflicts_by_binary = task.execute()
expected = {
jvm_binary: {
("org.example-dups-0.0.1.jar", "org.example-test-0.0.1.jar"): {
"com/twitter/commons/Duplicate.class"
}
}
}
self.assertEqual(expected, conflicts_by_binary)
def test_duplicate_skip(self):
self.set_options(fail_fast=False, skip=True)
task, _ = self._setup_external_duplicate()
conflicts_by_binary = task.execute()
self.assertEqual(None, conflicts_by_binary)
def test_duplicate_excluded_file(self):
self.set_options(fail_fast=False, excludes=[], exclude_files=["Duplicate.class"])
task, jvm_binary = self._setup_external_duplicate()
conflicts_by_binary = task.execute()
self.assertEqual({}, conflicts_by_binary)
def _setup_internal_duplicate(self):
java_library = self.make_target(
spec="src/java/com/twitter:lib", target_type=JavaLibrary, sources=[]
)
jvm_binary = self.make_target(
spec="src/java/com/twitter:thing", target_type=JvmBinary, dependencies=[java_library]
)
context = self.context(target_roots=[jvm_binary])
task = self.create_task(context)
classpath = self.get_runtime_classpath(context)
classpath.add_for_target(java_library, [("default", self.classes_dir)])
classpath.add_for_target(jvm_binary, [("default", self.classes_dir)])
return task, jvm_binary
def test_duplicate_found_internal(self):
self.set_options(fail_fast=False)
task, jvm_binary = self._setup_internal_duplicate()
conflicts_by_binary = task.execute()
expected = {
jvm_binary: {
("src/java/com/twitter:lib", "src/java/com/twitter:thing"): {
"com/twitter/Test.class",
"com/twitter/commons/Duplicate.class",
"org/apache/Unique.class",
"cucumber/api/java/zh_cn/假如.class",
}
}
}
self.assertEqual(expected, conflicts_by_binary)
def test_duplicate_excluded_internal(self):
self.set_options(
fail_fast=False, excludes=[], exclude_files=["Duplicate.class", "假如.class"]
)
task, jvm_binary = self._setup_internal_duplicate()
conflicts_by_binary = task.execute()
expected = {
jvm_binary: {
("src/java/com/twitter:lib", "src/java/com/twitter:thing"): {
"com/twitter/Test.class",
"org/apache/Unique.class",
}
}
}
self.assertEqual(expected, conflicts_by_binary)
def test_duplicate_found_mixed(self):
self.set_options(fail_fast=False)
jvm_binary = self.make_target(
spec="src/java/com/twitter:thing",
target_type=JvmBinary,
dependencies=[self.test_jarlib],
)
context = self.context(target_roots=[jvm_binary])
task = self.create_task(context)
classpath = self.get_runtime_classpath(context)
classpath.add_for_target(jvm_binary, [("default", self.classes_dir)])
classpath.add_jars_for_targets([self.test_jarlib], "default", [self.test_resolved_jar])
conflicts_by_binary = task.execute()
expected = {
jvm_binary: {
("org.example-test-0.0.1.jar", "src/java/com/twitter:thing"): {
"com/twitter/Test.class",
"com/twitter/commons/Duplicate.class",
}
}
}
self.assertEqual(expected, conflicts_by_binary)
def test_duplicate_not_found(self):
self.set_options(fail_fast=True)
jvm_binary = self.make_target(
spec="src/java/com/twitter:thing",
target_type=JvmBinary,
dependencies=[self.no_dups_jarlib, self.unicode_jarlib],
)
context = self.context(target_roots=[jvm_binary])
task = self.create_task(context)
classpath = self.get_runtime_classpath(context)
classpath.add_jars_for_targets(
[self.no_dups_jarlib], "default", [self.no_dups_resolved_jar]
)
classpath.add_jars_for_targets(
[self.unicode_jarlib], "default", [self.unicode_resolved_jar]
)
conflicts_by_binary = task.execute()
self.assertEqual({}, conflicts_by_binary)
def test_fail_fast_error_raised(self):
self.set_options(fail_fast=True)
jvm_binary = self.make_target(
spec="src/java/com/twitter:thing",
target_type=JvmBinary,
dependencies=[self.test_jarlib],
)
context = self.context(target_roots=[jvm_binary])
task = self.create_task(context)
classpath = self.get_runtime_classpath(context)
classpath.add_for_target(jvm_binary, [("default", self.classes_dir)])
classpath.add_jars_for_targets([self.test_jarlib], "default", [self.test_resolved_jar])
with self.assertRaises(TaskError):
task.execute()
def test_is_excluded_default(self):
task = self.create_task(self.context())
self.assertFalse(task._is_excluded("foo"))
self.assertFalse(task._is_excluded("foo/BCKEY.DSA"))
# excluded_files: No directroy
self.assertTrue(task._is_excluded(".DS_Store"))
# excluded_files: Mixed case
self.assertTrue(task._is_excluded("NOTICE.txt"))
# excluded_files: Leading directory
self.assertTrue(task._is_excluded("/foo/bar/dependencies"))
# excluded_dirs:
self.assertTrue(task._is_excluded("META-INF/services/foo"))
# excluded_patterns:
self.assertTrue(task._is_excluded("META-INF/BCKEY.RSA"))
def test_is_excluded_pattern(self):
self.set_options(exclude_patterns=[r".*/garbage\."])
task = self.create_task(self.context())
self.assertTrue(task._is_excluded("foo/garbage.txt"))
def test_is_excluded_files(self):
self.set_options(excludes=None, exclude_files=["bckey.dsa"])
task = self.create_task(self.context())
self.assertTrue(task._is_excluded("foo/BCKEY.DSA"))
# Defaults are now overridden
self.assertFalse(task._is_excluded("NOTICE.txt"))
def test_is_excluded_files_again(self):
self.set_options(exclude_dirs=["org/duplicated"])
task = self.create_task(self.context())
self.assertTrue(task._is_excluded("org/duplicated/FOO"))
# Defaults are now overridden
self.assertFalse(task._is_excluded("META-INF/services/foo"))
def _setup_external_duplicate_with_rules(self, rules):
jvm_binary = self.make_target(
spec="src/java/com/twitter:thing",
target_type=JvmBinary,
dependencies=[self.test_jarlib, self.dups_jarlib],
deploy_jar_rules=rules,
)
context = self.context(target_roots=[jvm_binary])
task = self.create_task(context)
classpath = self.get_runtime_classpath(context)
classpath.add_jars_for_targets([self.test_jarlib], "default", [self.test_resolved_jar])
classpath.add_jars_for_targets([self.dups_jarlib], "default", [self.dups_resolved_jar])
return task, jvm_binary
def test_duplicate_rule_skip(self):
self.set_options(fail_fast=False)
task, _ = self._setup_external_duplicate_with_rules(
rules=JarRules([Skip("^com/twitter/commons/Duplicate.class$")])
)
conflicts_by_binary = task.execute()
self.assertEqual({}, conflicts_by_binary)
def test_duplicate_rule_no_match(self):
self.set_options(fail_fast=False)
task, jvm_binary = self._setup_external_duplicate_with_rules(
rules=JarRules([Skip("^com/twitter/commons/DoesNotExist.class$")])
)
conflicts_by_binary = task.execute()
expected = {
jvm_binary: {
("org.example-dups-0.0.1.jar", "org.example-test-0.0.1.jar"): {
"com/twitter/commons/Duplicate.class"
}
}
}
self.assertEqual(expected, conflicts_by_binary)
| wisechengyi/pants | tests/python/pants_test/backend/jvm/tasks/test_detect_duplicates.py | Python | apache-2.0 | 12,183 |
from django.contrib.gis.db import models
from django.db.models import Q
import operator
class PriorityDepartmentsManager(models.Manager):
DEPARTMENTS = {
'Austin': {'state': 'TX', 'fdid': 'WP801'},
'Arlington': {'state': 'VA', 'fdid': '01300'},
'Chicago': {'state': 'IL', 'fdid': 'CS931'},
'Phoenix': {'state': 'AZ', 'fdid': '08203'},
'Mesa': {'state': 'AZ', 'fdid': '08183'},
'Miami': {'state': 'FL', 'fdid': '01032'},
'Memphis': {'state': 'TN', 'fdid': '79553'},
'Seattle': {'state': 'WA', 'fdid': '17M15'},
'Los Angeles': {'state': 'CA', 'fdid': '19105'},
'Boston': {'state': 'MA', 'fdid': '25035'},
'San Diego': {'state': 'CA', 'fdid': '37140'},
'Detroit': {'state': 'MI', 'fdid': '08207'},
'Atlanta': {'state': 'GA', 'fdid': '06001'},
'Alexandria': {'state': 'VA', 'fdid': '51000'},
'Worcester': {'state': 'MA', 'fdid': '27348'},
'Nashville': {'state': 'TN', 'fdid': '19532'},
'Charleston': {'state': 'SC', 'fdid': '10302'},
}
def get_priority_cities_filter(self):
return reduce(operator.or_, [Q(**value) for key, value in self.DEPARTMENTS.items()])
def get_query_set(self):
return super(PriorityDepartmentsManager, self).get_query_set().filter(self.get_priority_cities_filter())
| garnertb/rogue_geonode | geoshape/firestation/managers.py | Python | gpl-3.0 | 1,358 |
from tests.compat import OrderedDict
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, Subnet
class TestDescribeSubnets(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeSubnetsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<subnetSet>
<item>
<subnetId>subnet-9d4a7b6c</subnetId>
<state>available</state>
<vpcId>vpc-1a2b3c4d</vpcId>
<cidrBlock>10.0.1.0/24</cidrBlock>
<availableIpAddressCount>251</availableIpAddressCount>
<availabilityZone>us-east-1a</availabilityZone>
<defaultForAz>false</defaultForAz>
<mapPublicIpOnLaunch>false</mapPublicIpOnLaunch>
<tagSet/>
</item>
<item>
<subnetId>subnet-6e7f829e</subnetId>
<state>available</state>
<vpcId>vpc-1a2b3c4d</vpcId>
<cidrBlock>10.0.0.0/24</cidrBlock>
<availableIpAddressCount>251</availableIpAddressCount>
<availabilityZone>us-east-1a</availabilityZone>
<defaultForAz>false</defaultForAz>
<mapPublicIpOnLaunch>false</mapPublicIpOnLaunch>
<tagSet/>
</item>
</subnetSet>
</DescribeSubnetsResponse>
"""
def test_get_all_subnets(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_subnets(
['subnet-9d4a7b6c', 'subnet-6e7f829e'],
filters=OrderedDict([('state', 'available'),
('vpc-id', ['subnet-9d4a7b6c', 'subnet-6e7f829e'])]))
self.assert_request_parameters({
'Action': 'DescribeSubnets',
'SubnetId.1': 'subnet-9d4a7b6c',
'SubnetId.2': 'subnet-6e7f829e',
'Filter.1.Name': 'state',
'Filter.1.Value.1': 'available',
'Filter.2.Name': 'vpc-id',
'Filter.2.Value.1': 'subnet-9d4a7b6c',
'Filter.2.Value.2': 'subnet-6e7f829e'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 2)
self.assertIsInstance(api_response[0], Subnet)
self.assertEqual(api_response[0].id, 'subnet-9d4a7b6c')
self.assertEqual(api_response[1].id, 'subnet-6e7f829e')
class TestCreateSubnet(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateSubnetResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<subnet>
<subnetId>subnet-9d4a7b6c</subnetId>
<state>pending</state>
<vpcId>vpc-1a2b3c4d</vpcId>
<cidrBlock>10.0.1.0/24</cidrBlock>
<availableIpAddressCount>251</availableIpAddressCount>
<availabilityZone>us-east-1a</availabilityZone>
<tagSet/>
</subnet>
</CreateSubnetResponse>
"""
def test_create_subnet(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_subnet(
'vpc-1a2b3c4d', '10.0.1.0/24', 'us-east-1a')
self.assert_request_parameters({
'Action': 'CreateSubnet',
'VpcId': 'vpc-1a2b3c4d',
'CidrBlock': '10.0.1.0/24',
'AvailabilityZone': 'us-east-1a'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, Subnet)
self.assertEquals(api_response.id, 'subnet-9d4a7b6c')
self.assertEquals(api_response.state, 'pending')
self.assertEquals(api_response.vpc_id, 'vpc-1a2b3c4d')
self.assertEquals(api_response.cidr_block, '10.0.1.0/24')
self.assertEquals(api_response.available_ip_address_count, 251)
self.assertEquals(api_response.availability_zone, 'us-east-1a')
class TestDeleteSubnet(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteSubnetResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteSubnetResponse>
"""
def test_delete_subnet(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_subnet('subnet-9d4a7b6c')
self.assert_request_parameters({
'Action': 'DeleteSubnet',
'SubnetId': 'subnet-9d4a7b6c'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestModifySubnetAttribute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<ModifySubnetAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</ModifySubnetAttributeResponse>
"""
def test_modify_subnet_attribute_map_public_ip_on_launch(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.modify_subnet_attribute(
'subnet-1a2b3c4d', map_public_ip_on_launch=True)
self.assert_request_parameters({
'Action': 'ModifySubnetAttribute',
'SubnetId': 'subnet-1a2b3c4d',
'MapPublicIpOnLaunch.Value': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
| Asana/boto | tests/unit/vpc/test_subnet.py | Python | mit | 6,551 |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Let the user remove a playlist entry."""
__author__ = \
'[email protected] (Jeff Posnick) and [email protected] (JJ Behrens)'
from playlistpicker.handlers.basehandler import BaseHandler
from playlistpicker.utils import channels as channelutils
from playlistpicker.utils import youtube as youtubeutils
class RemovePlaylistEntryHandler(BaseHandler):
@BaseHandler.oauth2_decorator.oauth_required
@BaseHandler.authorize_playlist
@BaseHandler.playlist_entry_uri_required
def post(self, playlist_id):
yt_service_for_owner = youtubeutils.create_youtube_service(
self.owner_oauth_token)
response = yt_service_for_owner.DeletePlaylistVideoEntry(
self.playlist_uri, self.playlist_entry_id)
assert response
youtubeutils.write_playlist(self, yt_service_for_owner, playlist_id,
channelutils.notify_playlist_listeners)
| jjinux/party-playlist-picker | playlistpicker/handlers/removeplaylistentryhandler.py | Python | apache-2.0 | 1,455 |
# Patchwork - automated patch tracking system
# Copyright (C) 2016 Stephen Finucane <[email protected]>
#
# SPDX-License-Identifier: GPL-2.0-or-later
import email.parser
from rest_framework.generics import ListAPIView
from rest_framework.generics import RetrieveAPIView
from rest_framework.reverse import reverse
from rest_framework.serializers import SerializerMethodField
from patchwork.api.base import BaseHyperlinkedModelSerializer
from patchwork.api.filters import CoverLetterFilterSet
from patchwork.api.embedded import PersonSerializer
from patchwork.api.embedded import ProjectSerializer
from patchwork.api.embedded import SeriesSerializer
from patchwork.models import CoverLetter
class CoverLetterListSerializer(BaseHyperlinkedModelSerializer):
web_url = SerializerMethodField()
project = ProjectSerializer(read_only=True)
submitter = PersonSerializer(read_only=True)
mbox = SerializerMethodField()
series = SeriesSerializer(read_only=True)
comments = SerializerMethodField()
def get_web_url(self, instance):
request = self.context.get('request')
return request.build_absolute_uri(instance.get_absolute_url())
def get_mbox(self, instance):
request = self.context.get('request')
return request.build_absolute_uri(instance.get_mbox_url())
def get_comments(self, cover):
return self.context.get('request').build_absolute_uri(
reverse('api-cover-comment-list', kwargs={'pk': cover.id}))
def to_representation(self, instance):
# NOTE(stephenfin): This is here to ensure our API looks the same even
# after we changed the series-patch relationship from M:N to 1:N. It
# will be removed in API v2
data = super(CoverLetterListSerializer, self).to_representation(
instance)
data['series'] = [data['series']] if data['series'] else []
return data
class Meta:
model = CoverLetter
fields = ('id', 'url', 'web_url', 'project', 'msgid', 'date', 'name',
'submitter', 'mbox', 'series', 'comments')
read_only_fields = fields
versioned_fields = {
'1.1': ('web_url', 'mbox', 'comments'),
}
extra_kwargs = {
'url': {'view_name': 'api-cover-detail'},
}
class CoverLetterDetailSerializer(CoverLetterListSerializer):
headers = SerializerMethodField()
def get_headers(self, instance):
headers = {}
if instance.headers:
parsed = email.parser.Parser().parsestr(instance.headers, True)
for key in parsed.keys():
headers[key] = parsed.get_all(key)
# Let's return a single string instead of a list if only one
# header with this key is present
if len(headers[key]) == 1:
headers[key] = headers[key][0]
return headers
class Meta:
model = CoverLetter
fields = CoverLetterListSerializer.Meta.fields + (
'headers', 'content')
read_only_fields = fields
extra_kwargs = CoverLetterListSerializer.Meta.extra_kwargs
versioned_fields = CoverLetterListSerializer.Meta.versioned_fields
class CoverLetterList(ListAPIView):
"""List cover letters."""
serializer_class = CoverLetterListSerializer
filter_class = filterset_class = CoverLetterFilterSet
search_fields = ('name',)
ordering_fields = ('id', 'name', 'date', 'submitter')
ordering = 'id'
def get_queryset(self):
return CoverLetter.objects.all()\
.select_related('project', 'submitter', 'series')\
.defer('content', 'headers')
class CoverLetterDetail(RetrieveAPIView):
"""Show a cover letter."""
serializer_class = CoverLetterDetailSerializer
def get_queryset(self):
return CoverLetter.objects.all()\
.select_related('project', 'submitter', 'series')
| stephenfin/patchwork | patchwork/api/cover.py | Python | gpl-2.0 | 3,941 |
#!/usr/bin/env python
# coding=utf-8
from src.sequence import Sequence
class FastaReader(object):
def __init__(self):
self.seqs = []
def read(self, io_buffer):
header = ''
bases = ''
for line in io_buffer:
if line[0] == '>':
if len(header) > 0:
# Save the data
self.seqs.append(Sequence(header, bases))
header = line[1:].strip().split()[0] # Get the next header
bases = ''
else:
bases += line.strip()
# Add the last sequence
self.seqs.append(Sequence(header, bases))
return self.seqs
| Arabidopsis-Information-Portal/GAG | src/fasta_reader.py | Python | mit | 681 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2007 Lukáš Lalinský
# Copyright (C) 2010 fatih
# Copyright (C) 2010-2011, 2014, 2018-2021 Philipp Wolfer
# Copyright (C) 2012, 2014, 2018 Wieland Hoffmann
# Copyright (C) 2013 Ionuț Ciocîrlan
# Copyright (C) 2013-2014, 2018-2021 Laurent Monin
# Copyright (C) 2014, 2017 Sophist-UK
# Copyright (C) 2016 Frederik “Freso” S. Olesen
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2017 Shen-Ta Hsieh
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import builtins
from collections import namedtuple
from collections.abc import Iterator
import re
import unittest
from unittest.mock import Mock
from test.picardtestcase import PicardTestCase
from picard import util
from picard.const.sys import IS_WIN
from picard.util import (
extract_year_from_date,
find_best_match,
is_absolute_path,
iter_files_from_objects,
iter_unique,
limited_join,
make_filename_from_title,
pattern_as_regex,
sort_by_similarity,
tracknum_and_title_from_filename,
tracknum_from_filename,
uniqify,
wildcards_to_regex_pattern,
)
# ensure _() is defined
if '_' not in builtins.__dict__:
builtins.__dict__['_'] = lambda a: a
class ReplaceWin32IncompatTest(PicardTestCase):
@unittest.skipUnless(IS_WIN, "windows test")
def test_correct_absolute_win32(self):
self.assertEqual(util.replace_win32_incompat("c:\\test\\te\"st/2"),
"c:\\test\\te_st/2")
self.assertEqual(util.replace_win32_incompat("c:\\test\\d:/2"),
"c:\\test\\d_/2")
@unittest.skipUnless(not IS_WIN, "non-windows test")
def test_correct_absolute_non_win32(self):
self.assertEqual(util.replace_win32_incompat("/test/te\"st/2"),
"/test/te_st/2")
self.assertEqual(util.replace_win32_incompat("/test/d:/2"),
"/test/d_/2")
def test_correct_relative(self):
self.assertEqual(util.replace_win32_incompat("A\"*:<>?|b"),
"A_______b")
self.assertEqual(util.replace_win32_incompat("d:tes<t"),
"d_tes_t")
def test_incorrect(self):
self.assertNotEqual(util.replace_win32_incompat("c:\\test\\te\"st2"),
"c:\\test\\te\"st2")
class MakeFilenameTest(PicardTestCase):
def test_filename_from_title(self):
self.assertEqual(make_filename_from_title(), _("No Title"))
self.assertEqual(make_filename_from_title(""), _("No Title"))
self.assertEqual(make_filename_from_title(" "), _("No Title"))
self.assertEqual(make_filename_from_title(default="New Default"), "New Default")
self.assertEqual(make_filename_from_title("", "New Default"), "New Default")
self.assertEqual(make_filename_from_title("/"), "_")
@unittest.skipUnless(IS_WIN, "windows test")
def test_filename_from_title_win32(self):
self.assertEqual(make_filename_from_title("\\"), "_")
self.assertEqual(make_filename_from_title(":"), "_")
@unittest.skipUnless(not IS_WIN, "non-windows test")
def test_filename_from_title_non_win32(self):
self.assertEqual(make_filename_from_title(":"), ":")
class ExtractYearTest(PicardTestCase):
def test_string(self):
self.assertEqual(extract_year_from_date(""), None)
self.assertEqual(extract_year_from_date(2020), None)
self.assertEqual(extract_year_from_date("2020"), 2020)
self.assertEqual(extract_year_from_date('2020-02-28'), 2020)
self.assertEqual(extract_year_from_date('2015.02'), 2015)
self.assertEqual(extract_year_from_date('2015; 2015'), None)
# test for the format as supported by ID3 (https://id3.org/id3v2.4.0-structure): yyyy-MM-ddTHH:mm:ss
self.assertEqual(extract_year_from_date('2020-07-21T13:00:00'), 2020)
def test_mapping(self):
self.assertEqual(extract_year_from_date({}), None)
self.assertEqual(extract_year_from_date({'year': 'abc'}), None)
self.assertEqual(extract_year_from_date({'year': '2020'}), 2020)
self.assertEqual(extract_year_from_date({'year': 2020}), 2020)
self.assertEqual(extract_year_from_date({'year': '2020-02-28'}), None)
class SanitizeDateTest(PicardTestCase):
def test_correct(self):
self.assertEqual(util.sanitize_date("2006--"), "2006")
self.assertEqual(util.sanitize_date("2006--02"), "2006")
self.assertEqual(util.sanitize_date("2006 "), "2006")
self.assertEqual(util.sanitize_date("2006 02"), "")
self.assertEqual(util.sanitize_date("2006.02"), "")
self.assertEqual(util.sanitize_date("2006-02"), "2006-02")
def test_incorrect(self):
self.assertNotEqual(util.sanitize_date("2006--02"), "2006-02")
self.assertNotEqual(util.sanitize_date("2006.03.02"), "2006-03-02")
class SanitizeFilenameTest(PicardTestCase):
def test_replace_slashes(self):
self.assertEqual(util.sanitize_filename("AC/DC"), "AC_DC")
def test_custom_replacement(self):
self.assertEqual(util.sanitize_filename("AC/DC", "|"), "AC|DC")
def test_win_compat(self):
self.assertEqual(util.sanitize_filename("AC\\/DC", win_compat=True), "AC__DC")
@unittest.skipUnless(IS_WIN, "windows test")
def test_replace_backslashes(self):
self.assertEqual(util.sanitize_filename("AC\\DC"), "AC_DC")
@unittest.skipIf(IS_WIN, "non-windows test")
def test_keep_backslashes(self):
self.assertEqual(util.sanitize_filename("AC\\DC"), "AC\\DC")
class TranslateArtistTest(PicardTestCase):
def test_latin(self):
self.assertEqual("thename", util.translate_from_sortname("thename", "sort, name"))
def test_kanji(self):
self.assertEqual("Tetsuya Komuro", util.translate_from_sortname("小室哲哉", "Komuro, Tetsuya"))
# see _reverse_sortname(), cases with 3 or 4 chunks
self.assertEqual("c b a", util.translate_from_sortname("小室哲哉", "a, b, c"))
self.assertEqual("b a, d c", util.translate_from_sortname("小室哲哉", "a, b, c, d"))
def test_kanji2(self):
self.assertEqual("Ayumi Hamasaki & Keiko", util.translate_from_sortname("浜崎あゆみ & KEIKO", "Hamasaki, Ayumi & Keiko"))
def test_cyrillic(self):
self.assertEqual("Pyotr Ilyich Tchaikovsky", util.translate_from_sortname("Пётр Ильич Чайковский", "Tchaikovsky, Pyotr Ilyich"))
class FormatTimeTest(PicardTestCase):
def test(self):
self.assertEqual("?:??", util.format_time(0))
self.assertEqual("0:00", util.format_time(0, display_zero=True))
self.assertEqual("3:00", util.format_time(179750))
self.assertEqual("3:00", util.format_time(179500))
self.assertEqual("2:59", util.format_time(179499))
self.assertEqual("59:59", util.format_time(3599499))
self.assertEqual("1:00:00", util.format_time(3599500))
self.assertEqual("1:02:59", util.format_time(3779499))
class HiddenFileTest(PicardTestCase):
@unittest.skipUnless(not IS_WIN, "non-windows test")
def test(self):
self.assertTrue(util.is_hidden('/a/b/.c.mp3'))
self.assertTrue(util.is_hidden('/a/.b/.c.mp3'))
self.assertFalse(util.is_hidden('/a/.b/c.mp3'))
class TagsTest(PicardTestCase):
def test_display_tag_name(self):
dtn = util.tags.display_tag_name
self.assertEqual(dtn('tag'), 'tag')
self.assertEqual(dtn('tag:desc'), 'tag [desc]')
self.assertEqual(dtn('tag:'), 'tag')
self.assertEqual(dtn('tag:de:sc'), 'tag [de:sc]')
self.assertEqual(dtn('originalyear'), 'Original Year')
self.assertEqual(dtn('originalyear:desc'), 'Original Year [desc]')
self.assertEqual(dtn('~length'), 'Length')
self.assertEqual(dtn('~lengthx'), '~lengthx')
self.assertEqual(dtn(''), '')
class LinearCombinationTest(PicardTestCase):
def test_0(self):
parts = []
self.assertEqual(util.linear_combination_of_weights(parts), 0.0)
def test_1(self):
parts = [(1.0, 1), (1.0, 1), (1.0, 1)]
self.assertEqual(util.linear_combination_of_weights(parts), 1.0)
def test_2(self):
parts = [(0.0, 1), (0.0, 0), (1.0, 0)]
self.assertEqual(util.linear_combination_of_weights(parts), 0.0)
def test_3(self):
parts = [(0.0, 1), (1.0, 1)]
self.assertEqual(util.linear_combination_of_weights(parts), 0.5)
def test_4(self):
parts = [(0.5, 4), (1.0, 1)]
self.assertEqual(util.linear_combination_of_weights(parts), 0.6)
def test_5(self):
parts = [(0.95, 100), (0.05, 399), (0.0, 1), (1.0, 0)]
self.assertEqual(util.linear_combination_of_weights(parts), 0.2299)
def test_6(self):
parts = [(-0.5, 4)]
self.assertRaises(ValueError, util.linear_combination_of_weights, parts)
def test_7(self):
parts = [(0.5, -4)]
self.assertRaises(ValueError, util.linear_combination_of_weights, parts)
def test_8(self):
parts = [(1.5, 4)]
self.assertRaises(ValueError, util.linear_combination_of_weights, parts)
def test_9(self):
parts = ((1.5, 4))
self.assertRaises(TypeError, util.linear_combination_of_weights, parts)
class AlbumArtistFromPathTest(PicardTestCase):
def test_album_artist_from_path(self):
aafp = util.album_artist_from_path
file_1 = r"/10cc/Original Soundtrack/02 I'm Not in Love.mp3"
file_2 = r"/10cc - Original Soundtrack/02 I'm Not in Love.mp3"
file_3 = r"/Original Soundtrack/02 I'm Not in Love.mp3"
file_4 = r"/02 I'm Not in Love.mp3"
file_5 = r"/10cc - Original Soundtrack - bonus/02 I'm Not in Love.mp3"
self.assertEqual(aafp(file_1, '', ''), ('Original Soundtrack', '10cc'))
self.assertEqual(aafp(file_2, '', ''), ('Original Soundtrack', '10cc'))
self.assertEqual(aafp(file_3, '', ''), ('Original Soundtrack', ''))
self.assertEqual(aafp(file_4, '', ''), ('', ''))
self.assertEqual(aafp(file_5, '', ''), ('Original Soundtrack - bonus', '10cc'))
self.assertEqual(aafp(file_1, 'album', ''), ('album', ''))
self.assertEqual(aafp(file_2, 'album', ''), ('album', ''))
self.assertEqual(aafp(file_3, 'album', ''), ('album', ''))
self.assertEqual(aafp(file_4, 'album', ''), ('album', ''))
self.assertEqual(aafp(file_1, '', 'artist'), ('Original Soundtrack', 'artist'))
self.assertEqual(aafp(file_2, '', 'artist'), ('Original Soundtrack', 'artist'))
self.assertEqual(aafp(file_3, '', 'artist'), ('Original Soundtrack', 'artist'))
self.assertEqual(aafp(file_4, '', 'artist'), ('', 'artist'))
self.assertEqual(aafp(file_1, 'album', 'artist'), ('album', 'artist'))
self.assertEqual(aafp(file_2, 'album', 'artist'), ('album', 'artist'))
self.assertEqual(aafp(file_3, 'album', 'artist'), ('album', 'artist'))
self.assertEqual(aafp(file_4, 'album', 'artist'), ('album', 'artist'))
for name in ('', 'x', '/', '\\', '///'):
self.assertEqual(aafp(name, '', 'artist'), ('', 'artist'))
# test Strip disc subdirectory
self.assertEqual(aafp(r'/artistx/albumy/CD 1/file.flac', '', ''), ('albumy', 'artistx'))
self.assertEqual(aafp(r'/artistx/albumy/the DVD 23 B/file.flac', '', ''), ('albumy', 'artistx'))
self.assertEqual(aafp(r'/artistx/albumy/disc23/file.flac', '', ''), ('albumy', 'artistx'))
self.assertNotEqual(aafp(r'/artistx/albumy/disc/file.flac', '', ''), ('albumy', 'artistx'))
class IsAbsolutePathTest(PicardTestCase):
def test_is_absolute(self):
self.assertTrue(is_absolute_path('/foo/bar'))
self.assertFalse(is_absolute_path('foo/bar'))
self.assertFalse(is_absolute_path('./foo/bar'))
self.assertFalse(is_absolute_path('../foo/bar'))
@unittest.skipUnless(IS_WIN, "windows test")
def test_is_absolute_windows(self):
self.assertTrue(is_absolute_path('D:/foo/bar'))
self.assertTrue(is_absolute_path('D:\\foo\\bar'))
self.assertTrue(is_absolute_path('\\foo\\bar'))
# Paths to Windows shares
self.assertTrue(is_absolute_path('\\\\foo\\bar'))
self.assertTrue(is_absolute_path('\\\\foo\\bar\\'))
self.assertTrue(is_absolute_path('\\\\foo\\bar\\baz'))
class CompareBarcodesTest(PicardTestCase):
def test_same(self):
self.assertTrue(util.compare_barcodes('0727361379704', '0727361379704'))
self.assertTrue(util.compare_barcodes('727361379704', '727361379704'))
self.assertTrue(util.compare_barcodes('727361379704', '0727361379704'))
self.assertTrue(util.compare_barcodes('0727361379704', '727361379704'))
self.assertTrue(util.compare_barcodes(None, None))
self.assertTrue(util.compare_barcodes('', ''))
self.assertTrue(util.compare_barcodes(None, ''))
self.assertTrue(util.compare_barcodes('', None))
def test_not_same(self):
self.assertFalse(util.compare_barcodes('0727361379704', '0727361379705'))
self.assertFalse(util.compare_barcodes('727361379704', '1727361379704'))
self.assertFalse(util.compare_barcodes('0727361379704', None))
self.assertFalse(util.compare_barcodes(None, '0727361379704'))
class MbidValidateTest(PicardTestCase):
def test_ok(self):
self.assertTrue(util.mbid_validate('2944824d-4c26-476f-a981-be849081942f'))
self.assertTrue(util.mbid_validate('2944824D-4C26-476F-A981-be849081942f'))
self.assertFalse(util.mbid_validate(''))
self.assertFalse(util.mbid_validate('Z944824d-4c26-476f-a981-be849081942f'))
self.assertFalse(util.mbid_validate('22944824d-4c26-476f-a981-be849081942f'))
self.assertFalse(util.mbid_validate('2944824d-4c26-476f-a981-be849081942ff'))
self.assertFalse(util.mbid_validate('2944824d-4c26.476f-a981-be849081942f'))
def test_not_ok(self):
self.assertRaises(TypeError, util.mbid_validate, 123)
self.assertRaises(TypeError, util.mbid_validate, None)
SimMatchTest = namedtuple('SimMatchTest', 'similarity name')
class SortBySimilarity(PicardTestCase):
def setUp(self):
super().setUp()
self.test_values = [
SimMatchTest(similarity=0.74, name='d'),
SimMatchTest(similarity=0.61, name='a'),
SimMatchTest(similarity=0.75, name='b'),
SimMatchTest(similarity=0.75, name='c'),
]
def candidates(self):
yield from self.test_values
def test_sort_by_similarity(self):
results = [result.name for result in sort_by_similarity(self.candidates)]
self.assertEqual(results, ['b', 'c', 'd', 'a'])
def test_findbestmatch(self):
no_match = SimMatchTest(similarity=-1, name='no_match')
best_match = find_best_match(self.candidates, no_match)
self.assertEqual(best_match.result.name, 'b')
self.assertEqual(best_match.similarity, 0.75)
self.assertEqual(best_match.num_results, 4)
def test_findbestmatch_nomatch(self):
self.test_values = []
no_match = SimMatchTest(similarity=-1, name='no_match')
best_match = find_best_match(self.candidates, no_match)
self.assertEqual(best_match.result.name, 'no_match')
self.assertEqual(best_match.similarity, -1)
self.assertEqual(best_match.num_results, 0)
class GetQtEnum(PicardTestCase):
def test_get_qt_enum(self):
from PyQt5.QtCore import QStandardPaths
values = util.get_qt_enum(QStandardPaths, QStandardPaths.LocateOption)
self.assertIn('LocateFile', values)
self.assertIn('LocateDirectory', values)
self.assertNotIn('DesktopLocation', values)
class LimitedJoin(PicardTestCase):
def setUp(self):
super().setUp()
self.list = [str(x) for x in range(0, 10)]
def test_1(self):
expected = '0+1+...+8+9'
result = limited_join(self.list, 5, '+', '...')
self.assertEqual(result, expected)
def test_2(self):
expected = '0+1+2+3+4+5+6+7+8+9'
result = limited_join(self.list, -1)
self.assertEqual(result, expected)
result = limited_join(self.list, len(self.list))
self.assertEqual(result, expected)
result = limited_join(self.list, len(self.list) + 1)
self.assertEqual(result, expected)
def test_3(self):
expected = '0,1,2,3,…,6,7,8,9'
result = limited_join(self.list, len(self.list) - 1, ',')
self.assertEqual(result, expected)
class IterFilesFromObjectsTest(PicardTestCase):
def test_iterate_only_unique(self):
f1 = Mock()
f2 = Mock()
f3 = Mock()
obj1 = Mock()
obj1.iterfiles = Mock(return_value=[f1, f2])
obj2 = Mock()
obj2.iterfiles = Mock(return_value=[f2, f3])
result = iter_files_from_objects([obj1, obj2])
self.assertTrue(isinstance(result, Iterator))
self.assertEqual([f1, f2, f3], list(result))
class IterUniqifyTest(PicardTestCase):
def test_unique(self):
items = [1, 2, 3, 2, 3, 4]
result = uniqify(items)
self.assertEqual([1, 2, 3, 4], result)
class IterUniqueTest(PicardTestCase):
def test_unique(self):
items = [1, 2, 3, 2, 3, 4]
result = iter_unique(items)
self.assertTrue(isinstance(result, Iterator))
self.assertEqual([1, 2, 3, 4], list(result))
class TracknumFromFilenameTest(PicardTestCase):
def test_returns_expected_tracknumber(self):
tests = (
(2, '2.mp3'),
(2, '02.mp3'),
(2, '002.mp3'),
(None, 'Foo.mp3'),
(1, 'Foo 0001.mp3'),
(1, '1 song.mp3'),
(99, '99 Foo.mp3'),
(42, '42. Foo.mp3'),
(None, '20000 Feet.mp3'),
(242, 'track no 242.mp3'),
(77, 'Track no. 77 .mp3'),
(242, 'track-242.mp3'),
(242, 'track nr 242.mp3'),
(242, 'track_242.mp3'),
(1, 'artist song 2004 track01 xxxx.ogg'),
(1, 'artist song 2004 track-no-01 xxxx.ogg'),
(1, 'artist song 2004 track-no_01 xxxx.ogg'),
(1, '01_foo.mp3'),
(1, '01ābc.mp3'),
(1, '01abc.mp3'),
(11, "11 Linda Jones - Things I've Been Through 08.flac"),
(1, "01 artist song [2004] (02).mp3"),
(1, "01 artist song [04].mp3"),
(7, "artist song [2004] [7].mp3"),
# (7, "artist song [2004] (7).mp3"),
(7, 'artist song [2004] [07].mp3'),
(7, 'artist song [2004] (07).mp3'),
(4, 'xx 01 artist song [04].mp3'),
(None, 'artist song-(666) (01) xxx.ogg'),
(None, 'song-70s 69 comment.mp3'),
(13, "2_13 foo.mp3"),
(13, "02-13 foo.mp3"),
(None, '1971.mp3'),
(42, '1971 Track 42.mp3'),
(None, "artist song [2004].mp3"),
(None, '0.mp3'),
(None, 'track00.mp3'),
(None, 'song [2004] [1000].mp3'),
(None, 'song 2015.mp3'),
(None, '2015 song.mp3'),
(None, '30,000 Pounds of Bananas.mp3'),
(None, 'Dalas 1 PM.mp3'),
(None, "Don't Stop the 80's.mp3"),
(None, 'Symphony no. 5 in D minor.mp3'),
(None, 'Song 2.mp3'),
(None, '80s best of.mp3'),
(None, 'best of 80s.mp3'),
# (None, '99 Luftballons.mp3'),
(7, '99 Luftballons Track 7.mp3'),
(None, 'Margin 0.001.mp3'),
(None, 'All the Small Things - blink‐182.mp3'),
(None, '99.99 Foo.mp3'),
(5, '٠٥ فاصله میان دو پرده.mp3'),
(23, '23 foo.mp3'),
(None, '²³ foo.mp3'),
)
for expected, filename in tests:
tracknumber = tracknum_from_filename(filename)
self.assertEqual(expected, tracknumber, filename)
class TracknumAndTitleFromFilenameTest(PicardTestCase):
def test_returns_expected_tracknumber(self):
tests = (
((None, 'Foo'), 'Foo.mp3'),
(('1', 'Track 0001'), 'Track 0001.mp3'),
(('99', 'Foo'), '99 Foo.mp3'),
(('42', 'Foo'), '0000042 Foo.mp3'),
(('2', 'Foo'), '0000002 Foo.mp3'),
((None, '20000 Feet'), '20000 Feet.mp3'),
((None, '20,000 Feet'), '20,000 Feet.mp3'),
)
for expected, filename in tests:
result = tracknum_and_title_from_filename(filename)
self.assertEqual(expected, result)
def test_namedtuple(self):
result = tracknum_and_title_from_filename('0000002 Foo.mp3')
self.assertEqual(result.tracknumber, '2')
self.assertEqual(result.title, 'Foo')
class PatternAsRegexTest(PicardTestCase):
def test_regex(self):
regex = pattern_as_regex(r'/^foo.*/')
self.assertEqual(r'^foo.*', regex.pattern)
self.assertFalse(regex.flags & re.IGNORECASE)
self.assertFalse(regex.flags & re.MULTILINE)
def test_regex_flags(self):
regex = pattern_as_regex(r'/^foo.*/', flags=re.MULTILINE | re.IGNORECASE)
self.assertEqual(r'^foo.*', regex.pattern)
self.assertTrue(regex.flags & re.IGNORECASE)
self.assertTrue(regex.flags & re.MULTILINE)
def test_regex_extra_flags(self):
regex = pattern_as_regex(r'/^foo.*/im', flags=re.VERBOSE)
self.assertEqual(r'^foo.*', regex.pattern)
self.assertTrue(regex.flags & re.VERBOSE)
self.assertTrue(regex.flags & re.IGNORECASE)
self.assertTrue(regex.flags & re.MULTILINE)
def test_regex_raises(self):
with self.assertRaises(re.error):
pattern_as_regex(r'/^foo(.*/')
def test_wildcard(self):
regex = pattern_as_regex(r'(foo?)\\*\?\*', allow_wildcards=True)
self.assertEqual(r'^\(foo.\)\\.*\?\*$', regex.pattern)
self.assertFalse(regex.flags & re.IGNORECASE)
self.assertFalse(regex.flags & re.MULTILINE)
def test_wildcard_flags(self):
regex = pattern_as_regex(r'(foo)*', allow_wildcards=True, flags=re.MULTILINE | re.IGNORECASE)
self.assertEqual(r'^\(foo\).*$', regex.pattern)
self.assertTrue(regex.flags & re.IGNORECASE)
self.assertTrue(regex.flags & re.MULTILINE)
def test_string_match(self):
regex = pattern_as_regex(r'(foo)*', allow_wildcards=False)
self.assertEqual(r'\(foo\)\*', regex.pattern)
self.assertFalse(regex.flags & re.IGNORECASE)
self.assertFalse(regex.flags & re.MULTILINE)
def test_string_match_flags(self):
regex = pattern_as_regex(r'(foo)*', allow_wildcards=False, flags=re.MULTILINE | re.IGNORECASE)
self.assertEqual(r'\(foo\)\*', regex.pattern)
self.assertTrue(regex.flags & re.IGNORECASE)
self.assertTrue(regex.flags & re.MULTILINE)
class WildcardsToRegexPatternTest(PicardTestCase):
def test_wildcard_pattern(self):
pattern = 'fo?o*'
regex = wildcards_to_regex_pattern(pattern)
self.assertEqual('fo.o.*', regex)
re.compile(regex)
def test_escape(self):
pattern = 'f\\?o\\*o?o*\\[o'
regex = wildcards_to_regex_pattern(pattern)
self.assertEqual('f\\?o\\*o.o.*\\[o', regex)
re.compile(regex)
def test_character_group(self):
pattern = '[abc*?xyz]]'
regex = wildcards_to_regex_pattern(pattern)
self.assertEqual('[abc*?xyz]\\]', regex)
re.compile(regex)
def test_character_group_escape_square_brackets(self):
pattern = '[a[b\\]c]'
regex = wildcards_to_regex_pattern(pattern)
self.assertEqual('[a[b\\]c]', regex)
re.compile(regex)
def test_open_character_group(self):
pattern = '[abc*?xyz['
regex = wildcards_to_regex_pattern(pattern)
self.assertEqual('\\[abc.*.xyz\\[', regex)
re.compile(regex)
def test_special_chars(self):
pattern = ']()\\^$|'
regex = wildcards_to_regex_pattern(pattern)
self.assertEqual(re.escape(pattern), regex)
re.compile(regex)
| musicbrainz/picard | test/test_utils.py | Python | gpl-2.0 | 24,993 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ..common import *
from ..extractor import VideoExtractor
from json import loads
class QiE(VideoExtractor):
name = "QiE (企鹅直播)"
# Last updated: 2015-11-24
stream_types = [
{'id': 'normal', 'container': 'flv', 'video_profile': '标清'},
{'id': 'middle', 'container': 'flv', 'video_profile': '550'},
{'id': 'middle2', 'container': 'flv', 'video_profile': '900'},
]
id_dic = {i['video_profile']:(i['id']) for i in stream_types}
api_endpoint = 'http://www.qie.tv/api/v1/room/{room_id}'
@staticmethod
def get_vid_from_url(url):
"""Extracts video ID from live.qq.com.
"""
html = get_content(url)
return match1(html, r'room_id\":(\d+)')
def download_playlist_by_url(self, url, **kwargs):
pass
def prepare(self, **kwargs):
if self.url:
self.vid = self.get_vid_from_url(self.url)
content = get_content(self.api_endpoint.format(room_id = self.vid))
content = loads(content)
self.title = content['data']['room_name']
rtmp_url = content['data']['rtmp_url']
#stream_avalable = [i['name'] for i in content['data']['stream']]
stream_available = {}
stream_available['normal'] = rtmp_url + '/' + content['data']['rtmp_live']
if len(content['data']['rtmp_multi_bitrate']) > 0:
for k , v in content['data']['rtmp_multi_bitrate'].items():
stream_available[k] = rtmp_url + '/' + v
for s in self.stream_types:
if s['id'] in stream_available.keys():
quality_id = s['id']
url = stream_available[quality_id]
self.streams[quality_id] = {
'container': 'flv',
'video_profile': s['video_profile'],
'size': 0,
'url': url
}
def extract(self, **kwargs):
for i in self.streams:
s = self.streams[i]
s['src'] = [s['url']]
if 'stream_id' in kwargs and kwargs['stream_id']:
# Extract the stream
stream_id = kwargs['stream_id']
if stream_id not in self.streams:
log.e('[Error] Invalid video format.')
log.e('Run \'-i\' command with no specific video format to view all available formats.')
exit(2)
else:
# Extract stream with the best quality
stream_id = self.streams_sorted[0]['id']
s['src'] = [s['url']]
site = QiE()
download = site.download_by_url
download_playlist = playlist_not_supported('QiE') | betaY/crawler | you-get-master/src/you_get/extractors/qie.py | Python | mit | 2,716 |
from django import forms
from .models import SignUp
class ContactForm(forms.Form):
full_name = forms.CharField(required=False)
email = forms.EmailField()
message = forms.CharField()
class SignUpForm(forms.ModelForm):
class Meta:
model = SignUp
fields = ['full_name', 'email']
### exclude = ['full_name']
def clean_email(self):
email = self.cleaned_data.get('email')
email_base, provider = email.split("@")
domain, extension = provider.split('.')
# if not domain == 'USC':
# raise forms.ValidationError("Please make sure you use your USC email.")
if not extension == "edu":
raise forms.ValidationError("Please use a valid .EDU email address")
return email
def clean_full_name(self):
full_name = self.cleaned_data.get('full_name')
#write validation code.
return full_name | codingforentrepreneurs/ecommerce-2 | src/newsletter/forms.py | Python | mit | 812 |
#!/usr/bin/env python3
#Author: Stefan Toman
if __name__ == '__main__':
a = int(input())
b = int(input())
print(a+b)
print(a-b)
print(a*b)
| stoman/CompetitiveProgramming | problems/pythonarithmeticoperators/submissions/accepted/stefan.py | Python | mit | 161 |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Daniel Drizhuk, [email protected], 2017
# - Mario Lassnig, [email protected], 2017
# - Paul Nilsson, [email protected], 2017-2020
import collections
import subprocess # Python 2/3
try:
import commands # Python 2
except Exception:
pass
import json
import os
import platform
import ssl
import sys
try:
import urllib.request # Python 3
import urllib.error # Python 3
import urllib.parse # Python 3
except Exception:
import urllib # Python 2
import urllib2 # Python 2
import pipes
from .filehandling import write_file
from .auxiliary import is_python3
from .config import config
from .constants import get_pilot_version
import logging
logger = logging.getLogger(__name__)
_ctx = collections.namedtuple('_ctx', 'ssl_context user_agent capath cacert')
# anisyonk: public copy of `_ctx` to avoid logic break since ssl_context is reset inside the request() -- FIXME
# anisyonk: public instance, should be properly initialized by `https_setup()`
# anisyonk: use lightweight class definition instead of namedtuple since tuple is immutable and we don't need/use any tuple features here
ctx = type('ctx', (object,), dict(ssl_context=None, user_agent='Pilot2 client', capath=None, cacert=None))
def _tester(func, *args):
"""
Tests function ``func`` on arguments and returns first positive.
>>> _tester(lambda x: x%3 == 0, 1, 2, 3, 4, 5, 6)
3
>>> _tester(lambda x: x%3 == 0, 1, 2)
None
:param func: function(arg)->boolean
:param args: other arguments
:return: something or none
"""
for arg in args:
if arg is not None and func(arg):
return arg
return None
def capath(args=None):
"""
Tries to get :abbr:`CA (Certification Authority)` path with certificates.
Testifies it to be a directory.
Tries next locations:
1. :option:`--capath` from arguments
2. :envvar:`X509_CERT_DIR` from env
3. Path ``/etc/grid-security/certificates``
:param args: arguments, parsed by `argparse`
:returns: `str` -- directory path, or `None`
"""
return _tester(os.path.isdir,
args and args.capath,
os.environ.get('X509_CERT_DIR'),
'/etc/grid-security/certificates')
def cacert_default_location():
"""
Tries to get current user ID through `os.getuid`, and get the posix path for x509 certificate.
:returns: `str` -- posix default x509 path, or `None`
"""
try:
return '/tmp/x509up_u%s' % str(os.getuid())
except AttributeError:
logger.warn('No UID available? System not POSIX-compatible... trying to continue')
pass
return None
def cacert(args=None):
"""
Tries to get :abbr:`CA (Certification Authority)` certificate or X509 one.
Testifies it to be a regular file.
Tries next locations:
1. :option:`--cacert` from arguments
2. :envvar:`X509_USER_PROXY` from env
3. Path ``/tmp/x509up_uXXX``, where ``XXX`` refers to ``UID``
:param args: arguments, parsed by `argparse`
:returns: `str` -- certificate file path, or `None`
"""
return _tester(os.path.isfile,
args and args.cacert,
os.environ.get('X509_USER_PROXY'),
cacert_default_location())
def https_setup(args=None, version=None):
"""
Sets up the context for future HTTPS requests:
1. Selects the certificate paths
2. Sets up :mailheader:`User-Agent`
3. Tries to create `ssl.SSLContext` for future use (falls back to :command:`curl` if fails)
:param args: arguments, parsed by `argparse`
:param str version: pilot version string (for :mailheader:`User-Agent`)
"""
version = version or get_pilot_version()
_ctx.user_agent = 'pilot/%s (Python %s; %s %s)' % (version,
sys.version.split()[0],
platform.system(),
platform.machine())
logger.debug('User-Agent: %s' % _ctx.user_agent)
_ctx.capath = capath(args)
_ctx.cacert = cacert(args)
if sys.version_info < (2, 7, 9): # by anisyonk: actually SSL context should work, but prior to 2.7.9 there is no automatic hostname/certificate validation
logger.warn('Python version <2.7.9 lacks SSL contexts -- falling back to curl')
_ctx.ssl_context = None
else:
try:
_ctx.ssl_context = ssl.create_default_context(capath=_ctx.capath,
cafile=_ctx.cacert)
except Exception as e:
logger.warn('SSL communication is impossible due to SSL error: %s -- falling back to curl' % str(e))
_ctx.ssl_context = None
# anisyonk: clone `_ctx` to avoid logic break since ssl_context is reset inside the request() -- FIXME
ctx.capath = _ctx.capath
ctx.cacert = _ctx.cacert
ctx.user_agent = _ctx.user_agent
try:
ctx.ssl_context = ssl.create_default_context(capath=ctx.capath, cafile=ctx.cacert)
ctx.ssl_context.load_cert_chain(ctx.cacert)
except Exception as e: # redandant try-catch protection, should work well for both python2 & python3 -- CLEAN ME later (anisyonk)
logger.warn('Failed to initialize SSL context .. skipped, error: %s' % str(e))
def request(url, data=None, plain=False, secure=True):
"""
This function sends a request using HTTPS.
Sends :mailheader:`User-Agent` and certificates previously being set up by `https_setup`.
If `ssl.SSLContext` is available, uses `urllib2` as a request processor. Otherwise uses :command:`curl`.
If ``data`` is provided, encodes it as a URL form data and sends it to the server.
Treats the request as JSON unless a parameter ``plain`` is `True`.
If JSON is expected, sends ``Accept: application/json`` header.
:param string url: the URL of the resource
:param dict data: data to send
:param boolean plain: if true, treats the response as a plain text.
:param secure: Boolean (default: True, ie use certificates)
Usage:
.. code-block:: python
:emphasize-lines: 2
https_setup(args, PILOT_VERSION) # sets up ssl and other stuff
response = request('https://some.url', {'some':'data'})
Returns:
- :keyword:`dict` -- if everything went OK
- `str` -- if ``plain`` parameter is `True`
- `None` -- if something went wrong
"""
_ctx.ssl_context = None # certificates are not available on the grid, use curl
logger.debug('server update dictionary = \n%s' % str(data))
# get the filename and strdata for the curl config file
filename, strdata = get_vars(url, data)
# write the strdata to file
writestatus = write_file(filename, strdata)
# get the config option for the curl command
dat = get_curl_config_option(writestatus, url, data, filename)
if _ctx.ssl_context is None and secure:
req = get_curl_command(plain, dat)
try:
status, output = execute_request(req)
except Exception as e:
logger.warning('exception: %s' % e)
return None
else:
if status != 0:
logger.warn('request failed (%s): %s' % (status, output))
return None
# return output if plain otherwise return json.loads(output)
if plain:
return output
else:
try:
ret = json.loads(output)
except Exception as e:
logger.warning('json.loads() failed to parse output=%s: %s' % (output, e))
return None
else:
return ret
else:
req = execute_urllib(url, data, plain, secure)
context = _ctx.ssl_context if secure else None
if is_python3(): # Python 3
ec, output = get_urlopen_output(req, context)
if ec:
return None
else: # Python 2
ec, output = get_urlopen2_output(req, context)
if ec:
return None
return output.read() if plain else json.load(output)
def get_curl_command(plain, dat):
"""
Get the curl command.
:param plain:
:param dat: curl config option (string).
:return: curl command (string).
"""
req = 'curl -sS --compressed --connect-timeout %s --max-time %s '\
'--capath %s --cert %s --cacert %s --key %s '\
'-H %s %s %s' % (config.Pilot.http_connect_timeout, config.Pilot.http_maxtime,
pipes.quote(_ctx.capath or ''), pipes.quote(_ctx.cacert or ''),
pipes.quote(_ctx.cacert or ''), pipes.quote(_ctx.cacert or ''),
pipes.quote('User-Agent: %s' % _ctx.user_agent),
"-H " + pipes.quote('Accept: application/json') if not plain else '',
dat)
logger.info('request: %s' % req)
return req
def get_vars(url, data):
"""
Get the filename and strdata for the curl config file.
:param url: URL (string).
:param data: data to be written to file (dictionary).
:return: filename (string), strdata (string).
"""
strdata = ""
for key in data:
try:
strdata += 'data="%s"\n' % urllib.parse.urlencode({key: data[key]}) # Python 3
except Exception:
strdata += 'data="%s"\n' % urllib.urlencode({key: data[key]}) # Python 2
jobid = ''
if 'jobId' in list(data.keys()): # Python 2/3
jobid = '_%s' % data['jobId']
# write data to temporary config file
filename = '%s/curl_%s%s.config' % (os.getenv('PILOT_HOME'), os.path.basename(url), jobid)
return filename, strdata
def get_curl_config_option(writestatus, url, data, filename):
"""
Get the curl config option.
:param writestatus: status of write_file call (Boolean).
:param url: URL (string).
:param data: data structure (dictionary).
:param filename: file name of config file (string).
:return: config option (string).
"""
if not writestatus:
logger.warning('failed to create curl config file (will attempt to urlencode data directly)')
try:
dat = pipes.quote(url + '?' + urllib.parse.urlencode(data) if data else '') # Python 3
except Exception:
dat = pipes.quote(url + '?' + urllib.urlencode(data) if data else '') # Python 2
else:
dat = '--config %s %s' % (filename, url)
return dat
def execute_request(req):
"""
Execute the curl request.
:param req: curl request command (string).
:return: status (int), output (string).
"""
try:
status, output = subprocess.getstatusoutput(req) # Python 3
except Exception:
status, output = commands.getstatusoutput(req) # Python 2
return status, output
def execute_urllib(url, data, plain, secure):
"""
Execute the request using urllib.
:param url: URL (string).
:param data: data structure
:return: urllib request structure.
"""
try:
req = urllib.request.Request(url, urllib.parse.urlencode(data)) # Python 3
except Exception:
req = urllib2.Request(url, urllib.urlencode(data)) # Python 2
if not plain:
req.add_header('Accept', 'application/json')
if secure:
req.add_header('User-Agent', _ctx.user_agent)
return req
def get_urlopen_output(req, context):
"""
Get the output from the urlopen request.
:param req:
:param context:
:return: ec (int), output (string).
"""
ec = -1
output = ""
try:
output = urllib.request.urlopen(req, context=context)
except urllib.error.HTTPError as e:
logger.warn('server error (%s): %s' % (e.code, e.read()))
except urllib.error.URLError as e:
logger.warn('connection error: %s' % e.reason)
else:
ec = 0
return ec, output
def get_urlopen2_output(req, context):
"""
Get the output from the urlopen2 request.
:param req:
:param context:
:return: ec (int), output (string).
"""
ec = -1
output = ""
try:
output = urllib2.urlopen(req, context=context)
except urllib2.HTTPError as e:
logger.warn('server error (%s): %s' % (e.code, e.read()))
except urllib2.URLError as e:
logger.warn('connection error: %s' % e.reason)
else:
ec = 0
return ec, output
| PalNilsson/pilot2 | pilot/util/https.py | Python | apache-2.0 | 12,783 |
# -*- coding: utf-8 -*-
# Copyright (c) 2004-2015 Alterra, Wageningen-UR
# Allard de Wit and Iwan Supit ([email protected]), July 2015
# Approach based on LINTUL N/P/K made by Joost Wolf
from ...base_classes import StatesTemplate, ParamTemplate, SimulationObject, \
AfgenTrait, RatesTemplate
from ...decorators import prepare_rates, prepare_states
from ...traitlets import HasTraits, Float, Int, Instance
class NPK_Demand_Uptake(SimulationObject):
"""Calculates the crop N/P/K demand and its uptake from the soil.
Crop N/P/K demand is calculated as the difference between the
actual N/P/K concentration (kg N/P/K per kg biomass) in the
vegetative plant organs (leaves, stems and roots) and the maximum
N/P/K concentration for each organ. N/P/K uptake is then estimated
as the minimum of supply from the soil and demand from the crop.
Nitrogen fixation (leguminous plants) is calculated by assuming that a
fixed fraction of the daily N demand is supplied by nitrogen fixation.
The remaining part has to be supplied by the soil.
The N/P/K demand of the storage organs is calculated in a somewhat
different way because it is assumed that the demand from the storage
organs is fulfilled by translocation of N/P/K from the leaves, stems
and roots. So Therefore the uptake of the storage organs is calculated
as the minimum of the translocatable N/P/K (supply) and the demand from
the storage organs. Moreover, there is time coefficient for translocation
which takes into account that there is a delay in the availability of
translocatable N/P/K
**Simulation parameters**
============ ============================================= ======= ======================
Name Description Type Unit
============ ============================================= ======= ======================
NMAXLV_TB Maximum N concentration in leaves as TCr kg N kg-1 dry biomass
function of DVS
PMAXLV_TB As for P TCr kg P kg-1 dry biomass
KMAXLV_TB As for K TCr kg K kg-1 dry biomass
NMAXRT_FR Maximum N concentration in roots as fraction SCr -
of maximum N concentration in leaves
PMAXRT_FR As for P SCr -
KMAXRT_FR As for K SCr -
NMAXST_FR Maximum N concentration in stems as fraction SCr -
of maximum N concentration in leaves
PMAXST_FR As for P SCr -
KMAXST_FR As for K SCr -
NMAXSO Maximum N concentration in storage organs SCr kg N kg-1 dry biomass
PMAXSO As for P SCr kg P kg-1 dry biomass
KMAXSO As for K SCr kg K kg-1 dry biomass
NCRIT_FR Critical N concentration as fraction of SCr -
maximum N concentration for vegetative
plant organs as a whole (leaves + stems)
PCRIT_FR As for P SCr -
KCRIT_FR As for K SCr -
TCNT Time coefficient for N translation to SCr days
storage organs
TCPT As for P SCr days
TCKT As for K SCr days
NFIX_FR fraction of crop nitrogen uptake by SCr kg N kg-1 dry biomass
biological fixation
DVS_NPK_STOP Development stage after which no nutrients SCr -
are taken up from the soil by the crop.
============ ============================================= ======= =======================
**State variables**
======= ================================================= ==== ============
Name Description Pbl Unit
======= ================================================= ==== ============
NDEMLV N Demand in living leaves N |kg N ha-1|
NDEMST N Demand in living stems N |kg N ha-1|
NDEMRT N Demand in living roots N |kg N ha-1|
NDEMSO N Demand in storage organs N |kg N ha-1|
PDEMLV P Demand in living leaves N |kg P ha-1|
PDEMST P Demand in living stems N |kg P ha-1|
PDEMRT P Demand in living roots N |kg P ha-1|
PDEMSO P Demand in storage organs N |kg P ha-1|
KDEMLV K Demand in living leaves N |kg K ha-1|
KDEMST K Demand in living stems N |kg K ha-1|
KDEMRT K Demand in living roots N |kg K ha-1|
KDEMSO K Demand in storage organs N |kg K ha-1|
======= ================================================= ==== ============
**Rate variables**
======= ================================================= ==== ================
Name Description Pbl Unit
======= ================================================= ==== ================
RNULV Rate of N uptake in leaves Y |kg N ha-1 d-1|
RNUST Rate of N uptake in stems Y |kg N ha-1 d-1|
RNURT Rate of N uptake in roots Y |kg N ha-1 d-1|
RNUSO Rate of N uptake in storage organs Y |kg N ha-1 d-1|
RPULV Rate of P uptake in leaves Y |kg P ha-1 d-1|
RPUST Rate of P uptake in stems Y |kg P ha-1 d-1|
RPURT Rate of P uptake in roots Y |kg P ha-1 d-1|
RPUSO Rate of P uptake in storage organs Y |kg P ha-1 d-1|
RKULV Rate of K uptake in leaves Y |kg K ha-1 d-1|
RKUST Rate of K uptake in stems Y |kg K ha-1 d-1|
RKURT Rate of K uptake in roots Y |kg K ha-1 d-1|
RKUSO Rate of K uptake in storage organs Y |kg K ha-1 d-1|
RNUPTAKE Total rate of N uptake Y |kg N ha-1 d-1|
RPUPTAKE Total rate of P uptake Y |kg P ha-1 d-1|
RKUPTAKE Total rate of K uptake Y |kg K ha-1 d-1|
RNFIX Rate of N fixation Y |kg K ha-1 d-1|
======= ================================================= ==== ================
**Signals send or handled**
None
**External dependencies**
================ =================================== ==================== ===========
Name Description Provided by Unit
================ =================================== ==================== ===========
DVS Crop development stage DVS_Phenology -
TRA Crop transpiration Evapotranspiration |cm d-1|
TRAMX Potential crop transpiration Evapotranspiration |cm d-1|
NAVAIL Total available N from soil NPK_Soil_Dynamics |kg ha-1|
PAVAIL Total available P from soil NPK_Soil_Dynamics |kg ha-1|
KAVAIL Total available K from soil NPK_Soil_Dynamics |kg ha-1|
NTRANSLOCATABLE Translocatable amount of N from NPK_Translocation |kg ha-1|
stems, Leaves and roots
PTRANSLOCATABLE As for P NPK_Translocation |kg ha-1|
KTRANSLOCATABLE As for K NPK_Translocation |kg ha-1|
================ =================================== ==================== ===========
"""
class Parameters(ParamTemplate):
NMAXLV_TB = AfgenTrait() # maximum N concentration in leaves as function of dvs
PMAXLV_TB = AfgenTrait() # maximum P concentration in leaves as function of dvs
KMAXLV_TB = AfgenTrait() # maximum P concentration in leaves as function of dvs
NMAXRT_FR = Float(-99.) # maximum N concentration in roots as fraction of maximum N concentration in leaves
PMAXRT_FR = Float(-99.) # maximum P concentration in roots as fraction of maximum P concentration in leaves
KMAXRT_FR = Float(-99.) # maximum K concentration in roots as fraction of maximum K concentration in leaves
NMAXST_FR = Float(-99.) # maximum N concentration in stems as fraction of maximum N concentration in leaves
PMAXST_FR = Float(-99.) # maximum P concentration in stems as fraction of maximum P concentration in leaves
KMAXST_FR = Float(-99.) # maximum K concentration in stems as fraction of maximum K concentration in leaves
NMAXSO = Float(-99.) # maximum P concentration in storage organs [kg N kg-1 dry biomass]
PMAXSO = Float(-99.) # maximum P concentration in storage organs [kg P kg-1 dry biomass]
KMAXSO = Float(-99.) # maximum K concentration in storage organs [kg K kg-1 dry biomass]
TCNT = Float(-99.) # time coefficient for N translocation to storage organs [days]
TCPT = Float(-99.) # time coefficient for P translocation to storage organs [days]
TCKT = Float(-99.) # time coefficient for K translocation to storage organs [days]
NFIX_FR = Float(-99.) # fraction of crop nitrogen uptake by biological fixation
DVS_NPK_STOP = Float(-99.) # development stage above which no crop N-P-K uptake does occur
class StateVariables(StatesTemplate):
NDEMLV = Float(-99.)
NDEMST = Float(-99.)
NDEMRT = Float(-99.)
NDEMSO = Float(-99.)
PDEMLV = Float(-99.)
PDEMST = Float(-99.)
PDEMRT = Float(-99.)
PDEMSO = Float(-99.)
KDEMLV = Float(-99.)
KDEMST = Float(-99.)
KDEMRT = Float(-99.)
KDEMSO = Float(-99.)
class RateVariables(RatesTemplate):
RNULV = Float(-99.) # N uptake rate [kg ha-1 d -1]
RNUST = Float(-99.)
RNURT = Float(-99.)
RNUSO = Float(-99.)
RPULV = Float(-99.) # P uptake rate [kg ha-1 d -1]
RPUST = Float(-99.)
RPURT = Float(-99.)
RPUSO = Float(-99.)
RKULV = Float(-99.) # N uptake rate [kg ha-1 d -1]
RKUST = Float(-99.)
RKURT = Float(-99.)
RKUSO = Float(-99.)
RNUPTAKE = Float(-99.) # Total N uptake rate [kg ha-1 d -1]
RPUPTAKE = Float(-99.)
RKUPTAKE = Float(-99.)
RNFIX = Float(-99.)
def initialize(self, day, kiosk, parvalues):
"""
:param day: start date of the simulation
:param kiosk: variable kiosk of this PCSE instance
:param parvalues: a ParameterProvider with parameter key/value pairs
"""
self.params = self.Parameters(parvalues)
self.kiosk = kiosk
self.rates = self.RateVariables(kiosk,
publish=["RNULV", "RNUST", "RNURT", "RNUSO",
"RPULV", "RPUST", "RPURT", "RPUSO",
"RKULV", "RKUST", "RKURT", "RKUSO",
"RNUPTAKE", "RPUPTAKE", "RKUPTAKE", "RNFIX"])
self.states = self.StateVariables(kiosk,
NDEMLV=0., NDEMST=0., NDEMRT=0., NDEMSO=0.,
PDEMLV=0., PDEMST=0., PDEMRT=0., PDEMSO=0.,
KDEMLV=0., KDEMST=0., KDEMRT=0., KDEMSO=0.)
@prepare_rates
def calc_rates(self, day, drv):
r = self.rates
s = self.states
p = self.params
NAVAIL = self.kiosk["NAVAIL"] # total mineral N from soil and fertiliser [kg ha-1]
PAVAIL = self.kiosk["PAVAIL"] # total mineral P from soil and fertiliser [kg ha-1]
KAVAIL = self.kiosk["KAVAIL"] # total mineral K from soil and fertiliser [kg ha-1]
TRA = self.kiosk["TRA"]
TRAMX = self.kiosk["TRAMX"]
DVS = self.kiosk["DVS"]
NTRANSLOCATABLE = self.kiosk["NTRANSLOCATABLE"] # N supply to storage organs [kg ha-1]
PTRANSLOCATABLE = self.kiosk["PTRANSLOCATABLE"] # P supply to storage organs [kg ha-1]
KTRANSLOCATABLE = self.kiosk["KTRANSLOCATABLE"] # K supply to storage organs [kg ha-1]
# total NPK demand of leaves, stems and roots
NDEMTO = s.NDEMLV + s.NDEMST + s.NDEMRT
PDEMTO = s.PDEMLV + s.PDEMST + s.PDEMRT
KDEMTO = s.KDEMLV + s.KDEMST + s.KDEMRT
# NPK uptake rate in storage organs (kg N ha-1 d-1)
# is the mimimum of supply and demand divided by the
# time coefficient for N/P/K translocation
r.RNUSO = min(s.NDEMSO, NTRANSLOCATABLE)/p.TCNT
r.RPUSO = min(s.PDEMSO, PTRANSLOCATABLE)/p.TCPT
r.RKUSO = min(s.KDEMSO, KTRANSLOCATABLE)/p.TCKT
# No nutrients are absorbed after development stage DVS_NPK_STOP or
# when severe water shortage occurs i.e. TRANRF <= 0.01
TRANRF = TRA/TRAMX
if DVS < p.DVS_NPK_STOP and TRANRF > 0.01:
NutrientLIMIT = 1.0
else:
NutrientLIMIT = 0.
# biological nitrogen fixation
r.RNFIX = (max(0., p.NFIX_FR * NDEMTO) * NutrientLIMIT)
# NPK uptake rate from soil
r.RNUPTAKE = (max(0., min(NDEMTO - r.RNFIX, NAVAIL)) * NutrientLIMIT)
r.RPUPTAKE = (max(0., min(PDEMTO, PAVAIL)) * NutrientLIMIT)
r.RKUPTAKE = (max(0., min(KDEMTO, KAVAIL)) * NutrientLIMIT)
# NPK uptake rate
# if no demand then uptake rate = 0.
if NDEMTO == 0.:
r.RNULV = r.RNUST = r.RNURT = 0.
else:
r.RNULV = (s.NDEMLV / NDEMTO) * (r.RNUPTAKE + r.RNFIX)
r.RNUST = (s.NDEMST / NDEMTO) * (r.RNUPTAKE + r.RNFIX)
r.RNURT = (s.NDEMRT / NDEMTO) * (r.RNUPTAKE + r.RNFIX)
if PDEMTO == 0.:
r.RPULV = r.RPUST = r.RPURT = 0.
else:
r.RPULV = (s.PDEMLV / PDEMTO) * r.RPUPTAKE
r.RPUST = (s.PDEMST / PDEMTO) * r.RPUPTAKE
r.RPURT = (s.PDEMRT / PDEMTO) * r.RPUPTAKE
if KDEMTO == 0.:
r.RKULV = r.RKUST = r.RKURT = 0.
else:
r.RKULV = (s.KDEMLV / KDEMTO) * r.RKUPTAKE
r.RKUST = (s.KDEMST / KDEMTO) * r.RKUPTAKE
r.RKURT = (s.KDEMRT / KDEMTO) * r.RKUPTAKE
@prepare_states
def integrate(self, day, delt=1.0):
states = self.states
# published states from the kiosk
DVS = self.kiosk["DVS"]
WLV = self.kiosk["WLV"]
WST = self.kiosk["WST"]
WRT = self.kiosk["WRT"]
WSO = self.kiosk["WSO"]
ANLV = self.kiosk["ANLV"]
ANST = self.kiosk["ANST"]
ANRT = self.kiosk["ANRT"]
ANSO = self.kiosk["ANSO"]
APLV = self.kiosk["APLV"]
APST = self.kiosk["APST"]
APRT = self.kiosk["APRT"]
APSO = self.kiosk["APSO"]
AKLV = self.kiosk["AKLV"]
AKST = self.kiosk["AKST"]
AKRT = self.kiosk["AKRT"]
AKSO = self.kiosk["AKSO"]
params = self.params
# Maximum NPK concentrations in leaves [kg N kg-1 DM]
NMAXLV = params.NMAXLV_TB(DVS)
PMAXLV = params.PMAXLV_TB(DVS)
KMAXLV = params.KMAXLV_TB(DVS)
# Maximum NPK concentrations in stems and roots [kg N kg-1 DM]
NMAXST = params.NMAXST_FR * NMAXLV
NMAXRT = params.NMAXRT_FR * NMAXLV
NMAXSO = params.NMAXSO
PMAXST = params.PMAXST_FR * PMAXLV
PMAXRT = params.PMAXRT_FR * PMAXLV
PMAXSO = params.PMAXSO
KMAXST = params.KMAXST_FR * KMAXLV
KMAXRT = params.KMAXRT_FR * KMAXLV
KMAXSO = params.KMAXSO
# N demand [kg ha-1]
states.NDEMLV = max(NMAXLV*WLV - ANLV, 0.) # maybe should be divided by one day, see equation 5 Shibu etal 2010
states.NDEMST = max(NMAXST*WST - ANST, 0.)
states.NDEMRT = max(NMAXRT*WRT - ANRT, 0.)
states.NDEMSO = max(NMAXSO*WSO - ANSO, 0.)
# P demand [kg ha-1]
states.PDEMLV = max(PMAXLV*WLV - APLV, 0.)
states.PDEMST = max(PMAXST*WST - APST, 0.)
states.PDEMRT = max(PMAXRT*WRT - APRT, 0.)
states.PDEMSO = max(PMAXSO*WSO - APSO, 0.)
# K demand [kg ha-1]
states.KDEMLV = max(KMAXLV*WLV - AKLV, 0.)
states.KDEMST = max(KMAXST*WST - AKST, 0.)
states.KDEMRT = max(KMAXRT*WRT - AKRT, 0.)
states.KDEMSO = max(KMAXSO*WSO - AKSO, 0.)
| jajberni/pcse_web | main/pcse/crop/nutrients/npk_demand_uptake.py | Python | apache-2.0 | 17,101 |
import json
import time
import git
import discord
import os
import aiohttp
from cogs.utils.dataIO import dataIO
from urllib.parse import quote as uriquote
try:
from lxml import etree
except ImportError:
from bs4 import BeautifulSoup
from urllib.parse import parse_qs, quote_plus
#from cogs.utils import common
# @common.deprecation_warn()
def load_config():
with open('settings/config.json', 'r') as f:
return json.load(f)
# @common.deprecation_warn()
def load_optional_config():
with open('settings/optional_config.json', 'r') as f:
return json.load(f)
# @common.deprecation_warn()
def load_moderation():
with open('settings/moderation.json', 'r') as f:
return json.load(f)
# @common.deprecation_warn()
def load_notify_config():
with open('settings/notify.json', 'r') as f:
return json.load(f)
# @common.deprecation_warn()
def load_log_config():
with open('settings/log.json', 'r') as f:
return json.load(f)
def has_passed(oldtime):
if time.time() - 20.0 < oldtime:
return False
return time.time()
def set_status(bot):
if bot.default_status == 'idle':
return discord.Status.idle
elif bot.default_status == 'dnd':
return discord.Status.dnd
else:
return discord.Status.invisible
def user_post(key_users, user):
if time.time() - float(key_users[user][0]) < float(key_users[user][1]):
return False, [time.time(), key_users[user][1]]
else:
log = dataIO.load_json("settings/log.json")
now = time.time()
log["keyusers"][user] = [now, key_users[user][1]]
dataIO.save_json("settings/log.json", log)
return True, [now, key_users[user][1]]
def gc_clear(gc_time):
if time.time() - 3600.0 < gc_time:
return False
return time.time()
def game_time_check(oldtime, interval):
if time.time() - float(interval) < oldtime:
return False
return time.time()
def avatar_time_check(oldtime, interval):
if time.time() - float(interval) < oldtime:
return False
return time.time()
def update_bot(message):
g = git.cmd.Git(working_dir=os.getcwd())
branch = g.execute(["git", "rev-parse", "--abbrev-ref", "HEAD"])
g.execute(["git", "fetch", "origin", branch])
update = g.execute(["git", "remote", "show", "origin"])
if ('up to date' in update or 'fast-forward' in update) and message:
return False
else:
if message is False:
version = 4
else:
version = g.execute(["git", "rev-list", "--right-only", "--count", "{0}...origin/{0}".format(branch)])
version = description = str(int(version))
if int(version) > 4:
version = "4"
commits = g.execute(["git", "rev-list", "--max-count={0}".format(version), "origin/{0}".format(branch)])
commits = commits.split('\n')
em = discord.Embed(color=0x24292E, title='Latest changes for the selfbot:', description='{0} release(s) behind.'.format(description))
for i in range(int(version)):
i = i - 1 # Change i to i -1 to let the formatters below work
title = g.execute(["git", "log", "--format=%ar", "-n", "1", commits[i]])
field = g.execute(["git", "log", "--pretty=oneline", "--abbrev-commit", "--shortstat", commits[i], "^{0}".format(commits[i + 1])])
field = field[8:].strip()
link = 'https://github.com/appu1232/Discord-Selfbot/commit/%s' % commits[i]
em.add_field(name=title, value='{0}\n[Code changes]({1})'.format(field, link), inline=False)
em.set_thumbnail(url='https://image.flaticon.com/icons/png/512/25/25231.png')
em.set_footer(text='Full project: https://github.com/appu1232/Discord-Selfbot')
return em
def cmd_prefix_len():
config = load_config()
return len(config['cmd_prefix'])
def embed_perms(message):
try:
check = message.author.permissions_in(message.channel).embed_links
except:
check = True
return check
def get_user(message, user):
try:
member = message.mentions[0]
except:
member = message.guild.get_member_named(user)
if not member:
try:
member = message.guild.get_member(int(user))
except ValueError:
pass
if not member:
return None
return member
def find_channel(channel_list, text):
if text.isdigit():
found_channel = discord.utils.get(channel_list, id=int(text))
elif text.startswith("<#") and text.endswith(">"):
found_channel = discord.utils.get(channel_list,
id=text.replace("<", "").replace(">", "").replace("#", ""))
else:
found_channel = discord.utils.get(channel_list, name=text)
return found_channel
async def get_google_entries(query):
url = 'https://www.google.com/search?q={}'.format(uriquote(query))
params = {
'safe': 'off',
'lr': 'lang_en',
'h1': 'en'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64)'
}
entries = []
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params, headers=headers) as resp:
if resp.status != 200:
config = load_optional_config()
async with session.get("https://www.googleapis.com/customsearch/v1?q=" + quote_plus(query) + "&start=" + '1' + "&key=" + config['google_api_key'] + "&cx=" + config['custom_search_engine']) as resp:
result = json.loads(await resp.text())
return None, result['items'][0]['link']
try:
root = etree.fromstring(await resp.text(), etree.HTMLParser())
search_nodes = root.findall(".//div[@class='g']")
for node in search_nodes:
url_node = node.find('.//h3/a')
if url_node is None:
continue
url = url_node.attrib['href']
if not url.startswith('/url?'):
continue
url = parse_qs(url[5:])['q'][0]
entries.append(url)
except NameError:
root = BeautifulSoup(await resp.text(), 'html.parser')
for result in root.find_all("div", class_='g'):
url_node = result.find('h3')
if url_node:
for link in url_node.find_all('a', href=True):
url = link['href']
if not url.startswith('/url?'):
continue
url = parse_qs(url[5:])['q'][0]
entries.append(url)
return entries, root
def attach_perms(message):
return message.author.permissions_in(message.channel).attach_files
def parse_prefix(bot, text):
prefix = bot.cmd_prefix
if type(prefix) is list:
prefix = prefix[0]
return text.replace("[c]", prefix).replace("[b]", bot.bot_prefix)
| Bluscream/Discord-Selfbot | cogs/utils/checks.py | Python | gpl-3.0 | 7,337 |
""" Class to read binary information using various predefined types. """
#
# Copyright (c) 2007 Michael van Tellingen <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Only implement required information to retrieve video and audio information
import datetime
import struct
import cStringIO
from videoparser.streams import endian
class BinaryStream(object):
def __init__(self, fileobj, filesize, endianess=endian.little):
self._endianess = endianess
self._fileobj = fileobj
self._filesize = filesize
def __del__(self):
self.close()
def read(self, length):
if not length:
return ''
return self._fileobj.read(length)
def tell(self):
return self._fileobj.tell()
def seek(self, position):
return self._fileobj.seek(position)
def close(self):
return self._fileobj.close()
def bytes_left(self):
return self._fileobj.tell() < self._filesize
def set_endianess(self, endianess):
self._endianess = endianess
def get_endianess(self):
return self._endianess
def unpack(self, type, length):
""" Shorthand for unpack which uses the endianess defined with
set_endianess(), used internally."""
data = self.read(length)
assert len(data) == length, "Unexpected end of stream"
try:
if self._endianess == endian.big:
return struct.unpack('>' + type, data)[0]
else:
return struct.unpack('<' + type, data)[0]
except struct.error:
print len(data)
print "Unable to unpack '%r'" % data
raise
def read_float(self):
""" Read a 32bit float."""
return self.unpack('f', 4)
def read_qtfloat_32(self):
""" Read a 32bits quicktime float."""
# This comes from hachoir
return self.read_int16() + float(self.read_uint16()) /65535
def read_qt_ufloat32(self):
""" Read a 32bits quicktime float."""
# This comes from hachoir
return self.read_uint16() + float(self.read_uint16()) /65535
def read_uint64(self):
""" Read an unsigned 64bit integer."""
return self.unpack('Q', 8)
def read_int64(self):
""" Read an signed 64bit integer."""
return self.unpack('q', 4)
def read_uint32(self):
""" Read an unsigned 32bit integer."""
return self.unpack('I', 4)
def read_int32(self):
""" Read an signed 32bit integer."""
return self.unpack('i', 4)
def read_uint16(self):
""" Read an unsigned 16bit integer."""
return self.unpack('H', 2)
def read_int16(self):
""" Read an signed 16bit integer."""
return self.unpack('h', 2)
def read_uint8(self):
""" Read an unsigned 8bit integer."""
return ord(self.read(1))
def read_int8(self):
""" Read a signed 8bit integer."""
return struct.unpack('b', self.read(1))[0]
def read_dword(self):
return self.read(4)
def read_word(self):
return self.read(2)
def read_qword(self):
return self.read(8)
def read_byte(self):
return self.read(1)
def read_fourcc(self):
return self.read(4)
def read_timestamp_mac(self):
""" Read a timestamp in mac format (seconds sinds 1904) """
timestamp_base = datetime.datetime(1904, 1, 1, 0, 0)
timestamp_value = datetime.timedelta(seconds=self.read_uint32())
return timestamp_base + timestamp_value
def read_timestamp_win(self):
timestamp_base = datetime.datetime(1601, 1, 1, 0, 0, 0)
timestamp_value = datetime.timedelta(
microseconds=self.read_uint64()/10)
return timestamp_base + timestamp_value
# TODO: FIXME
def read_wchars(self, len, null_terminated=False):
data = self.read(len * 2)
# String is null terminated, remove the null char
if null_terminated:
data = data[:-2]
if self._endianess == endian.big:
return unicode(data, "UTF-16-BE")
else:
return unicode(data, "UTF-16-LE")
def read_subsegment(self, length):
data = self.read(length)
return BinaryStream(cStringIO.StringIO(data), len(data),
self._endianess)
def convert_uintvar(self, data, endianess=None):
""" Convert a string of variable length to an integer """
# using struct.unpack is twice as fast as this function, however
# it's not flexible enough
if endianess is None:
endianess = self._endianess
if endianess == endian.big:
data = data[::-1]
mask = 0
value = ord(data[0])
for octet in data[1:]:
mask += 8
value += (ord(octet) << mask)
return value
# ASF Specification requires the guid type, which is 128 bits aka 16 bytes
def read_guid(self):
# See http://www.ietf.org/rfc/rfc4122.txt for specification
# The version number is in the most significant 4 bits of the time
# stamp (bits 4 through 7 of the time_hi_and_version field).
# Python 2.5 includes a built-in guid module, which should be used
# retrieve version
position = self.tell()
self.seek(position + 6)
version = self.read_uint16() >> 12
self.seek(position)
#print repr([hex(ord(x)) for x in self.read(16)])
self.seek(position)
time_low = self.read_uint32()
time_mid = self.read_uint16()
time_hi = self.read_uint16()
clock_seq_hi = self.read_uint8()
clock_seq_low = self.read_uint8()
node = self.read(6)
#print "uuid version = %d - %X" % (version, time_low)
if version == 1:
node = self.convert_uintvar(node, endian.big)
else:
node = self.convert_uintvar(node, endian.big)
return "%08X-%04X-%04X-%X%X-%012X" % (time_low,
time_mid,
time_hi,
clock_seq_hi,
clock_seq_low,
node)
def read_waveformatex(self):
obj = self.WAVEFORMATEX()
obj.codec_id = self.read_uint16()
obj.channels = self.read_uint16()
obj.sample_rate = self.read_uint32()
obj.bit_rate = self.read_uint32()
obj.block_alignment = self.read_uint16()
obj.bits_per_sample = self.read_uint16()
obj.codec_size = self.read_uint16()
obj.codec_data = self.read_subsegment(obj.codec_size)
return obj
def read_bitmapinfoheader(self):
obj = self.BITMAPINFOHEADER()
obj.format_data_size = self.read_uint32()
obj.image_width = self.read_uint32()
obj.image_height = self.read_uint32()
obj.reserved = self.read_uint16()
obj.bpp = self.read_uint16()
obj.compression_id = self.read(4)
obj.image_size = self.read_uint32()
obj.h_pixels_meter = self.read_uint32()
obj.v_pixels_meter = self.read_uint32()
obj.colors = self.read_uint32()
obj.important_colors = self.read_uint32()
obj.codec_data = self.read_subsegment(obj.format_data_size -
40)
return obj
class BITMAPINFOHEADER(object):
def __repr__(self):
buffer = "BITMAPINFOHEADER structure: \n"
buffer += " %-35s : %s\n" % ("Format Data Size", self.format_data_size)
buffer += " %-35s : %s\n" % ("Image Width", self.image_width)
buffer += " %-35s : %s\n" % ("Image Height", self.image_height)
buffer += " %-35s : %s\n" % ("Reserved", self.reserved)
buffer += " %-35s : %s\n" % ("Bits Per Pixel Count", self.bpp)
buffer += " %-35s : %s\n" % ("Compression ID", self.compression_id)
buffer += " %-35s : %s\n" % ("Image Size", self.image_size)
buffer += " %-35s : %s\n" % ("Horizontal Pixels Per Meter", self.h_pixels_meter)
buffer += " %-35s : %s\n" % ("Vertical Pixels Per Meter", self.v_pixels_meter)
buffer += " %-35s : %s\n" % ("Colors Used Count", self.colors)
buffer += " %-35s : %s\n" % ("Important Colors Count", self.important_colors)
buffer += " %-35s : %s\n" % ("Codec Specific Data", self.codec_data)
return buffer
# Used in ASF and AVI parser, contains audio information
class WAVEFORMATEX(object):
codec_ids = {
0x2004: "A_REAL/COOK",
0x2003: "A_REAL/28_8",
0x2002: "A_REAL/14_4",
0x0130: "A_REAL/SIPR",
0x0270: "A_REAL/ATRC",
0x2001: "A_DTS",
0x2000: "A_AC3",
0x162: "WMAP",
0x161: "WMA2",
0x160: "WMA2",
0x50: "MP2",
0x55: "MP3",
0x1: "A_PCM/INT/LIT",
'unknown': "???",
}
def __repr__(self):
buffer = "WAVEFORMATEX structure: \n"
buffer += " %-35s : %s\n" % ("Codec ID / Format Tag", self.codec_id)
buffer += " %-35s : %s\n" % ("Number of Channels", self.channels)
buffer += " %-35s : %s\n" % ("Samples Per Second", self.sample_rate)
buffer += " %-35s : %s\n" % ("Average Number of Bytes Per Second", self.bit_rate)
buffer += " %-35s : %s\n" % ("Block Alignment", self.block_alignment)
buffer += " %-35s : %s\n" % ("Bits Per Sample", self.bits_per_sample)
buffer += " %-35s : %s\n" % ("Codec Specific Data Size",self.codec_size)
buffer += " %-35s : %s\n" % ("Codec Specific Data", repr(self.codec_data))
return buffer
| kevdes/videoparser | streams/binary.py | Python | cc0-1.0 | 11,649 |
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | hjson
{
"json": "obj"
}
"""
from __future__ import with_statement
import sys
import hjson
import pkg_resources # part of setuptools
HELP="""Hjson, the Human JSON.
Usage:
hjson [options]
hjson [options] <input>
hjson (-h | --help)
hjson (-V | --version)
Options:
-h --help Show this screen.
-j Output as formatted JSON.
-c Output as JSON.
-V --version Show version.
""";
def showerr(msg):
sys.stderr.write(msg)
sys.stderr.write("\n")
def main():
format = 'hjson'
args = []
for arg in sys.argv[1:]:
if arg == '-h' or arg == '--help':
showerr(HELP)
return
elif arg == '-j': format = 'json'
elif arg == '-c': format = 'compact'
elif arg == '-V' or arg == '--version':
showerr('Hjson ' + pkg_resources.require("Hjson")[0].version)
return
elif arg[0] == '-':
showerr(HELP)
raise SystemExit('unknown option ' + arg)
else:
args.append(arg)
outfile = sys.stdout
if len(args) == 0:
infile = sys.stdin
elif len(args) == 1:
infile = open(args[0], 'r')
else:
showerr(HELP)
raise SystemExit('unknown options')
with infile:
try:
obj = hjson.load(infile, use_decimal=True)
except ValueError:
raise SystemExit(sys.exc_info()[1])
with outfile:
if format == 'json':
hjson.dumpJSON(obj, outfile, use_decimal=True, indent=' ')
elif format == 'compact':
hjson.dumpJSON(obj, outfile, use_decimal=True, separators=(',', ':'))
else:
hjson.dump(obj, outfile, use_decimal=True)
outfile.write('\n')
if __name__ == '__main__':
main()
| mnahm5/django-estore | Lib/site-packages/hjson/tool.py | Python | mit | 1,894 |
#!/usr/bin/python
import json
import requests
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class API(object):
reserved = ["from", "items"]
def __init__(self, _url, _cert):
(self.url, self.cert) = (_url, _cert)
@staticmethod
def decode(o):
if isinstance(o, list):
o = [API.decode(v) for v in o]
elif isinstance(o, dict):
o = AttrDict({k: API.decode(o[k]) for k in o})
for r in API.reserved:
if r in o:
o["_" + r] = o[r]
del o[r]
return o
@staticmethod
def encode(o):
if isinstance(o, list):
o = [API.encode(v) for v in o]
elif isinstance(o, dict):
o = AttrDict({k: API.encode(o[k]) for k in o})
for r in API.reserved:
if "_" + r in o:
o[r] = o["_" + r]
del o["_" + r]
return o
def request(self, method, url, raw, **kwargs):
if "data" in kwargs:
kwargs["data"] = json.dumps(API.encode(kwargs["data"]))
print "%-6s %s" % (method, self.url + url)
r = requests.request(method, self.url + url, cert=self.cert, **kwargs)
if r.status_code / 100 != 2:
raise Exception(r.text)
if raw:
return r.content
else:
return API.decode(json.loads(r.text))
def delete(self, url, raw=False):
return self.request("DELETE", url, raw)
def get(self, url, raw=False):
return self.request("GET", url, raw)
def post(self, url, data, raw=False):
return self.request("POST", url, raw, data=data)
def put(self, url, data, raw=False):
return self.request("PUT", url, raw, data=data)
| RedHatEMEA/aws-ose3 | target/k8s.py | Python | apache-2.0 | 1,886 |
#!/usr/bin/env python
'''
Copyright (c) 2012 Jeremy Parks ( xanthic.9478 )
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Purpose: Generates a crafting guide based on current market prices
'''
import urllib, json, time, threading, datetime, math
import items_l, name_list
from Queue import Queue
from collections import defaultdict
from ftplib import FTP
# FTP Login
ftp_url = "text"
ftp_user = "goes"
ftp_pass = "here"
# Dictionary of all the items we need prices of
recipeDict = items_l.items
# list of items we compute the cost of that is used by every craft
'''
Dictionary structure
Key: tier(t1,t2,t3,t4,t5)
Key: ore, wood, cloth, leather, bone, claw, fang, scale, totem, venom, blood, ingot, plank, bolt, dowel, plated_dowel, thread, small_haft, large_haft, leather_section, string, lump(sometimes)
name: full name
cost: buy cost or computed make cost
recipe: items to build, or None for base items
'''
items = {}
insigs = {}
total_buy = defaultdict(int)
total_cost = 0
# Store our xp needed to reach each level of crafting
xp_to_level = [0]
#threaded function to get info about an item
class getItem(threading.Thread):
def __init__(self,itemid,tier,sub_type,level,name):
self.url1 = "http://www.gw2spidy.com/api/v0.9/json/item/"
self.url2 = "http://www.guildwarstrade.com/api/public/item?id="
self.itemid = itemid
self.tier = tier
self.sub_type = sub_type
self.level = level
self.nm = name
self.result = None
threading.Thread.__init__(self)
def get_result(self):
self.result['result']['tier'] = self.tier
self.result['result']['sub_type'] = self.sub_type
if not self.level == None:
self.result['result']['level'] = self.level
self.result['result']['sname'] = self.nm
return self.result
# Function for Guildwarstrade prices
def gwt(self,result,item):
f = json.load(result)
self.result = {}
self.result['result'] = {}
self.result['result']['min_sale_unit_price'] = f['sell']
self.result['result']['name'] = name_list.names[item]
self.result['result']['data_id'] = item
def run(self):
while(1):
try:
f = urllib.urlopen(self.url1+self.itemid)
self.result = json.load(f)
break
except Exception, err:
print 'ERROR: %s. Trying backup website.\n' % str(err)#"Error getting url, trying again "+ self.url + self.item
try:
f = urllib.urlopen(self.url2+self.itemid)
self.gwt(f,self.itemid)
break
except Exception, err:
print 'ERROR: %s. Backup website failed.\n' % str(err)#"Error getting url, trying again "+ self.url + self.item
time.sleep(1)
# Get our item data using threads and a Queue
def getItemDict():
def producer(q):
for tier in recipeDict:
for sub_type in recipeDict[tier]:
thread = getItem(recipeDict[tier][sub_type],tier,sub_type,None,None)
thread.start()
q.put(thread,True)
def consumer(q):
num = 1
den = 0
for tier in recipeDict:
den += len(recipeDict[tier])
while num <= den:
thread = q.get(True)
thread.join()
tmp = thread.get_result()
items.setdefault(tmp['result']['tier'],{})
items[tmp['result']['tier']].setdefault(tmp['result']['sub_type'],{})
items[tmp['result']['tier']][tmp['result']['sub_type']]['name'] = tmp['result']['name']
items[tmp['result']['tier']][tmp['result']['sub_type']]['cost'] = tmp['result']['min_sale_unit_price']
items[tmp['result']['tier']][tmp['result']['sub_type']]['recipe'] = None
print str(num) +' of '+ str(den)
num += 1
q = Queue(3)
p_thread = threading.Thread(target=producer, args=(q,))
c_thread = threading.Thread(target=consumer, args=(q,))
p_thread.start()
c_thread.start()
p_thread.join()
c_thread.join()
# Get our insignia price data using threads and a Queue
def getInsigDict():
def producer2(q):
for tier in items_l.insig_list:
for sub_type in items_l.insig_list[tier]:
for level in items_l.insig_list[tier][sub_type]:
for name in items_l.insig_list[tier][sub_type][level]:
thread = getItem(items_l.insig_list[tier][sub_type][level][name],tier,sub_type,level,name)
thread.start()
q.put(thread,True)
def consumer2(q):
num = 1
den = 0
for tier in items_l.insig_list:
for sub_type in items_l.insig_list[tier]:
for level in items_l.insig_list[tier][sub_type]:
den += len(items_l.insig_list[tier][sub_type][level])
while num <= den:
thread = q.get(True)
thread.join()
tmp = thread.get_result()
tier = tmp['result']['tier']
sub_type = tmp['result']['sub_type']
level = tmp['result']['level']
name = tmp['result']['sname']
insigs.setdefault(tier,{})
insigs[tier].setdefault(sub_type,{})
insigs[tier][sub_type].setdefault(level,{})
insigs[tier][sub_type][level].setdefault(name,{})
insigs[tier][sub_type][level][name]['name'] = tmp['result']['name']
insigs[tier][sub_type][level][name]['cost'] = tmp['result']['min_sale_unit_price']
insigs[tier][sub_type][level][name]['recipe'] = None
print str(num) +' of '+ str(den)
num += 1
q = Queue(3)
p_thread = threading.Thread(target=producer2, args=(q,))
c_thread = threading.Thread(target=consumer2, args=(q,))
p_thread.start()
c_thread.start()
p_thread.join()
c_thread.join()
# add some costs to the dict
def appendCosts():
items['t1']['thread'] = {'name':'Spool of Jute Thread','cost':8,'recipe':None}
items['t1']['lump'] = {'name':'Lump of Tin','cost':8,'recipe':None}
items['t2']['thread'] = {'name':'Spool of Wool Thread','cost':16,'recipe':None}
items['t3']['thread'] = {'name':'Spool of Cotton Thread','cost':24,'recipe':None}
items['t3']['lump'] = {'name':'Lump of Coal','cost':16,'recipe':None}
items['t4']['thread'] = {'name':'Spool of Linen Thread','cost':32,'recipe':None}
items['t4']['lump'] = {'name':'Lump of Primordium','cost':48,'recipe':None}
items['t5']['thread'] = {'name':'Spool of Silk Thread','cost':48,'recipe':None}
# generate placeholders in items for parts
for tier in items:
for o in ['ingot','plank','bolt','dowel','plated_dowel','leather_section','small_haft','large_haft','string']:
items[tier][o] = {'name':tier+'_'+o,'cost':0,'recipe':items_l.itec[tier][o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
items[tier]['insc'] = {'fine1':{},'fine2':{},'master':{}}
items[tier]['insig'] = {'fine1':{},'fine2':{},'master':{}}
if tier == 't5':
items[tier]['insc']['rare'] = {}
items[tier]['insig']['rare'] = {}
for typ in ['insc','insig']:
for stier in items_l.itec[tier][typ]:
for keyv in items_l.itec[tier][typ][stier]:
items[tier][typ][stier][keyv] = {'name':tier+'_'+keyv,'cost':0,'recipe':items_l.itec[tier][typ][stier][keyv]}
for o in items[tier][typ][stier][keyv]['recipe']:
items[tier][typ][stier][keyv]['cost'] += items[tier][typ][stier][keyv]['recipe'][o]*items[tier][o]['cost']
for o in items_l.wc:
items[tier][o] = {'name':tier+'_'+o,'cost':0,'recipe':items_l.wc[o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
for o in items_l.ht:
items[tier][o] = {'name':tier+'_'+o,'cost':0,'recipe':items_l.ht[o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
for o in items_l.ac:
items[tier][o] = {'name':tier+'_ac_'+o,'cost':0,'recipe':items_l.ac[o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
for o in items_l.lw:
items[tier][o] = {'name':tier+'_lw_'+o,'cost':0,'recipe':items_l.lw[o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
for o in items_l.tl:
items[tier][o] = {'name':tier+'_tl_'+o,'cost':0,'recipe':items_l.tl[o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
# Format copper values so they are easier to read
def mFormat(line):
line = int(line)
tmp = ''
rStr = ''
if line < 0:
tmp = '-'
line *= -1
mStr = str(line)
mLen = len(mStr)
if mLen > 4:
rStr += "%2dg" % int(mStr[0:mLen-4])
if mLen > 3:
rStr += '%2ds' % int(mStr[mLen-4:mLen-2])
elif mLen == 3:
rStr += '%2ds' % int(mStr[mLen-3:mLen-2])
if mLen == 1:
rStr += '%2dc' % int(mStr)
else:
rStr += '%2dc' % int(mStr[mLen-2:])
return tmp + rStr
def xpreq(level):
tmp = 500
for i in range(1,level):
tmp = math.floor(tmp * 1.01)
return tmp
def xpgain(_level,_type,_min):
span = 0.0
gain = 0.0
if _type == 1: # refinement
span = 25.0
mult = .3
if _type == 2: # part
span = 25.0
mult = .6
if _type == 3: # item
span = 40.0
mult = 1.4
# xp_gain(N) = xp_req(N+1) * multiplier * (1.0 - (N - N_min) / span)
gain = xpreq(_level+1) * mult * (1.0 - (_level - _min) / span)
return math.ceil(gain)
base_level = {'t1':0,'t2':75,'t3':150,'t4':225,'t5':300}
mod_level = {'fine1':0,'fine2':25,'master':50,'rare':75}
# Helper class to compute a tier of crafting
class craftTier:
# takes current level and xp as input
def __init__(self,xp):
self.xp = xp
self.cost = 0
self.log = []
# returns cost and make log for this tier
def getResult(self):
return self.cost, self.log, self.xp
# compute what level would be after crafting items, assume order is refine > p_discovery > parts > discovery
def compute_level(self,_xp, refine, part, discovery,tier_mult,tlvl):
level = tlvl
while xp_to_level[level] < _xp:
level += 1
for i in range(0,int(refine)):
_xp += tier_mult*xpgain(level,1,tlvl)
while xp_to_level[level] < _xp:
level += 1
for i in range(0,part):
_xp += tier_mult*xpgain(level,2,tlvl)
while xp_to_level[level] < _xp:
level += 1
for i in range(0,discovery):
_xp += (tier_mult+1)*xpgain(level,3,tlvl)
while xp_to_level[level] < _xp:
level += 1
return _xp
# calculate the xp per gold given the following items
# cost || num refines/level, num parts/level1, num pars/level2, num insig/level(discovery is assumed same level)
def xp_calc(self,refines,parts,plated,base_level,insig_level,mod):
weight = 0.0
weight += xpgain(base_level,1,base_level)*refines
weight += xpgain(base_level,1,base_level)*parts
weight += xpgain(base_level,1,base_level+50)*plated*2
weight += xpgain(base_level,1,insig_level)*mod # insignia
weight += xpgain(base_level,1,insig_level)*(1+mod) # discovery
return weight
# recursively compute the number of refinements in a recipe
def calcRefine(self,recipe,count,tier):
if items[tier][recipe]['recipe'] == None:
return 0.0
if recipe == 'ingot' and tier == 't1':
return count/5.0
if recipe in ['ingot','plank','bolt','leather_section']:
return count
r = 0.0
for item in items[tier][recipe]['recipe']:
r += self.calcRefine(item,items[tier][recipe]['recipe'][item],tier)*count
return r
# compute the costs for our current tier
# requires flag, tier and base item list
def generateTier(self,pList, flag, tier):
# things to buy
buy = defaultdict(int)
# parts to make
make = defaultdict(int)
# what tiers we are crafing
lvls = ['fine1','fine2','master']
if tier == 't5':
lvls.append('rare')
# dictionary to hold all our costs
costs = {}
# Dict to hold craft queue while we compute what to make
craft_queue = {}
#figure out the cost lists for the tiers we are working with
for lvl in lvls:
scost = {}
for item in pList:
if not (('helm' in item and tier == 't1' and lvl == 'fine1') or ('shoulder' in item and tier == 't1' and lvl in ['fine1','fine2'])):
mod = 1.0
refines = 1 # default for insignia
parts = 0
plated = 0
if 'fine' in lvl:
if flag == 'insc':
parts = 1
elif lvl == 'master':
mod = 2.0
if flag == 'insc':
refines = 5
plated = 1
else: # lvl == 'rare':
mod = 3.25
if flag == 'insc':
plated = 2
refines = 10
#adjust for copper ore refines
if tier == 't1' and plated > 0:
refines = math.ceil(plated*2.6)
make_w = self.xp_calc(refines,parts,plated,base_level[tier],base_level[tier]+mod_level[lvl],mod)
buy_w = self.xp_calc(0,0,0,base_level[tier],base_level[tier]+mod_level[lvl],mod)
for insig in items[tier][flag][lvl]:
mcost = items[tier][item]['cost']+items[tier][flag][lvl][insig]['cost']
bcost = items[tier][item]['cost']+insigs[tier][flag][lvl][insig]['cost']
if make_w/float(mcost) > buy_w/float(bcost):
scost[tier+'_'+lvl+'_'+insig+'{craft)_'+item] = {'cost':mcost,'part':items[tier][item]['recipe'],'insig':items[tier][flag][lvl][insig]['recipe'],'weight':make_w/float(mcost),'part_name':item,'insig_name':lvl+'_'+insig}
else:
scost[tier+'_'+lvl+'_'+insig+'(buy)_'+item] = {'cost':bcost,'part':items[tier][item]['recipe'],'insig':None,'weight':buy_w/float(bcost),'part_name':item,'insig_name':insig}
costs[lvl] = scost
craft_queue[lvl] = sorted(costs[lvl], key=lambda k: costs[lvl][k]['weight'], reverse=True)
# queue for printing make order
make_q = {}
cqc = {}
# copy craft_queue
for lvl in lvls:
make_q[lvl] = []
cqc[lvl] = craft_queue[lvl][:]
_xp = xp_to_level[base_level[tier]+mod_level['rare']]
refine = 0.0
part = 0
plated = 0
insc = 0
discovery = 0
# fill rare(if needed)
if tier == 't5':
_xp = xp_to_level[375]
while self.compute_level(_xp, 0, insc, discovery,3.25,base_level[tier]+mod_level['rare']) < xp_to_level[base_level[tier]+mod_level['rare'] + 25]:
item = cqc['rare'].pop(0)
make_q['rare'].append(item)
for sitem in costs['rare'][item]['part']:
refine += self.calcRefine(sitem,costs['rare'][item]['part'][sitem],tier)
discovery += 1
part += 2
if not costs['rare'][item]['insig'] == None:
for sitem in costs['rare'][item]['insig']:
refine += self.calcRefine(sitem,costs['rare'][item]['insig'][sitem],tier)
if flag == 'insc':
plated += 2
else:
part += 2
insc += 1
insc = 0
discovery = 0
spart = 0 # shoulders are 50 skill in tier 1
hpart = 0 # helmets are 25 points in tier 1
_xp = xp_to_level[base_level[tier]+mod_level['master']]
# fill master
while self.compute_level(_xp, 0, spart+insc+plated, discovery,2.0,base_level[tier]+mod_level['master']) < xp_to_level[base_level[tier]+mod_level['master'] + 25]:
item = cqc['master'].pop(0)
make_q['master'].append(item)
if tier == 't1' and 'shoulder' in item:
spart += 2
part -= 2
elif tier == 't1' and 'helm' in item:
hpart += 2
part -= 2
discovery += 1
part += 2
for sitem in costs['master'][item]['part']:
refine += self.calcRefine(sitem,costs['master'][item]['part'][sitem],tier)
if not costs['master'][item]['insig'] == None:
for sitem in costs['master'][item]['insig']:
refine += self.calcRefine(sitem,costs['master'][item]['insig'][sitem],tier)
if flag == 'insc':
plated += 1
else:
part += 1
insc += 1
insc = 0
discovery = 0
_xp = xp_to_level[base_level[tier]+mod_level['fine2']]
# fill fine2
while self.compute_level(_xp, 0, hpart+insc, discovery,1.0,base_level[tier]+mod_level['fine2']) < xp_to_level[base_level[tier]+mod_level['fine2'] + 25]:
item = cqc['fine2'].pop(0)
make_q['fine2'].append(item)
if tier == 't1' and 'helm' in item:
hpart += 2
part -= 2
discovery += 1
part += 2
for sitem in costs['fine2'][item]['part']:
refine += self.calcRefine(sitem,costs['fine2'][item]['part'][sitem],tier)
if not costs['fine2'][item]['insig'] == None:
for sitem in costs['fine2'][item]['insig']:
refine += self.calcRefine(sitem,costs['fine2'][item]['insig'][sitem],tier)
part += 1
insc += 1
insc = 0
discovery = 0
_xp = xp_to_level[base_level[tier]+mod_level['fine1']]
# fill fine1
while self.compute_level(_xp, math.ceil(refine), part+insc, discovery,1.0,base_level[tier]) < xp_to_level[base_level[tier] + 25]:
item = cqc['fine1'].pop(0)
make_q['fine1'].append(item)
part += 2
discovery += 1
for sitem in costs['fine1'][item]['part']:
refine += self.calcRefine(sitem,costs['fine1'][item]['part'][sitem],tier)
if not costs['fine1'][item]['insig'] == None:
for sitem in costs['fine1'][item]['insig']:
refine += self.calcRefine(sitem,costs['fine1'][item]['insig'][sitem],tier)
part += 1
insc += 1
# start loop
# recopy queue, empty make_q
# fill fine1 (assume fine2, master, rare unchanged) : refine > parts/dowels > fine1_insc > fine1_discovery
# fill fine2 (assume nothing) : fine2_fine1_discovery > fine2_insc > fine2_discovery
# fill master (assume rare is unchanged) master_fine2_discovery > plated_dowels > master_insc > master_discovery
# fill rare rare_master_discovery > rare_insc > rare_discovery
# end loop if same result as last time(check first item in each tier of copied queue)
t_buff = []
for ll in lvls:
t_buff.append('\nLevel: %i' % (base_level[tier]+mod_level[ll]))
l_queue = []
for o in sorted(make_q[ll]):
t_buff.append(str(o))
self.cost += costs[ll][o]['cost']
make[costs[ll][o]['part_name']] += 1
for item in costs[ll][o]['part']:
if items[tier][item]['recipe'] == None:
buy[item] += costs[ll][o]['part'][item]
else:
make[item] += costs[ll][o]['part'][item]
l_queue.append((item,costs[ll][o]['part'][item]))
if costs[ll][o]['insig'] == None:
buy[ll+'_'+costs[ll][o]['insig_name']] += 1
else:
make[costs[ll][o]['insig_name']] += 1
for item in costs[ll][o]['insig']:
if items[tier][item]['recipe'] == None:
buy[item] += costs[ll][o]['insig'][item]
else:
make[item] += costs[ll][o]['insig'][item]
l_queue.append((item,costs[ll][o]['insig'][item]))
while l_queue:
item, count = l_queue.pop()
if items[tier][item]['recipe'] == None:
buy[item] += count
else:
for sitem in items[tier][item]['recipe']:
if items[tier][sitem]['recipe'] == None:
buy[sitem] += count*items[tier][item]['recipe'][sitem]
else:
make[sitem] += items[tier][item]['recipe'][sitem]*count
l_queue.append((sitem,items[tier][item]['recipe'][sitem]*count))
self.log.append('\n***BUY***\n')
bcost = 0
for line in sorted(buy, key=lambda k: buy[k], reverse=True):
if line in items[tier]:
total_buy[items[tier][line]['name']] += buy[line]
self.log.append("%3i - %s (%s per)\n"% (buy[line],items[tier][line]['name'],mFormat(items[tier][line]['cost'])))
bcost += items[tier][line]['cost']*buy[line]
else:
t = line.split('_')
total_buy[insigs[tier][flag][t[0]][t[1]]['name']] += buy[line]
self.log.append("%3i - %s (%s per)\n"% (buy[line],insigs[tier][flag][t[0]][t[1]]['name'],mFormat(insigs[tier][flag][t[0]][t[1]]['cost'])))
bcost += insigs[tier][flag][t[0]][t[1]]['cost']*buy[line]
global total_cost
total_cost += bcost
self.log.append('Cost for this tier: %s\n' % mFormat(bcost))
self.log.append('\n***MAKE***'+'\n')
sub = {}
for line in sorted(make, key=lambda k: make[k], reverse=True):
if 'fine1_' not in line and 'fine2_' not in line and 'master_' not in line and 'rare_' not in line:
self.log.append("%3i - %s\n"% (make[line],line))
else:
sub[line] = make[line]
for line in sorted(sub):
self.log.append("%3i - %s (%s per)\n"% (sub[line],line, mFormat(items[tier][flag][line.split('_')[0]][line.split('_')[1]] ['cost'])))
for line in t_buff:
self.log.append(line+'\n')
# Compute the costs for leveling a craft
def computeCraft(pList,fname,flag):
with open(fname,'wb') as f:
f.write('Last updated: '+datetime.datetime.now().strftime('%H:%M:%S %m-%d-%Y')+' PST\n')
totalcost = 0
tnum = 0
xp = 0
for tier in ['t1','t2','t3','t4','t5']:
c_tier = craftTier(xp)
c_tier.generateTier(pList,flag,tier)
cost, log, xp = c_tier.getResult()
totalcost += cost
for line in log:
f.write(line)
f.write(fname+':'+mFormat(totalcost))
def main():
# populate the xp chart
for i in range(1,441):
xp_to_level.append(xpreq(i)+xp_to_level[i-1])
getItemDict()
appendCosts()
getInsigDict()
computeCraft(items_l.wc,"Weapon2.txt",'insc')
computeCraft(items_l.ht,"Huntsman2.txt",'insc')
computeCraft(items_l.ac,"Armorcraft2.txt",'insig')
computeCraft(items_l.lw,"Leatherwork2.txt",'insig')
computeCraft(items_l.tl,"Tailor2.txt",'insig')
with open("Totals.txt", 'wb') as f:
for line in sorted(total_buy):
f.write("%4i - %s \n"% (total_buy[line],line))
f.write("Total cost: %s" % mFormat(total_cost))
# myFtp = FTP(ftp_url)
# myFtp.login(ftp_user,ftp_pass)
# for item in ["Weapon2.txt","Huntsman2.txt","Armorcraft2.txt","Leatherwork2.txt","Tailor2.txt","Totals.txt"]:
# with open(item,'rb') as f:
# myFtp.storbinary('STOR '+item,f)
# myFtp.close()
# If ran directly, call main
if __name__ == '__main__':
main()
| xanthics/gw2crafting---retired | Crafting.py | Python | mit | 21,884 |
##
# tibrvftmon.py - example TIB/Rendezvous fault tolerant group
# monitor program
#
# rewrite TIBRV example: tibrvftmon.c
#
# LAST MODIFIED: V1.0 2016-12-26 ARIEN [email protected]
#
import sys
import signal
import getopt
from pytibrv.api import *
from pytibrv.status import *
from pytibrv.queue import *
from pytibrv.tport import *
from pytibrv.ft import *
# Module Variables
_running = True
_oldNumActives = 0
def signal_proc(signal, frame):
global _running
_running = False
print()
print('CRTL-C PRESSED')
def usage():
print()
print('tibrvftmon.py [options]')
print('')
print('options:')
print(' [--service service] RVD Service')
print(' [--network network] RVD Network')
print(' [--daemon daemon] RVD Daemon')
print()
sys.exit(1)
def get_params(argv):
params = ['service=', 'network=', 'daemon=']
try:
opts, args = getopt.getopt(argv, '', params)
except getopt.GetoptError:
usage()
service = None
network = None
daemon = None
for opt, arg in opts:
if opt == '--service':
service = arg
elif opt == '--network':
network = arg
elif opt == '--daemon':
daemon = arg
else:
usage()
if len(args) != 0:
usage()
return service, network, daemon
def monCB(monitor: tibrvftMonitor, groupName: bytes, numActiveMembers: int, closure):
global _oldNumActives
grp = groupName.decode()
if _oldNumActives > numActiveMembers:
txt = 'one deactivated'
else:
txt = 'one activated'
print('Group [{}]: has {} active members (after {})'.format(
grp,
numActiveMembers,
txt
))
_oldNumActives = numActiveMembers
return
# MAIN PROGRAM
def main(argv):
service, network, daemon = get_params(argv[1:])
progname = argv[0]
lostInt = 4.8
err = tibrv_Open()
if err != TIBRV_OK:
print('{}: Failed to open TIB/RV: {}'.format('', progname, tibrvStatus_GetText(err)))
sys.exit(1)
err, tx = tibrvTransport_Create(service, network, daemon)
if err != TIBRV_OK:
print('{}: Failed to initialize transport: {}'.format(progname, tibrvStatus_GetText(err)))
sys.exit(1)
err, monitor = tibrvftMonitor_Create(TIBRV_DEFAULT_QUEUE, monCB, tx, 'TIBRVFT_TIME_EXAMPLE', lostInt, None)
if err != TIBRV_OK:
print('{} : Failed to start group monitor - {}', progname, tibrvStatus_GetText(err))
sys.exit(1)\
print('{} : Waiting for group information...'.format(progname))
# Set Signal Handler for Ctrl-C
global _running
signal.signal(signal.SIGINT, signal_proc)
while _running:
tibrvQueue_TimedDispatch(TIBRV_DEFAULT_QUEUE, 0.5)
# CTRL-C PRESSED
# when ftMonitor destroyed,
# callback would be triggered within numActiveMembers = 0
# it is fault alert, and should be ignored
tibrvftMonitor_Destroy(monitor)
tibrv_Close()
return
if __name__ == "__main__":
main(sys.argv)
| arienchen/pytibrv | examples/api/tibrvftmon.py | Python | bsd-3-clause | 3,129 |
#!/usr/bin/env python
from .server import MapRouletteServer
from .challenge import MapRouletteChallenge
from .task import MapRouletteTask
from .taskcollection import MapRouletteTaskCollection | mvexel/maproulette-api-wrapper | maproulette/__init__.py | Python | mit | 192 |
from __future__ import print_function
from BinPy import *
print ('Usage of IC 4025:\n')
ic = IC_4025()
print ('\nThe Pin configuration is:\n')
input = {1: 1, 2: 1, 3: 0, 4: 0, 5: 0, 7: 0, 8: 1, 11: 0, 12: 1, 13: 1, 14: 1}
print (input)
print ('\nPin initinalization\n')
print ('\nPowering up the IC - using -- ic.setIC({14: 1, 7: 0}) -- \n')
ic.setIC({14: 1, 7: 0})
print ('\nDraw the IC with the current configuration\n')
ic.drawIC()
print (
'\nRun the IC with the current configuration using -- print ic.run() -- \n')
print (
'Note that the ic.run() returns a dict of pin configuration similar to :')
print (ic.run())
print (
'\nSeting the outputs to the current IC configuration using -- ic.setIC(ic.run()) --\n')
ic.setIC(ic.run())
print ('\nDraw the final configuration\n')
ic.drawIC()
print ('\nConnector Inputs\n')
print ('c = Connector(p[1])\np[1] = c\nic.setIC(p)\n')
c = Connector(p[1])
p[1] = c
ic.setIC(p)
print ('Run the IC\n')
print (ic.run())
print ('\nConnector Outputs')
print ('Set the output -- ic.setOutput(8, c)\n')
ic.setOutput(8, c)
print ('Run the IC\n')
print (ic.run())
| coder006/BinPy | BinPy/examples/ic/Series_4000/IC4025.py | Python | bsd-3-clause | 1,108 |
# -*- coding: utf-8 -*-
from . import print_labels
| lem8r/woodwerk-addons | woodwerk_labeling/wizard/__init__.py | Python | agpl-3.0 | 52 |
"""
Sensor for Fedex packages.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.fedex/
"""
from collections import defaultdict
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, CONF_USERNAME, CONF_PASSWORD,
ATTR_ATTRIBUTION, CONF_UPDATE_INTERVAL)
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
from homeassistant.util import Throttle
from homeassistant.util.dt import now, parse_date
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['fedexdeliverymanager==1.0.6']
_LOGGER = logging.getLogger(__name__)
COOKIE = 'fedexdeliverymanager_cookies.pickle'
DOMAIN = 'fedex'
ICON = 'mdi:package-variant-closed'
STATUS_DELIVERED = 'delivered'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_UPDATE_INTERVAL, default=timedelta(seconds=1800)):
vol.All(cv.time_period, cv.positive_timedelta),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Fedex platform."""
import fedexdeliverymanager
name = config.get(CONF_NAME)
update_interval = config.get(CONF_UPDATE_INTERVAL)
try:
cookie = hass.config.path(COOKIE)
session = fedexdeliverymanager.get_session(
config.get(CONF_USERNAME), config.get(CONF_PASSWORD),
cookie_path=cookie)
except fedexdeliverymanager.FedexError:
_LOGGER.exception("Could not connect to Fedex Delivery Manager")
return False
add_entities([FedexSensor(session, name, update_interval)], True)
class FedexSensor(Entity):
"""Fedex Sensor."""
def __init__(self, session, name, interval):
"""Initialize the sensor."""
self._session = session
self._name = name
self._attributes = None
self._state = None
self.update = Throttle(interval)(self._update)
@property
def name(self):
"""Return the name of the sensor."""
return self._name or DOMAIN
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return 'packages'
def _update(self):
"""Update device state."""
import fedexdeliverymanager
status_counts = defaultdict(int)
for package in fedexdeliverymanager.get_packages(self._session):
status = slugify(package['primary_status'])
skip = status == STATUS_DELIVERED and \
parse_date(package['delivery_date']) < now().date()
if skip:
continue
status_counts[status] += 1
self._attributes = {
ATTR_ATTRIBUTION: fedexdeliverymanager.ATTRIBUTION
}
self._attributes.update(status_counts)
self._state = sum(status_counts.values())
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
| PetePriority/home-assistant | homeassistant/components/sensor/fedex.py | Python | apache-2.0 | 3,452 |
# -*- coding: utf-8 -*-
# Copyright (c) 2007 - 2014 Detlev Offenbach <[email protected]>
#
"""
Module implementing the QMessageBox wizard plugin.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QDialog
from E5Gui.E5Application import e5App
from E5Gui.E5Action import E5Action
from E5Gui import E5MessageBox
# Start-Of-Header
name = "QMessageBox Wizard Plugin"
author = "Detlev Offenbach <[email protected]>"
autoactivate = True
deactivateable = True
version = "6.0.0"
className = "MessageBoxWizard"
packageName = "__core__"
shortDescription = "Show the QMessageBox wizard."
longDescription = """This plugin shows the QMessageBox wizard."""
pyqtApi = 2
python2Compatible = True
# End-Of-Header
error = ""
class MessageBoxWizard(QObject):
"""
Class implementing the QMessageBox wizard plugin.
"""
def __init__(self, ui):
"""
Constructor
@param ui reference to the user interface object (UI.UserInterface)
"""
super(MessageBoxWizard, self).__init__(ui)
self.__ui = ui
def activate(self):
"""
Public method to activate this plugin.
@return tuple of None and activation status (boolean)
"""
self.__initAction()
self.__initMenu()
return None, True
def deactivate(self):
"""
Public method to deactivate this plugin.
"""
menu = self.__ui.getMenu("wizards")
if menu:
menu.removeAction(self.action)
self.__ui.removeE5Actions([self.action], 'wizards')
def __initAction(self):
"""
Private method to initialize the action.
"""
self.action = E5Action(
self.tr('QMessageBox Wizard'),
self.tr('Q&MessageBox Wizard...'), 0, 0, self,
'wizards_qmessagebox')
self.action.setStatusTip(self.tr('QMessageBox Wizard'))
self.action.setWhatsThis(self.tr(
"""<b>QMessageBox Wizard</b>"""
"""<p>This wizard opens a dialog for entering all the parameters"""
""" needed to create a QMessageBox. The generated code is"""
""" inserted at the current cursor position.</p>"""
))
self.action.triggered.connect(self.__handle)
self.__ui.addE5Actions([self.action], 'wizards')
def __initMenu(self):
"""
Private method to add the actions to the right menu.
"""
menu = self.__ui.getMenu("wizards")
if menu:
menu.addAction(self.action)
def __callForm(self, editor):
"""
Private method to display a dialog and get the code.
@param editor reference to the current editor
@return the generated code (string)
"""
from WizardPlugins.MessageBoxWizard.MessageBoxWizardDialog import \
MessageBoxWizardDialog
dlg = MessageBoxWizardDialog(None)
if dlg.exec_() == QDialog.Accepted:
line, index = editor.getCursorPosition()
indLevel = editor.indentation(line) // editor.indentationWidth()
if editor.indentationsUseTabs():
indString = '\t'
else:
indString = editor.indentationWidth() * ' '
return (dlg.getCode(indLevel, indString), True)
else:
return (None, False)
def __handle(self):
"""
Private method to handle the wizards action.
"""
editor = e5App().getObject("ViewManager").activeWindow()
if editor is None:
E5MessageBox.critical(
self.__ui,
self.tr('No current editor'),
self.tr('Please open or create a file first.'))
else:
code, ok = self.__callForm(editor)
if ok:
line, index = editor.getCursorPosition()
# It should be done on this way to allow undo
editor.beginUndoAction()
editor.insertAt(code, line, index)
editor.endUndoAction()
| davy39/eric | Plugins/PluginWizardQMessageBox.py | Python | gpl-3.0 | 4,158 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.