repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
choderalab/assaytools
|
examples/direct-fluorescence-assay/inputs_p38_singlet.py
|
Python
|
lgpl-2.1
| 1,067
| 0.045923
|
import json
import numpy as np
from glob import glob
inputs = {
'xml_file_path' : "./data/single_wavelength_copy",
'file_set' : {'p38' : glob( "./data/single_wavelength_copy/*.xml")},
'section' : '280_480_TOP_120',
'ligand_order' : ['Bosutinib','Bosutinib Isomer','Erlotinib','Gefitinib','Ponatinib','Lapatinib','Saracatinib','Vandetanib'],
'Lstated' : np.array([20.0e-6,14.0e-6,9.82e-6,6.88e-6,4.82e-6,3.38e-6,2.37e-6,1.66e-6,1.16e-6,0.815e-6,0.571e-6,0.4e-6,0.28e-6,0.196e-6,0.138e-6,0.0964e-6,0.0676e-6,0.0474e-6,0.0320e-6,0.0240e-6,0.0160e-6,0.0120e-6,0.008e-6,0.0], np.float64), # ligand concentration, M
'Pstated' : 0.5e-6 * np.ones([24],np.float64), # protein concentration, M
'
|
assay_volume' : 50e-6, # assay volume, L
'well_area' : 0.1369, # well area, cm^2 for 4ti-0203 [http://4ti.co.uk/files/3113/4217/2464/4ti-0201.pdf]
}
inputs['Lstated'] = inputs['Lstated'].tolist()
inputs['Pstated'] = inputs['Pstated'].tolist()
with open('inputs.jso
|
n', 'w') as fp:
json.dump(inputs, fp)
|
erickpeirson/mbl-browser
|
browser/migrations/0010_auto_20160804_1644.py
|
Python
|
gpl-3.0
| 13,851
| 0.001877
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('browser', '0009_splitevent'),
]
operations = [
migrations.AddField(
model_name='affiliation',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='attendance',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='course',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='coursegroup',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalaffiliation',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalattendance',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalcourse',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalcoursegroup',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalinstitution',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalinvestigator',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalknowninstitution',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalknownlocation',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalknownperson',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicallocalization',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicallocation',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalpartof',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model
|
_name='historicalperson',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='institution',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='investigator',
name='validated_on',
field=models.DateTimeField(null=True,
|
blank=True),
),
migrations.AddField(
model_name='knowninstitution',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='knownlocation',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='knownperson',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='localization',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='location',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='partof',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='person',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AlterField(
model_name='affiliation',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='attendance',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='course',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='coursegroup',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalaffiliation',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalattendance',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalcourse',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalcoursegroup',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalinstitution',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalinvestigator',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has b
|
michaelgugino/web_keyer
|
sqlalchemy/events.py
|
Python
|
gpl-3.0
| 31,080
| 0.000611
|
# sqlalchemy/events.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core event interfaces."""
from . import event, exc
from .pool import Pool
from .engine import Connectable, Engine
from .sql.base import SchemaEventTarget
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and :class:`.SchemaEvent`
subclasses, including :class:`.MetaData`, :class:`.Table`,
:class:`.Column`.
:class:`.MetaData` and :class:`.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`.Column` is associated
with its :class:`.Table`, when a :class:`.ForeignKeyConstraint`
is associated with a :class:`.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The m
|
ethods here define the name of an event as well
as the names of members that are passed to listener
functions.
See also:
:ref:`event_
|
toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
def before_create(self, target, connection, **kw):
"""Called before CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_create(self, target, connection, **kw):
"""Called after CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_drop(self, target, connection, **kw):
"""Called before DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_drop(self, target, connection, **kw):
"""Called after DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def column_reflect(self, inspector, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`.
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`.Column` specific arguments ``info``, ``key``,
and ``quote`` can also be added to the dictionary and
will be passed to the constructor of :class:`.Column`.
Note that this event is only meaningful if either
associated with the :class:`.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for :class:`.Table`.
"""
class PoolEvents(event.Events):
"""Available events for :class:`.Pool`.
The methods here define the name of an event as well
as the names of members that are pa
|
ytsarev/rally
|
rally/osclients.py
|
Python
|
apache-2.0
| 7,886
| 0.000127
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urlparse
from ceilometerclient import client as ceilometer
from cinderclient import client as cinder
import glanceclient as glance
from heatclient import client as heat
from keystoneclient import exceptions as keystone_exceptions
from keystoneclient.v2_0 import client as keystone
from neutronclient.neutron import client as neutron
from novaclient import client as nova
from oslo.config import cfg
from rally import exceptions
CONF = cfg.CONF
CONF.register_opts([
cfg.FloatOpt("openstack_client_http_timeout", default=30.0,
help="HTTP timeout for any of OpenStack service in seconds"),
cfg.BoolOpt("https_insecure", default=False,
help="Use SSL for all OpenStack API interfaces"),
cfg.StrOpt("https_cacert", default=None,
help="Path to CA server cetrificate for SSL")
])
# NOTE(boris-42): super dirty hack to fix nova python client 2.17 thread safe
nova._adapter_pool = lambda x: nova.adapters.HTTPAdapter()
class Clients(object):
"""This class simplify and unify work with openstack python clients."""
def __init__(self, endpoint):
self.endpoint = endpoint
self.cache = {}
def clear(self):
"""Remove all cached client handles."""
self.cache = {}
def memoize(name):
"""Cache client handles."""
def decorate(func):
def wrapper(self, *args, **kwargs):
key = '{0}{1}{2}'.format(func.__name__,
str(args) if args else '',
str(kwargs) if kwargs else '')
if key in self.cache:
return self.cache[key]
self.cache[key] = func(self, *args, **kwargs)
return self.cache[key]
return wrapper
return decorate
@memoize('keystone')
def keystone(self):
"""Return keystone client."""
new_kw = {
"endpoint": self._change_port(self.endpoint.auth_url, "35357"),
"timeout": CONF.openstack_client_http_timeout,
"insecure": CONF.https_insecure, "cacert": CONF.https_cacert
}
kw = dict(self.endpoint.to_dict().items() + new_kw.items())
client = keystone.Client(**kw)
client.authenticate()
return client
def verified_keystone(self):
"""Ensure keystone endpoints are valid and then authenticate
:returns: Keystone Client
"""
try:
# Ensure that user is admin
client = self.keystone()
roles = client.auth_ref['user']['roles']
if not any('admin' == role['name'] for role in roles):
raise exceptions.InvalidAdminException(
username=self.endpoint.username)
except keystone_exceptions.Unauthorized:
raise exceptions.InvalidEndpointsException()
except keystone_exceptions.AuthorizationFailure:
raise exceptions.HostUnreachableException(
url=self.endpoint.auth_url)
return client
@memoize('nova')
def nova(self, version='2'):
"""Returns nova client."""
client = nova.Client(version,
self.endpoint.username,
self.endpoint.password,
self.endpoint.tenant_name,
auth_url=self.endpoint.auth_url,
|
service_type='compute',
http_log_debug=CONF.debug,
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
return client
@memoize('neutron')
def neutron(self, version='2.0'):
"""Returns neutron client."""
client = neutron.Client(version,
|
username=self.endpoint.username,
password=self.endpoint.password,
tenant_name=self.endpoint.tenant_name,
auth_url=self.endpoint.auth_url,
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
return client
@memoize('glance')
def glance(self, version='1'):
"""Returns glance client."""
kc = self.keystone()
endpoint = kc.service_catalog.get_endpoints()['image'][0]
client = glance.Client(version,
endpoint=endpoint['publicURL'],
token=kc.auth_token,
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
return client
@memoize('heat')
def heat(self, version='1'):
"""Returns heat client."""
kc = self.keystone()
endpoint = kc.service_catalog.get_endpoints()['orchestration'][0]
client = heat.Client(version,
endpoint=endpoint['publicURL'],
token=kc.auth_token,
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
return client
@memoize('cinder')
def cinder(self, version='1'):
"""Returns cinder client."""
client = cinder.Client(version,
self.endpoint.username,
self.endpoint.password,
self.endpoint.tenant_name,
auth_url=self.endpoint.auth_url,
service_type='volume',
http_log_debug=CONF.debug,
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
return client
@memoize('ceilometer')
def ceilometer(self, version='1'):
"""Returns ceilometer client."""
client = ceilometer.Client(version,
username=self.endpoint.username,
password=self.endpoint.password,
tenant_name=self.endpoint.tenant_name,
endpoint=self.endpoint.auth_url,
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
return client
def _change_port(self, url, new_port):
"""Change the port of a given url.
:param url: URL string
:param new_port: The new port
:returns: URL string
"""
url_obj = urlparse.urlparse(url)
new_url = "%(scheme)s://%(hostname)s:%(port)s%(path)s" % {
"scheme": url_obj.scheme, "hostname": url_obj.hostname,
"port": new_port, "path": url_obj.path}
return new_url
|
alexmilesyounger/ds_basics
|
s3v2.py
|
Python
|
mit
| 2,936
| 0.016349
|
from s3v1 import *
def filter_col_by_string(data_sample, field, filter_condition):
filtered_rows = [] # create a new list
col = int(data_sample[0].index(field)) # create a variable (col) and asign it to an integer which is pulled from the header row of the data_sample and which is the index (probably also an integer to begin with) of the field name that we passed in as an argument
filtered_rows.append(data_sample[0]) # add the header row to the new list
for item in data_sample[1:]:
if item[col] == filter_condition:
filtered_rows.append(item)
return filtered_rows
def filter_col_by_float(data_sample, field, direction, filter_condition):
filtered_rows = []
col = int(data_sample[0].index(field)) # you must use integers to access indexes. So this is just to be sure it's not a float or a string.
cond = float(filter_condition)
for row in data_sample[1:]:
element = float(row[col])
if direction == "<":
if element < cond:
filtered_rows.append(row)
elif direction == "<=":
if element <= cond:
filtered_rows.append(row)
elif direction == ">":
if element > cond:
filtered_rows.append(row)
elif direction == ">=":
if element >= cond:
filtered_rows.append(row)
elif direction == "==":
if element == cond:
filtered_rows.append(row)
else:
pass # the pass statement does nothing. It can be used when a statement is required syntactically but the program requires no action.
return filtered_rows
under_20_bucks = filter_col_by_float(data_from_csv, 'priceLabel', "<=", 20)
# print("Found {} ties under $20".format(number_of_records(under_20_bucks)))
silk_ties = filter_col_by_string(data_from_csv, "material", "_silk")
wool_ties = filter_col_by_string(data_from_csv, "material", "_wool")
cotton_ties = filter_col_by_string(data_from_csv, "material", "_cotton")
gucci_ties = filter_col_by_string(data_from_csv, "brandName", "Gucci") # this search term is case sensitive. It came back with 1 tie when I used "gucci" and 171 ties when I used "Gucci" I went to look for the one tie with lowercase gucci in the dataset (manual search), but it didn't come up. What's going on with the 1 "gucci" tie?
falfkafj_ties = filter_col_by_string(data_from_cs
|
v, "brandName", "falfkafj") # this a test of a non-existant thing to see if the lowercase "gucci" tie was a global error... like every request wil
|
l return a minimum of one record even if zero exist.
# print("Found {} silk ties".format(number_of_records(silk_ties)))
# print("Found {} wool ties".format(number_of_records(wool_ties)))
# print("Found {} cotton ties".format(number_of_records(cotton_ties)))
# print("Found {} Gucci ties".format(number_of_records(gucci_ties)))
# print("Found {} falfkafj ties".format(number_of_records(falfkafj_ties)))
# print("Found {} falfkafj ties".format(number_of_records(falfkafj_ties[1:])))
# print("Found {} falfkafj ties".format(number_of_records_ignore_header(falfkafj_ties)))
|
kurtraschke/pyRFC3339
|
pyrfc3339/generator.py
|
Python
|
mit
| 2,170
| 0
|
import pytz
from pyrfc3339.utils import timezone, timedelta_seconds
def generate(dt, utc=True, accept_naive=False, microseconds=False):
'''
Generate an :RFC:`3339`-formatted timestamp from a
:class:`datetime.datetime`.
>>> from datetime import datetime
>>> generate(datetime(2009,1,1,12,59,59,0,pytz.utc))
'2009-01-01T12:59:59Z'
The timestamp will use UTC unless `utc=False` is specified, in which case
it will use the timezone from the :class:`datetime.datetime`'s
:attr:`tzinfo` parameter.
>>> eastern = pytz.timezone('US/Eastern')
>>> dt = eastern.localize(datetime(2009,1,1,12,59,59))
>>> generate(dt)
'2009-01-01T17:59:59Z'
>>> generate(dt, utc=False)
'2009-01-01T12:59:59-05:00'
Unless `accept_naive=True` is specified, the `datetime` must not be naive.
>>> generate(datetime(2009,1,1,12,59,59,0))
Traceback (most recent call last):
...
ValueError: naive datetime and accept_naive is False
>>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True)
'2009-01-01T12:59:59Z'
If `accept_naive=True` is specified, the `datetime` is assumed to be UTC.
Attempting to generate a local timestamp from a naive datetime will result
in an error.
>>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True, utc=False)
Traceback (most recent call last):
...
ValueError: cannot generate a local timestamp from a naive datetime
'''
if dt.tzinfo is None:
if accept_naive is True:
if utc is True:
dt = dt.replace(tzinfo=pytz.utc)
else:
|
raise ValueError("cannot generate a local timestamp from " +
"a naive datetime")
|
else:
raise ValueError("naive datetime and accept_naive is False")
if utc is True:
dt = dt.astimezone(pytz.utc)
timestamp = dt.strftime('%Y-%m-%dT%H:%M:%S')
if microseconds is True:
timestamp += dt.strftime('.%f')
if dt.tzinfo is pytz.utc:
timestamp += 'Z'
else:
timestamp += timezone(timedelta_seconds(dt.tzinfo.utcoffset(dt)))
return timestamp
|
gundalow/ansible-modules-extras
|
windows/win_uri.py
|
Python
|
gpl-3.0
| 4,484
| 0.000669
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Corwin Brown <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: win_uri
version_added: "2.1"
short_description: Interacts with webservices.
description:
- Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE HTTP authentication mechanisms.
options:
url:
description:
- HTTP or HTTPS URL in the form of (http|https)://host.domain:port/path
method:
description:
- The HTTP Method of the request or response.
default: GET
choices:
- GET
- POST
- PUT
- HEAD
- DELETE
- OPTIONS
- PATCH
- TRACE
- CONNECT
- REFRESH
content_type:
description:
- Sets the "Content-Type" header.
body:
description:
- The body of the HTTP request/response to the web service.
headers:
description:
- 'Key Value pairs for headers. Example "Host: www.somesite.com"'
use_basic_parsing:
description:
- This module relies upon 'Invoke-WebRequest', which by default uses the Internet Explorer Engine to parse a webpage. There's an edge-case where if a user hasn't run IE before, this will fail. The only advantage to using the Internet Explorer praser is that you can traverse the DOM in a powershell script. That isn't useful for Ansible, so by default we toggle 'UseBasicParsing'. However, you can toggle that off here.
choices:
- True
- False
default: True
author: Corwin Brown (@blakfeld)
"""
EXAMPLES = """
# Send a GET request and store the output:
---
- name: Perform a GET and Store Output
win_uri:
url: http://www.somesite.com/myendpoint
register: http_output
# Set a HOST header to hit an internal webserver:
---
- name: Hit a Specific Host on the Server
win_uri:
url: http://my.internal.server.com
method: GET
headers:
host: "www.somesite.com"
# Do a HEAD request on an endpoint
---
- name: Perform a HEAD on an Endpoint
win_uri:
url: http://www.somesite.com
method: HEAD
# Post a body to an endpoint
---
- name: POST a Body to an Endpoint
win_uri:
url: http://www.somesite.com
method: POST
body: "{ 'some': 'json' }"
"""
RETURN = """
url:
description: The Target URL
returned: always
type: string
sample: "https://www.ansible.com"
method:
description: The HTTP method used.
returned: always
type: string
sample: "GET"
content_type:
description: The "content-type" header used.
returned: always
type: string
sample: "application/json"
use_basic_parsing:
description: The state of the "use_basic_parsing" flag.
returned: always
type: bool
sample: True
body:
description: The content of the body used
returned: when body is specified
type: string
sample: '{"id":1}'
version_added: "2.3"
status_code:
descript
|
ion: The HTTP Status Code of the response.
returned: success
type: int
sample: 200
status_description:
description: A summery of the status.
returned: success
type: string
stample: "OK"
raw_content:
description: The raw content of the HTTP response.
returned: success
type: string
sample: 'HTTP/1.1 200 OK\nX-XSS-Protection: 1; mode=block\nX-Frame-Options: SAMEORIGIN\nAlternate-Protocol: 443:quic,p=1\nAlt-Svc: quic="www.goo
|
gle.com:443"; ma=2592000; v="30,29,28,27,26,25",quic=":443"; ma=2...'
headers:
description: The Headers of the response.
returned: success
type: dict
sample: {"Content-Type": "application/json"}
raw_content_length:
description: The byte size of the response.
returned: success
type: int
sample: 54447
"""
|
anaran/olympia
|
sites/landfill/settings_base.py
|
Python
|
bsd-3-clause
| 5,402
| 0.002962
|
"""private_base will be populated from puppet and placed in this directory"""
import logging
import os
import dj_database_url
from lib.settings_base import CACHE_PREFIX, ES_INDEXES, KNOWN_PROXIES, LOGGING
from .. import splitstrip
import private_base as private
ENGAGE_ROBOTS = False
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = private.EMAIL_HOST
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
REDIRECT_SECRET_KEY = private.REDIRECT_SECRET_KEY
ADMINS = ()
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'mysql_pool'
DATABASES['default']['sa_pool_key'] = 'master'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave'] = dj_database_url.parse(private.DATABASES_SLAVE_URL)
DATABASES['slave']['ENGINE'] = 'mysql_pool'
DATABASES['slave']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave']['sa_pool_key'] = 'slave'
SERVICES_DATABASE = dj_database_url.parse(private.SERVICES_DATABASE_URL)
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 30
}
SLAVE_DATABASES = ['slave']
CACHES = {
'default': {
'BACKEND': 'caching.backends.memcached.MemcachedCache',
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'BACKEND': 'memcachepool.cache.UMemcacheCache',
'LOCATION': splitstrip(private.CACHES_DEFAULT_LOCATION),
'TIMEOUT': 500,
'KEY_PREFIX': CACHE_PREFIX,
},
}
SECRET_KEY = private.SECRET_KEY
LOG_LEVEL = logging.DEBUG
## Celery
BROKER_URL = private.BROKER_URL
CELERY_ALWAYS_EAGER = True
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
NETAPP_STORAGE = private.NETAPP_STORAGE_ROOT + '/shared_storage'
MIRROR_STAGE_PATH = private.NETAPP_STORAGE_ROOT + '/public-staging'
GUARDED_ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/guarded-addons'
UPLOADS_PATH = NETAPP_STORAGE + '/uploads'
USERPICS_PATH = UPLOADS_PATH + '/userpics'
ADDON_ICONS_PATH = UPLOADS_PATH + '/addon_icons'
COLLECTION_ICONS_PATH = UPLOADS_PATH + '/collection_icons'
IMAGEASSETS_PATH = UPLOADS_PATH + '/imageassets'
REVIEWER_ATTACHMENTS_PATH = UPLOADS_PATH + '/reviewer_attachment'
PREVIEWS_PATH = UPLOADS_PATH + '/previews'
SIGNED_APPS_PATH = NETAPP_STORAGE + '/signed_apps'
SIGNED_APPS_REVIEWER_PATH = NETAPP_STORAGE + '/signed_apps_reviewer'
PREVIEW_THUMBNAIL_PATH = PREVIEWS_PATH + '/thumbs/%s/%d.png'
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
HERA = []
LOGGING['loggers'].update({
'z.task': { 'level': logging.DEBUG },
'z.hera': { 'level': logging.INFO },
'z.redis': { 'level': logging.DEBUG },
'z.pool': { 'level': logging.ERROR },
})
REDIS_BACKEND = private.REDIS_BACKENDS_CACHE
REDIS_BACKENDS = {
'cache': private.REDIS_BACKENDS_CACHE,
'cache_slave': private.REDIS_BACKENDS_CACHE_SLAVE,
'master': private.REDIS_BACKENDS_MASTER,
'slave': private.REDIS_BACKENDS_SLAVE,
}
CACHE_MACHINE_USE_REDIS = True
RECAPTCHA_PUBLIC_KEY = private.RECAPTCHA_PUBLIC_KEY
RECAPTCHA_PRIVATE_KEY = private.RECAPTCHA_PRIVATE_KEY
RECAPTCHA_URL = ('https://www.google.com/recaptcha/api/challenge?k=%s' % RECAPTCHA_PUBLIC_KEY)
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
PACKAGER_PATH = os.path.join(TMP_PATH, 'packager')
ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/files'
PERF_THRESHOLD = 20
|
SPIDERMONKEY = '/usr/bin/tracemonkey'
# Remove DetectMobileMiddleware from middleware in production.
detect = 'mo
|
bility.middleware.DetectMobileMiddleware'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = private.RESPONSYS_ID
CRONJOB_LOCK_PREFIX = 'addons-landfill'
BUILDER_SECRET_KEY = private.BUILDER_SECRET_KEY
BUILDER_VERSIONS_URL = "https://builder-addons-dev.allizom.org/repackage/sdk-versions/"
ES_HOSTS = splitstrip(private.ES_HOSTS)
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_landfill' % v) for k, v in ES_INDEXES.items())
BUILDER_UPGRADE_URL = "https://builder-addons-dev.allizom.org/repackage/rebuild/"
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
GRAPHITE_HOST = private.GRAPHITE_HOST
GRAPHITE_PORT = private.GRAPHITE_PORT
GRAPHITE_PREFIX = private.GRAPHITE_PREFIX
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
EXPOSE_VALIDATOR_TRACEBACKS = True
KNOWN_PROXIES += ['10.2.83.105',
'10.2.83.106',
'10.2.83.107',
'10.8.83.200',
'10.8.83.201',
'10.8.83.202',
'10.8.83.203',
'10.8.83.204',
'10.8.83.210',
'10.8.83.211',
'10.8.83.212',
'10.8.83.213',
'10.8.83.214',
'10.8.83.215',
'10.8.83.251',
'10.8.83.252',
'10.8.83.253',
]
NEW_FEATURES = True
PERF_TEST_URL = 'http://talos-addon-master1.amotest.scl1.mozilla.com/trigger/trigger.cgi'
REDIRECT_URL = 'https://outgoing.allizom.org/v1/'
CLEANCSS_BIN = 'cleancss'
UGLIFY_BIN = 'uglifyjs'
CELERYD_TASK_SOFT_TIME_LIMIT = 240
LESS_PREPROCESS = True
XSENDFILE_HEADER = 'X-Accel-Redirect'
ALLOW_SELF_REVIEWS = True
AES_KEYS = private.AES_KEYS
|
stefanseefeld/synopsis
|
Synopsis/ASG.py
|
Python
|
lgpl-2.1
| 19,516
| 0.015936
|
#
# Copyright (C) 2000 Stefan Seefeld
# Copyright (C) 2000 Stephen Davies
# All rights reserved.
# Licensed to the public under the terms of the GNU LGPL (>= 2),
# see the file COPYING for details.
#
"""Abstract Syntax Tree classes.
This file contains classes which encapsulate nodes in the ASG. The base class
is the Declaration class that encapsulates a named declaration. All names used
are scoped tuples.
Also defined in module scope are the constants DEFAULT, PUBLIC, PROTECTED and
PRIVATE.
"""
# Accessibility constants
DEFAULT = 0
PUBLIC = 1
PROTECTED = 2
PRIVATE = 3
def ccmp(a,b):
"""Compares classes of two objects"""
return cmp(type(a),type(b)) or cmp(a.__class__,b.__class__)
class Error:
"""Exception class used by ASG internals."""
def __init__(self, err):
self.err = err
def __repr__(self):
return self.err
class Debugger(type):
"""Wrap the object's 'accept' method, printing out the visitor's type.
Useful for tracing visitors visiting declarations."""
def __init__(cls, name, bases, dict):
accept = dict['accept']
"The original instancemethod."
def accept_wrapper(self, visitor):
"The wrapper. The original 'accept' method is part of its closure."
print '%s accepting %s.%s'%(self.__class__.__name__,
visitor.__module__,
visitor.__class__.__name__)
accept(self, visitor)
setattr(cls, 'accept', accept_wrapper)
class TypeId(object):
"""Type-id abstract class."""
def __init__(self, language):
self.language = language
def accept(self, visitor):
"""visitor pattern accept. @see Visitor"""
pass
def __cmp__(self, other):
"Comparison operator"
return cmp(id(self),id(other))
class NamedTypeId(TypeId):
"""Named type abstract class"""
def __init__(self, language, name):
super(NamedTypeId, self).__init__(language)
self.name = name
class BuiltinTypeId(NamedTypeId):
"""Class for builtin type-ids"""
def __init__(self, language, name):
super(BuiltinTypeId, self).__init__(language, name)
def accept(self, visitor): visitor.visit_builtin_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return ccmp(self,other) or cmp(self.name,other.name)
def __str__(self): return str(self.name)
class DependentTypeId(NamedTypeId):
"""Class for template dependent type-ids"""
def __init__(self, language, name):
super(DependentTypeId, self).__init__(language, name)
def accept(self, visitor): visitor.visit_dependent_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return ccmp(self,other) or cmp(self.name,other.name)
def __str__(self): return str(self.name)
class UnknownTypeId(NamedTypeId):
"""Class for not (yet) known type-ids."""
base = TypeId
def __init__(self, language, name):
super(UnknownTypeId, self).__init__(language, name)
self.link = name
def resolve(self, language, name, link):
"""Associate this type-id with an external reference, instead of a declaration."""
self.base.language = language
self.name = name
self.link = link
def accept(self, visitor): visitor.visit_unknown_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return ccmp(self,other) or cmp(self.name,other.name)
def __str__(self): return str(self.name)
class DeclaredTypeId(NamedTypeId):
"""Class for declared types"""
def __init__(self, language, name, declaration):
super(DeclaredTypeId, self).__init__(language, name)
self.declaration = declaration
def accept(self, visitor): visitor.visit_declared_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return ccmp(self,other) or cmp(self.name,other.name)
def __str__(self): return str(self.name)
class TemplateId(DeclaredTypeId):
"""Class for template-ids."""
def __init__(self, language, name, declaration, parameters):
super(TemplateId, self).__init__(language, name, declaration)
self.parameters = parameters
def accept(self, visitor): visitor.visit_template_id(self)
def __cmp__(self, other):
"Comparison operator"
return ccmp(self,other) or cmp(self.parameters,other.parameters)
def __str__(self):
return "template<%s>%s"%(','.join(str(self.parameters)), str(self.name))
class ModifierTypeId(TypeId):
"""Class for alias types with modifiers (such as 'const', '&', etc.)"""
def __init__(self, language, alias, premod, postmod):
super(ModifierTypeId, self).__init__(language)
self.alias = al
|
ias
self.premod = premod
self.postmod = postmod
def accept(self, visitor): visitor.visit_modifier_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return (ccmp(self,other)
or cmp(self.alias,other.alias)
or cmp(self.premod,other.premod)
or cmp(self.postmod,other.postmod))
def __str__(self):
return "%s%s%s"%(''.join(['%s '%s for s in self.premod]),
str(self.alias),
|
''.join(self.postmod))
class ArrayTypeId(TypeId):
"""A modifier that adds array dimensions to a type-id."""
def __init__(self, language, alias, sizes):
super(ArrayTypeId, self).__init__(language)
self.alias = alias
self.sizes = sizes
def accept(self, visitor): visitor.visit_array_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return (ccmp(self,other)
or cmp(self.alias,other.alias)
or cmp(self.sizes,other.sizes))
def __str__(self):
return "%s%s"%(str(self.alias),
''.join(['[%d]'%s for s in self.sizes]))
class ParametrizedTypeId(TypeId):
"""Class for parametrized type-id instances."""
def __init__(self, language, template, parameters):
super(ParametrizedTypeId, self).__init__(language)
self.template = template
self.parameters = parameters
def accept(self, visitor): visitor.visit_parametrized_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return ccmp(self,other) or cmp(self.template,other.template)
def __str__(self):
return "%s<%s>"%('::'.join(self.template.name),
','.join([str(a) for a in self.parameters]))
class FunctionTypeId(TypeId):
"""Class for function (pointer) types."""
def __init__(self, language, return_type, premod, parameters):
super(FunctionTypeId, self).__init__(language)
self.return_type = return_type
self.premod = premod
self.parameters = parameters
def accept(self, visitor): visitor.visit_function_type_id(self)
class Dictionary(dict):
"""Dictionary extends the builtin 'dict' by adding a lookup method to it."""
def lookup(self, name, scopes):
"""locate 'name' in one of the scopes"""
for s in scopes:
scope = list(s)
while len(scope) > 0:
if self.has_key(scope + name):
return self[scope + name]
else: del scope[-1]
if self.has_key(name):
return self[name]
return None
def merge(self, dict):
"""merge in a foreign dictionary, overriding already defined types only
if they are of type 'Unknown'."""
for i in dict.keys():
if self.has_key(i):
if isinstance(self[i], UnknownTypeId):
self[i] = dict[i]
else:
pass
else: self[i] = dict[i]
class Declaration(object):
"""Declaration base class. Every declaration has a name, type,
accessibility and annotations. The default accessibility is DEFAULT except for
C++ where the Parser always sets it to one of the other three. """
#__metaclass__ = Debugger
def __init__(self, file,
|
yackj/GameAnalysis
|
gameanalysis/script/sgboot.py
|
Python
|
apache-2.0
| 3,682
| 0
|
"""calculate bootstrap bounds on a sample game"""
import argparse
import json
import sys
import numpy as np
from gameanalysis import bootstrap
from gameanalysis import gameio
from gameanalysis import regret
from gameanalysis import scriptutils
CHOICES = {
'regret': (bootstrap.mixture_regret, regret.mixture_regret),
'surplus': (bootstrap.mixture_welfare, regret.mixed_social_welfare),
}
def add_parser(subparsers):
parser = subparsers.add_parser(
'sgboot', help="""Bootstrap on sample games""", description="""Compute
bootstrap statistics using a sample game with data for every profile in
the support of the subgame and potentially deviations. The return value
is a list with an entry for each mixture in order. Each element is a
dictionary mapping percentile to value.""")
parser.add_argument(
'--input', '-i', metavar='<input-file>', default=sys.stdin,
type=argparse.FileType('r'), help="""Input sample game to run bootstrap
on. (default: stdin)""")
parser.add_argument(
'--output', '-o', metavar='<output-file>', default=sys.stdout,
type=argparse.FileType('w'), help="""Output file for script. (default:
stdout)""")
parser.add_argument(
'profil
|
es', metavar='<profile>', nargs='+', help="""File or string with
profiles from input game for which regrets should be calculated. This
file can be a list or a single profile""")
parser.add_argument(
'-t', '--type', default='regret', choices=CHOICES, help="""What to
return. regret - returns the regret of each profile. surplus - returns
the bootstrap surp
|
lus of every profile. (default: %(default)s)""")
parser.add_argument(
'--processes', metavar='num-processes', type=int, help="""The number of
processes when constructing bootstrap samples. Default will use all the
cores available.""")
parser.add_argument(
'--percentiles', '-p', metavar='percentile', type=float, nargs='+',
help="""Percentiles to return in [0, 100]. By default all bootstrap
values will be returned sorted.""")
parser.add_argument(
'--num-bootstraps', '-n', metavar='num-bootstraps', default=101,
type=int, help="""The number of bootstrap samples to acquire. More
samples takes longer, but in general the percentiles requested should
be a multiple of this number minus 1, otherwise there will be some
error due to linear interpolation between points. (default:
%(default)s)""")
parser.add_argument(
'--mean', '-m', action='store_true', help="""Also compute the mean
statistic and return it as well. This will be in each dictionary with
the key 'mean'.""")
return parser
def main(args):
game, serial = gameio.read_samplegame(json.load(args.input))
profiles = np.concatenate([serial.from_prof_json(p)[None] for p
in scriptutils.load_profiles(args.profiles)])
bootf, meanf = CHOICES[args.type]
results = bootf(game, profiles, args.num_bootstraps, args.percentiles,
args.processes)
if args.percentiles is None:
args.percentiles = np.linspace(0, 100, args.num_bootstraps)
percentile_strings = [str(p).rstrip('0').rstrip('.')
for p in args.percentiles]
jresults = [{p: v.item() for p, v in zip(percentile_strings, boots)}
for boots in results]
if args.mean:
for jres, mix in zip(jresults, profiles):
jres['mean'] = meanf(game, mix)
json.dump(jresults, args.output)
args.output.write('\n')
|
zakuro9715/luka
|
luka_maya.py
|
Python
|
gpl-3.0
| 2,318
| 0.006471
|
import shutil
import os
import base64
from PySide.QtCore import *
from PySide.QtGui import *
from maya.app.general.mayaMixin import MayaQWidgetBaseMixin, MayaQWidgetDockableMixin
from maya import cmds
from luka import Luka
from luka.gui.qt import TakeSnapshotWidget, SnapshotListWidget
__all__ = ['LukaTakeSnapshotUI', 'LukaUI']
def currentScenePath():
return cmds.file(q=True, sceneName=True).replace('/', '\\')
class LukaTakeSnapshotUI(MayaQWidgetBaseMixin, TakeSnapshotWidget):
def __init__(self, luka=None, *args, **kwargs):
if luka is None:
scene = currentScenePath()
luka = Luka(scene, load=True) if len(scene) > 0 else None
super(LukaTakeSnapshotUI, self).__init__(luka=luka, *args, **kwargs)
def take_snapshot(self):
cmds.file(save=True)
super(LukaTakeSnapshotUI, self).take_snapshot()
class LukaUI(MayaQWidgetBaseMixin, SnapshotListWidget):
def __init__(self, *args, **kwargs):
scene = currentScenePath()
self.luka = Luka(scene, load=True) if len(scene) > 0 else None
super(LukaUI, self).__init__(luka=self.luka, *args, **kwargs)
def initUI(self):
super(LukaUI, self).initUI()
self.newSnapshotButton = QPushButton("New Snapshot", self)
self.newSnapshotButton.clicked.connect(self.showTakeSnapshotUI)
self.layout.addWidget(self.newSnapshotButton)
def showTakeSnapshotUI(self):
ui = LukaTakeSnapshotUI(luka=self.luka)
|
ui.show()
def restore(self, s):
super(LukaUI, self).restore(s)
v = cmds.confirmDialog(
title='Restore snapshot',
message='All changes including SAV
|
ED will be lost. Are you sure?',
button=['OK','Cancel'], defaultButton='OK',
cancelButton='Cancel', dismissString='Cancel')
if v != 'OK':
return
cmds.file(cmds.file(q=True, sceneName=True), open=True, force=True)
def remove(self, s):
v = cmds.confirmDialog(
title='Remove snapshot',
message='Are you sure?',
button=['OK','Cancel'], defaultButton='OK',
cancelButton='Cancel', dismissString='Cancel')
if v != 'OK':
return
super(LukaUI, self).remove(s)
|
sambayless/monosat
|
src/monosat/api/python/tests/__init__.py
|
Python
|
mit
| 102
| 0
|
import u
|
nittest
import tests
import tests.test_logic
import tests.test_graph
impor
|
t tests.test_output
|
nerevu/riko
|
riko/modules/strreplace.py
|
Python
|
mit
| 6,865
| 0.000146
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.strreplace
~~~~~~~~~~~~~~~~~~~~~~~
Provides functions for string search-and-replace.
You provide the module with the text string to search for, and what to replace
it with. Multiple search-and-replace pairs can be added. You can specify to
replace all occurrences of the search string, just the first occurrence, or the
last occurrence.
Examples:
basic usage::
>>> from riko.modules.strreplace import pipe
>>>
>>> conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
>>> item = {'content': 'hello world'}
>>> next(pipe(item, conf=conf))['strreplace'] == 'bye world'
True
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
import pygogo as gogo
from functools import reduce
from . import processor
from riko.bado import coroutine, return_value, itertools as ait
OPTS = {
'listize': True, 'ftype': 'text', 'field': 'content', 'extract': 'rule'}
DEFAULTS = {}
logger = gogo.Gogo(__name__, monolog=True).logger
OPS = {
'first': lambda word, rule: word.replace(rule.find, rule.replace, 1),
'last': lambda word, rule: rule.replace.join(word.rsplit(rule.find, 1)),
'every': lambda word, rule: word.replace(rule.find, rule.replace),
}
def reducer(word, rule):
return OPS.get(rule.param, OPS['every'])(word, rule)
@coroutine
def async_parser(word, rules, skip=False, **kwargs):
""" Asynchronously parses the pipe content
Args:
word (str): The string to transform
rules (List[obj]): the parsed rules (Objectify instances).
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: strreplace)
stream (dict): The original item
Returns:
Deferred: twisted.internet.defer.Deferred item
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>> from meza.fntools import Objectify
>>>
>>> def run(reactor):
... item = {'content': 'hello world'}
... conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
... rule = Objectify(conf['rule'])
... kwargs = {'stream': item, 'conf': conf}
... d = async_parser(item['content'], [rule], **kwargs)
... return d.addCallbacks(print, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
bye world
"""
if skip:
value = kwargs['stream']
else:
value = yield ait.coop_reduce(reducer, rules, word)
return_value(value)
def parser(word, rules, skip=False, **kwargs):
""" Parses the pipe content
Args:
word (str): The string to transform
rules (List[obj]): the parsed rules (Objectify instances).
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: strtransform)
stream (dict): The original item
Returns:
dict: The item
Examples:
>>> from meza.fntools import Objectify
>>>
>>> item = {'content': 'hello world'}
>>> conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
>>> rule = Objectify(conf['rule'])
>>> kwargs = {'stream': item, 'conf': conf}
>>> parser(item['content'], [rule], **kwargs) == 'bye world'
True
"""
return kwargs['stream'] if skip else reduce(reducer, rules, word)
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A processor module that asynchronously replaces the text of a field of
an item.
Args:
item (dict): The entry to process
kwargs (dict
|
): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'rule'.
rule (dict): can be either a dict or list of dicts. Must contain
the keys 'find' and 'replace'. May contain the key 'param'.
find (str): The string to find.
replace (str): The string replacement.
param (str): The type of replacement. Must be one of: 'first'
|
,
'last', or 'every' (default: 'every').
assign (str): Attribute to assign parsed content (default: strreplace)
field (str): Item attribute to operate on (default: 'content')
Returns:
Deferred: twisted.internet.defer.Deferred item with replaced content
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['strreplace'])
... conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
... d = async_pipe({'content': 'hello world'}, conf=conf)
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
bye world
"""
return async_parser(*args, **kwargs)
@processor(**OPTS)
def pipe(*args, **kwargs):
"""A processor that replaces the text of a field of an item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'rule'.
rule (dict): can be either a dict or list of dicts. Must contain
the keys 'find' and 'replace'. May contain the key 'param'.
find (str): The string to find.
replace (str): The string replacement.
param (str): The type of replacement. Must be one of: 'first',
'last', or 'every' (default: 'every').
assign (str): Attribute to assign parsed content (default: strreplace)
field (str): Item attribute to operate on (default: 'content')
Yields:
dict: an item with replaced content
Examples:
>>> conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
>>> item = {'content': 'hello world'}
>>> next(pipe(item, conf=conf))['strreplace'] == 'bye world'
True
>>> rules = [
... {'find': 'Gr', 'replace': 'M'},
... {'find': 'e', 'replace': 'a', 'param': 'last'}]
>>> conf = {'rule': rules}
>>> kwargs = {'conf': conf, 'field': 'title', 'assign': 'result'}
>>> item = {'title': 'Greetings'}
>>> next(pipe(item, **kwargs))['result'] == 'Meatings'
True
"""
return parser(*args, **kwargs)
|
cbrafter/TRB18_GPSVA
|
codes/mainCode/ParallelSpecialMac.py
|
Python
|
mit
| 5,186
| 0.005401
|
# -*- coding: utf-8 -*-
import sys
import os
import shutil
import psutil
import subprocess
import time
import numpy as np
import itertools
# from matplotlib import pyplot
from routeGen import routeGen
from sumoConfigGen import sumoConfigGen
from stripXML import stripXML
import multiprocessing as mp
from glob import glob
#os.chdir(os.path.dirname(sys.argv[0]))
sys.path.insert(0, '../sumoAPI')
import GPSControl
import fixedTimeControl
import actuatedControl
import HybridVAControl
import HVA1
import sumoConnect
import readJunctionData
print(sys.path)
import traci
def simulation(x):
try:
assert len(x) == 4
runtime = time.time()
# Define Simulation Params
modelName, tlLogic, CAVratio, run = x
procID = int(mp.current_process().name[-1])
model = './models/{}_{}/'.format(modelName, procID)
simport = 8812 + procID
N = 10000 # Last time to insert vehicle at (10800=3hrs)
stepSize = 0.1
CAVtau = 1.0
configFile = model + modelName + ".sumocfg"
# Configure the Map of controllers to be run
tlControlMap = {'fixedTime': fixedTimeControl.fixedTimeControl,
'VA': actuatedControl.actuatedControl,
'GPSVA': GPSControl.GPSControl,
'HVA1': HVA1.HybridVA1Control,
'HVA': HybridVAControl.HybridVAControl}
tlController = tlControlMap[tlLogic]
exportPath = '/hardmem/results/' + tlLogic + '/' + modelName + '/'
# Check if model copy for this process exists
if not os.path.isdir(model):
shutil.copytree('./models/{}/'.format(modelName), model)
# this is relative to script not cfg file
if not os.path.exists(exportPath):
os.makedirs(exportPath)
#seed = int(sum([ord(X) for x in modelName + tlLogic]) + int(10*CAVratio) + run)
seed = int(run)
vehNr, lastVeh = routeGen(N, CAVratio, CAVtau,
routeFile=model + modelName + '.rou.xml',
seed=seed)
# Edit the the output filenames in sumoConfig
sumoConfigGen(modelName, configFile, exportPath,
|
CAVratio, stepSize, run, simport)
# Connect to model
connector = sumoConnect.sumoConnect(configFile, gui=False, port=simport)
connector.launchSumoAndConnect()
# Get junction data
jd = readJunctionData.readJunctionData(model + modelName + ".jcn.xml")
junctionsList = jd.getJunctionData()
# Add controller models to junctions
con
|
trollerList = []
for junction in junctionsList:
controllerList.append(tlController(junction))
# Step simulation while there are vehicles
while traci.simulation.getMinExpectedNumber():
# connector.runSimulationForSeconds(1)
traci.simulationStep()
for controller in controllerList:
controller.process()
# Disconnect from current configuration
connector.disconnect()
# Strip unused data from results file
ext = '{AVR:03d}_{Nrun:03d}.xml'.format(AVR=int(CAVratio*100), Nrun=run)
for filename in ['queuedata', 'tripinfo']:
target = exportPath+filename+ext
stripXML(target)
runtime = time.gmtime(time.time() - runtime)
print('DONE: {}, {}, Run: {:03d}, AVR: {:03d}%, Runtime: {}\n'
.format(modelName, tlLogic, run, int(CAVratio*100),
time.strftime("%H:%M:%S", runtime)))
return True
except:
# Print if an experiment fails and provide repr of params to repeat run
print('***FAILURE*** ' + repr(x))
return False
################################################################################
# MAIN SIMULATION DEFINITION
################################################################################
models = ['simpleT', 'twinT', 'corridor', 'manhattan']
#tlControllers = ['fixedTime', 'VA', 'HVA', 'GPSVA']
tlControllers = ['HVA']
CAVratios = np.linspace(0, 1, 11)
if len(sys.argv) >=3:
runArgs = sys.argv[1:3]
runArgs = [int(arg) for arg in runArgs]
runArgs.sort()
runStart, runEnd = runArgs
else:
runStart, runEnd = [1, 11]
runIDs = np.arange(runStart, runEnd)
configs = []
# Generate all simulation configs for fixed time and VA
#configs += list(itertools.product(models, ['VA'], [0.], runIDs))
# # Generate runs for CAV dependent controllers
configs += list(itertools.product(models[-1:], ['HVA'], [0.0,0.1,0.2], [10]))
print(len(configs))
nproc = 3
print('Starting simulation on {} cores'.format(nproc))
# define work pool
workpool = mp.Pool(processes=nproc)
# Run simualtions in parallel
result = workpool.map(simulation, configs, chunksize=1)
# remove spawned model copies
for rmdir in glob('./models/*_*'):
if os.path.isdir(rmdir):
shutil.rmtree(rmdir)
# Inform of failed expermiments
if all(result):
print('Simulations complete, no errors')
else:
print('Failed Experiment Runs:')
for i, j in zip(configs, result):
if not j:
print(i)
|
QuinDiesel/CommitSudoku-Project-Game
|
Euromast/menu.py
|
Python
|
mit
| 6,569
| 0.003349
|
import pygame
music_on = 0
# Set up a button class for later usage
class Button:
def __init__(self, x, y, w, h, img):
self.x = x
self.y = y
self.w = w
self.h = h
self.img = img
self.surface = w * h
def buttonHover(self):
mouse = pygame.mouse.get_pos()
if self.x + self.w > mouse[0] > self.x and self.y + self.h > mouse[1] > self.y:
return True
def startMusic():
global music_on
if music_on == 0:
music_on = 1
pygame.mixer.music.load('music.ogg')
pygame.mixer.music.play(-1, 0.0)
def start_program():
# Imports
import Rules
import Game
import Instructions
import Music
import Highscore
# Initialize pygame and music
pygame.init()
startMusic()
# Load in the images
startButtonImg = pygame.image.load('img/StartButtonWhite.png')
startButtonGrayImg = pygame.image.load('img/StartButtonGray.png')
quitButtonImg = pygame.image.load('img/QuitButtonWhite.png')
quitButtonGrayImg = pygame.image.load('img/QuitButtonGray.png')
backGroundImg = pygame.image.load('img/BackDrop.png')
buttonInstructionImg = pygame.image.load('img/ButtonInstructionWhite.png')
buttonInstructionGrayImg = pygame.image.load('img/ButtonInstructionGray.png')
buttonGameRulesImg = pygame.image.load('img/GameRulesWhite.png')
buttonGameRulesGrayImg = pygame.image.load('img/GameRulesGray.png')
MusicSettingsButtonImg = pygame.image.load('img/MSettings.png')
MusicSettingsGrayButtonImg = pygame.image.load('img/MSettingsGray.png')
HighscoreButtonImg = pygame.image.load('img/ButtonHighscoreWhite.png')
HighscoreButtonGrayImg = pygame.image.load('img/ButtonHighscoreGray.png')
# Set up a display with title
gameDisplay = pygame.display.set_mode((800, 600))
pygame.display.set_caption('Euromast V1.0')
# Create instances of the button
quitButton = Button(430, 220, 100, 50, quitButtonImg)
quitButtonGray = Button(430, 220, 100, 50, quitButtonGrayImg)
startButton = Button(280, 220, 100, 50, startButtonImg)
startButtonGray = Button(
|
280, 220, 100, 50, startButtonGrayImg)
gameRulesButton = Button(280, 326, 250, 50, buttonGameRulesImg)
gameRulesButtonGray = Button(280, 326, 250, 50, buttonGameRulesGrayImg)
buttonInstruction = Button(280, 273, 250, 50, buttonInstructionImg)
buttonInstructionGray = Button(280, 273, 250, 50, buttonInstructionGrayImg)
MSettingsButton = Button(10, 10, 100, 50, MusicSettingsButtonImg)
MSettingsButtonGray = Button(10, 10, 100, 50, MusicSettingsGrayBu
|
ttonImg)
HighscoreButton = Button(280, 379, 250, 50, HighscoreButtonImg)
HighscoreButtonGray = Button(280, 379, 250, 50, HighscoreButtonGrayImg)
# Initialize game loop
phase = "menu"
loop = True
while loop:
# Check if user wants to quit
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.fill((0, 0, 0))
if phase == "menu":
gameDisplay.blit(backGroundImg, (0, 0))
# Display the created buttons
gameDisplay.blit(quitButtonGray.img, (quitButton.x, quitButton.y))
gameDisplay.blit(startButtonGray.img, (startButton.x, startButton.y))
gameDisplay.blit(gameRulesButtonGray.img, (gameRulesButton.x, gameRulesButton.y))
gameDisplay.blit(buttonInstructionGray.img, (buttonInstruction.x, buttonInstruction.y))
gameDisplay.blit(MSettingsButtonGray.img, (MSettingsButton.x, MSettingsButton.y))
gameDisplay.blit(MSettingsButtonGray.img, (MSettingsButton.x, MSettingsButton.y))
gameDisplay.blit(HighscoreButtonGray.img, (HighscoreButton.x, HighscoreButton.y))
# Check if mouse hovers over button
if Button.buttonHover(quitButtonGray):
gameDisplay.blit(quitButton.img, (quitButton.x, quitButton.y))
# Check if the quit button has been pressed for exit functionality
if Button.buttonHover(quitButton):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
pygame.quit()
quit()
elif Button.buttonHover(startButtonGray):
gameDisplay.blit(startButton.img, (startButton.x, startButton.y))
if Button.buttonHover(startButton):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
phase = "game"
elif Button.buttonHover(gameRulesButtonGray):
gameDisplay.blit(gameRulesButton.img, (gameRulesButton.x, gameRulesButton.y))
if Button.buttonHover(gameRulesButton):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
phase = "rules"
elif Button.buttonHover(buttonInstructionGray):
gameDisplay.blit(buttonInstruction.img, (buttonInstruction.x, buttonInstruction.y))
if Button.buttonHover(buttonInstruction):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
phase = "instructions"
elif Button.buttonHover(MSettingsButtonGray):
gameDisplay.blit(MSettingsButton.img, (MSettingsButton.x, MSettingsButton.y))
if Button.buttonHover(MSettingsButton):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
phase = "muzieksettings"
# Button Highscore
elif Button.buttonHover(HighscoreButtonGray):
gameDisplay.blit(HighscoreButton.img, (HighscoreButton.x, HighscoreButton.y))
if Button.buttonHover(HighscoreButton):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
phase = "highscore"
elif phase == "game":
Game.startGame()
elif phase == "rules":
Rules.displayRules()
elif phase == "instructions":
Instructions.startInstructions()
elif phase == "muzieksettings":
Music.menu_settings()
elif phase == 'highscore':
Highscore.highscore_tab()
pygame.display.flip()
start_program()
|
leonyeh/CoffeeScript-Hello-World
|
fabfile.py
|
Python
|
apache-2.0
| 1,250
| 0.0256
|
from fabric.api import hosts, run, sudo, local
from fabric.contrib.console import confirm
from fabric.utils import puts,warn
DEV_PROVISIONING_UUID = "XXXXXXXX-XXXX-XXXX-XX
|
XX-XXXXXXXXXXXX"
DEV_SIGN = "Mobi Ten"
DEV_APP_NAME = "G
|
ym Mama"
DEV_APP_ID = 'com.mobiten.gym_mama'
TITANIUM_SDK_VERSION = '1.5.1'
IPHONE_SDK_VERSION = '4.2'
DEVICE_FAMILY = 'iphone'
BUILDER = "/Library/Application\ Support/Titanium/mobilesdk/osx/%s/iphone/builder.py" % (TITANIUM_SDK_VERSION)
def coffee():
local("coffee --watch -o Resources/js/ --compile App/*.coffee ", False)
def debug():
local("%s simulator %s ./ %s %s %s" % (BUILDER,IPHONE_SDK_VERSION,DEV_APP_ID,DEV_APP_NAME,DEVICE_FAMILY), False)
def device():
local("%s install %s ./ %s %s %s" % (BUILDER,IPHONE_SDK_VERSION, DEV_APP_ID, DEV_APP_NAME, DEV_PROVISIONING_UUID, DEV_SIGN))
def package():
print "nothing"
def clean():
if confirm("Clean will delete any files that is ignored by gitignore\nand also any files that not yet tracked by git.\nAre your sure you want to continue ?",default=False):
warn("Deleting Untracked and Ignore Files, you have been WARNED!")
local("git clean -d -f")
local("mkdir -p build/iphone")
puts("Project is now clean.")
else:
warn("CLEAN IS CANCELLED.")
|
DavidPowell/OpenModes
|
openmodes/constants.py
|
Python
|
gpl-3.0
| 1,192
| 0.001678
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# OpenModes - An eigenmode solver for open electromagnetic resonantors
# Copyright (C) 2013 David Powell
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the
|
hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://w
|
ww.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
"""
Various useful constants for electromagnetism. Most of these are already
defined in scipy.constants, but are duplicated here for convenience.
"""
import numpy as np
pi = np.pi
c = 299792458.0
mu_0 = 4e-7*pi
epsilon_0 = 1.0 / (mu_0*c*c)
eta_0 = mu_0*c
|
waynechu/PythonProject
|
dns/wiredata.py
|
Python
|
mit
| 3,751
| 0.000267
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2011,2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Wire Data Helper"""
import dns.exception
from ._compat import binary_type, string_types, PY2
# Figure out what constant python passes for an unspecified slice bound.
# It's supposed to be sys.maxint, yet on 64-bit windows sys.maxint is 2^31 - 1
# but Python uses 2^63 - 1 as the constant. Rather than making pointless
# extra comparisons, duplicating code, or weakening WireData, we just figure
# out what constant Python will use.
class _SliceUnspecifiedBound(binary_type):
def __getitem__(self, key):
return key.stop
if PY2:
def __getslice__(self, i, j): # pylint: disable=getslice-method
return self.__getitem__(slice(i, j))
_unspecified_bound = _SliceUnspecifiedBound()[1:]
class WireData(binary_type):
# WireData is a binary type with stricter slicing
def __getitem__(self, key):
try:
if isinstance(key, slice):
# make sure we are not going outside of valid ranges,
# do stricter control of boundaries than python does
# by default
start = key.start
stop = key.stop
if PY2:
if stop == _unspecified_bound:
# handle the case where the right bound is unspecified
stop = len(self)
|
if start < 0 or stop < 0:
raise dns.exception.FormError
# If it's not an empty slice, access left and right bounds
# to make sure they're valid
if start != stop:
super(WireData, self).__getitem__(start)
super(WireData, self).__getitem__(stop - 1)
|
else:
for index in (start, stop):
if index is None:
continue
elif abs(index) > len(self):
raise dns.exception.FormError
return WireData(super(WireData, self).__getitem__(
slice(start, stop)))
return bytearray(self.unwrap())[key]
except IndexError:
raise dns.exception.FormError
if PY2:
def __getslice__(self, i, j): # pylint: disable=getslice-method
return self.__getitem__(slice(i, j))
def __iter__(self):
i = 0
while 1:
try:
yield self[i]
i += 1
except dns.exception.FormError:
raise StopIteration
def unwrap(self):
return binary_type(self)
def maybe_wrap(wire):
if isinstance(wire, WireData):
return wire
elif isinstance(wire, binary_type):
return WireData(wire)
elif isinstance(wire, string_types):
return WireData(wire.encode())
raise ValueError("unhandled type %s" % type(wire))
|
Treggats/DicewarePassphrase
|
docs/conf.py
|
Python
|
gpl-2.0
| 11,767
| 0.006289
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Diceware documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 26 22:19:13 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
devdir = ''
try:
if os.environ['DEVDIR']:
devdir = os.environ['DEVDIR']
except KeyError:
print("Unable to obtain $DEVDIR from the environment")
exit(-1)
sys.path.insert(0, devdir + '/diceware')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Diceware'
copyright = '2015, Tonko Mulder'
author = 'Tonko Mulder'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relativ
|
e to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static f
|
iles,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Dicewaredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Diceware.tex', 'Diceware Documentation',
'Tonko Mulder', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#la
|
klim-iv/phantomjs-qt5
|
src/webkit/Tools/Scripts/webkitpy/common/config/committers.py
|
Python
|
bsd-3-clause
| 11,526
| 0.001822
|
# Copyright (c) 2011, Apple Inc. All rights reserved.
# Copyright (c) 2009, 2011, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for committer and reviewer validation.
import fnmatch
import json
from webkitpy.common.editdistance import edit_distance
from webkitpy.common.memoized import memoized
from webkitpy.common.system.filesystem import FileSystem
# The list of contributors have been moved to contributors.json
class Contributor(object):
def __init__(self, name, email_or_emails, irc_nickname_or_nicknames=None):
assert(name)
assert(email_or_emails)
self.full_name = name
if isinstance(email_or_emails, str):
self.emails = [email_or_emails]
else:
self.emails = email_or_emails
self.emails = map(lambda email: email.lower(), self.emails) # Emails are case-insensitive.
if isinstance(irc_nickname_or_nicknames, str):
self.irc_nicknames = [irc_nickname_or_nicknames]
else:
self.irc_nicknames = irc_nickname_or_nicknames
self.can_commit = False
self.can_review = False
def bugzilla_email(self):
# FIXME: We're assuming the first email is a valid bugzilla email,
# which might not be right.
return self.emails[0]
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return '"%s" <%s>' % (self.full_name, self.emails[0])
def contains_string(self, search_string):
string = search_string.lower()
if string in self.full_na
|
me.lower():
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
|
if string in nickname.lower():
return True
for email in self.emails:
if string in email:
return True
return False
def matches_glob(self, glob_string):
if fnmatch.fnmatch(self.full_name, glob_string):
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if fnmatch.fnmatch(nickname, glob_string):
return True
for email in self.emails:
if fnmatch.fnmatch(email, glob_string):
return True
return False
class Committer(Contributor):
def __init__(self, name, email_or_emails, irc_nickname=None):
Contributor.__init__(self, name, email_or_emails, irc_nickname)
self.can_commit = True
class Reviewer(Committer):
def __init__(self, name, email_or_emails, irc_nickname=None):
Committer.__init__(self, name, email_or_emails, irc_nickname)
self.can_review = True
class CommitterList(object):
# Committers and reviewers are passed in to allow easy testing
def __init__(self,
committers=[],
reviewers=[],
contributors=[]):
# FIXME: These arguments only exist for testing. Clean it up.
if not (committers or reviewers or contributors):
loaded_data = self.load_json()
contributors = loaded_data['Contributors']
committers = loaded_data['Committers']
reviewers = loaded_data['Reviewers']
self._contributors = contributors + committers + reviewers
self._committers = committers + reviewers
self._reviewers = reviewers
self._contributors_by_name = {}
self._accounts_by_email = {}
self._accounts_by_login = {}
@staticmethod
@memoized
def load_json():
filesystem = FileSystem()
json_path = filesystem.join(filesystem.dirname(filesystem.path_to_module('webkitpy.common.config')), 'contributors.json')
contributors = json.loads(filesystem.read_text_file(json_path))
return {
'Contributors': [Contributor(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Contributors'].iteritems()],
'Committers': [Committer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Committers'].iteritems()],
'Reviewers': [Reviewer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Reviewers'].iteritems()],
}
def contributors(self):
return self._contributors
def committers(self):
return self._committers
def reviewers(self):
return self._reviewers
def _name_to_contributor_map(self):
if not len(self._contributors_by_name):
for contributor in self._contributors:
assert(contributor.full_name)
assert(contributor.full_name.lower() not in self._contributors_by_name) # We should never have duplicate names.
self._contributors_by_name[contributor.full_name.lower()] = contributor
return self._contributors_by_name
def _email_to_account_map(self):
if not len(self._accounts_by_email):
for account in self._contributors:
for email in account.emails:
assert(email not in self._accounts_by_email) # We should never have duplicate emails.
self._accounts_by_email[email] = account
return self._accounts_by_email
def _login_to_account_map(self):
if not len(self._accounts_by_login):
for account in self._contributors:
if account.emails:
login = account.bugzilla_email()
assert(login not in self._accounts_by_login) # We should never have duplicate emails.
self._accounts_by_login[login] = account
return self._accounts_by_login
def _committer_only(self, record):
if record and not record.can_commit:
return None
return record
def _reviewer_only(self, record):
if record and not record.can_review:
return None
return record
def committer_by_name(self, name):
return self._committer_only(self.contributor_by_name(name))
def contributor_by_irc_nickname(self, irc_nickname):
for contributor in self.contributors():
# FIXME: This should do case-insensitive comparison or assert that all IRC nicknames are in lowercase
if contributor.irc_nicknames and irc_nickname in contributor.irc_nicknames:
return contributor
return None
def contributors_by_search_string(self, string):
glob_matches = filter(lambda contributor: contributor.matches_glob(string), self.contributors())
return glob_matches or filter(lambda contributor: contributor.contains_string(string), self.contributors(
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/callback/default.py
|
Python
|
bsd-3-clause
| 13,463
| 0.002823
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: default
type: stdout
short_description: default Ansible screen output
version_added: historical
description:
- This is the default output callback for ansible-playbook.
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuration
'''
from ansible import constants as C
from ansible.playbook.task_include import TaskInclude
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'default'
def __init__(self):
self._play = None
self._last_task_banner = None
super(CallbackModule, self).__init__()
def v2_runner_on_failed(self, result, ignore_errors=False):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._handle_exception(result._result)
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if delegated_vars:
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)), color=C.COLOR_ERROR)
else:
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_runner_on_ok(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if isinstance(result._task, TaskInclude):
return
elif result._result.get('changed', False):
if delegated_vars:
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "changed: [%s]" % result._host.get_name()
color = C.COLOR_CHANGED
else:
if delegated_vars:
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "ok: [%s]" % result._host.get_name()
color = C.COLOR_OK
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % (self._dump_results(result._result),)
self._display.display(msg, color=color)
def v2_runner_on_skipped(self, result):
if self._plug
|
in_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallba
|
ck on constants for inherited plugins missing docs
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
msg = "skipping: [%s]" % result._host.get_name()
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)),
color=C.COLOR_UNREACHABLE)
else:
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
def v2_playbook_on_no_hosts_matched(self):
self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_remaining(self):
self._display.banner("NO MORE HOSTS LEFT")
def v2_playbook_on_task_start(self, task, is_conditional):
if self._play.strategy != 'free':
self._print_task_banner(task)
def _print_task_banner(self, task):
# args can be specified as no_log in several places: in the task or in
# the argument spec. We can check whether the task is no_log but the
# argument spec can't be because that is only run on the target
# machine and we haven't run it thereyet at this time.
#
# So we give people a config option to affect display of the args so
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
args = ''
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = u', '.join(u'%s=%s' % a for a in task.args.items())
args = u' %s' % args
self._display.banner(u"TASK [%s%s]" % (task.get_name().strip(), args))
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG)
self._last_task_banner = task._uuid
def v2_playbook_on_cleanup_task_start(self, task):
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
def v2_playbook_on_handler_task_start(self, task):
self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if not name:
msg = u"PLAY"
else:
msg = u"PLAY [%s]" % name
self._play = play
self._display.banner(msg)
def v2_on_file_diff(self, result):
if result._task.loop and 'results' in result._result:
for res in result._result['results']:
if 'diff' in res and res['diff'] and res.get('changed', False):
diff = self._get_diff(res['diff'])
if diff:
self._display.display(diff)
elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
diff = self._get_diff(result._result['diff'])
if diff:
self._display.display(diff)
def v2_runner_item_on_ok(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if isinstance(result._task, TaskInclude):
|
jt6562/XX-Net
|
python27/1.0/lib/SimpleHTTPServer.py
|
Python
|
bsd-2-clause
| 7,826
| 0.001278
|
"""Simple HTTP Server.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
__version__ = "0.6"
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import sys
import shutil
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % di
|
splaypat
|
h)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
# Don't forget explicit trailing slash when normalizing. Issue17324
trailing_slash = path.rstrip().endswith('/')
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
if trailing_slash:
path += '/'
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
|
Korkki/djangocms-text-tinymce
|
djangocms_text_tinymce/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 703
| 0.001422
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
|
('cms', '0004_auto_20141112_1610'),
]
operations = [
migrations.CreateModel(
name='Text',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('body', tinymce.models.HTMLField(verbose_name='body'
|
)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
stvstnfrd/edx-platform
|
cms/djangoapps/contentstore/tests/test_i18n.py
|
Python
|
agpl-3.0
| 10,854
| 0.002588
|
# -*- coding: utf-8 -*-
"""
Tests for validate Internationalization and Module i18n service.
"""
import gettext
from unittest import skip
import mock
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.utils import translation
from django.utils.translation import get_language
from cms.djangoapps.contentstore.tests.utils import AjaxEnabledTestClient
from cms.djangoapps.contentstore.views.preview import _preview_module_system
from openedx.core.lib.edx_six import get_gettext
from xmodule.modulestore.django import ModuleI18nService
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
class FakeTranslations(ModuleI18nService):
"""A test GNUTranslations class that takes a map of msg -> translations."""
def __init__(self, translations): # pylint: disable=super-init-not
|
-called
self.translations = translations
def ugettext(self, msgid):
"""
Mock override for ugettext translation operation
"""
return self.translations.get(msgid, msgid)
gettext = u
|
gettext
@staticmethod
def translator(locales_map): # pylint: disable=method-hidden
"""Build mock translator for the given locales.
Returns a mock gettext.translation function that uses
individual TestTranslations to translate in the given locales.
:param locales_map: A map from locale name to a translations map.
{
'es': {'Hi': 'Hola', 'Bye': 'Adios'},
'zh': {'Hi': 'Ni Hao', 'Bye': 'Zaijian'}
}
"""
def _translation(domain, localedir=None, languages=None): # pylint: disable=unused-argument
"""
return gettext.translation for given language
"""
if languages:
language = languages[0]
if language in locales_map:
return FakeTranslations(locales_map[language])
return gettext.NullTranslations()
return _translation
class TestModuleI18nService(ModuleStoreTestCase):
""" Test ModuleI18nService """
def setUp(self):
""" Setting up tests """
super(TestModuleI18nService, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.test_language = 'dummy language'
self.request = mock.Mock()
self.course = CourseFactory.create()
self.field_data = mock.Mock()
self.descriptor = ItemFactory(category="pure", parent=self.course)
self.runtime = _preview_module_system(
self.request,
self.descriptor,
self.field_data,
)
self.addCleanup(translation.deactivate)
def get_module_i18n_service(self, descriptor):
"""
return the module i18n service.
"""
i18n_service = self.runtime.service(descriptor, 'i18n')
self.assertIsNotNone(i18n_service)
self.assertIsInstance(i18n_service, ModuleI18nService)
return i18n_service
def test_django_service_translation_works(self):
"""
Test django translation service works fine.
"""
class wrap_ugettext_with_xyz(object): # pylint: disable=invalid-name
"""
A context manager function that just adds 'XYZ ' to the front
of all strings of the module ugettext function.
"""
def __init__(self, module):
self.module = module
self.old_ugettext = get_gettext(module)
def __enter__(self):
def new_ugettext(*args, **kwargs):
""" custom function """
output = self.old_ugettext(*args, **kwargs)
return "XYZ " + output
self.module.ugettext = new_ugettext
self.module.gettext = new_ugettext
def __exit__(self, _type, _value, _traceback):
self.module.ugettext = self.old_ugettext
self.module.gettext = self.old_ugettext
i18n_service = self.get_module_i18n_service(self.descriptor)
# Activate french, so that if the fr files haven't been loaded, they will be loaded now.
with translation.override("fr"):
french_translation = translation.trans_real._active.value # pylint: disable=protected-access
# wrap the ugettext functions so that 'XYZ ' will prefix each translation
with wrap_ugettext_with_xyz(french_translation):
self.assertEqual(i18n_service.ugettext(self.test_language), 'XYZ dummy language')
# Check that the old ugettext has been put back into place
self.assertEqual(i18n_service.ugettext(self.test_language), 'dummy language')
@mock.patch('django.utils.translation.ugettext', mock.Mock(return_value='XYZ-TEST-LANGUAGE'))
@mock.patch('django.utils.translation.gettext', mock.Mock(return_value='XYZ-TEST-LANGUAGE'))
def test_django_translator_in_use_with_empty_block(self):
"""
Test: Django default translator should in use if we have an empty block
"""
i18n_service = ModuleI18nService(None)
self.assertEqual(i18n_service.ugettext(self.test_language), 'XYZ-TEST-LANGUAGE')
@mock.patch('django.utils.translation.ugettext', mock.Mock(return_value='XYZ-TEST-LANGUAGE'))
def test_message_catalog_translations(self):
"""
Test: Message catalog from FakeTranslation should return required translations.
"""
_translator = FakeTranslations.translator(
{
'es': {'Hello': 'es-hello-world'},
'fr': {'Hello': 'fr-hello-world'},
},
)
localedir = '/translations'
translation.activate("es")
with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir,
languages=[get_language()])):
i18n_service = self.get_module_i18n_service(self.descriptor)
self.assertEqual(i18n_service.ugettext('Hello'), 'es-hello-world')
translation.activate("ar")
with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir,
languages=[get_language()])):
i18n_service = self.get_module_i18n_service(self.descriptor)
self.assertEqual(get_gettext(i18n_service)('Hello'), 'Hello')
self.assertNotEqual(get_gettext(i18n_service)('Hello'), 'fr-hello-world')
self.assertNotEqual(get_gettext(i18n_service)('Hello'), 'es-hello-world')
translation.activate("fr")
with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir,
languages=[get_language()])):
i18n_service = self.get_module_i18n_service(self.descriptor)
self.assertEqual(i18n_service.ugettext('Hello'), 'fr-hello-world')
def test_i18n_service_callable(self):
"""
Test: i18n service should be callable in studio.
"""
self.assertTrue(callable(self.runtime._services.get('i18n'))) # pylint: disable=protected-access
class InternationalizationTest(ModuleStoreTestCase):
"""
Tests to validate Internationalization.
"""
CREATE_USER = False
def setUp(self):
"""
These tests need a user in the DB so that the django Test Client
can log them in.
They inherit from the ModuleStoreTestCase class so that the mongodb collection
will be cleared out before each test case execution and deleted
afterwards.
"""
super(InternationalizationTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.uname = 'testuser'
self.email = '[email protected]'
self.password = 'foo'
# Create th
|
vincentping/pythis
|
setup.py
|
Python
|
mit
| 1,142
| 0.016637
|
#! /usr/bin/env python
# -*- coding=utf-8 -*-
from distutils.core import setup
setup(
name='pythis',
version='1.4',
description='zen of python in Simplified Chinese',
url='https://github.com/vincentping/pythis',
author='Vincent Ping',
author_email='[email protected]',
license='MIT',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
#
|
5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development',
# Specify the Python versions you support here. In particular, ensure
# th
|
at you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
py_modules=['pythis'],
keywords='zen python chinese',
)
#EOF
|
arunchaganty/aeschines
|
django/twit/management/commands/annotate_features.py
|
Python
|
mit
| 1,154
| 0.006066
|
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import sys
import json
from twit.models import User, Tweet, Mention, UserMention
from javanlp.models import Sentence, Sentiment
from javanlp.util import AnnotationException, annotate_document_with_sentiment
class Command(BaseCommand):
"""Annotate tweets and load into database."""
help = __doc__
#def add_arguments(self, parser):
# import argparse
# parser.add_argument('--input', type=argparse.FileType('r'), help="Input file containing a
|
json tweet on each line.")
def handle(self, *args, **options):
for tweet in Tweet.objects.all():
if Sentence.objects.filter(doc_id = tweet.id).exists(): continue
try:
with transaction.atomic():
for sentence, sentime
|
nt in annotate_document_with_sentiment(tweet.id, tweet.text):
sentence.save()
sentiment.sentence = sentence
sentiment.save()
except AnnotationException:
pass # Couldn't annotate this sentence...
|
rrafiringa/is210-week-04-warmup
|
task_01.py
|
Python
|
mpl-2.0
| 759
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module provides a function that knows what you mean"""
def know_what_i_mean(wink, numwink=2):
""" Prints "Know what I mean?" with a variable number of winks.
Args:
wink (mixed): Represents a wink.
numwink (int): Wink multiplier. Defaults to 2.
Returns:
str: Arguments are contatenated in a sentence.
Examples:
>>> know_what_i_mean('wink')
|
'Know what I mean? winkwink, nudge nudge'
>>> know_what_i_mean('wink', 3)
'Know what I mean? winkwinkwink, nudge nudge nudge'
"""
winks = (wink * numwink)
|
.strip()
nudges = ('nudge ' * numwink).strip()
retstr = 'Know what I mean? {}, {}'.format(winks, nudges)
return retstr
|
sushi-irc/tekka
|
signal_handler.py
|
Python
|
bsd-2-clause
| 38,295
| 0.032208
|
# coding:UTF-8
"""
Copyright (c) 2009-2010 Marian Tietz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
import gtk
import gobject
import logging
import string
import time as mtime
from dbus import UInt64
from gettext import gettext as _
from tekka import com
from tekka import config
from tekka import signals
from tekka import gui
from tekka.lib import contrast
from tekka.com import sushi, parse_from
from tekka.signals import connect_signal
from tekka.lib import key_dialog
from tekka.lib import dcc_dialog
from tekka.lib import inline_dialog
from tekka.helper import code
from tekka.helper import color
from tekka.helper import markup
from tekka.typecheck import types
init = False
def setup():
sushi.g_connect("maki-connected", maki_connected_cb)
sushi.g_connect("maki-disconnected", maki_disconnected_cb)
def maki_connected_cb(sush
|
i):
global init
if init == False:
# Message-Signals
connect_signal("message", userMessage_cb)
connect_signal("notice", userNotice_cb)
connect_signal("action", userAction_c
|
b)
connect_signal("away_message", userAwayMessage_cb)
connect_signal("ctcp", userCTCP_cb)
connect_signal("error", userError_cb)
# action signals
connect_signal("part", userPart_cb)
connect_signal("join", userJoin_cb)
connect_signal("names", userNames_cb)
connect_signal("quit", userQuit_cb)
connect_signal("kick", userKick_cb)
connect_signal("nick", userNick_cb)
connect_signal("user_away", userAway_cb)
connect_signal("mode", userMode_cb)
connect_signal("oper", userOper_cb)
# Server-Signals
connect_signal("connect", serverConnect_cb)
connect_signal("connected", serverConnected_cb)
connect_signal("motd", serverMOTD_cb)
connect_signal("dcc_send", dcc_send_cb)
# Channel-Signals
connect_signal("topic", channelTopic_cb)
connect_signal("banlist", channelBanlist_cb)
# Maki signals
connect_signal("shutdown", makiShutdown_cb)
init = True
_add_servers()
def maki_disconnected_cb(sushi):
pass
@types (server = basestring)
def _setup_server(server):
tab = gui.tabs.create_server(server)
gui.tabs.add_tab(None, tab,
update_shortcuts = config.get_bool("tekka","server_shortcuts"))
return tab
def _add_servers():
""" Adds all servers to tekka which are reported by maki. """
# in case we're reconnecting, clear all stuff
gui.widgets.get_object("tab_store").clear()
for server in sushi.servers():
tab = _setup_server(server)
tab.connected = True
_add_channels(tab)
try:
toSwitch = gui.tabs.get_all_tabs()[1]
except IndexError:
return
else:
gui.tabs.switch_to_path(toSwitch.path)
def _add_channels(server_tab):
"""
Adds all channels to tekka wich are reported by maki.
"""
channels = sushi.channels(server_tab.name)
for channel in channels:
add = False
nicks, prefixes = sushi.channel_nicks(server_tab.name, channel)
tab = gui.tabs.search_tab(server_tab.name, channel)
if not tab:
tab = gui.tabs.create_channel(server_tab, channel)
add = True
tab.nickList.clear()
tab.nickList.add_nicks(nicks, prefixes)
for nick in nicks:
# FIXME inefficient → nicks, prefixes, aways = …?
tab.nickList.set_away(nick, sushi.user_away(server_tab.name, nick))
tab.topic = sushi.channel_topic(server_tab.name, channel)
tab.topicsetter = ""
if tab.is_active():
gui.set_topic(markup.markup_escape(tab.topic))
gui.mgmt.set_user_count(
len(tab.nickList),
tab.nickList.get_operator_count())
# TODO: handle topic setter
tab.joined = True
tab.connected = True
if add:
gui.tabs.add_tab(server_tab, tab, update_shortcuts = False)
tab.print_last_log()
topic = sushi.channel_topic(server_tab.name, channel)
_report_topic(mtime.time(), server_tab.name, channel, topic)
gui.shortcuts.assign_numeric_tab_shortcuts(gui.tabs.get_all_tabs())
def isHighlighted (server_tab, text):
def has_highlight(text, needle):
punctuation = string.punctuation + " \n\t"
needle = needle.lower()
ln = len(needle)
for line in text.split("\n"):
line = line.lower()
i = line.find(needle)
if i >= 0:
if (line[i-1:i] in punctuation
and line[ln+i:ln+i+1] in punctuation):
return True
return False
highlightwords = config.get_list("chatting", "highlight_words", [])
highlightwords.append(server_tab.nick)
for word in highlightwords:
if has_highlight(text, word):
return True
return False
def action_nick_color(nick):
""" return the nick color if color_action_nicks is activated,
otherwise return the default text color
"""
if config.get_bool("colors", "color_action_nicks"):
return color.get_nick_color(nick)
return color.get_color_by_key("text_action")
@types (server = basestring, name = basestring)
def _createTab (server, name):
""" check if tab exists, create it if not, return the tab """
server_tab = gui.tabs.search_tab(server)
if not server_tab:
raise Exception("No server tab in _createTab(%s, %s)" % (server,name))
tab = gui.tabs.search_tab(server, name)
if not tab:
if name[0] in server_tab.support_chantypes:
tab = gui.tabs.create_channel(server_tab, name)
else:
tab = gui.tabs.create_query(server_tab, name)
tab.connected = True
gui.tabs.add_tab(server_tab, tab)
tab.print_last_log()
if tab.name != name:
# the name of the tab differs from the
# real nick, correct this.
tab.name = name
return tab
def _getPrefix(server, channel, nick):
tab = gui.tabs.search_tab(server, channel)
if tab and tab.is_channel():
return tab.nickList.get_prefix(nick)
else:
return ""
@types (tab = gui.tabs.TekkaTab, what = basestring, own = bool)
def _hide_output(tab, what, own = False):
""" Returns bool.
Check if the message type determined by "what"
shall be hidden or not.
tab should be a TekkaServer, -Channel or -Query
"""
if type(tab) == gui.tabs.TekkaChannel:
cat = "channel_%s_%s" % (
tab.server.name.lower(),
tab.name.lower())
elif type(tab) == gui.tabs.TekkaQuery:
cat = "query_%s_%s" % (
tab.server.name.lower(),
tab.name.lower())
else:
return False
hide = what in config.get_list(cat, "hide", [])
hideOwn = what in config.get_list(cat, "hide_own", [])
return ((hide and not own)
or (own and hideOwn)
or (hide and own and not hideOwn))
@types (servertab = gui.tabs.TekkaServer, tab = gui.tabs.TekkaTab,
what = basestring, own = bool)
def _show_output_exclusive(servertab, tab, what, own = False):
""" Returns bool.
Determine if the message identified by -what- shall
be shown in tab -tab- or not.
-servertab- is not used at the moment.
"""
return not _hide_output(tab, what, own = own)
""" Server callbacks """
def serverConnect_cb(time, server):
"""
maki is connecting to a server.
"""
gui.mgmt.set_useable(True)
tab = gui.tabs.search_tab(server)
if not tab:
tab = _setup_server(server)
if tab.connected:
tab.connected = False
channels = gui.tabs.get_all_tabs(servers = [server])[1:]
if channels:
for channelTab in channels:
if channelTab.is_chan
|
eusoubrasileiro/fatiando_seismic
|
cookbook/gravmag_magdir_dipolemagdir.py
|
Python
|
bsd-3-clause
| 2,519
| 0
|
"""
GravMag: Use the DipoleMagDir class to estimate the magnetization direction
of dipoles with known centers
"""
import numpy
from fatiando import mesher, gridder
from fatiando.utils import ang2vec, vec2ang, contaminate
from fatiando.gravmag import sphere
from fatiando.vis import mpl
from fatiando.gravmag.magdir import DipoleMagDir
from fatiando.constants import CM
# Make noise-corrupted synthetic data
inc, dec = -10.0, -15.0 # inclination and declination of the Geomagnetic Field
model = [mesher.Sphere(3000, 3000, 1000, 1000,
{'magnetization': ang2vec(6.0, -20.0, -10.0)}),
mesher.Sphere(7000, 7000, 1000, 1000,
{'magnetization': ang2vec(10.0, 3.0, -67.0)})]
area = (0, 10000, 0, 10000)
x, y, z = gridder.scatter(area, 1000, z=-150, seed=0)
tf = contaminate(sphere.tf(x, y, z, model, inc, dec), 5.0, seed=0)
# Give the centers of the dipoles
centers = [[3000, 3000, 1000], [7000, 7000, 1000]]
# Estimate the magnetization vectors
solver = DipoleMagDir(x, y, z, tf, inc, dec, centers).fit()
# Print the estimated and true dipole monents, inclinations and declinations
print 'Estimated magnetization (intensity, inclination, declination)'
for e in solver.estimate_:
print e
# Plot the fit and the normalized histogram of the residuals
mpl.figure(figsize=(14, 5))
mpl.subplot(1, 2, 1)
mpl.title("Total Field Anomaly (nT)", fontsize=14)
mpl.axis('scaled')
nlevels = mpl.contour(y, x, tf, (50, 50), 15, interp=True, color='r',
label='Observed', linewidth=2.0)
mpl.contour(y, x, solver.predicted(), (50, 50), nlevels, interp=True,
color='b', label='Predicted', style='dashed', linewidth=2.0)
mpl.legend(loc='upper left'
|
, shadow=True, prop={'size': 13})
mpl.xlabel('East y (m)', fontsize=
|
14)
mpl.ylabel('North x (m)', fontsize=14)
mpl.subplot(1, 2, 2)
residuals_mean = numpy.mean(solver.residuals())
residuals_std = numpy.std(solver.residuals())
# Each residual is subtracted from the mean and the resulting
# difference is divided by the standard deviation
s = (solver.residuals() - residuals_mean) / residuals_std
mpl.hist(s, bins=21, range=None, normed=True, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False,
color=None, label=None)
mpl.xlim(-4, 4)
mpl.title("mean = %.3f std = %.3f" % (residuals_mean, residuals_std),
fontsize=14)
mpl.ylabel("P(z)", fontsize=14)
mpl.xlabel("z", fontsize=14)
mpl.show()
|
shmish/core-assess
|
core/urls.py
|
Python
|
mpl-2.0
| 549
| 0.014572
|
from django.conf.urls import url
from . import views
from core.views import list_evidence_view, create_evidence_view, list_
|
submissions_view
app_name = 'core'
urlpatterns = [
# ex: /core/
url(r'^$', views.index, name='index'),
url(r'^(?P<corecomp_id>[0-9]+)/$', views.detail, name='detail'),
url(r'^submitted', views.submitted, name='submitted'),
url(r'^evide
|
nce_form/$', create_evidence_view, name='evidence'),
url(r'^CES/$', list_evidence_view, name='CES'),
url(r'^submissions/$', list_submissions_view, name='submissions')
]
|
ekiro/chaps
|
tests/test_instance_scope.py
|
Python
|
mit
| 436
| 0
|
from haps.scopes.instance import InstanceScope
def test_get_object(some_class):
some_instanc
|
e = InstanceScope().get_object(some_class)
assert isinstance(some_instance, some_class)
def test_get_multiple_objects(some_class):
scope = InstanceScope()
objects = {scope.get_object(some_class) for _ in range(100)}
assert all(isinstance(o, some_class) for o in objects)
|
assert len({id(o) for o in objects}) == 100
|
arsgeografica/kinderstadt-registry
|
setup.py
|
Python
|
gpl-3.0
| 1,548
| 0.000646
|
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import versioneer
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pyt
|
est.main(self.pytest_args + ' test')
sys.exit(errno)
cmd_classes = versioneer.get_cmdclass()
cmd_classes['test'] = PyTest
setup(
name="kinderstadt-registry",
version=versioneer.get_version(),
cmdclass=cmd_classes,
packages=find_packages(),
install_requires=[
'alembic==0.7.6',
'click==4.0',
'fake-factory=
|
=0.5.2',
'Flask-Migrate==1.4.0',
'Flask-SQLAlchemy==2.0',
'Flask-WTF==0.11',
'Flask==0.10.1',
'path.py==7.3',
'pgcli==0.17.0',
'python-stdnum==1.1',
'SQLAlchemy-Searchable==0.9.3',
'SQLAlchemy-Utils==0.30.12',
],
extras_require={
'devel': [
'ansible',
'autopep8',
'flake8',
'ipython',
],
},
tests_require=[
'pytest',
'testing.postgresql'
],
entry_points={
'console_scripts': [
'registry=registry.cli:main'
]
}
)
|
dedichan/ChemDB
|
principal.py
|
Python
|
gpl-3.0
| 18,412
| 0.01423
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/victorgarric/Documents/INVENTAIRE/principal.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QApplication, QPushButton, QLineEdit, QLabel, QMenuBar, QStatusBar, QMessageBox, QProgressDialog, QFileDialog
import display
import cursor
import listing
import excel
import delete
import manual
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(500, 262)
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.button_find_id = QPushButton(self.centralwidget)
self.button_find_id.setGeometry(QtCore.QRect(370, 10, 110, 32))
self.button_find_id.setObjectName(_fromUtf8("button_find_id"))
self.button_find_name = QPushButton(self.centralwidget)
self.button_find_name.setGeometry(QtCore.QRect(370, 50, 110, 32))
self.button_find_name.setObjectName(_fromUtf8("button_find_name"))
self.button_find_cas = QPushButton(self.centralwidget)
self.button_find_cas.setGeometry(QtCore.QRect(370, 90, 110, 32))
self.button_find_cas.setObjectName(_fromUtf8("button_find_cas"))
self.button_find_vpid = QPushButton(self.centralwidget)
self.button_find_vpid.setGeometry(QtCore.QRect(370, 130, 110, 32))
self.button_find_vpid.setObjectName(_fromUtf8("button_find_cas"))
self.button_add = QPushButton(self.centralwidget)
self.button_add.setGeometry(QtCore.QRect(150, 180, 110, 32))
self.button_add.setObjectName(_fromUtf8("button_add"))
self.button_stop = QPushButton(self.centralwidget)
self.button_stop.setGeometry(QtCore.QRect(150, 210, 110, 32))
self.button_stop.setObjectName(_fromUtf8("button_stop"))
self.button_invent = QPushButton(self.centralwidget)
self.button_invent.setGeometry(QtCore.QRect(20, 180, 120, 32))
self.button_invent.setObjectName(_fromUtf8("button_invent"))
self.button_invent_2 = QPushButton(self.centralwidget)
self.button_invent_2.setGeometry(QtCore.QRect(20, 210, 120, 32))
self.button_invent_2.setObjectName(_fromUtf8("button_invent_2"))
self.button_delete = QPushButton(self.centralwidget)
self.button_delete.setGeometry(QtCore.QRect(260, 210, 120, 32))
self.button_delete.setObjectName(_fromUtf8("button_delete"))
self.button_manual = QPushButton(self.centralwidget)
self.button_manual.setGeometry(QtCore.QRect(260, 180, 120, 32))
self.button_manual.setObjectName(_fromUtf8("button_delete"))
self.button_repop = QPushButton(self.centralwidget)
self.button_repop.setGeometry(QtCore.QRect(380, 195, 110, 32))
self.button_repop.setObjectName(_fromUtf8("button_repop"))
self.line_id = QLineEdit(self.centralwidget)
self.line_id.setGeometry(QtCore.QRect(90, 10, 251, 21))
self.line_id.setObjectName(_fromUtf8("line_id"))
self.line_name = QLineEdit(self.centralwidget)
self.line_name.setGeometry(QtCore.QRect(90, 50, 251, 21))
self.line_name.setObjectName(_fromUtf8("line_name"))
self.line_cas = QLineEdit(self.centralwidget)
self.line_cas.setGeometry(QtCore.QRect(90, 90, 251, 21))
self.line_cas.setObjectName(_fromUtf8("line_cas"))
self.line_vpid = QLineEdit(self.centralwidget)
self.line_vpid.setGeometry(QtCore.QRect(90, 130, 251, 21))
self.line_vpid.setObjectName(_fromUtf8("line_cas"))
self.label_id = QLabel(self.centralwidget)
self.label_id.setGeometry(QtCore.QRect(10, 10, 56, 13))
self.label_id.setObjectName(_fromUtf8("label_id"))
self.label_name = QLabel(self.centralwidget)
self.label_name.setGeometry(QtCore.QRect(10, 50, 56, 13))
self.label_name.setObjectName(_fromUtf8("label_name"))
self.label_cas = QLabel(self.centralwidget)
self.label_cas.setGeometry(QtCore.QRect(10, 90, 56, 13))
self.label_cas.setObjectName(_fromUtf8("label_cas"))
self.label_vpid = QLabel(self.centralwidget)
self.label_vpid.setGeometry(QtCore.QRect(10, 130, 56, 13))
self.label_vpid.setObjectName(_fromUtf8("label_cas"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
#connection
#self.trigger=QtCore.pyqtSignal()
#self.trigger.connect(self.button_add, QtCore.SIGNAL("released()"), self.new)
#self.connect(self.button_stop, QtCore.SIGNAL("released()"), self.quit)
#self.connect(self.button_find_id, QtCore.SIGNAL("released()"), self.find_id)
#self.connect(self.button_find_name, QtCore.SIGNAL("released()"), self.find_name)
#self.connect(self.button_find_vpid, QtCore.SIGNAL("released()"), self.find_vpid)
#self.connect(self.button_find_cas, QtCore.SIGNAL("released()"), self.find_cas)
#self.connect(self.button_invent, QtCore.SIGNAL("released()"), self.invent)
#self.connect(self.button_invent_2, QtCore.SIGNAL("released()"), self.invent_2)
#self.connect(self.button_delete, QtCore.SIGNAL("released()"), self.delete)
#self.connect(self.button_manual, QtCore.SIGNAL("released()"), self.manu)
#self.connect(self.button_repop, QtCore.SIGNAL("released()"), self.repop)
self.button_stop.clicked.connect(self.quit)
self.button_add.clicked.connect(self.new)
self.button_find_id.clicked.connect(self.find_id)
self.button_find_name.clicked.connect(self.find_name)
self.button_find_vpid.clicked.connect(self.find_vpid)
self.button_find_cas.clicked.connect(self.find_cas)
self.button_invent.clicked.connect(self.invent)
self.button_invent_2.clicked.connect(self.invent_2)
self.button_delete.clicked.connect(self.delete)
|
self.button_manual.clicked.connect(self.manu)
self.button_repop.clicked.connect(self.repop)
def invent(self) :
prog=QProgressDialog("Compiling inventory...","Cancel",0,100,self)
prog.open()
allconn=cursor.connection()
curs
|
=allconn[0]
data=allconn[1]
curs.execute("""SELECT * FROM "main"."chem" WHERE "id" > 0 """)
store=curs.fetchall()
a=excel.makeinvent(store)
a.begin()
internal=0
if prog.wasCanceled() :
return None
while internal != 100 :
try :
internal=(a.returnid()/len(store))*100
except :
internal=100
prog.setValue(internal)
if prog.wasCanceled() :
return None
b=a.returnbook()
try :
fname=QFileDialog.getSaveFileName(self, 'Save File', '/','Excel File (*.xls)')[0]
b.save(fname)
QMessageBox.information(self, "Info", "Inventory was saved sucessfully.")
if prog.wasCanceled() :
return None
except :
QMessageBox.information(self, "Info", "Inventory was no saved.")
def invent_2 (self) :
|
datapythonista/pandas
|
pandas/core/ops/mask_ops.py
|
Python
|
bsd-3-clause
| 5,124
| 0.000195
|
"""
Ops for masked arrays.
"""
from typing import (
Optional,
Union,
)
import numpy as np
from pandas._libs import (
lib,
missing as libmissing,
)
def kleene_or(
left: Union[bool, np.ndarray],
right: Union[bool, np.ndarray],
left_mask: Optional[np.ndarray],
right_mask: Optional[np.ndarray],
):
|
"""
Boolean ``o
|
r`` using Kleene logic.
Values are NA where we have ``NA | NA`` or ``NA | False``.
``NA | True`` is considered True.
Parameters
----------
left, right : ndarray, NA, or bool
The values of the array.
left_mask, right_mask : ndarray, optional
The masks. Only one of these may be None, which implies that
the associated `left` or `right` value is a scalar.
Returns
-------
result, mask: ndarray[bool]
The result of the logical or, and the new mask.
"""
# To reduce the number of cases, we ensure that `left` & `left_mask`
# always come from an array, not a scalar. This is safe, since because
# A | B == B | A
if left_mask is None:
return kleene_or(right, left, right_mask, left_mask)
assert isinstance(left, np.ndarray)
raise_for_nan(right, method="or")
if right is libmissing.NA:
result = left.copy()
else:
result = left | right
if right_mask is not None:
# output is unknown where (False & NA), (NA & False), (NA & NA)
left_false = ~(left | left_mask)
right_false = ~(right | right_mask)
mask = (
(left_false & right_mask)
| (right_false & left_mask)
| (left_mask & right_mask)
)
else:
if right is True:
mask = np.zeros_like(left_mask)
elif right is libmissing.NA:
mask = (~left & ~left_mask) | left_mask
else:
# False
mask = left_mask.copy()
return result, mask
def kleene_xor(
left: Union[bool, np.ndarray],
right: Union[bool, np.ndarray],
left_mask: Optional[np.ndarray],
right_mask: Optional[np.ndarray],
):
"""
Boolean ``xor`` using Kleene logic.
This is the same as ``or``, with the following adjustments
* True, True -> False
* True, NA -> NA
Parameters
----------
left, right : ndarray, NA, or bool
The values of the array.
left_mask, right_mask : ndarray, optional
The masks. Only one of these may be None, which implies that
the associated `left` or `right` value is a scalar.
Returns
-------
result, mask: ndarray[bool]
The result of the logical xor, and the new mask.
"""
if left_mask is None:
return kleene_xor(right, left, right_mask, left_mask)
raise_for_nan(right, method="xor")
if right is libmissing.NA:
result = np.zeros_like(left)
else:
# error: Incompatible types in assignment (expression has type
# "Union[bool, Any]", variable has type "ndarray")
result = left ^ right # type: ignore[assignment]
if right_mask is None:
if right is libmissing.NA:
mask = np.ones_like(left_mask)
else:
mask = left_mask.copy()
else:
mask = left_mask | right_mask
return result, mask
def kleene_and(
left: Union[bool, libmissing.NAType, np.ndarray],
right: Union[bool, libmissing.NAType, np.ndarray],
left_mask: Optional[np.ndarray],
right_mask: Optional[np.ndarray],
):
"""
Boolean ``and`` using Kleene logic.
Values are ``NA`` for ``NA & NA`` or ``True & NA``.
Parameters
----------
left, right : ndarray, NA, or bool
The values of the array.
left_mask, right_mask : ndarray, optional
The masks. Only one of these may be None, which implies that
the associated `left` or `right` value is a scalar.
Returns
-------
result, mask: ndarray[bool]
The result of the logical xor, and the new mask.
"""
# To reduce the number of cases, we ensure that `left` & `left_mask`
# always come from an array, not a scalar. This is safe, since because
# A | B == B | A
if left_mask is None:
return kleene_and(right, left, right_mask, left_mask)
assert isinstance(left, np.ndarray)
raise_for_nan(right, method="and")
if right is libmissing.NA:
result = np.zeros_like(left)
else:
result = left & right
if right_mask is None:
# Scalar `right`
if right is libmissing.NA:
mask = (left & ~left_mask) | left_mask
else:
mask = left_mask.copy()
if right is False:
# unmask everything
mask[:] = False
else:
# unmask where either left or right is False
left_false = ~(left | left_mask)
right_false = ~(right | right_mask)
mask = (left_mask & ~right_false) | (right_mask & ~left_false)
return result, mask
def raise_for_nan(value, method: str):
if lib.is_float(value) and np.isnan(value):
raise ValueError(f"Cannot perform logical '{method}' with floating NaN")
|
andrewsmedina/django
|
django/db/models/sql/aggregates.py
|
Python
|
bsd-3-clause
| 4,116
| 0.001944
|
"""
Classes to represent the default SQL aggregate functions
"""
import copy
from django.db.models.fields import IntegerField, FloatField
# Fake fields used to identify aggregate types in data-conversion operations.
ordinal_aggregate_field = IntegerField()
computed_aggregate_field = FloatField()
class Aggregate(object):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
def relabeled_clone(self, change_map):
clone = copy.copy(self)
if isinstance(self.col, (list, tuple)):
clone.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
return clone
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL with parameters."
params = []
if hasattr(self.col, 'as_sql'):
field_name, params = self.col.as_sql(qn, conn
|
ection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self
|
.col
substitutions = {
'function': self.sql_function,
'field': field_name
}
substitutions.update(self.extra)
return self.sql_template % substitutions, params
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct=distinct and 'DISTINCT ' or '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = sample and 'STDDEV_SAMP' or 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = sample and 'VAR_SAMP' or 'VAR_POP'
|
froyobin/ironic
|
ironic/drivers/modules/drac/common.py
|
Python
|
apache-2.0
| 4,782
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common functionalities shared between different DRAC modules.
"""
from oslo.utils import importutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.drivers.modules.drac import client as drac_client
pywsman = importutils.try_import('pywsman')
REQUIRED_PROPERTIES = {
'drac_host': _('IP address or hostname of the DRAC card. Required.'),
'drac_username': _('username used for authentication. Required.'),
'drac_password': _('password used for authentication. Required.')
}
OPTIONAL_PROPERTIES = {
'drac_port': _('port used for WS-Man endpoint; default is 443. Optional.'),
'drac_path': _('path used for WS-Man endpoint; default is "/wsman". '
'Optional.'),
'drac_protocol': _('protocol used for WS-Man endpoint; one of http, https;'
' default is "https". Optional.'),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
# ReturnValue constants
RET_SUCCESS = '0'
RET_ERROR = '2'
RET_CREATED = '4096'
def parse_driver_info(node):
"""Parse a node's driver_info values.
Parses the driver_info of the node, reads default values
and returns a dict containing the combination of both.
:param node: an ironic node object.
:returns: a dict containing information from driver_info
and default values.
:raises: InvalidParameterValue if some mandatory information
is missing on the node or on invalid inputs.
"""
driver_info = node.driver_info
parsed_driver_info = {}
error_msgs = []
for param in REQUIRED_PROPERTIES:
try:
parsed_driver_info[param] = str(driver_info[param])
except KeyError:
error_msgs.append(_("'%s' not supplied to DracDriver.") % param)
except UnicodeEncodeError:
error_msgs.append(_("'%s' contains non-ASCII symbol.") % param)
parsed_driver_info['drac_port'] = driver_info.get('drac_port', 443)
try:
parsed_driver_info['drac_path'] = str(driver_info.get('drac_path',
'/wsman'))
except UnicodeEncodeError:
error_msgs.append(_("'drac_path' contains non-ASCII symbol."))
try:
parsed_driver_info['drac_protocol'] = str(
driver_info.get('drac_protocol', 'https'))
except UnicodeEncodeError:
error_msgs.append(_("'drac_protocol' contains non-ASCII symbol."))
try:
parsed_driver_info['drac_port'] = int(parsed_driver_info['drac_port'])
except ValueError:
error_msgs.append(_("'drac_port' is not an integer value."))
if error_msgs:
msg = (_('The following errors were encountered while parsing '
'driver_info:\n%s') % '\n'.join(error_msgs))
raise exception.InvalidParameterValue(msg)
return parsed_driver_info
def get_wsman_client(node):
"""Return a DRAC client object.
Given an ironic node object, this method gives back a
Client object which is a wrapper for pywsman.Client.
:param node: an ironic node object.
:returns: a Client object.
:raises: InvalidParameterValue if some mandatory information
is missing on the node or on invalid inputs.
"""
driver_info = parse_driver_info(node)
client = drac_client.Client(**driver_info)
return client
def find_xml(doc, item, namespace, find_all=False):
"""Find the first or all elements in a ElementTree object.
:param doc: the element tree object.
:param item: the element name.
:param namespace: the namespace of the element.
:param find_all: Boolean value, if True find all elements, if False
find only the fir
|
st one. Defaults to False.
:returns: if find_all is False the element object will be returned
if found, None if not found. If find_all is True a list of
element objects will be returned or an empty list if no
elements were found.
"""
query = ('.//{%(namespace)s}%(item)s' % {'namespace': namespa
|
ce,
'item': item})
if find_all:
return doc.findall(query)
return doc.find(query)
|
jinzekid/codehub
|
python/lyutil/ly_proxy_test.py
|
Python
|
gpl-3.0
| 3,046
| 0.009321
|
# Author: Jason Lu
import urllib.request
from bs4 import BeautifulSoup
import time
req_header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding':'en-us',
'Connection':'keep-alive',
'Referer':'http://www.baidu.com/'
}
req_timeout = 5
testUrl = "http://www.baidu.com/"
testStr = "wahaha"
file1 = open('proxy.txt' , 'w')
# url = ""
# req = urllib2.Request(url,None,req_header)
# jsondatas = urllib2.urlopen(req,None,req_timeout).read()
# cookies = urllib2.HTTPCookieProcessor()
# 希望登录状态一直保持,使用Cookie处理
import http.cookiejar
# 使用http.cookiejar.CookieJar()创建CookieJar对象
cjar = http.cookiejar.CookieJar()
cookies = urllib.request.HTTPCookieProcessor(cjar)
checked_num = 0
grasp_num = 0
for page in range(1, 3):
# req = urllib2.Request('http://www.xici.net.co/nn/' + str(page), None, req_header)
# html_doc = urllib2.urlopen(req, None, req_timeout).read()
req = urllib.request.Request('http://www.xici.net.co/nn/' + str(page))
req.add_header('User-Agent',
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1")
html_doc = urllib.request.urlopen(req).read().decode('utf-8')
# html_doc = urllib2.urlopen('http://www.xici.net.co/nn/' + str(page)).read()
soup = BeautifulSoup(html_doc)
trs = soup.find('table', id='ip_list').find_all('tr')
print(trs)
for tr in trs[1:]:
|
tds = tr.find_all('td')
ip = tds[1].text.strip()
port = tds[2].text.strip()
protocol = tds[5].text.strip()
if protocol == 'HTTP' or protocol == 'HTTPS':
#of.write('%s=%s:%s\n' % (protocol, ip, port))
print('%s=%s:%s' % (protoc
|
ol, ip, port))
grasp_num +=1
proxyHandler = urllib.request.ProxyHandler({"http": r'http://%s:%s' % (ip, port)})
opener = urllib.request.build_opener(cookies, proxyHandler)
opener.addheaders = [('User-Agent',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36')]
t1 = time.time()
try:
req = opener.open(testUrl, timeout=req_timeout)
result = req.read()
timeused = time.time() - t1
pos = result.find(testStr)
if pos > 1:
file1.write(protocol+"\t"+ip+"\t"+port+"\n")
checked_num+=1
print(checked_num, grasp_num)
else:
continue
except Exception as e:
print(str(e))
continue
file1.close()
print(checked_num,grasp_num)
|
bcsaller/layercake
|
tests/utils.py
|
Python
|
apache-2.0
| 671
| 0.00149
|
from contextlib import contextmanager
import pkg_resources
import os
def local_stream(name):
return pkg_resources.resource_stream(__name__, name)
def local_file(name):
return pkg_resources.resource_filename(__name__
|
, name)
@contextmanager
def Environ(**kwargs):
orig =
|
os.environ.copy()
replace = set(kwargs.keys()) & set(orig.keys())
removes = set(kwargs.keys()) - set(orig.keys())
try:
os.environ.update(kwargs)
yield
finally:
for r in removes:
os.environ.pop(r)
for r in replace:
os.environ[r] = orig[r]
class O(dict):
def __getattr__(self, key):
return self[key]
|
adusca/treeherder
|
treeherder/model/fields.py
|
Python
|
mpl-2.0
| 1,086
| 0
|
from django.db import models
# Django doesn't support big auto fields out of the box, see
# https://code.djangoproject.com/ticket/14286.
# This is a stripped down version of the BoundedBigAutoField from Sentry.
class BigAutoField(models.AutoField):
description = "Big Integer"
def db_type(self, connection):
engine = connection.settings_dict['ENGINE']
if 'mysql' in engine:
return "bigint AUTO_INCREMENT"
elif 'postgres' in engine:
return "bigserial"
else:
raise NotImplemented
def get_related_db_type(self, connection):
return models.BigIntegerField().db_type(connection)
def get_internal_type(self):
return "BigIntegerField"
class
|
FlexibleForeignKey(models.ForeignKey):
def db_type(self, connection):
# This is required to support BigAutoField
rel_field = self.related_fi
|
eld
if hasattr(rel_field, 'get_related_db_type'):
return rel_field.get_related_db_type(connection)
return super(FlexibleForeignKey, self).db_type(connection)
|
bronycub/sugarcub
|
users/validators.py
|
Python
|
gpl-3.0
| 1,069
| 0.008419
|
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.translation import gettext_lazy as _
from django.core import validators
@deconstructible
class SkypeValidator:
message = _('Enter a valid URL.')
code = 'invalid'
def __call__(self, value):
if not value.starts
|
with('skype:'):
raise ValidationError(self.message, code=self.code)
@deconstructible
class UrlValidator:
message = _('Enter a valid URL.')
code = 'invalid'
validators = [validators.URLValidator(), validators.EmailValidator(), SkypeValidator()]
def __call__(self, value):
def apply_validator(value):
def _apply_validator(validator):
try:
|
validator(value)
except ValidationError as e:
skype_failed = True
return _apply_validator
if any(map(apply_validator(value), self.validators)):
raise ValidationError(self.message, code=self.code)
|
PythonScientists/Shape
|
main/module/about/views.py
|
Python
|
apache-2.0
| 173
| 0.017341
|
#
|
-*-coding:utf-8-*-
from . import about_blueprint
from flask import render_template
@about_blueprint.r
|
oute("/")
def about_index():
return render_template("about.html")
|
heracek/django-nonrel
|
django/core/cache/__init__.py
|
Python
|
bsd-3-clause
| 6,144
| 0.001302
|
"""
Caching framework.
This package defines set of cache backends that all conform to a simple API.
In a nutshell, a cache is a set of values -- which can be any object that
may be pickled -- identified by string keys. For the complete API, see
the abstract BaseCache class in django.core.cache.backends.base.
Client code should not access a cache backend directly; instead it should
either use the "cache" variable made available here, or it should use the
get_cache() function made available here. get_cache() takes a backend URI
(e.g. "memcached://127.0.0.1:11211/") and returns an instance of a backend
cache class.
See docs/topics/cache.txt for information on the public API.
"""
from django.conf import settings
from django.core import signals
from django.core.cache.backends.base import (
InvalidCacheBackendError, CacheKeyWarning, BaseCache)
from django.core.exceptions import ImproperlyConfigured
from django.utils import importlib
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5, 2.4. Works on Python 2.6 but raises
# PendingDeprecationWarning
from cgi import parse_qsl
__all__ = [
'get_cache', 'cache', 'DEFAULT_CACHE_ALIAS'
]
# Name for use in settings file --> name of module in "backends" directory.
# Any backend scheme that is not in this dictionary is treated as a Python
# import path to a custom backend.
BACKENDS = {
'memcached': 'memcached',
'locmem': 'locmem',
'file': 'filebased',
'db': 'db',
'dummy': 'dummy',
}
DEFAULT_CACHE_ALIAS = 'default'
def parse_backend_uri(backend_uri):
"""
Converts the "backend_uri" into a cache scheme ('db', 'memcached', etc), a
host and any extra params that are required for the backend. Returns a
(scheme, host, params) tuple.
"""
if backend_uri.find(':') == -1:
raise InvalidCacheBackendError("Backend URI must start with scheme://")
scheme, rest = backend_uri.split(':', 1)
if not rest.startswith('//'):
raise InvalidCacheBackendError("Backend URI must start with scheme://")
host = rest[2:]
qpos = rest.find('?')
if qpos != -1:
params = dict(parse_qsl(rest[qpos+1:]))
host = rest[2:qpos]
else:
params = {}
if host.endswith('/'):
host = host[:-1]
return scheme, host, params
if not settings.CACHES:
import warnings
warnings.warn(
"settings.CACHE_* is deprecated; use settings.CACHES instead.",
PendingDeprecationWarning
)
# Mapping for new-style cache backend api
backend_classes = {
'memcached': 'memcached.CacheClass',
'locmem': 'locmem.LocMemCache',
'file': 'filebased.FileBasedCache',
'db': 'db.DatabaseCache',
'dummy': 'dummy.DummyCache',
}
engine, host, params = parse_backend_uri(settings.CACHE_BACKEND)
if engine in backend_classes:
engine = 'django.core.cache.backends.%s' % backend_classes[engine]
defaults = {
'BACKEND': engine,
'LOCATION': host,
}
defaults.update(params)
settings.CACHES[DEFAULT_CACHE_ALIAS] = defaults
if DEFAULT_CACHE_ALIAS not in settings.CACHES:
raise ImproperlyConfigured("You must define a '%s' cache" % DEFAULT_CACHE_ALIAS)
def parse_backend_conf(backend, **kwargs):
"""
Helper function to parse the backend configuration
that doesn't use the URI notation.
"""
# Try to get the CACHES entry for the given backend name first
conf = settings.CACHES.get(backend, None)
if conf is not None:
args = co
|
nf.copy()
backend = args.pop('BACKEND')
location = args.pop('LOCATION', '')
return backend, location, args
else:
# Trying to import the given backend, in case it's a dotted path
mod_path, cls_name = bac
|
kend.rsplit('.', 1)
try:
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError):
raise InvalidCacheBackendError("Could not find backend '%s'" % backend)
location = kwargs.pop('LOCATION', '')
return backend, location, kwargs
raise InvalidCacheBackendError(
"Couldn't find a cache backend named '%s'" % backend)
def get_cache(backend, **kwargs):
"""
Function to load a cache backend dynamically. This is flexible by design
to allow different use cases:
To load a backend with the old URI-based notation::
cache = get_cache('locmem://')
To load a backend that is pre-defined in the settings::
cache = get_cache('default')
To load a backend with its dotted import path,
including arbitrary options::
cache = get_cache('django.core.cache.backends.memcached.MemcachedCache', **{
'LOCATION': '127.0.0.1:11211', 'TIMEOUT': 30,
})
"""
try:
if '://' in backend:
# for backwards compatibility
backend, location, params = parse_backend_uri(backend)
if backend in BACKENDS:
backend = 'django.core.cache.backends.%s' % BACKENDS[backend]
params.update(kwargs)
mod = importlib.import_module(backend)
backend_cls = mod.CacheClass
else:
backend, location, params = parse_backend_conf(backend, **kwargs)
mod_path, cls_name = backend.rsplit('.', 1)
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError), e:
raise InvalidCacheBackendError(
"Could not find backend '%s': %s" % (backend, e))
return backend_cls(location, params)
cache = get_cache(DEFAULT_CACHE_ALIAS)
# Some caches -- python-memcached in particular -- need to do a cleanup at the
# end of a request cycle. If the cache provides a close() method, wire it up
# here.
if hasattr(cache, 'close'):
signals.request_finished.connect(cache.close)
|
Oscarbralo/TopBlogCoder
|
SRMs/SRM180PY/250/250.py
|
Python
|
mit
| 326
| 0.003067
|
class DinkyFish:
def monthsUnti
|
lCrowded(self, tankVolume, maleNum, femaleNum):
months = 0
while maleNum + femaleNum <= (tankVolume * 2):
minFishes = min(maleNum, femaleNum)
maleNum += minFishes
femaleNum += minFi
|
shes
months += 1
return months
|
gnss-lab/gnss-tec
|
tests/conftest.py
|
Python
|
mit
| 21,163
| 0.002755
|
import datetime
from io import StringIO
from pytest import fixture
from gnss_tec import ObsFileV2, ObsFileV3
RNX_V2 = """\
2.11 OBSERVATION DATA M (MIXED) RINEX VERSION / TYPE
teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM / RUN BY / DATE
ASPA MARKER NAME
50503S006 MARKER NUMBER
Giovanni Sella NGS OBSERVER / AGENCY
4733K06635 TRIMBLE NETR5 4.85 REC # / TYPE / VERS
30517456 TRM55971.00 NONE ANT # / TYPE
-6100258.8690 -996506.1670 -1567978.8630 APPROX POSITION XYZ
0.0000 0.0000 0.0000 ANTENNA: DELTA H/E/N
1 1 WAVELENGTH FACT L1/2
11 L1 L2 L5 C1 P1 C2 P2 C5 S1# / TYPES OF OBSERV
S2 S5 # / TYPES OF OBSERV
30.0000
|
INTERVAL
18 LEAP SECONDS
2017 7 6 0 0 0.0000000 GPS TIME OF FIRST OBS
END OF HEADER
4 5
ASPA (COGO code)
|
COMMENT
0.000 (antenna height) COMMENT
-14.32609534 (latitude) COMMENT
-170.72243361 (longitude) COMMENT
0053.667 (elevation) COMMENT
17 7 6 0 0 0.0000000 0 18G18R15G31G03R06G16G01R09G25G22R05G29-0.000392832
R16G26R04G10G32G14
129609926.497 6 100994793.77642 24663965.641
24663974.148 38.600 17.800
120505665.941 6 93726662.377 6 22550992.016 22550991.051
22550998.707 41.700 39.400
113401304.102 8 88364763.776 7 21579566.188
21579571.359 21579571.531 50.300 46.200
132701874.619 5 103404140.724 5 25252336.969
25252347.414 33.700 34.400
119263436.899 6 92760508.769 5 22349925.250 22349924.051
22349927.602 38.100 35.100
116184238.344 7 90533145.56945 22109098.484
22109105.234 45.600 33.200
129470789.804 6 100886299.783 6 24637455.992
24637464.797 24637466.082 37.100 37.300
114931261.449 7 89391042.915 7 21522933.477 21522934.391
21522939.465 45.900 43.900
131228058.513 6 102255791.926 6 24971881.508
24971889.785 24971890.309 38.400 36.300
119420387.410 7 93054887.93344 22724945.750
22724949.512 43.200 29.400
104095002.622 7 80962839.312 7 19473125.563 19473125.184
19473131.082 43.900 42.200
131232157.556 6 102258880.431 5 24972645.516
24972654.613 24972654.199 38.300 34.800
106080541.169 7 82507163.624 7 19858497.734 19858498.063
19858503.371 44.000 42.800
108649979.923 8 84662364.399 8 20675386.594
20675395.574 20675395.805 48.400 51.100
112909742.180 8 87818759.471 7 21085104.797 21085103.715
21085108.438 48.100 44.700
115661530.779 8 90125872.381 7 22009648.641
22009657.211 22009657.441 48.500 47.600
115505192.609 7 90004072.298 7 21979890.539
21979899.461 21979899.281 47.500 47.600
113491920.675 7 88435293.67545 21596788.523
21596794.160 46.100 32.700
17 7 6 0 1 0.0000000 0 18 18R15G31G03R06G16G01R09G25G22R05G29
R16G26R04G10G32G14
129714491.092 6 101076272.53043 24683863.789
24683872.414 39.200 18.600
120613774.752 7 93810746.963 6 22571222.727 22571222.703
22571230.711 42.500 39.700
113438416.847 8 88393682.795 7 21586628.695
21586633.398 21586633.336 50.300 46.600
132599072.037 5 103324034.869 6 25232775.227
25232785.262 25232781.449 34.600 36.400
119149217.493 6 92671671.486 5 22328518.555 22328518.293
22328522.430 38.300 35.000
116099973.097 7 90467484.36845 22093063.586
22093069.574 45.900 33.100
129470125.015 6 100885781.713 6 24637328.750
24637339.078 24637340.129 36.200 37.000
114869248.525 7 89342810.692 7 21511321.695 21511321.555
21511325.922 46.600 44.500
131324730.690 6 102331120.877 6 24990277.883
24990285.867 24990286.273 38.900 37.100
119340545.428 7 92992673.42545 22709753.359
22709755.480 46.100 31.200
104062372.020 7 80937459.929 7 19467020.781 19467021.227
19467027.590 44.100 42.700
131219712.462 6 102249182.977 6 24970277.469
24970285.688 24970286.094 39.900 36.900
106112572.378 7 82532076.791 7 19864493.438 19864493.176
19864498.133 43.700 42.700
108609118.768 8 84630524.539 8 20667611.063
20667619.516 20667619.746 48.500 51.000
112981641.858 7 87874681.372 7 21098530.055 21098530.383
21098535.574 47.800 45.400
115746528.568 8 90192104.390 7 22025823.547
22025831.473 22025832.172 49.600 47.100
115506300.717 8 90004935.735 7 21980103.695
21980111.211 21980110.855 48.800 47.200
113479270.250 7 88425436.16745 21594381.758
21594386.398 45.500 32.700
"""
RNX_HEADER_V2 = """\
2.11 OBSERVATION DATA M (MIXED) RINEX VERSION / TYPE
teqc 2016Apr1 BKG Frankfurt 20170707 00:23:29UTCPGM / RUN BY / DATE
ADIS MARKER NAME
31502M001 MARKER NUMBER
NTRIPS05-769322-52 ADDIS ABABA UNIVERSITY OBSERVER / AGENCY
MT300102915 JPS LEGACY 2.6.1 JAN,10,2008 REC # / TYPE / VERS
0220173805 TRM29659.00 NONE ANT # / TYPE
4913652.8072 3945922.6351 995383.2858 APPROX POSITION XYZ
0.0010 0.0000 0.0000 ANTENNA: DELTA H/E/N
1 1 WAVELENGTH FACT L1/2
21 L1 P1 C1 L2 P2 D1 D2
|
red-hood/calendarserver
|
txweb2/test/test_http_headers.py
|
Python
|
apache-2.0
| 32,998
| 0.004273
|
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{txweb2.http_headers}.
"""
from twisted.trial import unittest
import random
import time
from txweb2 import http_headers
from txweb2.http_headers import Cookie, HeaderHandler, quoteString, generateKeyValues
from twisted.python import util
class parsedvalue:
"""Marker class"""
def __init__(self, raw):
self.raw = raw
def __eq__(self, other):
return isinstance(other, parsedvalue) and other.raw == self.raw
class HeadersAPITest(unittest.TestCase):
"""Make sure the public API exists and works."""
def testRaw(self):
rawvalue = ("value1", "value2")
h = http_headers.Headers(handler=HeaderHandler(parsers={}, generators={}))
h.setRawHeaders("test", rawvalue)
self.assertEquals(h.hasHeader("test"), True)
self.assertEquals(h.getRawHeaders("test"), rawvalue)
self.assertEquals(list(h.getAllRawHeaders()), [('Test', rawvalue)])
self.assertEquals(h.getRawHeaders("foobar"), None)
h.removeHeader("test")
self.assertEquals(h.getRawHeaders("test"), None)
def testParsed(self):
parsed = parsedvalue(("value1", "value2"))
h = http_headers.Headers(handler=HeaderHandler(parsers={}, generators={}))
h.setHeader("test", parsed)
self.assertEquals(h.hasHeader("test"), True)
self.assertEquals(h.getHeader("test"), parsed)
self.assertEquals(h.getHeader("foobar"), None)
h.removeHeader("test")
self.assertEquals(h.getHeader("test"), None)
def testParsedAndRaw(self):
def parse(raw):
return parsedvalue(raw)
def generate(parsed):
return parsed.raw
rawvalue = ("value1", "value2")
rawvalue2 = ("value3", "value4")
handler = HeaderHandler(parsers={'test': (parse,)},
generators={'test': (generate,)})
h = http_headers.Headers(handler=handler)
h.setRawHeaders("test", rawvalue)
self.assertEquals(h.getHeader("test"), parsedvalue(rawvalue))
h.setHeader("test", parsedvalue(rawvalue2))
self.assertEquals(h.getRawHeaders("test"), rawvalue2)
# Check the initializers
h = http_headers.Headers(rawHeaders={"test": rawvalue},
handler=handler)
self.assertEquals(h.getHeader("test"), parsedvalue(rawvalue))
h = http_headers.Headers({"test": parsedvalue(rawvalue2)},
handler=handler)
self.assertEquals(h.getRawHeaders("test"), rawvalue2)
def testImmutable(self):
h = http_headers.Headers(handler=HeaderHandler(parsers={}, generators={}))
h.makeImmutable()
self.assertRaises(AttributeError, h.setRawHeaders, "test", [1])
self.assertRaises(AttributeError, h.setHeader, "test", 1)
self.assertRaises(AttributeError, h.removeHeader, "test")
class TokenizerTest(unittest.TestCase):
"""Test header list parsing functions."""
def testParse(self):
parser = lambda val: list(http_headers.tokenize([val, ]))
Token = http_headers.Token
tests = (('foo,bar', ['foo', Token(','), 'bar']),
('FOO,BAR', ['foo', Token(','), 'bar']),
(' \t foo \t bar \t , \t baz ', ['foo', Token(' '), 'bar', Token(','), 'baz']),
('()<>@,;:\\/[]?={}', [Token('('), Token(')'), Token('<'), Token('>'), Token('@'), Token(','), Token(';'), Token(':'), Token('\\'), Token('/'), Token('['), Token(']'), Token('?'), Token('='), Token('{'), Token('}')]),
(' "foo" ', ['foo']),
('"FOO(),\\"BAR,"', ['FOO(),"BAR,']))
raiseTests = ('"open quote', '"ending \\', "control character: \x127", "\x00", "\x1f")
for test, result in tests:
self.assertEquals(parser(test), result)
for test in raiseTests:
self.assertRaises(ValueError, parser, test)
def testGenerate(self):
pass
def testRoundtrip(self):
pass
def atSpecifiedTime(when, func):
def inner(*a, **kw):
orig = time.time
time.time = lambda: when
try:
return func(*a, **kw)
finally:
time.time = orig
return util.mergeFunctionMetadata(func, inner)
def parseHeader(name, val):
head = http_headers.Headers(handler=http_headers.DefaultHTTPHandler)
head.setRawHeaders(name, val)
return head.getHeader(name)
parseHeader = atSpecifiedTime(999999990, parseHeader) # Sun, 09 Sep 2001 01:46:30 GMT
def generateHeader(name, val):
head = http_headers.Headers(handler=http_headers.DefaultHTTPHandler)
head.setHeader(name, val)
return head.getRawHeaders(name)
generateHeader = atSpecifiedTime(999999990, generateHeader) # Sun, 09 Sep 2001 01:46:30 GMT
class HeaderParsingTestBase(unittest.TestCase):
def runRoundtripTest(self, headername, table):
"""
Perform some assertions about the behavior of parsing and
generating HTTP headers. Specifically: parse an HTTP header
value, assert that the parsed form contains all the available
information with the correct structure; generate the HTTP
header value from the parsed form, assert that it contains
certain literal strings; finally, re-parse the generated HTTP
header value and assert that the resulting structured data is
the same as the first-pass parsed form.
@type headername: C{str}
@param headername: The name of the HTTP header L{table} contains values for.
@type table: A sequence of tuples describing inputs to and
outputs from header parsing and generation. The tuples may be
either 2 or 3 elements long. In either case: the first
element is a string representing an HTTP-format header value;
the second element is a dictionary mapping names of parameters
to values of those parameters (the parsed form of the header).
If there is a third element, it is a list of strings which
must occur exactly in the HTTP header value
string which is re-generated from the parsed form.
"""
for r
|
ow in table:
if len(row) == 2:
rawHeaderInput, parsedHeaderData = row
requiredGeneratedElements = []
elif len(row) == 3:
rawHeaderInput, parsedHeaderDat
|
a, requiredGeneratedElements = row
assert isinstance(requiredGeneratedElements, list)
# parser
parsed = parseHeader(headername, [rawHeaderInput, ])
self.assertEquals(parsed, parsedHeaderData)
regeneratedHeaderValue = generateHeader(headername, parsed)
if requiredGeneratedElements:
# generator
for regeneratedElement in regeneratedHeaderValue:
reqEle = requiredGeneratedElements[regeneratedHeaderValue.index(regeneratedElement)]
elementIndex = regeneratedElement.find(reqEle)
self.assertNotEqual(
elementIndex, -1,
"%r did not appear in generated HTTP header %r: %r" % (reqEle,
headername,
regeneratedElement))
# parser/generator
reparsed = parseHeader(headername, regeneratedHeaderValue)
self.assertEquals(parsed, reparsed)
def invalidParseTest(self, headername, values):
for val in values:
parsed = parseHeader(headername, val)
self.assertEquals(parsed, None)
class GeneralHeaderParsingTests(HeaderParsingTestBase):
def testCacheControl(self):
table = (
("no-cache",
{'no-cache': None}),
("no-cache, no-store, max-age=5, max-stale=3, min-fresh=5, no-transform, only-if-cached, blahblah-extension-thingy",
{'no-cache': None,
'no-store': None,
|
lsbardel/flow
|
flow/conf/__init__.py
|
Python
|
bsd-3-clause
| 1,191
| 0.012594
|
import os
from jflow.utils.importlib import import_module
from jflow.conf import global_settings
#If django is installed used the django setting object
try:
from django.conf import settings as django_settings
except:
django_settings = None
ENVIRONMENT_VARIABLE = "JF
|
LOW_SETTINGS_MODULE"
class Settings(object):
pass
def fill(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
if not hasattr(self,setting
|
):
setattr(self, setting, getattr(global_settings, setting))
return self
def get_settings():
settings_module = os.environ.get(ENVIRONMENT_VARIABLE,None)
if settings_module:
try:
mod = import_module(settings_module)
except ImportError, e:
raise ImportError("Could not import settings '%s': %s" % (settings_module, e))
else:
mod = None
sett = django_settings
if not sett:
sett = Settings()
return fill(sett,mod)
settings = get_settings()
|
UltronAI/Deep-Learning
|
Pattern-Recognition/hw2-Feature-Selection/skfeature/utility/entropy_estimators.py
|
Python
|
mit
| 7,335
| 0.00259
|
# Written by Greg Ver Steeg (http://www.isi.edu/~gregv/npeet.html)
import scipy.spatial as ss
from scipy.special import digamma
from math import log
import numpy.random as nr
import numpy as np
import random
# continuous estimators
def entropy(x, k=3, base=2):
"""
The classic K-L k-nearest neighbor continuous entropy estimator x should be a list of vectors,
e.g. x = [[1.3],[3.7],[5.1],[2.4]] if x is a one-dimensional scalar and we have four samples
"""
assert k <= len(x)-1, "Set k smaller than num. samples - 1"
d = len(x[0])
N = len(x)
intens = 1e-10 # small noise to break degeneracy, see doc.
x = [list(p + intens * nr.rand(len(x[0]))) for p in x]
tree = ss.cKDTree(x)
nn = [tree.query(point, k+1, p=float('inf'))[0][k] for point in x]
const = digamma(N)-digamma(k) + d*log(2)
return (const + d*np.mean(map(log, nn)))/log(base)
def mi(x, y, k=3, base=2):
"""
Mutual information of x and y; x, y should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert len(x) == len(y), "Lists should have same length"
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
intens = 1e-10 # small noise to break degeneracy, see doc.
x = [list(p + intens * nr.rand(len(x[0]))) for p in x]
y = [list(p + intens * nr.rand(len(y[0]))) for p in y]
points = zip2(x, y)
# Find nearest neighbors in joint space, p=inf means max-norm
tree = ss.cKDTree(points)
dvec = [tree.query(point, k+1, p=float('inf'))[0][k] for point in points]
a, b, c, d = avgdigamma(x, dvec), avgdigamma(y, dvec), digamma(k), digamma(len(x))
return (-a-b+c+d)/log(base)
def cmi(x, y, z, k=3, base=2):
"""
Mutual information of x and y, conditioned on z; x, y, z should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert len(x) == len(y), "Lists should have same length"
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
intens = 1e-10 # small noise to break degeneracy, see doc.
x = [list(p + intens * nr.rand(len(x[0]))) for p in x]
y = [list(p + intens * nr.rand(len(y[0]))) for p in y]
z = [list(p + intens * nr.rand(len(z[0]))) for p in z]
points = zip2(x, y, z)
# Find nearest neighbors in joint space, p=inf means max-norm
tree = ss.cKDTree(points)
dvec = [tree.query(point, k+1, p=float('inf'))[0][k] for point in points]
a, b, c, d = avgdigamma(zip2(x, z), dvec), avgdigamma(zip2(y, z), dvec), avgdigamma(z, dvec), digamma(k)
return (-a-b+c+d)/log(base)
def kldiv(x, xp, k=3, base=2):
"""
KL Divergence between p and q for x~p(x), xp~
|
q(x); x, xp should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
assert k <= len(xp) - 1, "Set k smaller than num. samples -
|
1"
assert len(x[0]) == len(xp[0]), "Two distributions must have same dim."
d = len(x[0])
n = len(x)
m = len(xp)
const = log(m) - log(n-1)
tree = ss.cKDTree(x)
treep = ss.cKDTree(xp)
nn = [tree.query(point, k+1, p=float('inf'))[0][k] for point in x]
nnp = [treep.query(point, k, p=float('inf'))[0][k-1] for point in x]
return (const + d*np.mean(map(log, nnp))-d*np.mean(map(log, nn)))/log(base)
# Discrete estimators
def entropyd(sx, base=2):
"""
Discrete entropy estimator given a list of samples which can be any hashable object
"""
return entropyfromprobs(hist(sx), base=base)
def midd(x, y):
"""
Discrete mutual information estimator given a list of samples which can be any hashable object
"""
return -entropyd(list(zip(x, y)))+entropyd(x)+entropyd(y)
def cmidd(x, y, z):
"""
Discrete mutual information estimator given a list of samples which can be any hashable object
"""
return entropyd(list(zip(y, z)))+entropyd(list(zip(x, z)))-entropyd(list(zip(x, y, z)))-entropyd(z)
def hist(sx):
# Histogram from list of samples
d = dict()
for s in sx:
d[s] = d.get(s, 0) + 1
return map(lambda z: float(z)/len(sx), d.values())
def entropyfromprobs(probs, base=2):
# Turn a normalized list of probabilities of discrete outcomes into entropy (base 2)
return -sum(map(elog, probs))/log(base)
def elog(x):
# for entropy, 0 log 0 = 0. but we get an error for putting log 0
if x <= 0. or x >= 1.:
return 0
else:
return x*log(x)
# Mixed estimators
def micd(x, y, k=3, base=2, warning=True):
""" If x is continuous and y is discrete, compute mutual information
"""
overallentropy = entropy(x, k, base)
n = len(y)
word_dict = dict()
for sample in y:
word_dict[sample] = word_dict.get(sample, 0) + 1./n
yvals = list(set(word_dict.keys()))
mi = overallentropy
for yval in yvals:
xgiveny = [x[i] for i in range(n) if y[i] == yval]
if k <= len(xgiveny) - 1:
mi -= word_dict[yval]*entropy(xgiveny, k, base)
else:
if warning:
print("Warning, after conditioning, on y={0} insufficient data. Assuming maximal entropy in this case.".format(yval))
mi -= word_dict[yval]*overallentropy
return mi # units already applied
# Utility functions
def vectorize(scalarlist):
"""
Turn a list of scalars into a list of one-d vectors
"""
return [(x,) for x in scalarlist]
def shuffle_test(measure, x, y, z=False, ns=200, ci=0.95, **kwargs):
"""
Shuffle test
Repeatedly shuffle the x-values and then estimate measure(x,y,[z]).
Returns the mean and conf. interval ('ci=0.95' default) over 'ns' runs, 'measure' could me mi,cmi,
e.g. Keyword arguments can be passed. Mutual information and CMI should have a mean near zero.
"""
xp = x[:] # A copy that we can shuffle
outputs = []
for i in range(ns):
random.shuffle(xp)
if z:
outputs.append(measure(xp, y, z, **kwargs))
else:
outputs.append(measure(xp, y, **kwargs))
outputs.sort()
return np.mean(outputs), (outputs[int((1.-ci)/2*ns)], outputs[int((1.+ci)/2*ns)])
# Internal functions
def avgdigamma(points, dvec):
# This part finds number of neighbors in some radius in the marginal space
# returns expectation value of <psi(nx)>
N = len(points)
tree = ss.cKDTree(points)
avg = 0.
for i in range(N):
dist = dvec[i]
# subtlety, we don't include the boundary point,
# but we are implicitly adding 1 to kraskov def bc center point is included
num_points = len(tree.query_ball_point(points[i], dist-1e-15, p=float('inf')))
avg += digamma(num_points)/N
return avg
def zip2(*args):
# zip2(x,y) takes the lists of vectors and makes it a list of vectors in a joint space
# E.g. zip2([[1],[2],[3]],[[4],[5],[6]]) = [[1,4],[2,5],[3,6]]
return [sum(sublist, []) for sublist in zip(*args)]
|
KlubJagiellonski/pola-backend
|
pola/product/migrations/0017_remove_product_companies.py
|
Python
|
bsd-3-clause
| 329
| 0
|
# Generated by
|
Django 3.1.7 on 2021-09-27 16:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0016_product_company'),
]
|
operations = [
migrations.RemoveField(
model_name='product',
name='companies',
),
]
|
openqt/algorithms
|
projecteuler/ac/old/pe029_distinct_powers.py
|
Python
|
gpl-3.0
| 823
| 0
|
#!/usr/bin/env python
# coding=utf-8
"""
Distinct powers
Problem 29
Consider all integer combinations of ab for 2 ≤ a ≤ 5 and 2 ≤ b ≤ 5:
2^2=4, 2^3=8, 2^4=16, 2^5=32
3^2=9, 3^3=27, 3^4=81, 3^5=243
4^2=16, 4^3=64, 4^4=256, 4^5=1024
5^2=25, 5^3=125, 5^4=625, 5^5=3125
If they are then placed in numerical order, wit
|
h any repeats removed, we get
th
|
e following sequence of 15 distinct terms:
4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
How many distinct terms are in the sequence generated by ab for 2 ≤ a ≤ 100 and
2 ≤ b ≤ 100?
"""
from __future__ import print_function
def power_combinations(a, b):
for i in range(2, a):
for j in range(2, b):
yield i ** j
if __name__ == '__main__':
print(len(set(power_combinations(101, 101)))) # 9183
|
sgerhart/ansible
|
lib/ansible/modules/cloud/pubnub/pubnub_blocks.py
|
Python
|
mit
| 23,797
| 0.00021
|
#!/usr/bin/python
#
# PubNub Real-time Cloud-Hosted Push API and Push Notification Client
# Frameworks
# Copyright (C) 2016 PubNub Inc.
# http://www.pubnub.com/
# http://www.pubnub.com/terms
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pubnub_blocks
version_added: '2.2'
short_description: PubNub blocks management module.
description:
- "This module allows Ansible to interface with the PubNub BLOCKS
infrastructure by providing the following operations: create / remove,
start / stop and rename for blocks and create / modify / remove for event
handlers"
author:
- PubNub <[email protected]> (@pubnub)
- Sergey Mamontov <[email protected]> (@parfeon)
requirements:
- "python >= 2.7"
- "pubnub_blocks_client >= 1.0"
options:
email:
description:
- Email from account for which new session should be started.
- "Not required if C(cache) contains result of previous module call (in
same play)."
required: false
password:
description:
- Password which match to account to which specified C(email) belong.
- "Not required if C(cache) contains result of previous module call (in
same play)."
required: false
cache:
description: >
In case if single play use blocks management module few times it is
preferred to enabled 'caching' by making previous module to share
gathered artifacts and pass them to this parameter.
required: false
default: {}
account:
description:
- "Name of PubNub account for from which C(application) will be used to
manage blocks."
- "User\'s account will be used if value not set or empty."
required: false
version_added: '2.4'
application:
description:
- "Name of target PubNub application for which blocks configuration on
specific C(keyset) will be done."
required: true
keyset:
description:
- Name of application's keys set which is bound to managed blocks.
required: true
state:
description:
- "Intended block state after event handlers creation / update process
will be completed."
required: false
default: 'started'
choices: ['started', 'stopped', 'present', 'absent']
name:
description:
- Name of managed block which will be later visible on admin.pubnub.com.
required: true
description:
description:
- "Short block description which will be later visible on
admin.pubnub.com. Used only if block doesn\'t exists and won\'t change
description for existing block."
required: false
default: 'New block'
event_handlers:
description:
- "List of event handlers which should be updated for specified block
C(name)."
- "Each entry for new event handler should contain: C(name), C(src),
C(channels), C(event). C(name) used as event handler name which can be
used later to make changes to it."
- C(src) is full path to file with event handler code.
- "C(channels) is name of channel from which event handler is waiting
for events."
- "C(event) is type of event which is able to trigger event handler:
I(js-before-publish), I(js-after-publish), I(js-after-presence)."
- "Each entry for existing handlers should contain C(name) (so target
handler can be identified). Rest parameters (C(src), C(channels) and
C(event)) can be added if changes required for them."
- "It is possible to rename event handler by adding C(changes) key to
event handler payload and pass dictionary, which will contain single key
C(name), where new name should be passed."
- "To remove particular event handler it is possible to set C(state) for
it to C(absent) and it will be removed."
required: false
default: []
changes:
description:
- "List of fields which should be changed by block itself (doesn't
affect any event handlers)."
- "Possible options for change is: C(name)."
required: false
default: {}
validate_certs:
description:
- "This key allow to try skip certificates check when performing REST API
calls. Sometimes host may have issues with certificates on it and this
will cause problems to call PubNub REST API."
- If check should be ignored C(False) should be passed to this parameter.
required: false
default: true
'''
EXAMPLES = '''
# Event handler create example.
- name: Create single event handler
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
event_handlers:
-
src: '{{ path_to_handler_source }}'
name: '{{ handler_name }}'
event: 'js-before-publish'
channels: '{{ handler_channel }}'
# Change event handler trigger event type.
- name: Change event handler 'even
|
t'
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
event_handlers:
-
name: '{{ handler_name }}'
event: 'js-after-publish'
# Stop block and event handlers.
- name: Sto
|
pping block
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: stop
# Multiple module calls with cached result passing
- name: Create '{{ block_name }}' block
register: module_cache
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: present
- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}'
register: module_cache
pubnub_blocks:
cache: '{{ module_cache }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: present
event_handlers:
-
src: '{{ path_to_handler_1_source }}'
name: '{{ event_handler_1_name }}'
channels: '{{ event_handler_1_channel }}'
event: 'js-before-publish'
- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}'
register: module_cache
pubnub_blocks:
cache: '{{ module_cache }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: present
event_handlers:
-
src: '{{ path_to_handler_2_source }}'
name: '{{ event_handler_2_name }}'
channels: '{{ event_handler_2_channel }}'
event: 'js-before-publish'
- name: Start '{{ block_name }}' block
register: module_cache
pubnub_blocks:
cache: '{{ module_cache }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: started
'''
RETURN = '''
module_cache:
description: "Cached account information. In case if with single play module
used few times it is better to pass cached data to next module calls to speed
up process."
type: dict
returned: always
'''
import copy
import os
try:
# Import PubNub BLOCKS client.
from pubnub_blocks_client import User, Account, Owner, Application, Keyset
from pubnub_blocks_client import Block, EventHandler
from pubnub_blocks_client import exceptions
HAS_PUBNUB_BLOCKS_CLIENT = True
except ImportError:
HAS_PUBNUB_BLOCKS_CLIENT = False
User = None
Account = None
Owner = None
Application = None
Keyset = None
Block = None
EventHandler = None
exceptions = None
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
def pubnub_user(module):
"""Create and configure user model if it possible.
:type module: AnsibleModule
:param module: Reference on module which contain module
|
CMLL/taiga-back
|
taiga/projects/userstories/migrations/0010_remove_userstory_watchers.py
|
Python
|
agpl-3.0
| 1,220
| 0.004098
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import connecti
|
on
from django.db import models, migrations
from django.contrib.contenttypes.models import
|
ContentType
from django.contrib.contenttypes.management import update_all_contenttypes
def create_notifications(apps, schema_editor):
update_all_contenttypes(verbosity=0)
sql="""
INSERT INTO notifications_watched (object_id, created_date, content_type_id, user_id, project_id)
SELECT userstory_id AS object_id, now() AS created_date, {content_type_id} AS content_type_id, user_id, project_id
FROM userstories_userstory_watchers INNER JOIN userstories_userstory ON userstories_userstory_watchers.userstory_id = userstories_userstory.id""".format(content_type_id=ContentType.objects.get(model='userstory').id)
cursor = connection.cursor()
cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('notifications', '0004_watched'),
('userstories', '0009_remove_userstory_is_archived'),
]
operations = [
migrations.RunPython(create_notifications),
migrations.RemoveField(
model_name='userstory',
name='watchers',
),
]
|
bunjiboys/security_monkey
|
security_monkey/common/sts_connect.py
|
Python
|
apache-2.0
| 3,363
| 0.000892
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.common.sts_connect
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <[email protected]> @monkeysecurity
"""
from security_monkey.datastore import Account
import botocore.session
import boto3
import boto
def connect(account_name, connection_type, **args):
"""
Examples of use:
ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False)
ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000)
ec2 = sts_connect.connect(environment, 'ec2')
where environment is ( test, prod, dev )
s3 = sts_connect.connect(environment, 's3')
ses = sts_connect.connect(environment, 'ses')
:param account: Account to connect with (i.e. test, prod, dev)
:raises Exception: RDS Region not valid
AWS Tech not supported.
:returns: STS Connection Object for given tech
:note: To use this method a SecurityMonkey role must be created
in the target account with full read only privileges.
"""
account = Account.query.filter(Account.name == account_name).first()
sts = boto.connect_sts()
role_name = 'SecurityMonkey'
if account.role_name and account.role_name != '':
role_name = account.role_name
role = sts.assume_role('arn:aws:iam::' + account.number + ':role/' + role_name, 'secmonkey')
if connection_type == 'botocore':
botocore_session = botocore.session.get_session()
botocore_session.set_credentials(
role.credentials.access_key,
role.credentials.secret_key,
token=role.credentials.session_token
)
return botocore_session
region = 'us-east-1'
if 'region' in args:
region = args.pop('region')
if hasattr(region, 'name'):
region = region.name
if 'boto3' in connection_type:
# Should be called in this format: boto3.iam.client
_, tech, api = connection_type.split('.')
session = boto3.Session(
aws_access_key_id=role.credentials.access_key,
aws_secret_access_key=role.credentials.secret_key,
aws_session_token=role.credentials.session_token,
region_name=region
)
if api == 'resource':
return session.resource(tech)
return session.client(tech)
module = __import__("boto.{}".format(connection_type))
for subm in connection_type.split('.'):
module = getattr(module, subm)
return modu
|
le.connect_to_region(
region,
aws_access_key_id=role.credentials.access_key,
aws_secret_access_key=role.credentials.secret_key,
|
security_token=role.credentials.session_token
)
|
alexryndin/ambari
|
ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/package/scripts/hbase_decommission.py
|
Python
|
apache-2.0
| 2,306
| 0.009974
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
def hbase_decommission(env):
import params
env.set_params(params)
kinit_cmd_decommission = params.kinit_cmd_decommission
File(params.region_drainer,
content=StaticFile("draining_servers.rb"),
mode=0755
)
if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
hosts = params.hbase_excluded_hosts.split(",")
elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
hosts = params.hbase_included_hosts.split(",")
if params.hbase_drain_only:
for host in hosts:
if host:
regiondrainer_cmd = format(
"{kinit_cmd_decommission} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {regio
|
n_drainer} remove {host}")
Execute(regiondrainer_cmd,
user=params.hbase_user,
logoutput=True
)
pass
pass
else:
for host in hosts:
if host:
regiondrainer_cmd = format(
"{kinit_cmd_decommission} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add {host}")
regionmover_cmd = for
|
mat(
"{kinit_cmd_decommission} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload {host}")
Execute(regiondrainer_cmd,
user=params.hbase_user,
logoutput=True
)
Execute(regionmover_cmd,
user=params.hbase_user,
logoutput=True
)
pass
pass
pass
pass
|
sylverspace/craft_game
|
app.py
|
Python
|
mit
| 10,026
| 0.033513
|
from flask import Flask, render_template, redirect, url_for, request, session, flash, jsonify, g
import sqlite3 as sql
import os
import random
import json
from flask_sqlalchemy import SQLAlchemy
import ast #To change string list to a python list
import collections #To conte duplicate in iventory list using Counter()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///RTSDB.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = os.urandom(24)
db = SQLAlchemy(app)
class Data(db.Model):
__tablename__ = "data"
id = db.Column('id', db.Integer, primary_key=True)
user = db.Column("user", db.String(20))
password = db.Column("password", db.String(20))
saltWater = db.Column("saltwater", db.String(20))
freshWater = db.Column("freshWater", db.String(20))
elixir = db.Column("elixir", db.String(20))
pearl = db.Column("pearl", db.String(20))
iron = db.Column("iron", db.String(20))
coal = db.Column("coal", db.String(20))
titanium = db.Column("titanium", db.String(20))
diamond = db.Column("diamond", db.String(20))
def __init__(self, id, user, password, saltWater, freshWater, elixir, pearl, iron, coal, titanium, diamond):
self.id = id
self.user = user
self.password = password
self.saltWater = saltWater
self.freshWater = freshWater
self.elixir = elixir
self.pearl = pearl
self.iron = iron
self.coal = coal
self.titanium = titanium
self.diamond = diamond
#def _
|
_repr__(self):
# return '{}'.format(self.id)
class Recipe(db.Model):
num_of_rec = 0
__tablen
|
ame__ = "recipes"
id = db.Column('id', db.Integer, primary_key=True)
name = db.Column("name", db.String(20))
type = db.Column("type", db.String(20))
result = db.Column("result", db.String(20))
prereq = db.Column("prereq", db.String(20))
ing_1 = db.Column("ing_1", db.String(20))
ing_qty_1 = db.Column("ing_qty_1", db.Integer)
ing_2 = db.Column("ing_2", db.String(20))
ing_qty_2 = db.Column("ing_qty_2", db.Integer)
desc = db.Column("desc", db.String(20))
def __init__(self, id, name, type, result, prereq, ing_1, ing_qty_1, ing_2, coal, ing_qty_2, desc):
self.id = id
self.name = name
self.type = type
self.result = result
self.prereq = prereq
self.ing_1 = ing_1
self.ing_qty_1 = ing_qty_1
self.ing_2 = ing_2
self.ing_qty_2 = ing_qty_2
self.desc = desc
Recipe.num_of_rec += 1
def __repr__(self):
return '{}'.format(self.result)
'''
findtest = "saltWater"
userStuff = Data.query.filter_by(user="admin").first()
value = getattr(userStuff, findtest)
print (value)
temp = int(value)
temp += 1
value = str(temp)
print (value)
userStuff.saltWater = value
db.session.commit()
#newinfo = ExempleDB(7, 'sixth user', '123456', '25')
#db.session.add(newinfo)
#db.session.commit()
#update_this = ExempleDB.query.filter_by(id=6).first
#update_this.user = "New_user"
#db.session.commit()
'''
@app.route("/test")
def test():
resultList = Data.query.all()
return render_template('test.html', resultList=resultList)
@app.before_request
def before_request():
g.user = None
if 'user' in session:
g.user = session['user']
@app.route('/getsession')
def getsession():
if 'user' in session:
return session['user']
return 'Not logged in!'
@app.route("/logout")
def logout():
session.pop('user', None)
flash('You were just logged out!')
return redirect(url_for('index'))
@app.route("/", methods=['GET', 'POST'])
def index():
session.pop('user', None)
error = None
if request.method == 'POST':
session['user'] = request.form['username']
enteredPassword = request.form['password']
con = sql.connect("RTSDB.db")
cur = con.execute("SELECT user, password from data")
resultList = [row[0] for row in cur.fetchall()]
if session['user'] in resultList:
return redirect(url_for('map'))
else:
print "Failed"
flash('Invalid Credentials!')
return render_template("index.html", title="RTS FLASK PROJECT", **globals())
#Map_and_resources___________________________________________________________
@app.route("/map")
def map():
if g.user:
resources = Data.query.filter_by(user = g.user).all()
return render_template("map.html", resources=resources)
return redirect(url_for('index'))
@app.route('/gather/<clickedRegion>', methods=['POST'])
def gather(clickedRegion):
def gatherChances():
finding = ""
randomResult = random.random()
if randomResult <= 0.005:
if clickedRegion == "water":
finding = "pearl" #Legendary
if clickedRegion == "mine":
finding = "diamond" #Legendary
elif randomResult > 0.005 and randomResult <= 0.05:
if clickedRegion == "water":
finding = "elixir" #Epic
if clickedRegion == "mine":
finding = "titanium" #Epic
elif randomResult > 0.05 and randomResult <= 0.20:
if clickedRegion == "water":
finding = "freshWater" #Rare
if clickedRegion == "mine":
finding = "coal" #Rare
else:
if clickedRegion == "water":
finding = "saltWater" #Common
if clickedRegion == "mine":
finding = "iron" #Common
#print randomResult
#print finding
return finding
finding = gatherChances()
#Get value from database to show them in HTML
con = sql.connect("RTSDB.db")
cur = con.execute("SELECT " + finding + " from data WHERE user='"+g.user+"'")
for row in cur:
print row
#Update the value by one (for now)
newval = int(row[0]) + 1
newval = str(newval)
#Insert new values
#Data.update().where(user=g.user).with_entities(finding).values(newval)
#db.session.commit()
cur.execute("UPDATE data SET " + finding + " = " + newval + " WHERE user='" + g.user +"'")
con.commit()
con.close()
return json.dumps({"finding":finding, "newval":newval})
#Inventory____________________________________________________
@app.route("/inventory")
def inventory():
if g.user:
con = sql.connect("RTSDB.db")
#Showing ressources
resources = Data.query.filter_by(user = g.user).all()
#Getting current inventory
cur = con.execute("SELECT items from inventory WHERE user='" + g.user +"'").fetchone()
#First row of the list (all items)
x = cur[0]
#Stinged list as real list
currInv = ast.literal_eval(x)
counter = collections.Counter(currInv)
print counter
return render_template("inventory.html", resources=resources, currInv=currInv, counter=counter)
return redirect(url_for('index'))
#Crafting______________________________________________________
@app.route("/craft")
def craft():
if g.user:
#Get all recipes from Table recipes
resources = Data.query.filter_by(user = g.user).all()
recipes = Recipe.query.all()
return render_template("craft.html", recipes=recipes, resources=resources)
return redirect(url_for('index'))
'''
@app.route("/showComponent/<clickedComponent>")
def showComponent(clickedComponent):
recipe = Data.query.filter_by(name = clickedComponent).all()
return recipe
'''
@app.route("/craftProcess/<item>", methods=['POST'])
def craftProcess(item):
con = sql.connect("RTSDB.db")
#Getting FIRST required mats
cur = con.execute("SELECT ing_1 from recipes WHERE result='"+item+"'").fetchone()
ing_1 = cur[0]
cur = con.execute("SELECT ing_qty_1 from recipes WHERE result='"+item+"'").fetchone()
ing_qty_1 = cur[0]
#Getting SECOND required mats
cur = con.execute("SELECT ing_2 from recipes WHERE result='"+item+"'").fetchone()
ing_2 = cur[0]
cur = con.execute("SELECT ing_qty_2 from recipes WHERE result='"+item+"'").fetchone()
ing_qty_2 = cur[0]
#Getting FIRST concerned ressource and removing
cur = con.execute("SELECT " + ing_1 + " from data WHERE user='" + g.user +"'").fetchone()
oldVal = cur[0]
newVal1 = int(oldVal) - ing_qty_1
#Getting SECOND concerned ressource and removing
cur = con.execute("SELECT " + ing_2 + " from data WHERE user='" + g.user +"'").fetchone()
oldVal = cur[0]
newVal2 = int(oldVal) - ing_qty_1
#Updating resources
con.execute("UPDATE data SET "
|
CKPalk/SeattleCrime_DM
|
DataMining/Stats/coord_bounds.py
|
Python
|
mit
| 801
| 0.087391
|
''' Work of Cameron P
|
alk '''
import sys
import pandas as pd
def main( argv ):
try:
csv_filepath = argv[ 0 ]
output_filepath = argv[ 1 ]
except IndexError:
print( "Error, usage: \"python3 coord_bounds.py <CSV> <output_file>\"" )
return
trainin
|
g_data = pd.read_csv( csv_filepath )
training_data[ 'clean_Latitude' ] = training_data[ training_data.Latitude > 47 ].Latitude
training_data[ 'clean_Longitude' ] = training_data[ training_data.Longitude < -122 ].Longitude
training_data.dropna()
print( training_data[ 'clean_Latitude' ] )
for axis in [ 'clean_Longitude', 'clean_Latitude' ]:
print( "{:16} min: {:16} max: {:16}".format(
axis,
min( training_data[ axis ] ),
max( training_data[ axis ] )
) )
#
if __name__=='__main__':
main( sys.argv[ 1: ] )
|
mrocklin/streams
|
streamz/sources.py
|
Python
|
bsd-3-clause
| 25,985
| 0.001347
|
from glob import glob
import os
import time
import tornado.ioloop
from tornado import gen
from .core import Stream, convert_interval, RefCounter
def PeriodicCallback(callback, callback_time, asynchronous=False, **kwargs):
source = Stream(asynchronous=asynchronous)
def _():
result = callback()
source._emit(result)
pc = tornado.ioloop.PeriodicCallback(_, callback_time, **kwargs)
pc.start()
return source
def sink_to_file(filename, upstream, mode='w', prefix='', suffix='\n', flush=False):
file = open(filename, mode=mode)
def write(text):
file.write(prefix + text + suffix)
if flush:
file.flush()
upstream.sink(write)
return file
class Source(Stream):
_graphviz_shape = 'doubleoctagon'
def __init__(self, **kwargs):
self.stopped = True
super(Source, self).__init__(**kwargs)
def stop(self): # pragma: no cover
# fallback stop method - for poll functions with while not self.stopped
if not self.stopped:
self.stopped = True
@Stream.register_api(staticmethod)
class from_textfile(Source):
""" Stream data from a text file
Parameters
----------
f: file or string
Source of the data. If string, will be opened.
poll_interval: Number
Interval to poll file for new data in seconds
delimiter: str
Character(s) to use to split the data into parts
start: bool
Whether to start running immediately; otherwise call stream.start()
explicitly.
from_end: bool
Whether to begin streaming from the end of the file (i.e., only emit
lines appended after the stream starts).
Examples
--------
>>> source = Stream.from_textfile('myfile.json') # doctest: +SKIP
>>> source.map(json.loads).pluck('value').sum().sink(print) # doctest: +SKIP
>>> source.start() # doctest: +SKIP
Returns
-------
Stream
"""
def __init__(self, f, poll_interval=0.100, delimiter='\n', start=False,
from_end=False, **kwargs):
if isinstance(f, str):
f = open(f)
self.file = f
self.from_end = from_end
self.delimiter = delimiter
self.poll_interval = poll_interval
super(from_textfile, self).__init__(ensure_io_loop=True, **kwargs)
self.stopped = True
self.started = False
if start:
self.start()
def start(self):
self.stopped = False
self.started = False
self.loop.add_callback(self.do_poll)
@gen.coroutine
def do_poll(self):
buffer = ''
if self.from_end:
# this only happens when we are ready to read
self.file.seek(0, 2)
while not self.stopped:
self.started = True
line = self.file.read()
if line:
buffer = buffer + line
if self.delimiter in buffer:
parts = buffer.split(self.delimiter)
buffer = parts.pop(-1)
for part in parts:
yield self._emit(part + self.delimiter)
else:
yield gen.sleep(self.poll_interval)
@Stream.register_api(staticmethod)
class filenames(Source):
""" Stream over filenames in a directory
Parameters
----------
path: string
Directory path or globstring over which to search for files
poll_interval: Number
Seconds between checking path
start: bool (False)
Whether to start running immediately; otherwise call stream.start()
explicitly.
Examples
--------
>>> source = Stream.filenames('path/to/dir') # doctest: +SKIP
>>> source = Stream.filenames('path/to/*.csv', poll_interval=0.500) # doctest: +SKIP
"""
def __init__(self, path, poll_interval=0.100, start=False, **kwargs):
if '*' not in path:
if os.path.isdir(path):
if not path.endswith(os.path.sep):
path = path + '/'
path = path + '*'
self.path = path
self.seen = set()
self.poll_interval = poll_interval
self.stopped = True
super(filenames, self).__init__(ensure_io_loop=True)
if start:
self.start()
def start(self):
self.stopped = False
self.loop.add_callback(self.do_poll)
@gen.coroutine
def do_poll(self):
while True:
filenames = set(glob(self.path))
new = filenames - self.seen
for fn in sorted(new):
self.seen.add(fn)
yield self._emit(fn)
yield gen.sleep(self.poll_interval) # TODO: remove poll if delayed
if self.stopped:
break
@Stream.register_api(staticmethod)
class from_tcp(Source):
"""
Creates events by reading from a socket using tornado TCPServer
The stream of incoming bytes is split on a given delimiter, and the parts
become the emitted events.
Parameters
----------
port : int
The port to open and listen on. It only gets opened
|
when the source
is started, and closed upon ``stop()``
delimit
|
er : bytes
The incoming data will be split on this value. The resulting events
will still have the delimiter at the end.
start : bool
Whether to immediately initiate the source. You probably want to
set up downstream nodes first.
server_kwargs : dict or None
If given, additional arguments to pass to TCPServer
Examples
--------
>>> source = Source.from_tcp(4567) # doctest: +SKIP
"""
def __init__(self, port, delimiter=b'\n', start=False,
server_kwargs=None):
super(from_tcp, self).__init__(ensure_io_loop=True)
self.stopped = True
self.server_kwargs = server_kwargs or {}
self.port = port
self.server = None
self.delimiter = delimiter
if start: # pragma: no cover
self.start()
@gen.coroutine
def _start_server(self):
from tornado.tcpserver import TCPServer
from tornado.iostream import StreamClosedError
class EmitServer(TCPServer):
source = self
@gen.coroutine
def handle_stream(self, stream, address):
while True:
try:
data = yield stream.read_until(self.source.delimiter)
yield self.source._emit(data)
except StreamClosedError:
break
self.server = EmitServer(**self.server_kwargs)
self.server.listen(self.port)
def start(self):
if self.stopped:
self.loop.add_callback(self._start_server)
self.stopped = False
def stop(self):
if not self.stopped:
self.server.stop()
self.server = None
self.stopped = True
@Stream.register_api(staticmethod)
class from_http_server(Source):
"""Listen for HTTP POSTs on given port
Each connection will emit one event, containing the body data of
the request
Parameters
----------
port : int
The port to listen on
path : str
Specific path to listen on. Can be regex, but content is not used.
start : bool
Whether to immediately startup the server. Usually you want to connect
downstream nodes first, and then call ``.start()``.
server_kwargs : dict or None
If given, set of further parameters to pass on to HTTPServer
Examples
--------
>>> source = Source.from_http_server(4567) # doctest: +SKIP
"""
def __init__(self, port, path='/.*', start=False, server_kwargs=None):
self.port = port
self.path = path
self.server_kwargs = server_kwargs or {}
super(from_http_server, self).__init__(ensure_io_loop=True)
self.stopped = True
self.server = None
if start: # pragma: no cover
self.start()
def _start_server(self):
from tornado.web import Application, RequestHandler
from
|
psycofdj/xtd
|
xtd/network/server/application.py
|
Python
|
gpl-3.0
| 5,010
| 0.020958
|
# -*- coding: utf-8
#------------------------------------------------------------------#
__author__ = "Xavier MARCELET <[email protected]>"
#------------------------------------------------------------------#
import json
import sys
import cherrypy
from xtd.core import logger, config
from xtd.core.config import checkers
from xtd.core.application import Application
from .log import LogPage
from .counter import CounterPage
from .config import ConfigPage
from .param import ParamPage
from .manager import ServerManager
#------------------------------------------------------------------#
class ServerApplication(Application):
def __init__(self, p_name = sys.argv[0]):
super(ServerApplication, self).__init__(p_name)
self.config().register_section("http", "Server Settings", [{
"name" : "listen",
"default" : "tcp://localhost:8080",
"description" : "bind server to given socket",
"checks" : config.checkers.is_socket(p_schemes=["tcp", "unix"])
},{
"name" : "threads",
"default" : 10,
"description" : "allocate VAL number of work threads",
"checks" : config.checkers.is_int(p_min=1)
},{
"name" : "daemonize",
"default" : False,
"description" : "daemonize process at startup"
},{
"name" : "pid-file",
"default" : "/tmp/%s.pid",
"description" : "daemon pid file"
},{
"name" : "admin-password",
"default" : None,
"valued" : True,
"description" : "Administrator password for write access to admin web interfaces"
},{
"name" : "tls",
"default" : False,
"description" : "Enable TLS of http server",
"checks" : checkers.is_bool()
},{
"name" : "tlscacert",
"default" : None,
"valued" : True,
"description" : "TLS CA-Certificate file"
},{
"name" : "tlscert",
"default" : None,
"valued" : True,
"description" : "TLS Certificate file"
},{
"name" : "tlskey",
"default" : None,
"valued" : True,
"description" : "TLS key file"
}])
def _initialize_server(self):
l_password = config.get("http", "admin-password")
l_socket = config.get("http", "listen")
l_threads = config.get("http", "threads")
l_credentials = None
if l_password:
l_credentials = { "admin" : l_password }
ServerManager.initialize(__name__)
l_tls = config.get("http", "tls")
l_cacert = config.get("http", "tlscacert")
l_cert = config.get("http", "tlscert")
l_key = config.get("http", "tlskey")
ServerManager.listen(l_socket, l_threads, l_tls, l_cacert, l_cert, l_key)
ServerManager.mount(self, "/", {}, __name__)
ServerManager.mount(ConfigPage(), "/admin/config", {}, __name__)
ServerManager.mount(CounterPage(), "/admin/counter", {}, __name__)
l_paramPage = ParamPage(l_credentials)
ServerManager.mount(l_paramPage, "/admin/params", {
"/write" : {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'localhost',
'tools.auth_basic.checkpassword': l_paramPage.check_password
}
}, __name__)
l_logPage = LogPage(l_credentials)
ServerManager.mount(l
|
_logPage, "/admin/log", {
"/write" : {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'localhost',
'tools.auth_basic.checkpassword': l_logPage.check_password
}
}, __name__)
ServerManager.subscribe("exit", super().stop, 100)
@cherrypy.expose
@cherrypy.tools.json_out()
#pylint: disable=unused-argument
def default(self, *p_args, **p_kwds):
l_reqinfo = {
|
"method" : cherrypy.request.method,
"path" : cherrypy.request.path_info,
"params" : cherrypy.request.params,
"headers" : cherrypy.request.headers
}
logger.error(self.m_name, "unhandled request : %s", json.dumps(l_reqinfo))
cherrypy.response.status = 500
return {
"error" : "unhandled request",
"request" : l_reqinfo
}
@staticmethod
def _check_config():
l_useTLS = config.get("http", "tls")
if l_useTLS:
l_values = [ "tlscacert", "tlscert", "tlskey" ]
for c_key in l_values:
l_val = config.get("http", c_key)
config.set("http", c_key, checkers.is_file("http", c_key, l_val, p_read=True))
def initialize(self):
super(ServerApplication, self).initialize()
self._check_config()
self._initialize_server()
def start(self):
super(ServerApplication, self).start()
ServerManager.start()
def join(self):
ServerManager.join()
super(ServerApplication, self).join()
def stop(self):
super(ServerApplication, self).stop()
ServerManager.stop()
def process(self):
return 0, False
|
murffer/DetectorSim
|
MCNPXRPMModels/WrappedCylinders/CylinderMCNPX.py
|
Python
|
apache-2.0
| 23,537
| 0.016145
|
#!/usr/bin/env python
"""
MCNPX Model for Cylindrical RPM8
"""
import sys
sys.path.append('../MCNPTools/')
sys.path.append('../')
from MCNPMaterial import Materials
import subprocess
import math
import mctal
import numpy as np
import itertools
import os
class CylinderRPM(object):
# Material Dictionaries
cellForStr = '{:5d} {:d} -{:4.3f} {:d} -{:d} u={:d}\n'
surfForStr = '{:5d} cz {:5.3f}\n'
tranForStr = '*tr{:d} {:4.3f} {:4.3f} 0.000\n'
geoParam={'RPM8Size':12.7,'DetectorThickness':0.01,'DetectorSpacing':0.8,
'CylinderLightGuideRadius':0.5,'CylinderRadius':2.5}
def __init__(self,inp='INP.mcnp'):
""" Wrapped Cylinder MCNPX Model of RPM8
Keywords:
inp -- desired name of the input deck
"""
# Material dictionary for the moderator, light guide, and detector
self.material = {'Moderator':None,'Detector':None,'LightGuide':None}
self.materia
|
l['Detector'] = {'name':'Detector','mt': 3, 'rho': 1.1,'matString':None} # detector
self.material['LightGuide'] = {'name': 'PMMA','mt':10, 'rho':0.93} # PMMA
self.material['Moderator'] = {'name':'HDPE','mt':456, 'rho': 0.93} # HPDE
# Cell and Surface Inital Numbering
self.CellStartNum = 600
|
self.SurfaceStartNum = 600
self.ZeroSurfaceNum = 500
self.UniverseNum = 200
self.surfGeo = None
self.inp = inp
self.name = 'OUT_'+self.inp.strip('.mcnp')+'.'
self.setMaterial(0.1,'PS')
def __str__(self):
s = '\tMCNPX Model of Wrapped Cylinder\n'
s += '\t Cell Number Starts: {0:d}\n'.format(self.CellStartNum)
s += '\t Surface Number Starts: {0:d}\n'.format(self.SurfaceStartNum)
return s
def getInteractionRate(self):
""" Returns the interaction rate """
m = mctal.MCTAL(self.name+'.m')
t = m.tallies[4]
# Returing the total
return t.data[-1],t.errors[-1]
def setMaterial(self,massFraction,polymer):
"""
Sets the detector material
"""
M = Materials()
num = self.material['Detector']['mt']
if polymer == 'PS':
self.material['Detector']['matString'] = M.GetPSLiF(massFraction,num)
elif polymer == 'PEN':
self.material['Detector']['matString'] = M.GetPENLiF(massFraction,num)
else:
raise ValueError('Polymer {} is not in the material database'.format(polymer))
def createSurfaceGeo(self):
"""
Creates a dictionary of surface positions and cylinders
"""
self.surfGeo = dict()
r = self.geoParam['CylinderLightGuideRadius']
self.surfGeo[r] = 'LightGuide'
#self.material = {'Moderator':None,'Detector':None,'LightGuide':None}
while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):
r += self.geoParam['DetectorThickness']
self.surfGeo[r] = 'Detector'
r += self.geoParam['DetectorSpacing']
if (r < self.geoParam['CylinderRadius']):
self.surfGeo[r] = 'LightGuide'
return self.surfGeo
def calculateDetectorArea(self):
"""
Calculates the area used in a detector
"""
area = 0.0
r = self.geoParam['CylinderLightGuideRadius']
while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):
area -= math.pow(r,2)
r += self.geoParam['DetectorThickness']
area += math.pow(r,2)
r += self.geoParam['DetectorSpacing']
return math.pi*area
def createDetectorCylinder(self,uNum=1):
"""
Creates a detector cylinder
Returns an ntuple of s,c,detectorCells
s - the surface string
c - the cell string
detectorCells - a list of the numbers corresponding to the detectors cells
"""
cellsCreated = 0
sNum = self.SurfaceStartNum
cNum = self.CellStartNum
detectorCells = list()
s = '{:5d} rcc 0 0 0 0 0 217.7 {}\n'.format(self.SurfaceStartNum,self.geoParam['CylinderRadius'])
c = ''
keyList = sorted(self.surfGeo.keys(), key = lambda x: float(x))
for key in keyList:
sPrev = sNum
sNum += 1
cNum += 1
s += self.surfForStr.format(sNum,key)
m = self.material[self.surfGeo[key]]
if cNum == self.CellStartNum+1:
c+= '{:5d} {:d} -{:4.3f} -{:d} u={:d}\n'.format(cNum,m['mt'],m['rho'],sNum,uNum)
else:
c += self.cellForStr.format(cNum,m['mt'],m['rho'],sPrev,sNum,uNum)
# List of cells for the detector
if self.surfGeo[key] is 'Detector':
detectorCells.append(cNum)
cellsCreated += 1
# Last cell up to universe boundary
m = self.material['Moderator']
c += '{:5d} {:d} -{:4.3f} {:d} u={:d}\n'.format(cNum+1,m['mt'],m['rho'],sNum,uNum)
cellsCreated += 1
return s,c,detectorCells,cellsCreated
def runModel(self):
"""
Runs the Model by submission to Tourqe / Maui
"""
qsub= subprocess.check_output('which qsub',shell=True).strip()
cmd = '#!/bin/bash\n'
cmd += '#PBS -N {0}\n#PBS -V\n#PBS -q gen1\n#PBS -l nodes=1:ppn=1\n'
cmd += 'cd $PBS_O_WORKDIR\nmpirun mcnpx inp={1} name={2}\n'
job = cmd.format('Job_RPMCylinder',self.inp,self.name)
with open('qsub','w') as o:
o.write(job)
subprocess.call(qsub+' qsub',shell=True)
subprocess.call('rm qsub',shell=True)
def createInputDeck(self,cylinderPositions,inp=None,name=None):
""" createInputDeck
Creates an input deck of the given geometry
"""
self.inp = inp
self.name = name
if not inp:
self.inp = 'INP_Cylinder.mcnp'
if not name:
self.name = 'OUT_Cylinder.'
oFile = self.inp
# Problem Constants
cellString = 'c ------------------------- Source ----------------------------------------\n'
cellString += '70 5 -15.1 -70 $ 252Cf source \n'
cellString += '71 406 -11.34 -71 70 $ Lead around source\n'
cellString += '72 456 -0.93 -72 71 $ Poly around source\n'
surfString = 'c ########################### Surface Cards ##############################\n'
surfString += 'c ------------------- Encasing Bounds (Size of RPM8) ---------------------\n'
surfString += '500 rpp 0 12.7 -15.25 15.25 0 217.7 \n'
# Add in other cells here
numCells = 4 # 3 Source, 1 RPM8 Encasing
##################################################################
# Add in Detector Cells and Surfaces #
##################################################################
universeNum = 1
(s,c,detectorCells,cellsCreated) = self.createDetectorCylinder(universeNum)
surfString += s
cellString += 'c ------------------- Detector Cylinder Universe ------------------------\n'
cellString += c
transNum = 1
uCellNum = self.UniverseNum
transString = ''
cellString += 'c ----------------------- Detector Universe ----------------------------\n'
for pos in cylinderPositions:
transString += self.tranForStr.format(transNum,pos[0],pos[1])
cellString += '{:5d} 0 -{:d} trcl={:d} fill={:d}\n'.format(uCellNum,self.SurfaceStartNum,transNum,universeNum)
transNum +=1
uCellNum +=1
# Adding the PMMA Moderator Block
m = self.material['Moderator']
cellString += 'c ------------------------- HDPE Moderator -----------------------------\n'
cellString += '{:5d} {:d} -{:4.3f} -{:d} '.format(500,m['mt'],m['rho'],self.ZeroSurfaceNum)
cellString += ''.join('#{:d}
|
dansackett/Todooo
|
todooo/validators.py
|
Python
|
mit
| 1,462
| 0.001368
|
import errors
def validate_num_arguments_eq(num_args):
"""Validate that the number of supplied args is equal to some number"""
def decorator(func):
def wrapped_func(*args, **kwargs):
if len(args[1]) != num_args:
raise errors.InvalidArgumentError
else:
func(*args, **kwargs)
return wrapped_func
return decorator
def validate_num_arguments_lt(num_args):
"""Validate that the number of supplied args is less than to some number"""
def decorator(func):
def wrapped_func(*args, **kwargs):
if len(args[1]) > num_args:
raise errors.InvalidArgumentError
else:
func(*args, **kwargs)
return wrapped_func
return decorator
def validate_num_arguments_gt(num_args):
"""Validate that the number of supplied args is greater than to some numbe
|
r"""
def decorator(func):
def wrapped_func(*args, **kwargs):
if len(args[1]) < num_args:
raise errors.InvalidArgumentE
|
rror
else:
func(*args, **kwargs)
return wrapped_func
return decorator
def parse_index(lst, id):
"""Validate an index to the list is within range and a digit and return it"""
if not id.isdigit():
raise errors.ExpectedItemError
idx = int(id) - 1
if idx > len(lst) - 1 or idx < 0:
raise errors.InvalidItemError
return idx
|
xypron/mini-dinstall
|
minidinstall/ChangeFile.py
|
Python
|
gpl-2.0
| 4,579
| 0.005241
|
# ChangeFile
# A class which represents a Debian change file.
# Copyright 20
|
02 Colin Walters <[email protected]>
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as p
|
ublished by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, re, sys, string, stat
import threading, Queue
import logging
from minidinstall import DpkgControl, SignedFile
from minidinstall import misc
class ChangeFileException(Exception):
def __init__(self, value):
self._value = value
def __str__(self):
return `self._value`
class ChangeFile(DpkgControl.DpkgParagraph):
md5_re = r'^(?P<md5>[0-9a-f]{32})[ \t]+(?P<size>\d+)[ \t]+(?P<section>[-/a-zA-Z0-9]+)[ \t]+(?P<priority>[-a-zA-Z0-9]+)[ \t]+(?P<file>[0-9a-zA-Z][-+:.,=~0-9a-zA-Z_]+)$'
sha1_re = r'^(?P<sha1>[0-9a-f]{40})[ \t]+(?P<size>\d+)[ \t]+(?P<file>[0-9a-zA-Z][-+:.,=~0-9a-zA-Z_]+)$'
sha256_re = r'^(?P<sha256>[0-9a-f]{64})[ \t]+(?P<size>\d+)[ \t]+(?P<file>[0-9a-zA-Z][-+:.,=~0-9a-zA-Z_]+)$'
def __init__(self):
DpkgControl.DpkgParagraph.__init__(self)
self._logger = logging.getLogger("mini-dinstall")
self._file = ''
def load_from_file(self, filename):
self._file = filename
f = SignedFile.SignedFile(open(self._file))
self.load(f)
f.close()
def getFiles(self):
return self._get_checksum_from_changes()['md5']
def _get_checksum_from_changes(self):
""" extract checksums and size from changes file """
output = {}
hashes = { 'md5': ['files', re.compile(self.md5_re)],
'sha1': ['checksums-sha1', re.compile(self.sha1_re)],
'sha256': ['checksums-sha256', re.compile(self.sha256_re)]
}
hashes_checked = hashes.copy()
try:
self['files']
except KeyError:
return []
for hash in hashes:
try:
self[hashes[hash][0]]
except KeyError:
self._logger.warn("Can't find %s checksum in changes file '%s'" % (hash, os.path.basename(self._file)))
hashes_checked.pop(hash)
for hash in hashes_checked:
output[hash] = []
for line in self[hashes[hash][0]]:
if line == '':
continue
match = hashes[hash][1].match(line)
if (match is None):
raise ChangeFileException("Couldn't parse file entry \"%s\" in Files field of .changes" % (line,))
output[hash].append([match.group(hash), match.group('size'), match.group('file') ])
return output
def verify(self, sourcedir):
""" verify size and hash values from changes file """
checksum = self._get_checksum_from_changes()
for hash in checksum.keys():
for (hashsum, size, filename) in checksum[hash]:
self._verify_file_integrity(os.path.join(sourcedir, filename), int(size), hash, hashsum)
def _verify_file_integrity(self, filename, expected_size, hash, expected_hashsum):
""" check uploaded file integrity """
self._logger.debug('Checking integrity of %s' % (filename,))
try:
statbuf = os.stat(filename)
if not stat.S_ISREG(statbuf[stat.ST_MODE]):
raise ChangeFileException("%s is not a regular file" % (filename,))
size = statbuf[stat.ST_SIZE]
except OSError, e:
raise ChangeFileException("Can't stat %s: %s" % (filename,e.strerror))
if size != expected_size:
raise ChangeFileException("File size for %s does not match that specified in .dsc" % (filename,))
if (misc.get_file_sum(self, hash, filename) != expected_hashsum):
raise ChangeFileException("%ssum for %s does not match that specified in .dsc" % (hash, filename,))
self._logger.debug('Verified %ssum %s and size %s for %s' % (hash, expected_hashsum, expected_size, filename))
# vim:ts=4:sw=4:et:
|
uclouvain/osis
|
reference/admin.py
|
Python
|
agpl-3.0
| 2,104
| 0.000476
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# se
|
e http://
|
www.gnu.org/licenses/.
#
##############################################################################
from django.contrib import admin
from reference.models import *
admin.site.register(continent.Continent,
continent.ContinentAdmin)
admin.site.register(currency.Currency,
currency.CurrencyAdmin)
admin.site.register(country.Country,
country.CountryAdmin)
admin.site.register(decree.Decree,
decree.DecreeAdmin)
admin.site.register(domain.Domain,
domain.DomainAdmin)
admin.site.register(domain_isced.DomainIsced,
domain_isced.DomainIscedAdmin)
admin.site.register(language.Language,
language.LanguageAdmin)
admin.site.register(zipcode.ZipCode,
zipcode.ZipCodeAdmin)
admin.site.register(high_school.HighSchool,
high_school.HighSchoolAdmin)
|
Affirm/moto
|
moto/dynamodb2/comparisons.py
|
Python
|
apache-2.0
| 15,123
| 0.00238
|
from __future__ import unicode_literals
import re
import six
# TODO add tests for all of these
EQ_FUNCTION = lambda item_value, test_value: item_value == test_value # flake8: noqa
NE_FUNCTION = lambda item_value, test_value: item_value != test_value # flake8: noqa
LE_FUNCTION = lambda item_value, test_value: item_value <= test_value # flake8: noqa
LT_FUNCTION = lambda item_value, test_value: item_value < test_value # flake8: noqa
GE_FUNCTION = lambda item_value, test_value: item_value >= test_value # flake8: noqa
GT_FUNCTION = lambda item_value, test_value: item_value > test_value # flake8: noqa
COMPARISON_FUNCS = {
'EQ': EQ_FUNCTION,
'=': EQ_FUNCTION,
'NE': NE_FUNCTION,
'!=': NE_FUNCTION,
'LE': LE_FUNCTION,
'<=': LE_FUNCTION,
'LT': LT_FUNCTION,
'<': LT_FUNCTION,
'GE': GE_FUNCTION,
'>=': GE_FUNCTION,
'GT': GT_FUNCTION,
'>': GT_FUNCTION,
'NULL': lambda item_value: item_value is None,
'NOT_NULL': lambda item_value: item_value is not None,
'CONTAINS': lambda item_value, test_value: test_value in item_value,
'NOT_CONTAINS': lambda item_value, test_value: test_value not in item_value,
'BEGINS_WITH': lambda item_value, test_value: item_value.startswith(test_value),
'IN': lambda item_value, *test_values: item_value in test_values,
'BETWEEN': lambda item_value, lower_test_value, upper_test_value: lower_test_value <= item_value <= upper_test_value,
}
def get_comparison_func(range_comparison):
return COMPARISON_FUNCS.get(range_comparison)
class RecursionStopIteration(StopIteration):
pass
def get_filter_expression(expr, names, values):
# Examples
# expr = 'Id > 5 AND attribute_exists(test) AND Id BETWEEN 5 AND 6 OR length < 6 AND contains(test, 1) AND 5 IN (4,5, 6) OR (Id < 5 AND 5 > Id)'
# expr = 'Id > 5 AND Subs < 7'
if names is None:
names = {}
if values is None:
values = {}
# Do substitutions
for key, value in names.items():
expr = expr.replace(key, value)
# Store correct types of values for use later
values_map = {}
for key, value in values.items():
if 'N' in value:
values_map[key] = float(value['N'])
elif 'BOOL' in value:
values_map[key] = value['BOOL']
elif 'S' in val
|
ue:
values_map[key] = value['S']
elif 'NS' in value:
values_map[key] = tuple(value['NS'])
elif 'SS' in value:
values_map[key] = tuple(value['SS'])
elif 'L' in value:
values_map[key] = tuple(value['L'])
else:
raise NotImple
|
mentedError()
# Remove all spaces, tbf we could just skip them in the next step.
# The number of known options is really small so we can do a fair bit of cheating
expr = list(expr.strip())
# DodgyTokenisation stage 1
def is_value(val):
return val not in ('<', '>', '=', '(', ')')
def contains_keyword(val):
for kw in ('BETWEEN', 'IN', 'AND', 'OR', 'NOT'):
if kw in val:
return kw
return None
def is_function(val):
return val in ('attribute_exists', 'attribute_not_exists', 'attribute_type', 'begins_with', 'contains', 'size')
# Does the main part of splitting between sections of characters
tokens = []
stack = ''
while len(expr) > 0:
current_char = expr.pop(0)
if current_char == ' ':
if len(stack) > 0:
tokens.append(stack)
stack = ''
elif current_char == ',': # Split params ,
if len(stack) > 0:
tokens.append(stack)
stack = ''
elif is_value(current_char):
stack += current_char
kw = contains_keyword(stack)
if kw is not None:
# We have a kw in the stack, could be AND or something like 5AND
tmp = stack.replace(kw, '')
if len(tmp) > 0:
tokens.append(tmp)
tokens.append(kw)
stack = ''
else:
if len(stack) > 0:
tokens.append(stack)
tokens.append(current_char)
stack = ''
if len(stack) > 0:
tokens.append(stack)
def is_op(val):
return val in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT')
# DodgyTokenisation stage 2, it groups together some elements to make RPN'ing it later easier.
def handle_token(token, tokens2, token_iterator):
# ok so this essentially groups up some tokens to make later parsing easier,
# when it encounters brackets it will recurse and then unrecurse when RecursionStopIteration is raised.
if token == ')':
raise RecursionStopIteration() # Should be recursive so this should work
elif token == '(':
temp_list = []
try:
while True:
next_token = six.next(token_iterator)
handle_token(next_token, temp_list, token_iterator)
except RecursionStopIteration:
pass # Continue
except StopIteration:
ValueError('Malformed filter expression, type1')
# Sigh, we only want to group a tuple if it doesnt contain operators
if any([is_op(item) for item in temp_list]):
# Its an expression
tokens2.append('(')
tokens2.extend(temp_list)
tokens2.append(')')
else:
tokens2.append(tuple(temp_list))
elif token == 'BETWEEN':
field = tokens2.pop()
# if values map contains a number, it would be a float
# so we need to int() it anyway
op1 = six.next(token_iterator)
op1 = int(values_map.get(op1, op1))
and_op = six.next(token_iterator)
assert and_op == 'AND'
op2 = six.next(token_iterator)
op2 = int(values_map.get(op2, op2))
tokens2.append(['between', field, op1, op2])
elif is_function(token):
function_list = [token]
lbracket = six.next(token_iterator)
assert lbracket == '('
next_token = six.next(token_iterator)
while next_token != ')':
function_list.append(next_token)
next_token = six.next(token_iterator)
tokens2.append(function_list)
else:
# Convert tokens back to real types
if token in values_map:
token = values_map[token]
# Need to join >= <= <>
if len(tokens2) > 0 and ((tokens2[-1] == '>' and token == '=') or (tokens2[-1] == '<' and token == '=') or (tokens2[-1] == '<' and token == '>')):
tokens2.append(tokens2.pop() + token)
else:
tokens2.append(token)
tokens2 = []
token_iterator = iter(tokens)
for token in token_iterator:
handle_token(token, tokens2, token_iterator)
# Start of the Shunting-Yard algorithm. <-- Proper beast algorithm!
def is_number(val):
return val not in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT')
OPS = {'<': 5, '>': 5, '=': 5, '>=': 5, '<=': 5, '<>': 5, 'IN': 8, 'AND': 11, 'OR': 12, 'NOT': 10, 'BETWEEN': 9, '(': 100, ')': 100}
def shunting_yard(token_list):
output = []
op_stack = []
# Basically takes in an infix notation calculation, converts it to a reverse polish notation where there is no
# ambiguity on which order operators are applied.
while len(token_list) > 0:
token = token_list.pop(0)
if token == '(':
op_stack.append(token)
elif token == ')':
while len(op_stack) > 0 and op_stack[-1] != '(':
output.append(op_stack.pop())
lbracket = op_stack.pop()
assert lbracket == '('
elif is_number(token):
output.append(token)
else:
# Must be operator kw
|
kfieldho/SMQTK
|
python/smqtk/representation/code_index/memory.py
|
Python
|
bsd-3-clause
| 5,235
| 0.000764
|
import cPickle
import os.path as osp
from smqtk.representation.code_index import CodeIndex
from smqtk.utils import SimpleTimer
__author__ = "[email protected]"
class MemoryCodeIndex (CodeIndex):
"""
Local RAM memory based index with an optional file cache
"""
@classmethod
def is_usable(cls):
"""
No outside dependencies.
:rtype: bool
"""
return True
def __init__(self, file_cache=None):
"""
Initialize a new in-memory code index, or reload one from a cache.
:param file_cache: Optional path to a file path, loading an existing
index if the file already exists. Either way, providing a path to
this enabled file caching when descriptors are added to this index.
This cache file is a pickle serialization.
:type file_cache: None | str
"""
super(MemoryCodeIndex, self).__init__()
self._num_descr = 0
self._file_cache = file_cache
# Mapping of code to a dictionary mapping descrUUID->Descriptor
#: :type: dict[collections.Hashable, dict[collections.Hashable, smqtk.representation.DescriptorElement]]
self._table = {}
if file_cache and osp.isfile(file_cache):
with open(file_cache) as f:
self._log.debug("Loading cached code index table from file: %s",
file_cache)
#: :type: dict[collections.Hashable, dict[collections.Hashable, smqtk.representation.DescriptorElement]]
self._table = cPickle.load(f)
self._log.debug("Counting indexed descriptors")
# Find the number of descriptors in the table
self._num_descr = sum(len(d) for d in self._table.itervalues())
self._log.debug("Done loading cached table")
def cache_table(self):
if self._file_cache:
with SimpleTimer("Caching memory table", self._log.debug):
with open(self._file_cache, 'wb') as f:
cPickle.dump(self._table, f)
def get_config(self):
return {
"file_cache": self._file_cache
}
def count(self):
"""
:return: Number of descriptor elements stored in this index. This is not
necessarily the number of codes stored in the index.
:rtype: int
"""
return self._num_descr
def clear(self):
"""
Clear this code index's table entries.
"""
self._table = {}
self.cache_table()
def codes(self):
"""
:return: Set of code integers currently used in this code index.
:rtype: set[int]
"""
return set(self._table)
def iter_codes(self):
"""
Iterate over code contained in this index in an arbitrary order.
:return: Generator that yields integer code keys
:rtype: collections.Iterator[int|long]
"""
for k in self._table:
yield k
def add_descriptor(self, code, descriptor, no_cache=False):
"""
Add a descriptor to this index given a matching small-code
:param code: bit-hash of the given descriptor in integer form
:type code: int
:param descriptor: Descriptor to index
:type descriptor: smqtk.representation.DescriptorElement
:param no_cache: Do not cache the internal table if a file cache was
provided. This option should not be modified from its default by
normal use. Used internally.
:type no_cache: bool
"""
code_map = self._table.setdefault(code, {})
if descriptor.uuid() not in code_map:
self._num_descr += 1
code_map[descriptor.uuid()] = descriptor
if not no_cache:
self.cache_table()
def add_many_descriptors(self, code_descriptor_pairs):
"""
Add multiple code/descriptor pairs.
:param code_descriptor_pairs: Iterable of integer code and paired
descriptor tuples to add to this index.
:type code_descriptor_pairs:
collections.Iterable[(int, s
|
mqtk.representation.DescriptorElement)]
"""
for c, d in code_descriptor_pairs:
self.add_descriptor(c, d, True)
self.cache_table()
def get_descriptors(self, code_or_codes):
"""
Get iterable of descriptors associated to this code or iterable of
codes. This may return an empty iterable.
:param code_or_codes: An intege
|
r or iterable of integer bit-codes.
:type code_or_codes: collections.Iterable[int] | int
:return: Iterable of descriptors
:rtype: collections.Iterable[smqtk.representation.DescriptorElement]
"""
if hasattr(code_or_codes, '__iter__'):
# noinspection PyTypeChecker
# -> I literally just checked for __iter__
for c in code_or_codes:
for v in self._table.get(c, {}).values():
yield v
else: # assuming int
for v in self._table.get(code_or_codes, {}).itervalues():
yield v
CODE_INDEX_CLASS = MemoryCodeIndex
|
projectweekend/Links-API
|
links/issues/mixins.py
|
Python
|
mit
| 849
| 0
|
from issues.models import ReportedLink, ReportedUser
from issues.serializers import ReportedLinkSerializer, ReportedUserSerializer
class ReportedLinkAPI(object):
serializer_class = ReportedLinkSerializer
def get_queryset(self):
return ReportedLink.objects.all()
class ReportedLinkSelfAPI(object):
def get_queryset(self):
return ReportedLink.objects.filter(reporter=self.request.user)
|
def pre_save(self, obj):
obj.reporter = self.request.user
class ReportedUserAPI(object):
serializer_class = ReportedUserSerializer
def get_queryset(self):
|
return ReportedUser.objects.all()
class ReportedUserSelfAPI(object):
def get_queryset(self):
return ReportedUser.objects.filter(reporter=self.request.user)
def pre_save(self, obj):
obj.reporter = self.request.user
|
LaoZhongGu/kbengine
|
kbe/src/lib/python/PCbuild/build_ssl.py
|
Python
|
lgpl-3.0
| 9,198
| 0.002827
|
# Script for building the _ssl and _hashlib modules for Windows.
# Uses Perl to setup the OpenSSL environment correctly
# and build OpenSSL, then invokes a simple nmake session
# for the actual _ssl.pyd and _hashlib.pyd DLLs.
# THEORETICALLY, you can:
# * Unpack the latest SSL release one level above your main Python source
# directory. It is likely you will already find the zlib library and
# any other external packages there.
# * Install ActivePerl and ensure it is somewhere on your path.
# * Run this script from the PCBuild directory.
#
# it should configure and build SSL, then build the _ssl and _hashlib
# Python extensions without intervention.
# Modified by Christian Heimes
# Now this script supports pre-generated makefiles and assembly files.
# Developers don't need an installation of Perl anymore to build Python. A svn
# checkout from our svn repository is enough.
#
# In Order to create the files in the case of an update you still need Perl.
# Run build_ssl in this order:
# python.exe build_ssl.py Release x64
# python.exe build_ssl.py Release Win32
import os, sys, re, shutil
# Find all "foo.exe" files on the PATH.
def find_all_on_path(filename, extras = None):
entries = os.environ["PATH"].split(os.pathsep)
ret = []
for p in entries:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
if extras:
for p in extras:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
return ret
# Find a suitable Perl installation for OpenSSL.
# cygwin perl does *not* work. ActivePerl does.
# Being a Perl dummy, the simplest way I can check is if the "Win32" package
# is available.
def find_working_perl(perls):
for perl in perls:
fh = os.popen('"%s" -e "use Win32;"' % perl)
fh.read()
rc = fh.close()
if rc:
continue
return perl
print("Can not find a suitable PERL:")
if perls:
print(" the following perl interpreters were found:")
for p in perls:
print(" ", p)
print(" None of these versions appear suitable for building OpenSSL")
else:
print(" NO perl interpreters were found on this machine at all!")
print(" Please install ActivePerl and ensure it appears on your path")
return None
# Fetch SSL directory from VC properties
def get_ssl_dir():
propfile = (os.path.join(os.path.dirname(__file__), 'pyproject.vsprops'))
with open(propfile) as f:
m = re.search('openssl-([^"]+)"', f.read())
return "..\..\openssl-"+m.group(1)
def create_makefile64(makefile, m32):
"""Create and fix makefile for 64bit
Replace 32 with 64bit directories
"""
if not os.path.isfile(m32):
return
with open(m32) as fin:
with open(makefile, 'w') as fout:
for line in fin:
line = line.replace("=tmp32", "=tmp64")
line = line.replace("=out32", "=out64")
line = line.replace("=inc32", "=inc64")
# force 64 bit machine
line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
# don't link against the lib on 64bit systems
line = line.replace("bufferoverflowu.lib", "")
fout.write(line)
os.unlink(m32)
def fix_makefile(makefile):
"""Fix some stuff in all makefiles
"""
if not os.path.isfile(makefile):
return
with open(makefile) as fin:
lines = fin.readlines()
with open(makefile, 'w') as fout:
for line in lines:
if line.startswith("PERL="):
continue
if line.startswith("CP="):
line = "CP=copy\n"
if line.startswith("MKDIR="):
line = "MKDIR=mkdir\n"
if line.startswith("CFLAG="):
line = line.strip()
for algo in ("RC5", "MDC2", "IDEA"):
noalgo = " -DOPENSSL_NO_%s" % algo
if noalgo not in line:
line = line + noalgo
line = line + '\n'
fout.write(line)
def run_configure(configure, do_script):
print("perl Configure "+configure+" no-idea no-mdc2")
os.system("perl Configure "+configure+" no-idea no-mdc2")
print(do_script)
os.system(do_script)
def cmp(f1, f2):
bufsize = 1024 * 8
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
def copy(src, dst):
if os.path.isfile(dst) and cmp(src, dst):
return
shutil.copy(src, dst)
def main():
build_all = "-a" in sys.argv
if sys.argv[1] == "Release":
debug = False
elif sys.argv[1] == "Debug":
debug = True
else:
raise ValueError(str(sys.argv))
if sys.argv[2] == "Win32":
arch = "x86"
configure = "VC-WIN32"
do_script = "ms\\do_nasm"
makefile="ms\\nt.mak"
m32
|
= makefile
dirsuffix = "32"
elif sys.argv[2] == "x64":
arch="amd64"
configure = "VC-WIN64A"
do_script = "ms\\do_win64a"
makefile = "ms\\nt64.mak"
m32 = makefile.replace('64', '')
dirsuffix = "64"
#os
|
.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
else:
raise ValueError(str(sys.argv))
make_flags = ""
if build_all:
make_flags = "-a"
# perl should be on the path, but we also look in "\perl" and "c:\\perl"
# as "well known" locations
perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
perl = find_working_perl(perls)
if perl:
print("Found a working perl at '%s'" % (perl,))
else:
print("No Perl installation was found. Existing Makefiles are used.")
sys.stdout.flush()
# Look for SSL 2 levels up from pcbuild - ie, same place zlib etc all live.
ssl_dir = get_ssl_dir()
if ssl_dir is None:
sys.exit(1)
old_cd = os.getcwd()
try:
os.chdir(ssl_dir)
# rebuild makefile when we do the role over from 32 to 64 build
if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
os.unlink(m32)
# If the ssl makefiles do not exist, we invoke Perl to generate them.
# Due to a bug in this script, the makefile sometimes ended up empty
# Force a regeneration if it is.
if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
if perl is None:
print("Perl is required to build the makefiles!")
sys.exit(1)
print("Creating the makefiles...")
sys.stdout.flush()
# Put our working Perl at the front of our path
os.environ["PATH"] = os.path.dirname(perl) + \
os.pathsep + \
os.environ["PATH"]
run_configure(configure, do_script)
if debug:
print("OpenSSL debug builds aren't supported.")
#if arch=="x86" and debug:
# # the do_masm script in openssl doesn't generate a debug
# # build makefile so we generate it here:
# os.system("perl util\mk1mf.pl debug "+configure+" >"+makefile)
if arch == "amd64":
create_makefile64(makefile, m32)
fix_makefile(makefile)
copy(r"crypto\buildinf.h", r"crypto\buildinf_%s.h" % arch)
copy(r"crypto\opensslconf.h", r"crypto\opensslconf_%s.h" % arch)
# If the assembler files don't exist in tmpXX, copy them there
if perl is None and os.path.exists("asm"+dirsuffix):
if not os.path.exists("tmp"+dirsuffix):
os.mkdir("tmp"+dirsuffix)
for f in os.listdir("asm"+dirsuffix):
|
wichert/rest_toolkit
|
tests/test_resource.py
|
Python
|
bsd-2-clause
| 3,615
| 0
|
import pytest
from webtest import TestApp
from pyramid.config import Configurator
from pyramid.testing import DummyRequest
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.authentication import AuthTktAuthenticationPolicy
def make_app(config):
return TestApp(config.make_wsgi_app())
@pytest.mark.parametrize('method', ['delete', 'get', 'post', 'patch', 'put'])
def test_unallowed_method_added(method):
config = Configurator()
config.scan('resource_only')
app = make_app(config)
getattr(app, method)('/', status=405)
def test_default_options_method():
config = Configurator()
config.scan('resource_only')
app = make_app(config)
response = app.options('/')
assert response.headers['Access-Control-Allow-Methods'] == 'OPTIONS'
def test_request_add_get_view():
config = Configurator()
config.scan('resource_get')
app = make_app(config)
app.get('/')
def test_request_default_to_json_renderer():
config = Configurator()
config.scan('resource_get')
app = make_app(config)
r = app.get('/')
assert r.content_type == 'application/json'
assert r.json == {'message': 'hello'}
def test_request_override_renderer():
config = Configurator()
config.scan('resource_get_renderer')
app = make_app(config)
r = app.get('/')
assert r.content_type == 'text/plain'
assert r.unicode_body == 'hello'
def test_add_controller():
config = Configurator()
config.scan('controller')
app = make_app(config)
app.post('/engage')
def test_nested_controller():
# Test for https://github.com/wichert/rest_toolkit/issues/12
config = Configurator()
config.scan('controller')
app = make_app(config)
app.post('/resource/engage')
def test_controller_default_to_json_renderer():
config = Configurator()
config.scan('controller')
app = make_app(config)
r = app.post('/engage')
assert r.content_type == 'application/json'
assert r.json == {'message': 'Ai ai captain'}
def test_set_controller_method():
config
|
= Configura
|
tor()
config.scan('controller')
app = make_app(config)
r = app.get('/engage')
assert r.json == {'message': 'Warp engine offline'}
@pytest.mark.parametrize('method', ['delete', 'get', 'patch', 'put'])
def test_controller_invalid_method(method):
config = Configurator()
config.scan('controller')
app = make_app(config)
getattr(app, method)('/', status=405)
def test_default_get_view():
config = Configurator()
config.scan('resource_abc')
app = make_app(config)
r = app.get('/')
assert r.json == {'message': 'Hello, world'}
def test_override_default_view():
config = Configurator()
config.scan('resource_abc_override')
app = make_app(config)
r = app.get('/')
assert r.json == {'message': 'Welcome'}
def test_set_resource_route_name():
config = Configurator()
config.scan('resource_route_name')
config.make_wsgi_app()
request = DummyRequest()
request.registry = config.registry
assert request.route_path('user', id=15) == '/users/15'
def test_secured_default_view_not_allowed():
config = Configurator()
config.set_authentication_policy(AuthTktAuthenticationPolicy('seekrit'))
config.set_authorization_policy(ACLAuthorizationPolicy())
config.scan('resource_abc')
app = make_app(config)
app.get('/secure', status=403)
def test_secured_default_view_allowed():
config = Configurator()
config.testing_securitypolicy(1)
config.scan('resource_abc')
app = make_app(config)
app.get('/secure')
|
PearsonIOKI/compose-forum
|
askbot/utils/twitter.py
|
Python
|
gpl-3.0
| 479
| 0.002088
|
import urllib
from askbot.deps.django_authopenid.util import OAuthConnection
class Twitter(OAuthCon
|
nection):
def __init__(self):
super(Twitter, self).__init__('twitter')
self.tweet_url = 'https://api.twitter.com/1.1/statuses/update.json'
def tweet(self, text, access_token=None):
client = self.get_client(access_token)
body = urllib.url
|
encode({'status': text})
return self.send_request(client, self.tweet_url, 'POST', body=body)
|
bruderstein/PythonScript
|
PythonLib/min/argparse.py
|
Python
|
gpl-2.0
| 97,984
| 0.000235
|
# Author: Steven J. Bethard <[email protected]>.
# New maintainer as of 29 August 2019: Raymond Hettinger <[email protected]>
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter
|
and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still conside
|
red an implementation detail.)
"""
__version__ = '1.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
'BooleanOptionalAction',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'MetavarTypeHelpFormatter',
'Namespace',
'Action',
'ONE_OR_MORE',
'OPTIONAL',
'PARSER',
'REMAINDER',
'SUPPRESS',
'ZERO_OR_MORE',
]
import os as _os
import re as _re
import sys as _sys
from gettext import gettext as _, ngettext
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
star_args = {}
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
if name.isidentifier():
arg_strings.append('%s=%r' % (name, value))
else:
star_args[name] = value
if star_args:
arg_strings.append('**%s' % repr(star_args))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return list(self.__dict__.items())
def _get_args(self):
return []
def _copy_items(items):
if items is None:
return []
# The copy module is used only in the 'append' and 'append_const'
# actions, and it is needed only when the default value isn't a list.
# Delay its import for speeding up the common case.
if type(items) is list:
return items[:]
import copy
return copy.copy(items)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
import shutil
width = shutil.get_terminal_size().columns
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = min(max_help_position,
max(width - 20, indent_increment * 2))
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+', _re.ASCII)
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_
|
devs4v/devs4v-information-retrieval15
|
project/lucene/pylucene-4.9.0-0/jcc/helpers/linux.py
|
Python
|
mit
| 7,260
| 0.003306
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, os.path, re
import distutils, setuptools
from setuptools import dist, extension
from setuptools.command import build_ext
from setuptools.extension import Library as _Library
def patch_st_dir(patch_version, st_egg, jccdir):
return '''
Shared mode is disabled, setuptools patch.43.%s must be applied to enable it
or the NO_SHARED environment variable must be set to turn off this error.
sudo patch -d %s -Nup0 < %s/jcc/patches/patch.43.%s
See %s/INSTALL for more information about shared mode.
''' %(patch_version, st_egg, jccdir, patch_version, jccdir)
def patch_st_zip(patch_version, st_egg, jccdir):
return '''
Shared mode is disabled, setuptools patch.43.%s must be applied to enable it
or the NO_SHARED environment variable must be set to turn off this error.
mkdir tmp
cd tmp
unzip -q %s
patch -Nup0 < %s/jcc/patches/patch.43.%s
sudo zip %s -f
cd ..
rm -rf tmp
See %s/INSTALL for more information about shared mode.
''' %(patch_version, st_egg, jccdir, patch_version, st_egg, jccdir)
def patch_setuptools(with_setuptools):
with_setuptools_c7 = ('00000000', '00000006', '*c', '00000007', '*final')
with_setuptools_c11 = ('00000000', '00000006', '*c', '00000011', '*final')
with_distribute_1 = ('00000000', '00000006', '00000001', '*final')
try:
from setuptools.command.build_ext import sh_link_shared_object
enable_shared = True # jcc/patches/patch.43 was applied
except ImportError:
jccdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
st_egg = os.path.dirname(setuptools.__path__[0])
if with_setuptools_c7 <= with_setuptools <= with_setuptools_c11 or with_distribute_1 <= with_setuptools:
# Old setuptools 0.6c7-10 series
# New distribute 0.6.1+ series
if with_setuptools < with_setuptools_c11 and not hasattr(dist, 'check_packages'):
# Old setuptools 0.6c7-10 series missing check_packages()
dist.check_packages = check_packages
setuptools.Library = LinuxLibrary
extension.Library = LinuxLibrary
build_ext.build_ext = LinuxBuildExt
if build_ext.use_stubs:
# Build shared libraries.
global sh_link_shared_object # Fix UnboundLocalError
build_ext.link_shared_object = sh_link_shared_object
else:
# Build static libraries every where else (unless forced)
build_ext.libtype = 'static'
build_ext.link_shared_object = st_link_shared_object
print >>sys.stderr, "Applied shared mode monkey patch to:", setuptools
return True # monkey patch was applied
elif with_setuptools < with_setuptools_c11: # old 0.6c7-10 series
patch_version = '0.6c7'
elif with_setuptools >= with_distribute_1: # new 0.6.1 and up fork
patch_version = '0.6c7' # compatible with 0.6c7
else:
patch_version = '0.6c11' # old 0.6c11+ series
if os.path.isdir(st_egg):
raise NotImplementedError, patch_st_dir(patch_version, st_egg,
jccdir)
else:
raise NotImplementedError, patch_st_zip(patch_version, st_egg,
jccdir)
return enable_shared
class LinuxLibrary(_Library):
def __init__(self, *args, **kwds):
self.force_shared = kwds.pop('force_shared', False)
extension.Extension.__init__(self, *args, **kwds)
class LinuxBuildExt(build_ext.build_ext):
def get_ext_filename(self, fullname):
filename = build_ext._build_ext.get_ext_filename(self, fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
if isinstance(ext, _Library):
if ext.force_shared and not build_ext.use_stubs:
libtype = 'shared'
else:
libtype = build_ext.libtype
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn, libtype)
elif build_ext.use_stubs and ext._links_to_dynamic:
d, fn = os.path.split(filename)
return os.path.join(d, 'dl-' + fn)
return filename
def build_extension(self, ext):
_compiler = self.compiler
try:
force_shared = False
if isinstance(ext, _Library):
self.compiler = self.shlib_compiler
force_shared = ext.force_shared and not build_ext.use_stubs
if force_shared:
self.compiler.link_shared_object = sh_link_shared_object.__get__(self.compiler)
build_ext._build_ext.build_extension(self, ext)
if ext._needs_stub:
self.write_stub(self.get_finalized_command('build_py').build_lib, ext)
finally:
if force_shared:
self.compiler.link_shared_object = build_ext.link_shared_object.__get__(self.compiler)
self.compiler = _compiler
def sh_link_shared_object(self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None):
self.link(self.SHARED_LIBRARY, objects, output_libname, output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols, debug, extra_preargs, extra_postargs, build_temp, target_lang)
def st_link_shared_object(self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_pr
|
eargs=None, extra_postargs=None, build_temp=None, target_lang=None):
assert output_dir is None # distutils build_ext doesn't pass this
output_dir, filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(objects, basen
|
ame, output_dir, debug, target_lang)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only"
".-separated package names in setup.py", pkgname
)
|
LeunamBk/translatorPy
|
globalHotkeys.py
|
Python
|
gpl-3.0
| 950
| 0.009474
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
try:
from PyQt4.QtGui import QApplication, QKeySequence
except ImportError:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QKeySequence
from pygs import QxtGlobalShortcut
def hotkeyBinding():
|
SHORTCUT_SHOW = "Ctrl+Alt+S" # Ctrl maps to Command on Mac OS X
SHORTCUT_EXIT = "Ctrl+Alt+F" # again, Ctrl maps to Command on Mac OS X
def show_activated():
print("Shortcut Activated!")
app = QApplication([])
shortcut_show = QxtGlobalShortcut()
shortcut_show.setSho
|
rtcut(QKeySequence(SHORTCUT_SHOW))
shortcut_show.activated.connect(show_activated)
shortcut_exit = QxtGlobalShortcut()
shortcut_exit.setShortcut(QKeySequence(SHORTCUT_EXIT))
shortcut_exit.activated.connect(app.exit)
return_code = app.exec_()
del shortcut_show
del shortcut_exit
sys.exit(return_code)
|
mmilkin/cchaser
|
chaser/tests/test_controller.py
|
Python
|
mit
| 4,019
| 0.000498
|
import unittest
from mock import patch, call
from chaser.controller import MotorController, LEFT_KEY, RIGHT_KEY, UP_KEY, DOWN_KEY, MotorInputError
class ControllerTestCase(unittest.TestCase):
@patch('chaser.controller.gpio')
def test_init(self, io):
io.OUT = True
calls = [call(4, True), call(17, True), call(24, True), call(25, True)]
MotorController()
io.setup.assert_has_calls(calls)
@patch('chaser.controller.gpio')
def test_shut_down(self, io):
controller = MotorController()
io.reset_mock()
controller.shut_down()
calls = [call(4,
|
False), call(17, False), call(24, False), call(25, False)]
io.output.assert_has_calls(calls)
@patch('chaser.controller.gpio')
def test_left(self, io):
controller = MotorController()
controller.left()
calls = [call(24, True), call(25, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'left')
@patch('c
|
haser.controller.gpio')
def test_left_stop(self, io):
controller = MotorController()
controller.turn_keys.add(LEFT_KEY)
controller.left()
calls = [call(24, False), call(25, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'stopped')
@patch('chaser.controller.gpio')
def test_right(self, io):
controller = MotorController()
controller.right()
calls = [call(25, True), call(24, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'right')
@patch('chaser.controller.gpio')
def test_right_stop(self, io):
controller = MotorController()
controller.turn_keys.add(RIGHT_KEY)
controller.right()
calls = [call(25, False), call(24, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'stopped')
@patch('chaser.controller.gpio')
def test_forward(self, io):
controller = MotorController()
controller.forward()
calls = [call(4, True), call(17, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'forward')
@patch('chaser.controller.gpio')
def test_forward_stop(self, io):
controller = MotorController()
controller.progress_keys.add(UP_KEY)
controller.forward()
calls = [call(4, False), call(17, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'stopped')
@patch('chaser.controller.gpio')
def test_reverse(self, io):
controller = MotorController()
controller.reverse()
calls = [call(17, True), call(4, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'backwards')
@patch('chaser.controller.gpio')
def test_reverse_stop(self, io):
controller = MotorController()
controller.progress_keys.add(DOWN_KEY)
controller.reverse()
calls = [call(17, False), call(4, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'stopped')
@patch('chaser.controller.gpio')
def test_motor(self, io):
controller = MotorController()
controller.motor(UP_KEY)
calls = [call(4, True), call(17, False)]
io.output.assert_has_calls(calls)
io.reset_mock()
controller.motor(DOWN_KEY)
calls = [call(17, True), call(4, False)]
io.output.assert_has_calls(calls)
io.reset_mock()
controller.motor(RIGHT_KEY)
calls = [call(25, True), call(24, False)]
io.output.assert_has_calls(calls)
io.reset_mock()
controller.motor(LEFT_KEY)
calls = [call(24, True), call(25, False)]
io.output.assert_has_calls(calls)
def test_motor_bad_key(self):
controller = MotorController()
with self.assertRaises(MotorInputError):
controller.motor('other')
|
google/pikov
|
python/pikov/properties.py
|
Python
|
apache-2.0
| 2,766
| 0
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Property classes for building wrapper classes for Pikov nodes.
We want to wrap our semantic graph with Python classes. This allows us to
interact with Python objects to modify the guid_map.
These classes encode the core types used in the semantic graph. When classes
use these properties, the guid_map is updated with the correct serialization
of the property.
"""
from .core import Int64Node, StringNode
class AbstractSemanticGraphProperty(object):
def __init__(self, label):
self._label = label
def from_node(self, obj, value):
raise NotImplementedError()
def to_node(self, value):
|
raise NotImplementedError()
def __get__(self, obj, type=None):
return self.from_node(obj, obj[self._label])
def __set__(self, obj, value):
obj[self._label] = self.to_node(value)
class UnspecifiedProperty(AbstractSemanticGraphProperty):
def from_node(self, obj, value):
|
obj._graph.get_value(obj, self._label)
def to_node(self, value):
# Value should already by a Node.
return value
class GuidProperty(AbstractSemanticGraphProperty):
def __init__(self, label, cls):
super().__init__(label)
self._cls = cls
def from_node(self, obj, value):
if value is None:
return None
return self._cls(obj._graph, guid=value.guid)
def to_node(self, value):
# Value should already by a GuidNode.
return value
def make_guid_property(wrapped):
def __init__(self, label):
GuidProperty.__init__(self, label, wrapped)
return type(
wrapped.__name__ + "Property",
(GuidProperty,),
{
"__init__": __init__,
}
)
class ScalarProperty(AbstractSemanticGraphProperty):
def from_node(self, obj, value):
if value is None:
return None
return value.value
class Int64Property(ScalarProperty):
def to_node(self, value):
if value is None:
return None
return Int64Node(value)
class StringProperty(ScalarProperty):
def to_node(self, value):
if value is None:
return None
return StringNode(value)
|
DiegoQueiroz/scriptLattes
|
scriptLattes/producoesBibliograficas/outroTipoDeProducaoBibliografica.py
|
Python
|
gpl-2.0
| 3,771
| 0.032388
|
#!/usr/bin/python
# encoding: utf-8
# filename: outroTipoDeProducaoBibliografica.py
#
# scriptLattes V8
# Copyright 2005-2013: Jesús P. Mena-Chalco e Roberto M. Cesar-Jr.
# http://scriptlattes.sourceforge.net/
#
#
# Este programa é um software livre; você pode redistribui-lo e/ou
# modifica-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF); na versão 2 da
# Licença, ou (na sua opinião) qualquer versão.
#
# Este programa é distribuído na esperança que possa ser util,
# mas SEM NENHUMA GARANTIA; sem uma garantia implicita de ADEQUAÇÂO a qualquer
# MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, escreva para a Fundação do Software
# Livre(FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from scriptLattes import *
from geradorDePaginasWeb import *
import re
class OutroTipoDeProducaoBibliografica:
item = None # dado
|
bruto
idMembro = None
relevante = None
autores = None
titulo = None
ano = None
natureza = None # tipo de producao
chave = None
def __init__(self, idMembro, partesDoItem='', relevante=''):
self.idMembro = set([])
self.idMembro.add(idMembro)
if not partesDoI
|
tem=='':
# partesDoItem[0]: Numero (NAO USADO)
# partesDoItem[1]: Descricao do livro (DADO BRUTO)
self.relevante = relevante
self.item = partesDoItem[1]
# Dividir o item na suas partes constituintes
partes = self.item.partition(" . ")
self.autores = partes[0].strip()
partes = partes[2]
aux = re.findall(u' \((.*?)\)\.$', partes)
if len(aux)>0:
self.natureza = aux[-1]
partes = partes.rpartition(" (")
partes = partes[0]
else:
self.natureza = ''
aux = re.findall(u' ((?:19|20)\d\d)\\b', partes)
if len(aux)>0:
self.ano = aux[-1] #.strip().rstrip(".").rstrip(",")
partes = partes.rpartition(" ")
partes = partes[0]
else:
self.ano = ''
self.titulo = partes.strip().rstrip(".").rstrip(",")
self.chave = self.autores # chave de comparação entre os objetos
else:
self.relevante = ''
self.autores = ''
self.titulo = ''
self.ano = ''
self.natureza = ''
def compararCom(self, objeto):
if self.idMembro.isdisjoint(objeto.idMembro) and compararCadeias(self.titulo, objeto.titulo):
# Os IDs dos membros são agrupados.
# Essa parte é importante para a criação do GRAFO de colaborações
self.idMembro.update(objeto.idMembro)
if len(self.autores)<len(objeto.autores):
self.autores = objeto.autores
if len(self.titulo)<len(objeto.titulo):
self.titulo = objeto.titulo
if len(self.natureza)<len(objeto.natureza):
self.natureza = objeto.natureza
return self
else: # nao similares
return None
def html(self, listaDeMembros):
s = self.autores + '. <b>' + self.titulo + '</b>. '
s+= str(self.ano) + '. ' if str(self.ano).isdigit() else '. '
s+= self.natureza if not self.natureza=='' else ''
s+= menuHTMLdeBuscaPB(self.titulo)
return s
# ------------------------------------------------------------------------ #
def __str__(self):
s = "\n[OUTRO TIPO DE PRODUCAO BIBLIOGRAFICA] \n"
s += "+ID-MEMBRO : " + str(self.idMembro) + "\n"
s += "+RELEVANTE : " + str(self.relevante) + "\n"
s += "+AUTORES : " + self.autores.encode('utf8','replace') + "\n"
s += "+TITULO : " + self.titulo.encode('utf8','replace') + "\n"
s += "+ANO : " + str(self.ano) + "\n"
s += "+NATUREZA : " + self.natureza.encode('utf8','replace') + "\n"
s += "+item : " + self.item.encode('utf8','replace') + "\n"
return s
|
ElliotTheRobot/LILACS-mycroft-core
|
mycroft/skills/LILACS_knowledge/services/wikidata/__init__.py
|
Python
|
gpl-3.0
| 2,400
| 0.002917
|
from os.path import abspath
import wptools
from mycroft.messagebus.message import Message
from mycroft.skills.LILACS_knowledge.services import KnowledgeBackend
from mycroft.util.log import getLogger
__author__ = 'jarbas'
logger = getLogger(abspath(__file__).split('/')[-2])
class WikidataService(KnowledgeBackend):
def __init__(self, config, emitter, name='wikidata'):
self.config = config
self.process = None
self.emitter = emitter
self.name = name
|
self.emitter.on('WikidataKnowledgeAdquire', self._adquire)
def _adquire(self, message=None):
logger.info('WikidataKnowledge_Adquire')
subject = message.data["subject"]
if subject is None:
|
logger.error("No subject to adquire knowledge about")
return
else:
dict = {}
node_data = {}
# get knowledge about
# TODO exception handling for erros
try:
page = wptools.page(subject, silent=True, verbose=False).get_wikidata()
# parse for distant child of
node_data["description"] = page.description
# direct child of
node_data["what"] = page.what
# data fields
node_data["data"] = page.wikidata
# related to
# TODO parse and make cousin/child/parent
node_data["properties"] = page.props
# id info source
dict["wikidata"] = node_data
except:
logger.error("Could not parse wikidata for " + str(subject))
self.send_result(dict)
def adquire(self, subject):
logger.info('Call WikidataKnowledgeAdquire')
self.emitter.emit(Message('WikidataKnowledgeAdquire', {"subject": subject}))
def send_result(self, result = {}):
self.emitter.emit(Message("LILACS_result", {"data": result}))
def stop(self):
logger.info('WikidataKnowledge_Stop')
if self.process:
self.process.terminate()
self.process = None
def load_service(base_config, emitter):
backends = base_config.get('backends', [])
services = [(b, backends[b]) for b in backends
if backends[b]['type'] == 'wikidata']
instances = [WikidataService(s[1], emitter, s[0]) for s in services]
return instances
|
bollwyvl/jademagic
|
jademagic.py
|
Python
|
bsd-3-clause
| 1,775
| 0
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from IPython import get_ipython
from IPython.display import (
display,
Javascript,
HTML,
)
from IPython.core import magic_arguments
from IPython.core.magic import (
Magics,
magics_class,
cell_magic,
)
import pyjade
__version_info__ = (0, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
@magics_class
clas
|
s JadeMagics(Magics):
"""
Write and load HTML with Jade in the IPython Notebook.
Example:
%%jade
ul
li: some text!
"""
def __init__(self, shell):
super(JadeMagics, self).__init__(shell)
@cell_magic
@magic_arguments.magic_arguments()
@magic_arguments.argum
|
ent(
"var_name",
default=None,
nargs="?",
help="""Name of local variable to set to parsed value"""
)
def jade(self, line, cell):
line = line.strip()
args = magic_arguments.parse_argstring(self.jade, line)
display(Javascript(
"""
require(
[
"notebook/js/codecell",
"codemirror/mode/jade/jade"
],
function(cc){
cc.CodeCell.options_default.highlight_modes.magic_jade = {
reg: ["^%%jade"]
}
}
);
"""))
try:
val = pyjade.simple_convert(cell)
except Exception as err:
print(err)
return
if args.var_name is not None:
get_ipython().user_ns[args.var_name] = val
else:
return HTML(val)
def load_ipython_extension(ip):
ip = get_ipython()
ip.register_magics(JadeMagics)
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/test/test_sf.py
|
Python
|
gpl-2.0
| 460
| 0.041304
|
# This is to see that the total memory usage
|
doesn't increase with time
# i.e. no leakage / link between consecutive usages of hsp.
# This will run for ever, to be monitored by the printout and some external monitor.
def t():
from guppy import hsp
while 1:
import guppy.heapy.UniSet
import gc
reload( guppy.heapy.UniSet )
hp = hsp()
x = None
x = hp.heap()
print x
gc.collect()
prin
|
t x[0]
print x[1]
print x[2]
gc.collect()
print x&dict
|
ThisLife/aws-management-suite
|
amslib/network/route53.py
|
Python
|
mit
| 46,031
| 0.004627
|
import boto
from amslib.core.manager import BaseManager
from amslib.instance.instance import InstanceManager
import argparse
from errors import *
import pprint
import time
pp = pprint.PrettyPrinter(indent=4)
# Custom HealthCheck object to add support for failure threshold...seems to have been missed in boto
class HealthCheck(object):
"""An individual health check"""
POSTXMLBody = """
<HealthCheckConfig>
<IPAddress>%(ip_addr)s</IPAddress>
<Port>%(port)s</Port>
<Type>%(type)s</Type>
%(resource_path)s
%(fqdn_part)s
%(string_match_part)s
%(request_interval)s
%(failure_threshold)s
</HealthCheckConfig>
"""
XMLResourcePathPart = """<ResourcePath>%(resource_path)s</ResourcePath>"""
XMLFQDNPart = """<FullyQualifiedDomainName>%(fqdn)s</FullyQualifiedDomainName>"""
XMLStringMatchPart = """<SearchString>%(string_match)s</SearchString>"""
XMLRequestIntervalPart = """<RequestInterval>%(request_interval)d</RequestInterval>"""
XMLRequestFailurePart = """<FailureThreshold>%(failure_threshold)d</FailureThreshold>"""
valid_request_intervals = (10, 30)
valid_failure_thresholds = range(1, 11) # valid values are integers 1-10
def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_match=None, request_interval=30, failure_threshold=3):
"""
HealthCheck object
:type ip_addr: str
:param ip_addr: IP Address
:type port: int
:param port: Port to check
:type hc_type: str
:param ip_addr: One of HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP
:type resource_path: str
:param resource_path: Path to check
:type fqdn: str
:param fqdn: domain name of the endpoint to check
:type string_match: str
:param string_match: if hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the response body from the specified resource
:type request_interval: int
:param request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.
:type failure_threshold: int
:param failure_threshold: The number of times that Amazon Route 53 that a health check has fails before the resource is marked as down.
"""
self.ip_addr = ip_addr
self.port = port
self.hc_type = hc_type
self.resource_path = resource_path
self.fqdn = fqdn
self.string_match = string_match
if failure_threshold in self.valid_failure_thresholds:
self.failure_threshold = failure_threshold
else:
raise AttributeError(
"Valid values for request_interval are: %s" %
",".join(str(i) for i in self.valid_failure_thresholds))
if request_interval in self.valid_request_intervals:
self.request_interval = request_interval
else:
raise AttributeError(
"Valid values for request_interval are: %s" %
",".join(str(i) for i in self.valid_request_intervals))
def to_xml(self):
params = {
'ip_addr': self.ip_addr,
'port': self.port,
'type': self.hc_type,
'resource_path': "",
'fqdn_part': "",
'string_match_part': "",
'request_interval': (self.XMLRequestIntervalPart %
{'request_interval': self.request_interval}),
'failure_threshold': (self.XMLRequestFailurePart %
{'failure_threshold': self.failure_threshold}),
}
if self.fqdn is not None:
params['fqdn_part'] = self.XMLFQDNPart % {'fqdn': self.fqdn}
if self.string_match is not None:
params['string_match_part'] = self.XMLStringMatchPart % {'string_match' : self.string_match}
if self.resource_path is not None:
params['resource_path'] = self.XMLResourcePathPart % {'resource_path' : self.resource_path}
return self.POSTXMLBody % params
# custom version of the boto.route53.record.Record module to add support for failover resource records and to fix the missing health chech field on a response
class Record(object):
"""An individual ResourceRecordSet"""
HealthCheckBody = """<HealthCheckId>%s</HealthCheckId>"""
XMLBody = """<ResourceRecordSet>
<Name>%(name)s</Name>
<Type>%(type)s</Type>
%(weight)s
%(body)s
%(health_check)s
</ResourceRecordSet>"""
WRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Weight>%(weight)s</Weight>
"""
RRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Region>%(region)s</Region>
"""
FailoverBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Failover>%(failover)s</Failover>
"""
ResourceRecordsBody = """
<TTL>%(ttl)s</TTL>
<ResourceRecords>
%(records)s
</ResourceRecords>"""
ResourceRecordBody = """<ResourceRecord>
<Value>%s</Value>
</ResourceRecord>"""
AliasBody = """<AliasTarget>
<HostedZoneId>%(hosted_zone_id)s</HostedZoneId>
<DNSName>%(dns_name)s</DNSName>
%(eval_target_health)s
</AliasTarget>"""
EvaluateTargetHealth =
|
"""<EvaluateTargetHealth>%s</EvaluateTargetHealth>"""
valid_failover_roles = ['PRIMARY', 'SECONDARY']
def __init__(self, name=None, type=None, ttl=600, resource_records=None,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None, failover_role=None):
self.name = name
self.type
|
= type
self.ttl = ttl
if resource_records is None:
resource_records = []
self.resource_records = resource_records
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.identifier = identifier
self.weight = weight
self.region = region
self.alias_evaluate_target_health = alias_evaluate_target_health
self.health_check = health_check
self.failover_role = None
if failover_role in self.valid_failover_roles or failover_role is None:
self.failover_role = failover_role
else:
raise AttributeError(
"Valid values for failover_role are: %s" %
",".join(self.valid_failover_roles))
def __repr__(self):
return '<Record:%s:%s:%s>' % (self.name, self.type, self.to_print())
def add_value(self, value):
"""Add a resource record value"""
self.resource_records.append(value)
def set_alias(self, alias_hosted_zone_id, alias_dns_name,
alias_evaluate_target_health=False):
"""Make this an alias resource record set"""
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.alias_evaluate_target_health = alias_evaluate_target_health
def to_xml(self):
"""Spit this resource record set out as XML"""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Use alias
if self.alias_evaluate_target_health is not None:
eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false')
else:
eval_target_health = ""
body = self.AliasBody % { "hosted_zone_id": self.alias_hosted_zone_id,
"dns_name": self.alias_dns_name,
"eval_target_health": eval_target_health }
else:
# Use resource record(s)
records = ""
for r in self.resource_records:
records += self.ResourceRecordBody % r
body = self.ResourceR
|
mancoast/CPythonPyc_test
|
cpython/266_test_zipimport_support.py
|
Python
|
gpl-3.0
| 10,482
| 0.000859
|
# This test module covers support in various parts of the standard library
# for working with modules located inside zipfiles
# The tests are centralised in this fashion to make it easy to drop them
# if a platform doesn't support zipimport
import unittest
import test.test_support
import os
import os
|
.path
import sys
import textwrap
import zipfile
import zipimport
import doctest
import inspect
import linecache
import pdb
import warnings
verbose = test.test_support.verbose
# Library modules covered by this test set
# pdb (Issue 4201)
# inspect (Issue 4223)
# doctest (Issue 4197)
# Other test modules with zipimport related te
|
sts
# test_zipimport (of course!)
# test_cmd_line_script (covers the zipimport support in runpy)
# Retrieve some helpers from other test cases
from test import test_doctest, sample_doctest
from test.test_importhooks import ImportHooksBaseTestCase
from test.test_cmd_line_script import temp_dir, _run_python, \
_spawn_python, _kill_python, \
_make_test_script, \
_compile_test_script, \
_make_test_zip, _make_test_pkg
def _run_object_doctest(obj, module):
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = test.test_support.get_original_stdout()
try:
finder = doctest.DocTestFinder(verbose=verbose, recurse=False)
runner = doctest.DocTestRunner(verbose=verbose)
# Use the object's fully qualified name if it has one
# Otherwise, use the module's name
try:
name = "%s.%s" % (obj.__module__, obj.__name__)
except AttributeError:
name = module.__name__
for example in finder.find(obj, name, module):
runner.run(example)
f, t = runner.failures, runner.tries
if f:
raise test.test_support.TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
class ZipSupportTests(ImportHooksBaseTestCase):
# We use the ImportHooksBaseTestCase to restore
# the state of the import related information
# in the sys module after each test
# We also clear the linecache and zipimport cache
# just to avoid any bogus errors due to name reuse in the tests
def setUp(self):
linecache.clearcache()
zipimport._zip_directory_cache.clear()
ImportHooksBaseTestCase.setUp(self)
def test_inspect_getsource_issue4223(self):
test_src = "def foo(): pass\n"
with temp_dir() as d:
init_name = _make_test_script(d, '__init__', test_src)
name_in_zip = os.path.join('zip_pkg',
os.path.basename(init_name))
zip_name, run_name = _make_test_zip(d, 'test_zip',
init_name, name_in_zip)
os.remove(init_name)
sys.path.insert(0, zip_name)
import zip_pkg
self.assertEqual(inspect.getsource(zip_pkg.foo), test_src)
def test_doctest_issue4197(self):
# To avoid having to keep two copies of the doctest module's
# unit tests in sync, this test works by taking the source of
# test_doctest itself, rewriting it a bit to cope with a new
# location, and then throwing it in a zip file to make sure
# everything still works correctly
test_src = inspect.getsource(test_doctest)
test_src = test_src.replace(
"from test import test_doctest",
"import test_zipped_doctest as test_doctest")
test_src = test_src.replace("test.test_doctest",
"test_zipped_doctest")
test_src = test_src.replace("test.sample_doctest",
"sample_zipped_doctest")
sample_src = inspect.getsource(sample_doctest)
sample_src = sample_src.replace("test.test_doctest",
"test_zipped_doctest")
with temp_dir() as d:
script_name = _make_test_script(d, 'test_zipped_doctest',
test_src)
zip_name, run_name = _make_test_zip(d, 'test_zip',
script_name)
z = zipfile.ZipFile(zip_name, 'a')
z.writestr("sample_zipped_doctest.py", sample_src)
z.close()
if verbose:
zip_file = zipfile.ZipFile(zip_name, 'r')
print 'Contents of %r:' % zip_name
zip_file.printdir()
zip_file.close()
os.remove(script_name)
sys.path.insert(0, zip_name)
import test_zipped_doctest
# Some of the doc tests depend on the colocated text files
# which aren't available to the zipped version (the doctest
# module currently requires real filenames for non-embedded
# tests). So we're forced to be selective about which tests
# to run.
# doctest could really use some APIs which take a text
# string or a file object instead of a filename...
known_good_tests = [
test_zipped_doctest.SampleClass,
test_zipped_doctest.SampleClass.NestedClass,
test_zipped_doctest.SampleClass.NestedClass.__init__,
test_zipped_doctest.SampleClass.__init__,
test_zipped_doctest.SampleClass.a_classmethod,
test_zipped_doctest.SampleClass.a_property,
test_zipped_doctest.SampleClass.a_staticmethod,
test_zipped_doctest.SampleClass.double,
test_zipped_doctest.SampleClass.get,
test_zipped_doctest.SampleNewStyleClass,
test_zipped_doctest.SampleNewStyleClass.__init__,
test_zipped_doctest.SampleNewStyleClass.double,
test_zipped_doctest.SampleNewStyleClass.get,
test_zipped_doctest.old_test1,
test_zipped_doctest.old_test2,
test_zipped_doctest.old_test3,
test_zipped_doctest.old_test4,
test_zipped_doctest.sample_func,
test_zipped_doctest.test_DocTest,
test_zipped_doctest.test_DocTestParser,
test_zipped_doctest.test_DocTestRunner.basics,
test_zipped_doctest.test_DocTestRunner.exceptions,
test_zipped_doctest.test_DocTestRunner.option_directives,
test_zipped_doctest.test_DocTestRunner.optionflags,
test_zipped_doctest.test_DocTestRunner.verbose_flag,
test_zipped_doctest.test_Example,
test_zipped_doctest.test_debug,
test_zipped_doctest.test_pdb_set_trace,
test_zipped_doctest.test_pdb_set_trace_nested,
test_zipped_doctest.test_testsource,
test_zipped_doctest.test_trailing_space_in_test,
test_zipped_doctest.test_DocTestSuite,
test_zipped_doctest.test_DocTestFinder,
]
# These remaining tests are the ones which need access
# to the data files, so we don't run them
fail_due_to_missing_data_files = [
test_zipped_doctest.test_DocFileSuite,
test_zipped_doctest.test_testfile,
test_zipped_doctest.test_unittest_reportflags,
]
# Needed for test_DocTestParser and test_debug
deprecations = [
# Ignore all warnings about the use of class Tester in this module.
("class Tester is deprecated", DeprecationWarning)]
if sys.py3kwarning:
deprecations += [
("backquote not supported", SyntaxWarning),
|
ldemattos/openComtradeViewer
|
src/pyComtrade.py
|
Python
|
gpl-3.0
| 15,076
| 0.022221
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Copyright (c) 2013 Miguel Moreto <http://sites.google.com/site/miguelmoreto/>
#This file is part of pyComtrade.
#
# pyComtrade is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# pyComtrade is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyComtrade. If not, see <http://www.gnu.org/licenses/>.
# ====================================================================
# pyComtrade: A python Class for read and write IEEE
# Comtrade files based on 1999 standard
#
#
# OBS: - The field names ara iqual to Comtrade 1999 standard;
#
# Developed by Miguel Moreto
# Brazil - 2013
#
#
__version__ = "$Revision$" # SVN revision.
__date__ = "$Date$" # Date of the last SVN revision.
import os
import numpy
import struct
import traceback
class ComtradeRecord:
"""
A python Class for read and write IEEE Comtrade files.
This is the main class of pyComtrade.
"""
def __init__(self,filename):
"""
pyComtrade constructor:
Prints a message.
Clear the variables
Check if filename exists.
If so, read the CFG file.
filename: string with the path for the .cfg file.
"""
self.filename = filename
self.filehandler = 0
# Station name, identification and revision year:
self.station_name = ''
self.rec_dev_id = ''
self.rev_year = 0000
# Number and type of channels:
self.TT = 0
self.A = 0 # Number of analog channels.
self.D = 0 # Number of digital channels.
# Analog channel information:
self.An = []
self.Ach_id = []
self.Aph = []
self.Accbm = []
self.uu = []
self.a = []
self.b = []
self.skew = []
self.min = []
self.max = []
self.primary = []
self.secondary = []
self.PS = []
# Digital channel information:
self.Dn = []
self.Dch_id = []
self.Dph = []
self.Dccbm = []
self.y = []
# Line frequency:
self.lf = 0
# Sampling rate information:
self.nrates = 0
self.samp = []
self.endsamp = []
# Date/time stamps:
# defined by: [dd,mm,yyyy,hh,mm,ss.ssssss]
self.start = [00,00,0000,00,00,0.0]
self.trigger = [00,00,0000,00,00,0.0]
# Data file type:
self.ft = ''
# Time stamp multiplication factor:
self.timemult = 0.0
self.DatFileContent = ''
print 'pyComtrade instance created!'
def clear(self):
"""
Clear the internal (private) variables of the class.
"""
self.filename = ''
self.filehandler = 0
# Station name, identification and revision year:
self.station_name = ''
self.rec_dev_id = ''
self.rev_year = 0000
# Number and type of channels:
self.TT = 0
self.A = 0 # Number of analog channels.
self.D = 0 # Number of digital channels.
# Analog channel information:
self.An = []
self.Ach_id = []
self.Aph = []
self.Accbm = []
self.uu = []
self.a = []
self.b = []
self.skew = []
self.min = []
self.max = []
self.primary = []
self.secondary = []
self.PS = []
# Digital channel information:
self.Dn = []
self.Dch_id = []
self.Dph = []
self.Dccbm = []
self.y = []
# Line frequency:
self.lf = 0
# Sampling rate information:
self.nrates = 0
self.samp = []
self.endsamp = []
# Date/time stamps:
# defined by: [dd,mm,yyyy,hh,mm,ss.ssssss]
self.start = [00,00,0000,00,00,0.0]
self.trigger = [00,00,0000,00,00,0.0]
# Data file type:
self.ft = ''
# Time stamp multiplication factor:
self.timemult = 0.0
self.DatFileContent = ''
def ReadCFG(self):
"""
Reads the Comtrade header file (.cfg).
"""
try:
self.filehandler = open(self.filename,'r')
# Processing first line:
line = self.filehandler.readline()
templist = line.split(',')
self.station_name = templist[0]
self.rec_dev_id = templist[1]
if len(templist) > 2:
self.rev_year = int(templist[2])
# Processing second line:
line = self.filehandler.readline().rstrip() # Read line and remove spaces and new line characters.
templist = line.split(',')
self.TT = int(templist[0])
self.A = int(templist[1].strip('A'))
self.D = int(templist[2].strip('D'))
# Processing analog channel lines:
for i in range(self.A): #@UnusedVariable
line = self.filehandler.readline()
templist = line.split(',')
self.An.append(int(templist[0]))
self.Ach_id.append(templist[1])
self.Aph.append(templist[2])
self.Accbm.append(templist[3])
self.uu.append(templist[4])
self.a.append(float(templist[5]))
self.b.append(float(templist[6]))
self.skew.append(float(templist[7]))
self.min.append(int(templist[8]))
self.max.append(int(templist[9]))
if len(templist) > 10:
self.primary.append(float(templist[10]))
if len(templist) > 11:
self.secondary.append(float(templist[11]))
if len(templist) > 12:
self.PS.append(templist[12])
# Processing digital channel lines:
for i in range(self.D): #@UnusedVariable
line = self.filehandler.readline()
templist = line.split(',')
self.Dn.append(int(templist[0]))
self.Dch_id.append(templist[1])
self.Dph.append(templist[2])
if len(templist) > 3:
self.Dccbm.append(templist[3])
if len(templist) > 4:
self.y.append(int(templist[4]))
# Read line frequency:
se
|
lf.lf = int(float(self.filehandler.readline()))
|
# Read sampling rates:
self.nrates = int(self.filehandler.readline()) # nrates.
for i in range(self.nrates): #@UnusedVariable
line = self.filehandler.readline()
templist = line.split(',')
self.samp.append(int(float(templist[0])))
self.endsamp.append(int(float(templist[1])))
# Read start date and time ([dd,mm,yyyy,hh,mm,ss.ssssss]):
line = self.filehandler.readline()
templist = line.split('/')
self.start[0] = int(templist[0]) # day.
self.start[1] = int(templist[1]) # month.
templist = templist[2].split(',')
self.start[2] = int(templist[0]) # year.
templist = templist[1].split(':')
self.start[3] = int(templist[0]) # hours.
self.start[4] = int(templist[1]) # minutes.
self.start[5] = float(templist[2]) # seconds.
# Read trigger date and time ([dd,mm,yyyy,hh,mm,ss.ssssss]):
line = self.filehandler.readline()
templist = line.split('/')
self.trigger[0] = int(templist[0]) # day.
self.trigger[1] = int(templist[1]) # month.
templist = templist[2].split(',')
self.trigger[2] = int(templist[0]) # year.
templist = templist[1].split(':')
self.trigger[3] = int(templist[0]) # hours.
self.trigger[4] = int(templist[1]) # minutes.
self.
|
eternalfame/django-russian_fields
|
russian_fields/__init__.py
|
Python
|
mit
| 56
| 0.035714
|
__version__ = "0
|
.1.3"
get_version = lambda: __
|
version__
|
Ebag333/Pyfa
|
eos/effects/highspeedmanuveringcapacitorneedmultiplierpostpercentcapacitorneedlocationshipmodulesrequiringhighspeedmanuvering.py
|
Python
|
gpl-3.0
| 568
| 0.003521
|
# highSpeedManuveringCapacitorNeedMultiplierPostPercentCapacitorNeedLocationShipModulesRequiringHighSpeedManuvering
#
# Used by:
# Implants named like: Eifyr and Co. 'Rogue' High Speed Maneuvering HS (6 of 6)
# Skill: High Speed Maneuvering
type = "passive"
def handler(fit, container, context):
l
|
evel = container.level if "skill" in context else 1
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("High Speed Maneuvering"),
"capacitorNeed", container.getModifiedItemAtt
|
r("capacitorNeedMultiplier") * level)
|
dmitry-sobolev/ansible
|
lib/ansible/module_utils/vyos.py
|
Python
|
gpl-3.0
| 4,383
| 0.004335
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network_common import to_list
from ansible.module_utils.connection import exec_command
_DEVICE_CONFIGS = {}
vyos_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dic
|
t(fallbac
|
k=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
'provider': dict(type='dict'),
}
def check_args(module, warnings):
provider = module.params['provider'] or {}
for key in vyos_argument_spec:
if key != 'provider' and module.params[key]:
warnings.append('argument %s has been deprecated and will be '
'removed in a future version' % key)
if provider:
for param in ('password',):
if provider.get(param):
module.no_log_values.update(return_values(provider[param]))
def get_config(module, target='commands'):
cmd = ' '.join(['show configuration', target])
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=err)
cfg = str(out).strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def run_commands(module, commands, check_rc=True):
responses = list()
for cmd in to_list(commands):
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=err, rc=rc)
responses.append(out)
return responses
def load_config(module, commands, commit=False, comment=None):
rc, out, err = exec_command(module, 'configure')
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', output=err)
for cmd in to_list(commands):
rc, out, err = exec_command(module, cmd)
if rc != 0:
# discard any changes in case of failure
exec_command(module, 'exit discard')
module.fail_json(msg='configuration failed')
diff = None
if module._diff:
rc, out, err = exec_command(module, 'compare')
if not out.startswith('No changes'):
rc, out, err = exec_command(module, 'show')
diff = str(out).strip()
if commit:
cmd = 'commit'
if comment:
cmd += ' comment "%s"' % comment
exec_command(module, cmd)
if not commit:
exec_command(module, 'exit discard')
else:
exec_command(module, 'exit')
if diff:
return diff
|
shsingh/ansible
|
lib/ansible/cli/__init__.py
|
Python
|
gpl-3.0
| 20,952
| 0.002816
|
# Copyright: (c) 2012-2014, Michael DeHaan <[email protected]>
# Copyright: (c) 2016, Toshio Kuratomi <[email protected]>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import re
import subprocess
import sys
from abc import ABCMeta, abstractmethod
from ansible.cli.arguments import option_helpers as opt_help
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types
from ansible.module_utils._text import to_bytes, to_text
from ansib
|
le.parsing.dataloader import DataLoader
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.release import __version__
from ansible.ut
|
ils.collection_loader import AnsibleCollectionLoader, get_collection_name_from_path, set_collection_playbook_paths
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
from ansible.utils.unsafe_proxy import to_unsafe_text
from ansible.vars.manager import VariableManager
try:
import argcomplete
HAS_ARGCOMPLETE = True
except ImportError:
HAS_ARGCOMPLETE = False
display = Display()
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
if not args:
raise ValueError('A non-empty list for args is required')
self.args = args
self.parser = None
self.callback = callback
if C.DEVEL_WARNING and __version__.endswith('dev0'):
display.warning(
'You are running the development version of Ansible. You should only run Ansible from "devel" if '
'you are modifying the Ansible engine, or trying out features under development. This is a rapidly '
'changing source of code and can become unstable at any point.'
)
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
self.parse()
display.vv(to_text(opt_help.version(self.parser.prog)))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
# warn about deprecated config options
for deprecated in C.config.DEPRECATED:
name = deprecated[0]
why = deprecated[1]['why']
if 'alternatives' in deprecated[1]:
alt = ', use %s instead' % deprecated[1]['alternatives']
else:
alt = ''
ver = deprecated[1]['version']
display.deprecated("%s option, %s %s" % (name, why, alt), version=ver)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=None,
auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precedence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
# TODO: remove the now unused args
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm new vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
create_new_password,
auto_prompt=auto_prompt)
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from th
|
davidam/python-examples
|
nlp/nltk/howtos/generate.py
|
Python
|
gpl-3.0
| 537
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
grammar = CFG.fromstring(demo_grammar)
print(grammar)
for sentence in generate(grammar, n=10):
print(' '.join(sentence))
for sentence
|
in generate(grammar, depth=4):
print(' '.join(sentence))
print(len(list(generate(grammar, depth=3))))
print(len(list(generate(grammar, depth=4))))
print(len(list(generate(grammar, depth=5))))
print(len(list(generate(grammar, depth=6))))
print(len(list(generate(grammar))
|
))
|
katakumpo/pipetools
|
pipetools/debug.py
|
Python
|
mit
| 672
| 0
|
from itertools import imap, chain
def set_name(name, f):
try:
f.__pipetools__name__ = name
except (AttributeError, UnicodeEncodeError):
pass
return f
def get_name(f):
from pipetools.main import Pipe
pipetools_name = getattr(f, '__pipetools__name__', None)
if pipetools_name:
return pipetools_na
|
me() if callable(pipetools_name) else pipetools_name
if isinstance(f, Pipe):
return repr(f)
return f.__name__ if hasattr(f, '__name__') else repr(f)
def repr_args(*args, **kwargs):
return ', '.join(chain(
imap('{0!r}'.format, args),
imap('{0[0]}={0[1]!r}'.format, kwargs.iteritems(
|
))))
|
paulbellamy/Rhythmbox-iPod-Plugin
|
plugins/jamendo/jamendo/__init__.py
|
Python
|
gpl-2.0
| 4,882
| 0.020893
|
# -*- coding: utf-8 -*-
# __init__.py
#
# Copyright (C) 2007 - Guillaume Desmottes
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Parts from "Magnatune Rhythmbox plugin" (stolen from rhythmbox's __init__.py)
# Copyright (C), 2006 Adam Zimmerman <[email protected]>
import rhythmdb, rb
import gobject
import gtk
from JamendoSource import JamendoSource
from JamendoConfigureDialog import JamendoConfigureDialog
popup_ui = """
<ui>
<popup name="JamendoSourceViewPopup">
<menuitem name="AddToQueueLibraryPopup" action="AddToQueue"/>
<menuitem name="JamendoDownloadAlbum" action="JamendoDownloadAlbum"/>
<menuitem name="JamendoDonateArtist" action="JamendoDonateArtist"/>
<separator/>
<menuitem name="BrowseGenreLibraryPopup" action="BrowserSrcChooseGenre"/>
<menuitem name="BrowseArtistLibraryPopup" action="BrowserSrcChooseArtist"/>
<menuitem name="BrowseAlbumLibraryPopup" action="BrowserSrcChooseAlbum"/>
<separator/>
<menuitem name="PropertiesLibraryPopup" action="MusicProperties"/>
</popup>
</ui>
"""
class Jamendo(rb.Plugin):
#
# Core methods
#
def __init__(self):
rb.Plugin.__init__(self)
def
|
activate(self, shell):
self.db = shell.get_property("db")
self.entry_type = self.db.entry_register_type("JamendoEntryType")
# allow changes which don't do anything
self.entry_t
|
ype.can_sync_metadata = True
self.entry_type.sync_metadata = None
group = rb.rb_source_group_get_by_name ("stores")
if not group:
group = rb.rb_source_group_register ("stores",
_("Stores"),
rb.SOURCE_GROUP_CATEGORY_FIXED)
theme = gtk.icon_theme_get_default()
rb.append_plugin_source_path(theme, "/icons/")
width, height = gtk.icon_size_lookup(gtk.ICON_SIZE_LARGE_TOOLBAR)
icon = rb.try_load_icon(theme, "jamendo", width, 0)
self.source = gobject.new (JamendoSource,
shell=shell,
entry_type=self.entry_type,
plugin=self,
icon=icon,
source_group=group)
shell.register_entry_type_for_source(self.source, self.entry_type)
shell.append_source(self.source, None) # Add the source to the list
# Add button
manager = shell.get_player().get_property('ui-manager')
action = gtk.Action('JamendoDownloadAlbum', _('_Download Album'),
_("Download this album using BitTorrent"),
'gtk-save')
action.connect('activate', lambda a: shell.get_property("selected-source").download_album())
self.action_group = gtk.ActionGroup('JamendoPluginActions')
self.action_group.add_action(action)
# Add Button for Donate
action = gtk.Action('JamendoDonateArtist', _('_Donate to Artist'),
_("Donate Money to this Artist"),
'gtk-jump-to')
action.connect('activate', lambda a: shell.get_property("selected-source").launch_donate())
self.action_group.add_action(action)
manager.insert_action_group(self.action_group, 0)
self.ui_id = manager.add_ui_from_string(popup_ui)
manager.ensure_update()
self.pec_id = shell.get_player().connect('playing-song-changed', self.playing_entry_changed)
def deactivate(self, shell):
manager = shell.get_player().get_property('ui-manager')
manager.remove_ui (self.ui_id)
manager.remove_action_group(self.action_group)
self.action_group = None
shell.get_player().disconnect (self.pec_id)
self.db.entry_delete_by_type(self.entry_type)
self.db.commit()
self.db = None
self.entry_type = None
self.source.delete_thyself()
self.source = None
def create_configure_dialog(self, dialog=None):
if not dialog:
builder_file = self.find_file("jamendo-prefs.ui")
dialog = JamendoConfigureDialog (builder_file).get_dialog()
dialog.present()
return dialog
def playing_entry_changed (self, sp, entry):
self.source.playing_entry_changed (entry)
|
ilpianista/ansible
|
lib/ansible/playbook/task.py
|
Python
|
gpl-3.0
| 20,951
| 0.002148
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_native
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.plugins.loader import lookup_loader
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.block import Block
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.conditional import Conditional
from ansible.playbook.loop_control import LoopControl
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
from ansible.utils.collection_loader import AnsibleCollectionLoader
from ansible.utils.display import Display
from ansible.utils.sentinel import Sentinel
__all__ = ['Task']
display = Display()
class Task(Base, Conditional, Taggable, CollectionSearch):
"""
A task is a language feature that represents a call to a module, with given arguments and other parameters.
A handler is a subclass of a task.
Usage:
Task.load(datastructure) -> Task
Task.something(...)
"""
# =================================================================================
# ATTRIBUTES
# load_<attribute_name> and
# validate_<attribute_name>
# will be used if defined
# might be possible to define others
# NOTE: ONLY set defaults on task attributes that are not inheritable,
# inheritance is only triggered if the 'current value' is None,
# default can be set at play/top level object and inheritance will take it's course.
_args = FieldAttribute(isa='dict', default=dict)
_action = FieldAttribute(isa='string')
_async_val = FieldAttribute(isa='int', default=0, alias='async')
_changed_when = FieldAttribute(isa='list', default=list)
_delay = FieldAttribute(isa='int', default=5)
_delegate_to = FieldAttribute(isa='string')
_delegate_facts = FieldAttribute(isa='bool')
_failed_when = FieldAttribute(isa='list', default=list)
_loop = FieldAttribute()
_loop_control = FieldAttribute(isa='class', class_type=LoopControl, inherit=False)
_notify = FieldAttribute(isa='list')
_poll = FieldAttribute(isa='int', default=C.DEFAULT_POLL_INTERVAL)
_register = FieldAttribute(isa='string', static=True)
_retries = FieldAttribute(isa='int', default=3)
_until = FieldAttribute(isa='list', default=list)
# deprecated, used to be loop and loop_args but loop has been repurposed
_loop_with = FieldAttribute(isa='string', private=True, inherit=False)
def __init__(self, block=None, role=None, task_include=None):
''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
self._role = role
self._parent = None
if task_include:
self._parent = task_include
else:
self._parent = block
super(Task, self).__init__()
def get_path(self):
''' return the absolute path of the task with its line number '''
path = ""
if hasattr(self, '_ds') and hasattr(self._ds, '_data_source') and hasattr(self._ds, '_line_number'):
path = "%s:%s" % (self._ds._data_source, self._ds._line_number)
elif hasattr(self._parent._play, '_ds') and hasattr(self._parent._play._ds, '_data_source') and hasattr(self._parent._play._ds, '_line_number'):
path = "%s:%s" % (self._parent._play._ds._data_source, self._parent._play._ds._line_number)
return path
def get_name(self, include_role_fqcn=True):
''' return the name of the task '''
if self._role:
role_name = self._role.get_name(include_role_fqcn=include_role_fqcn)
if self._role and self.name and role_name not in self.name:
return "%s : %s" % (role_name, self.name)
elif self.name:
return self.name
else:
if self._role:
return "%s : %s" % (role_name, self.action)
else:
return "%s" % (self.action,)
def _merge_kv(self, ds):
if ds is None:
return ""
elif isinstance(ds, string_types):
return ds
elif isinstance(ds, dict):
buf = ""
for (k, v) in iteritems(ds):
if k.startswith('_'):
continue
buf = buf + "%s=%s " % (k, v)
buf = buf.strip()
return buf
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = Task(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def __repr__(self):
''' returns a human readable representation of the task '''
if self.get_name() == 'meta':
return "TASK: meta (%s)" % self.args['_raw_params']
else:
return "TASK: %s" % self.get_name()
def _preprocess_with_loop(self, ds, new_ds, k, v):
''' take a lookup plugin name and store it correctly '''
loop_name = k.replace("with_", "")
if new_ds.get('loop') is not None or new_ds.get('loop_with') is not None:
raise AnsibleError("duplicate loop in task: %s" % loop_name, obj=ds)
if v is None:
raise AnsibleError("you must specify a value when using %s" % k, obj=ds)
new_ds['loop_with'] = loop_name
new_ds['loop'] = v
# display.deprecated("with_ type loops are being phased out, use the
|
'loop' keyword instead", version="2.10")
def preprocess_data(self, ds):
'''
tasks are especially complex arguments so need pre-processing.
keep it short.
'''
if not isinstance(ds, dict):
raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds
|
, type(ds)))
# the new, cleaned datastructure, which will have legacy
# items reduced to a standard structure suitable for the
# attributes of the task class
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
# since this affects the task action parsing, we have to resolve in preprocess instead of in typical validator
default_collection = AnsibleCollectionLoader().default_collection
# use the parent value if our ds doesn't define it
collections_list = ds.get('collections', self.collections)
if collections_list is None:
collections_list = []
if isinstance(collections_list, string_types):
collections_list = [collections_list]
if default_collection and not self._role: # FIXME: and not a collections role
if collections_list:
if default_collection not in collections_list:
collections_list.insert(0, default_collection)
else:
collections_list = [default_collection]
if collections_list and 'ansible.builtin' not in collections_list and 'ansible.legacy' not
|
pythonprobr/pythonpro-website
|
pythonpro/analytics/context_processors.py
|
Python
|
agpl-3.0
| 197
| 0
|
from
|
django.conf import settings
def posthog_configurations(request):
return {
'POSTHOG_API_KEY': settings.POSTHOG_API_KEY,
'POSTHOG_API_URL': settings.POSTHOG_API_URL,
}
| |
tectronics/nyctos
|
src/data.res/scripts/npcs/retiredminer.py
|
Python
|
gpl-2.0
| 1,428
| 0.008403
|
import parole
from parole.colornames import colors
from parole.display import interpolateRGB
import pygame, random
import sim_creatures, main, rando
|
m
from util import *
description = \
"""
This guy should really look into another line of work.
"""
nagLines = [
'*sigh*',
"It's not been the same 'round 'ere.",
"Ain't been no work since the mines... changed.",
"We been in
|
for some rough times.",
"I pray they don't get to the wells.",
]
class NPCClass(sim_creatures.NPC):
def __init__(self):
sim_creatures.NPC.__init__(
self,
'retired miner', # name
parole.map.AsciiTile('@', colors['Gray']), # symbol, color
11, # str
8, # dex
11, # con
11, # per
10, # spd
1, # level
description=description,
)
parole.info('New NPC: retiredminer.')
main.schedule.listen('enter tile', self)
def listen(self, event):
super(NPCClass, self).listen(event)
if random.random() < 0.9:
return
if not visible(self):
return
if event.id == 'enter tile':
eObj, ePos, eMap = event.args
if eMap is self.parentTile.map and eObj is main.player:
self.say(random.choice(nagLines))
#========================================
thingClass = NPCClass
|
ProgDan/maratona
|
SPOJ/UJ.py
|
Python
|
gpl-3.0
| 129
| 0.124031
|
while 1:
arr=input().split(' ')
k=arr[0]
n=arr[1]
if k=='
|
0' and n=='0':
break
ans=int(int(k)**int(n
|
))
print (int(ans))
|
nirmeshk/oh-mainline
|
vendor/packages/oauthlib/tests/oauth2/rfc6749/endpoints/test_credentials_preservation.py
|
Python
|
agpl-3.0
| 4,860
| 0.003086
|
"""Ensure credentials are preserved through the authorization.
The Authorization Code Grant will need to preserve state as well as redirect
uri and the Implicit Grant will need to preserve state.
"""
from __future__ import absolute_import, unicode_literals
import json
import mock
from .test_utils import get_query_credentials, get_fragment_credentials
from ....unittest import TestCase
from oauthlib.oauth2 import RequestValidator
from oauthlib.oauth2 import WebApplicationServer, MobileApplicationServer
from oauthlib.oauth2.rfc6749 import errors
class PreservationTest(TestCase):
DEFAULT_REDIRECT_URI = 'http://i.b./path'
def setUp(self):
self.validator = mock.MagicMock(spec=RequestValidator)
self.validator.get_default_redirect_uri.return_value = self.DEFAULT_REDIRECT_URI
self.validator.authenticate_client.side_effect = self.set_client
self.web = WebApplicationServer(self.validator)
self.mobile = MobileApplicationServer(self.validator)
def set_state(self, state):
def set_request_state(client_id, code, client, request):
request.state = state
return True
return set_request_state
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def test_state_preservation(self):
auth_uri = 'http://example.com/path?state=xyz&client_id=abc&response_type='
token_uri = 'http://example.com/path'
# authorization grant
h, _, s = self
|
.web.create_authorization_response(
auth_uri + 'code', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
code = get_query_credentials(h['Location'])['code'][0]
self.validator.validate_code.side_effect = self.set_state('xyz')
_, body, _ = self.web.create_
|
token_response(token_uri,
body='grant_type=authorization_code&code=%s' % code)
self.assertEqual(json.loads(body)['state'], 'xyz')
# implicit grant
h, _, s = self.mobile.create_authorization_response(
auth_uri + 'token', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertEqual(get_fragment_credentials(h['Location'])['state'][0], 'xyz')
def test_redirect_uri_preservation(self):
auth_uri = 'http://example.com/path?redirect_uri=http%3A%2F%2Fi.b%2Fpath&client_id=abc'
redirect_uri = 'http://i.b/path'
token_uri = 'http://example.com/path'
# authorization grant
h, _, s = self.web.create_authorization_response(
auth_uri + '&response_type=code', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertTrue(h['Location'].startswith(redirect_uri))
# confirm_redirect_uri should return false if the redirect uri
# was given in the authorization but not in the token request.
self.validator.confirm_redirect_uri.return_value = False
code = get_query_credentials(h['Location'])['code'][0]
_, body, _ = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=%s' % code)
self.assertEqual(json.loads(body)['error'], 'access_denied')
# implicit grant
h, _, s = self.mobile.create_authorization_response(
auth_uri + '&response_type=token', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertTrue(h['Location'].startswith(redirect_uri))
def test_invalid_redirect_uri(self):
auth_uri = 'http://example.com/path?redirect_uri=http%3A%2F%2Fi.b%2Fpath&client_id=abc'
self.validator.validate_redirect_uri.return_value = False
# authorization grant
self.assertRaises(errors.MismatchingRedirectURIError,
self.web.create_authorization_response,
auth_uri + '&response_type=code', scopes=['random'])
# implicit grant
self.assertRaises(errors.MismatchingRedirectURIError,
self.mobile.create_authorization_response,
auth_uri + '&response_type=token', scopes=['random'])
def test_default_uri(self):
auth_uri = 'http://example.com/path?state=xyz&client_id=abc'
self.validator.get_default_redirect_uri.return_value = None
# authorization grant
self.assertRaises(errors.MissingRedirectURIError,
self.web.create_authorization_response,
auth_uri + '&response_type=code', scopes=['random'])
# implicit grant
self.assertRaises(errors.MissingRedirectURIError,
self.mobile.create_authorization_response,
auth_uri + '&response_type=token', scopes=['random'])
|
Bulochkin/tensorflow_pack
|
tensorflow/python/kernel_tests/fft_ops_test.py
|
Python
|
apache-2.0
| 18,811
| 0.010207
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fft operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops
from tensorflow.python.platform import test
VALID_FFT_RANKS = (1, 2, 3)
class BaseFFTOpsTest(test.TestCase):
def _use_eigen_kernels(self):
use_eigen_kernels = False # Eigen kernels are default
if test.is_gpu_available(cuda_only=True):
use_eigen_kernels = False
return use_eigen_kernels
def _fft_kernel_label_map(self):
"""Returns a generator overriding kernel selection.
This is used to force testing of the eigen kernels, even
when they are not the default registered kernels.
Returns:
A generator in which to wrap every test.
"""
if self._use_eigen_kernels():
d = dict([(op, "eigen")
for op in [
"FFT", "FFT2D", "FFT3D", "IFFT", "IFFT2D", "IFFT3D",
"IRFFT", "IRFFT2D", "IRFFT3D", "RFFT", "RFFT2D", "RFFT3D"
]])
return ops.get_default_graph()._kernel_label_map(d)
else:
return ops.get_default_graph()._kernel_label_map({})
def _Compare(self, x, rank, fft_length=None, use_placeholder=False):
self._CompareForward(x, rank, fft_length, use_placeholder)
self._CompareBackward(x, rank, fft_length, use_placeholder)
def _CompareForward(self, x, rank, fft_length=None, use_placeholder=False):
x_np = self._npFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tfFFT(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=1e-4, atol=1e-4)
def _CompareBackward(self, x, rank, fft_length=None, use_placeholder=False):
x_np = self._npIFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfIFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tfIFFT(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=1e-4, atol=1e-4)
def _checkGradComplex(self, func, x, y, result_is_complex=True):
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
# func is a forward or inverse, real or complex, batched or unbatched FFT
# function with a complex input.
z = func(math_ops.complex(inx, iny))
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
((x_jacob_t, x_jacob_n),
(y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
[inx, iny], [list(x.shape), list(y.shape)],
loss, [1],
x_init_value=[x, y],
delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=1e-2)
def _checkGradReal(self, func, x):
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
# func is a forward RFFT function (batched or unbatched).
z = func(inx)
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
x_jacob_t, x_jacob_n = test.compute_gradient(
inx, list(x.shape), loss, [1], x_init_value=x, delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
class FFTOpsTest(BaseFFTOpsTest):
def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):
# fft_length unused for complex FFTs.
with self.test_session(use_gpu=True):
return self._tfFFTForRank(rank)(x).eval(feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):
# fft_length unused for complex FFTs.
with self.test_session(use_gpu=True):
return self._tfIFFTForRank(rank)(x).eval(feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.fft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.fft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.fft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.ifft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.ifft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.ifft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return spectral_ops.fft
elif rank == 2:
return spectral_ops.fft2d
elif rank == 3:
return spectral_ops.fft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return spectral_ops.ifft
elif rank == 2:
return spectral_ops.ifft2d
elif rank == 3:
return spectral_ops.ifft3d
else:
raise ValueError("invalid rank")
def testEmpty(self):
with self._fft_kernel_label_map():
for ra
|
nk in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np.complex64)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
def t
|
estBasic(self):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(
np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np.complex64), rank)
def testBasicPlaceholder(self):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(
np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np.complex64),
rank,
use_placeholder=True)
def testRandom(self):
with self._fft_kernel_label_map():
np.random.seed(12345)
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(gen((4,) * dims), rank)
def testError(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfIFFT(x, rank)
def testGrad_Simple(self):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.ones(shape=(4,) * dims, dtype=np.float32) /
|
Flyingfox646/flyingfox
|
src/chunks/migrations/0004_faq_fairplay.py
|
Python
|
mit
| 5,970
| 0.004275
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
faq_en = '''
<p>
<b>Why statistics on the site does not coincide with the statistics in the game?</b>
</p>
<p>
Algorithms collection statistics IL2 stats differs from statistics in-game. As a consequence of these statistics will not coincide with the game.
</p>
<br>
<p>
<b>How is calculated the rating?</b>
</p>
<p>
1) calculate how many scores player earns per one life - score / (dead + capture) = SD<br>
2) calculate how many scores player earns per one hour - score / flight time = SH<br>
3) calculate rating by formula: (SD * SH * score) / 1000
</p>
<br>
<p>
<b>Why my profile is not displayed in the table of players?</b>
</p>
<p>
Statistics exclude inactive players from the overall rating. By default players inactive for more than 7 days - do not participate in the rating.
</p>
<br>
<p>
<b>I landed at the airfield, but sortie status - landing not on airfield. Why?</b>
</p>
<p>
Landing working only on active airfield. Usually active airfield is the one where you can respawn.
</p>
<br>
<p>
<b>What is the Fairplay Index?</b>
</p>
<p>
Fairplay Index is an indicator of the correct behavior of the player, it affects the score.The maximum value - 100% indicates that the player does not violate the rules, a player receives a 100% score and all bonuses. If the index is less than 100%, that player gets a percentage of the score corresponding to the current index. Also, in this case, the player does not receive any bonuses.<br>
Violations of reducing the index:<br>
Disconnection -10%<br>
Shotdown friendly aircraft -10%<br>
Destroyed friendly ground target -5%<br>
The index recovered by 5% per flying hour, if the player did not violate the rules.<br>
The idea was borrowed from the project Bellum War.
</p>
<br>
'''
faq_ru = '''
<p>
<b>Почему статистика на сайте не совпадает со статистикой внутри игры?</b>
</p>
<p>
Алгоритмы сбора статистики IL2 stats отличаются от статистики в игре. Как следствие данная статистика не будет совпадать с игровой.
</p>
<br>
<p>
<b>Как рассчитывается рейтинг?</b>
</p>
<p>
Рейтинг пилота рассчитывается на основе заработанных пилотом очков, среднего количества очков за жизнь и за час. Такой способ расчета рейтинга учитывает не только количественные, но и качественные показатели пилота, а так же сводит в единую систему оценки пилотов разных специализаций.<br>
Как именно рассчитывается рейтинг:<br>
1) вычисляем сколько игр
|
ок зарабатывает очков за одну жизнь - очки / (смерти + плен) = ОС<br>
2) вычисляем сколько игрок зарабатывает очков за один час налета - очки / налет часов = ОЧ<br>
3) вычисляем рейтинг по формуле: (ОС * ОЧ * очки) / 1000
</p>
<br>
<p>
<b>Почему мой профиль не отображается в общей таблице игроков?</b>
</p>
<p>
В статистике включена опция которая исключает неактивных игроков из общего рейтинга. По умолчанию игроки неактивные более
|
7 дней - не участвуют в рейтинге.
</p>
<br>
<p>
<b>Я приземлился на аэродром, но в статусе вылета указана посадка в поле. Почему?</b>
</p>
<p>
Посадка засчитывается только на активный аэродром. Как правило активный аэродром это тот на котором вы можете начать вылет.
</p>
<br>
<p>
<b>Что такое "Индекс честной игры"?</b>
</p>
<p>
Индекс честной игры (Fairplay) это показатель правильного поведения игрока, влияющий на получаемые им очки. Максимальное значение - 100% говорит о том, что игрок не нарушает правила, такой игрок получает 100% очков и все полагающиеся ему бонусы. Если индекс меньше 100%, то игрок получает не всю сумму заработанных очков, а лишь процент от них, соответствующий текущему индексу честной игры. Так же, в этом случае, игрок не получает ни каких бонусов.<br>
Нарушения уменьшающие индекс честной игры:<br>
Дисконнект -10%<br>
Уничтожение союзного самолета -10%<br>
Уничтожение союзной техники -5%<br>
Индекс восстанавливается по 5% за час налета, при условии игры без нарушений.<br>
Идея заимствована из проекта Bellum War.
</p>
<br>
'''
def default_chunks(apps, schema_editor):
Chunk = apps.get_model('chunks', 'Chunk')
faq = Chunk.objects.get_or_create(key='faq')[0]
faq.title_en = 'FAQ'
faq.title_ru = 'FAQ'
faq.content_en = faq_en
faq.content_ru = faq_ru
faq.save()
class Migration(migrations.Migration):
dependencies = [
('chunks', '0003_auto_20151107_2007'),
]
operations = [
migrations.RunPython(default_chunks),
]
|
ofer43211/unisubs
|
apps/teams/migrations/0057_auto__chg_field_project_slug__chg_field_task_assignee.py
|
Python
|
agpl-3.0
| 23,628
| 0.008507
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Project.slug'
db.alter_column('teams_project', 'slug', self.gf('django.db.models.fields.SlugField')(max_length=50, blank=True))
# Changing field 'Task.assignee'
db.alter_column('teams_task', 'assignee_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.CustomUser'], null=True, blank=True))
def backwards(self, orm):
# Changing field 'Project.slug'
db.alter_column('teams_project', 'slug', self.gf('django.db.models.fields.SlugField')(max_length=50))
# Changing field 'Task.assignee'
db.alter_column('teams_task', 'ass
|
ignee_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['teams.TeamMember'], null=True, blank=True))
models = {
'auth.customuser': {
|
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'follow_new_video': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Application'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.invite': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Invite'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '200', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invitations'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_invitations'", 'to': "orm['auth.CustomUser']"})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"})
},
'teams.task': {
'Meta': {'object_name': 'Task'},
'assignee': ('djan
|
Alex-Ian-Hamilton/solarbextrapolation
|
solarbextrapolation/preprocessors/base.py
|
Python
|
mit
| 2,867
| 0.006278
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 19:18:58 2015
@author: alex_
"""
# General Imports
import matplotlib as mpl
mpl.use('TkAgg') # Force mpl backend not to use qt. Else we have a conflict.
import numpy as np
import pickle
import time
from datetime import datetime
from collections import namedtuple
import warnings
import inspect
#from sunpy.sun._constants import physical_constants as con
# SunPy imports
import sunpy.map
from sunpy.sun import constants, sun
from sunpy.time import parse_time, is_time
from astropy.table import Table
import astropy.units as u
# Internal imports
#from solarbextrapolation.utilities import *
class Preprocessors(object):
"""
A common class for all 2D pre-processing routines, tools used to pre-process
the 2D sunpy map data for use in extrapolations.
Usage can include basic filters for noise/contrast or algorythms to
compensate for extrapolator assumptions, such as the force-free assumption
that is assumed in many extrapolations, but isn't true in the photosphere
where magnetogram observations are generally taken.
Parameters
----------
map_data : `sunpy.map.GenericMap`
The sunpy map containing the data to be processed.
filepath : `string`
The optional filepath for automatic saving of preprocessed results.
notes : `string`
User specified notes that will be added to the metadata.
"""
def __init__(self, map_data, **kwargs):
"""
Method for creating a preprocessor object, using a sunpy map.
"""
# Add some type checking, we want a map object, check for .unit attribute.
|
self.map_input = map_data
self.routine = kwargs.get('preprocessor_routine', type(self))
self.meta = self.map_input.meta
self.meta['preprocessor_notes'] = kwargs.get('notes', '')
self.meta['preprocessor_routine'] = self.routine
self.filepath = kwargs.get('filepath', None)
def _preprocessor(self, **kwargs):
"""
Method running the and returning a sunpy map.
For tracab
|
ility this should add entries into the metadata that
include any parameters used for the given run.
"""
map_output = sunpy.map.Map(self.map_input.data, self.meta)
return map_output
def preprocess(self, **kwargs):
"""
Method to be called to run the preprocessor.
Times the process and saves output where applicable.
"""
dt_start = datetime.now()
tim_start = time.time()
map_output = self._preprocessor()
tim_duration = time.time() - tim_start
map_output.meta['preprocessor_start_time'] = dt_start.isoformat()
map_output.meta['preprocessor_duration'] = tim_duration
if self.filepath:
map_output.save(self.filepath)
return map_output
|
fizz-ml/pytorch-aux-reward-rl
|
model_generator.py
|
Python
|
mit
| 671
| 0.011923
|
import os
import sys
import getopt
import json
def main(argv):
"""Specify input to generator with:
-s : save
|
path
-f : model_def folder
"""
opts, args = getopt.getopt(argv,"s:f:")
save_location = "models/ddpg_models/"
model_def_folder = ""
print(opts)
for opt, arg in opts:
if opt == "-s":
save_location = arg
elif opt == "-f":
model_def_folder = arg
json_data = open(os.path.join(model_def_folder,'config.json')).read()
config_dict =
|
json.loads(json_data)
print(config_dict)
exec('')
os.system("script2.py 1")
if __name__ == "__main__":
main(sys.argv[1:])
|
ieuan1630-cmis/ieuan1630-cmis-cs2
|
cs2quiz1.py
|
Python
|
cc0-1.0
| 2,934
| 0.02454
|
#40/40
#Part 1: Terminology (15 points) --> 15/15
#1 1pt) What is the symbol "=" used for?
#to assign and store values to and in variables
# 1pt
#
#2 3pts) Write a technical definition for 'function'
#a named sequence of calculations which takes input and returns output
# 3pts
#
#3 1pt) What does the keyword "return" do?
#it gives back the output or result of the function
# 1pt
#
#4 5pts) We know 5 basic data types. Write the name for each one and provide two
# examples of each below
# 1: integer ex: 1, 2
# 2: floating point ex: 1.2, 1.3
# 3: string ex: "hi", "hello"
# 4: boolean ex: True, False
# 5: tuple ex: ("HEllo", 3), ("Bob", 10, "fat")
# 5pts
#
#5 2pts) What is the difference between a "function definition" and a
# "function call"?
#a function definition does not result in any output being presented, it simply defines a set of calculations which are run if and only if they are called by a function call
# 2pts
#
#
#6 3pts) What are the 3 phases that every computer program has? What happens in
# each of them
# 1:input (the program takes some input values, most often from the user)
# 2:processing (the program does something with those input values to for instance calculate something)
# 3:output (the program returns the product of its labours (processing) often a something printed
# 3pts
#
#Part 2: Programming (25 points) --> 25/25
#Write a program that asks the user for the areas of 3 circles.
#It should then calculate the diameter of each and the sum of the diameters
#of the 3 circles.
#Finally, it should produce output like this:
#Circle Diameter
#c1 ...
#c2 ...
#c3 ...
#TOTALS ...
# Hint: Radius is the square root of the area divided by pi (a = pi(r)^2) so r = sqrt(a/pi)
import math
#1 pt for header line
#3 pt for correct formula
#1 pt for return value
#1 pt for parameter name
#1 pt for function name
def circarea_to_diameter(circarea):
return 2 * (math.sqrt(circarea/math.pi)) #finds radius and multiples by 2 to get diameter
def sum_three(x, y, z): #takes three values and adds them
return x + y + z
#1pt for header line
#1pt for parameter names
#1pt for return value
#1pt for correct output format
#3pt for correct use of format function
def output(d1, d2, d3, total):
return """
Circle Diameter
C1 {}
C2 {}
C3 {}
Totals {}
""".format(d1, d2, d3, total)
#1pt header line
#1pt getting input
#1pt converting input
#1pt for calling output function
#2pt for correct diameter formula
#1pt for variable names
def main():
#input
C1 = raw_input("Area of C1: ")
C2 = raw_input("Area of C2: ")
C3 = raw_input("Area of C3: ")
#processing
d1 = circarea_to_diameter(float(C1))
d2 = circarea_to_diameter(float(C2))
d3 = circarea_to_diameter(float(C3))
total = sum_three(d1, d2, d3)
#output
print output(d1, d2, d3, total)
#1pt for callin
|
g main
main()
#1pt explanatory comments
#1pt code format
| |
g2p/xtraceback
|
xtraceback/lexer.py
|
Python
|
mit
| 2,860
| 0.006643
|
try:
from pygments.lexer import bygroups, include, using
from pygments.lexers.agile import PythonLexer, PythonTracebackLexer
from pygments.token import Text, Name, Number, Generic, String, Operator
except ImportError: # pragma: no cover
# this is for nose coverage which does a recursive import on the package
pass
else:
BASE_NAME = r"[a-zA-Z_][a-zA-Z0-9_]*"
class XPythonLexer(PythonLexer):
tokens = PythonLexer.tokens.copy()
tokens["classname"] = [
("'?[a-zA-Z_][a-zA-Z0-9_.]*'?", Name.Class, "#pop")
]
# Marker __repr__
r
|
ef = "(<ref offset)(=)(\-\d+)( ?)((?:name)?)(=?)((?:%s)?)(>?)" % BASE_NAME
tokens["root"].insert(0, (ref, bygroups(Name.Builtin, Name.Operator,
Number, Text, Name.Builtin,
Name.Operator, Name.Variable,
Name.Builtin)))
class PythonXTracebackLexer(PythonTracebackLexer):
tokens = {
|
"root": [
include("entry"),
include("exception"),
(r"^.*\n", Generic.Error),
],
"entry": [
(r"^Traceback \(most recent call last\):\n",
Generic.Error,
"frame"),
# file - path is colored differently if under working directory
(r'^( File )((?:"[./<][^"]+")?)((?:"[^"]+")?)' \
'(, line )(\d+)((?:, in )?)(.*)(\n)',
bygroups(Generic.Error, Name.Builtin, Operator.Word,
Generic.Error, Number, Generic.Error, Name.Function,
Text),
"frame"),
],
"exception": [
(r"^(AssertionError: )(.+\n)", bygroups(Generic.Error,
using(XPythonLexer))),
(r"^(%s:?)(.+\n)" % BASE_NAME, bygroups(Generic.Error, String)),
],
"frame": [
include("entry"),
include("exception"),
# line of python code
(r"^((?:-+>)?)( +)(\d+)(.+\n)",
bygroups(Generic.Error, Text, Number, using(XPythonLexer))),
# variable continuation
(r"^([ ]+)('[^']+')(: )(.*)([,}]?\n)",
bygroups(Text, String, Name.Operator, using(XPythonLexer), Text)),
# variable
(r"^([ ]+)((?:g:)?)(\**%s)( = )(.+\n)" % BASE_NAME,
bygroups(Text, Name.Builtin, Name.Variable, Name.Operator,
using(XPythonLexer))),
# plain python
(r"^( )(.+)(\n)",
bygroups(Text, using(XPythonLexer), Text)),
],
}
|
alex/flanker
|
flanker/mime/message/headers/encodedword.py
|
Python
|
apache-2.0
| 3,690
| 0.000815
|
# coding:utf-8
import logging
import regex as re
import email.quoprimime
import email.base64mime
from base64 import b64encode
from flanker.mime.message import charsets, errors
log = logging.getLogger(__name__)
#deal with unfolding
foldingWhiteSpace = re.compile(r"(\n\r?|\r\n?)(\s*)")
def unfold(value):
"""
Unfolding is accomplished by simply removing any CRLF
that is immediately followed by WSP. Each header field should be
treated in its unfolded form for further syntactic and semantic
evaluation.
"""
return re.sub(foldingWhiteSpace, r"\2", value)
def decode(header):
return mime_to_unicode(header)
def mime_to_unicode(header):
"""
Takes a header value and returns a fully decoded unicode string.
It differs from standard Python's mail.header.decode_header() because:
- it is higher level, i.e. returns a unicode string instead of
an array of tuples
- it accepts Unicode and non-ASCII strings as well
>>> header_to
|
_unicode("=?UTF-8?B?UmVbMl06INCX0LXQvNC70Y/QutC4?=")
u"Земляки"
>>> header_to_unicode("hello")
u"Hello"
"""
try:
header = unfold(header)
decoded = [] # decoded parts
while header:
match = encodedWord.search(header)
if match:
start = match.start()
if start != 0:
# decodes unencoded ascii part to unicode
value = charsets.convert_to_unicode(ascii, header[0:star
|
t])
if value.strip():
decoded.append(value)
# decode a header =?...?= of encoding
charset, value = decode_part(
match.group('charset').lower(),
match.group('encoding').lower(),
match.group('encoded'))
decoded.append(charsets.convert_to_unicode(charset, value))
header = header[match.end():]
else:
# no match? append the remainder
# of the string to the list of chunks
decoded.append(charsets.convert_to_unicode(ascii, header))
break
return u"".join(decoded)
except Exception:
try:
log.warning(
u"HEADER-DECODE-FAIL: ({0}) - b64encoded".format(
b64encode(header)))
except Exception:
log.exception("Failed to log exception")
return header
ascii = 'ascii'
#this spec refers to
#http://tools.ietf.org/html/rfc2047
encodedWord = re.compile(r'''(?P<encodedWord>
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
)''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
def decode_part(charset, encoding, value):
"""
Attempts to decode part, understands
'q' - quoted encoding
'b' - base64 mime encoding
Returns (charset, decoded-string)
"""
if encoding == 'q':
return (charset, email.quoprimime.header_decode(str(value)))
elif encoding == 'b':
# Postel's law: add missing padding
paderr = len(value) % 4
if paderr:
value += '==='[:4 - paderr]
return (charset, email.base64mime.decode(value))
elif not encoding:
return (charset, value)
else:
raise errors.DecodingError(
"Unknown encoding: {0}".format(encoding))
|
congminghaoxue/learn_python
|
change_pic_size_by_cut.py
|
Python
|
apache-2.0
| 1,474
| 0.003634
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# function: 剪切更改图片尺寸大小
import os
import os.path
import sys
import argparse
from PIL import Image
def CutImage(filein, fileout, width, height, type):
'''
# 从左上角开始 剪切 width*height的图片
filein: 输入图片
fileout: 输出图片
width: 输出图片宽度
height:输出图片高度
type:输出图片类型(png, gif, jpeg...)
'''
img = Image.open(filein)
out = img.crop((1, 1, width, height))
out.save(fileout, type)
if __name__ =
|
= "__main__":
argc = len(sys.argv)
cmdargs = str(sys.argv)
parser = argparse.ArgumentParser(description="Tool for change the picture to custom size")
parser.add_argument('-f', '--file', required=True, help='the file path of the input file')
parser.add_argument('-H', '--height',
|
type=int, required=True, help='height of the output file')
parser.add_argument('-W', '--width', type=int, required=True, help='width of the output file')
parser.add_argument('-T', '--type', required=False, help='the type of the output file: jpeg, git, png ,etc')
args = parser.parse_args()
filein = args.file
width = args.width
height = args.height
f, e = os.path.splitext(filein)
if args.type is None:
type = 'png'
else:
type = args.type
fileout = f + "_" + str(width) + "_" + str(height) + '.' + type
CutImage(filein, fileout, width, height, type)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.