repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
SunPower/pvfactors
|
pvfactors/tests/test_geometry/test_timeseries.py
|
Python
|
bsd-3-clause
| 13,582
| 0
|
import os
from pvfactors.geometry.timeseries import TsPointCoords, TsLineCoords
from pvfactors.geometry.pvrow import TsPVRow
from pvfactors.geometry.pvground import TsGround, TsGroundElement
import pandas as pd
import numpy as np
from pvfactors.geometry.pvrow import PVRow
from pvfactors.geometry.base import \
BaseSide, PVSegment, PVSurface, ShadeCollection
from pvfactors.config import MIN_X_GROUND, MAX_X_GROUND
def test_ts_pvrow():
"""Test timeseries pv row creation and shading cases.
Note that shading must always be zero when pv rows are flat"""
xy_center = (0, 2)
width = 2.
df_inputs = pd.DataFrame({
'rotation_vec': [20., -30., 0.],
'shaded_length_front': [1.3, 0., 1.9],
'shaded_length_back': [0, 0.3, 0.6]})
cut = {'front': 3, 'back': 4}
ts_pvrow = TsPVRow.from_raw_inputs(
xy_center, width, df_inputs.rotation_vec,
cut, df_inputs.shaded_length_front,
df_inputs.shaded_length_back)
# check segment index
assert len(ts_pvrow.front.list_segments) == 3
assert [s.index for s in ts_pvrow.front.list_segments] == [0, 1, 2]
# Check timeseries length of front and back segments
for seg in ts_pvrow.front.list_segments:
np.testing.assert_allclose(width / cut['front'], seg.length)
for seg in ts_pvrow.back.list_segments:
np.testing.assert_allclose(width / cut['back'], seg.length)
# Check shaded length on either sides of pv rows
expected_front_shading = np.where(df_inputs.rotation_vec,
df_inputs.shaded_length_front, 0.)
expected_back_shading = np.where(df_inputs.rotation_vec,
df_inputs.shaded_length_back, 0.)
np.testing.assert_allclose(expected_front_shading,
ts_pvrow.front.shaded_length)
np.testing.assert_allclose(expected_back_shading,
ts_pvrow.back.shaded_length)
def test_plot_ts_pvrow():
is_ci = os.environ.get('CI', False)
if not is_ci:
import matplotlib.pyplot as plt
# Create a PV row
xy_center = (0, 2)
width = 2.
df_inputs = pd.DataFrame({
'rotation_vec': [20., -30., 0.],
'shaded_length_front': [1.3, 0., 1.9],
'shaded_length_back': [0, 0.3, 0.6]})
cut = {'front': 3, 'back': 4}
ts_pvrow = TsPVRow.from_raw_inputs(
xy_center, width, df_inputs.rotation_vec,
cut, df_inputs.shaded_length_front,
df_inputs.shaded_length_back)
# Plot it at ts 0
f, ax = plt.subplots()
ts_pvrow.plot_at_idx(0, ax)
plt.show()
# Plot it at ts 1
f, ax = plt.subplots()
ts_pvrow.plot_at_idx(1, ax)
plt.show()
# Plot it at ts 2: flat case
f, ax = plt.subplots()
ts_pvrow.plot_at_idx(2, ax)
plt.show()
def test_ts_pvrow_to_geometry():
"""Check that the geometries are created correctly"""
xy_center = (0, 2)
width = 2.
df_inputs = pd.DataFrame({
'rotation_vec': [20., -30., 0.],
'shaded_length_front': [1.3, 0., 1.9],
'shaded_length_back': [0, 0.3, 0.6]})
cut = {'front': 3, 'back': 4}
param_names = ['test1', 'test2']
ts_pvrow = TsPVRow.from_raw_inputs(
xy_center, width, df_inputs.rotation_vec,
cut, df_inputs.shaded_length_front,
df_inputs.shaded_length_back, param_names=param_names)
pvrow = ts_pvrow.at(0)
# Check classes of geometries
assert isinstance(pvrow, PVRow)
assert isinstance(pvrow.front, BaseSide)
assert isinstance(pvrow.back, BaseSide)
assert isinstance(pvrow.front.list_segments[0], PVSegment)
assert isinstance(pvrow.back.list_segments[0].illum_collection,
ShadeCollection)
assert isinstance(pvrow.front.list_segments[1].illum_collection
.list_surfaces[0], PVSurface)
# Check some values
np.testing.assert_allclose(pvrow.front.shaded_length, 1.3)
front_surface = (pvrow.front.list_segments[1].illum_collection
.list_surfaces[0])
|
back_surface = (pvrow.back.list_segments[1].illum_co
|
llection
.list_surfaces[0])
n_vector_front = front_surface.n_vector
n_vector_back = back_surface.n_vector
expected_n_vec_front = np.array([-0.68404029, 1.87938524])
np.testing.assert_allclose(n_vector_front, expected_n_vec_front)
np.testing.assert_allclose(n_vector_back, - expected_n_vec_front)
assert front_surface.param_names == param_names
assert back_surface.param_names == param_names
def test_ts_ground_from_ts_pvrow():
"""Check that ground geometries are created correctly from ts pvrow"""
# Create a ts pv row
xy_center = (0, 2)
width = 2.
df_inputs = pd.DataFrame({
'rotation_vec': [20., -90., 0.],
'shaded_length_front': [1.3, 0., 1.9],
'shaded_length_back': [0, 0.3, 0.6]})
cut = {'front': 3, 'back': 4}
param_names = ['test1', 'test2']
ts_pvrow = TsPVRow.from_raw_inputs(
xy_center, width, df_inputs.rotation_vec,
cut, df_inputs.shaded_length_front,
df_inputs.shaded_length_back, param_names=param_names)
# Create ground from it
alpha_vec = np.deg2rad([80., 90., 70.])
ts_ground = TsGround.from_ts_pvrows_and_angles(
[ts_pvrow], alpha_vec, df_inputs.rotation_vec, param_names=param_names)
assert len(ts_ground.shadow_elements) == 1
# Check at specific times
ground_0 = ts_ground.at(0)
assert ground_0.n_surfaces == 4
assert ground_0.list_segments[0].shaded_collection.n_surfaces == 1
ground_1 = ts_ground.at(1) # vertical, sun above
assert ground_1.n_surfaces == 2 # only 2 illuminated surfaces
assert ground_1.list_segments[0].shaded_collection.n_surfaces == 0
assert ground_1.shaded_length == 0 # no shadow (since shadow length 0ish)
np.testing.assert_allclose(ground_0.shaded_length, 1.7587704831436)
np.testing.assert_allclose(ts_ground.at(2).shaded_length, width) # flat
# Check that all have surface params
for surf in ground_0.all_surfaces:
assert surf.param_names == param_names
def test_ts_ground_overlap():
shadow_coords = np.array([
[[[0, 0], [0, 0]], [[2, 1], [0, 0]]],
[[[1, 2], [0, 0]], [[5, 5], [0, 0]]]
])
overlap = [True, False]
# Test without overlap
ts_ground = TsGround.from_ordered_shadows_coords(shadow_coords)
np.testing.assert_allclose(ts_ground.shadow_elements[0].b2.x, [2, 1])
# Test with overlap
ts_ground = TsGround.from_ordered_shadows_coords(shadow_coords,
flag_overlap=overlap)
np.testing.assert_allclose(ts_ground.shadow_elements[0].b2.x, [1, 1])
def test_ts_ground_to_geometry():
# There should be an overlap
shadow_coords = np.array([
[[[0, 0], [0, 0]], [[2, 1], [0, 0]]],
[[[1, 2], [0, 0]], [[5, 5], [0, 0]]]
])
overlap = [True, False]
cut_point_coords = [TsPointCoords.from_array(np.array([[2, 2], [0, 0]]))]
# Test with overlap
ts_ground = TsGround.from_ordered_shadows_coords(
shadow_coords, flag_overlap=overlap, cut_point_coords=cut_point_coords)
# Run some checks for index 0
pvground = ts_ground.at(0, merge_if_flag_overlap=False,
with_cut_points=False)
assert pvground.n_surfaces == 4
assert pvground.list_segments[0].illum_collection.n_surfaces == 2
assert pvground.list_segments[0].shaded_collection.n_surfaces == 2
assert pvground.list_segments[0].shaded_collection.length == 5
np.testing.assert_allclose(pvground.shaded_length, 5)
# Run some checks for index 1
pvground = ts_ground.at(1, with_cut_points=False)
assert pvground.n_surfaces == 5
assert pvground.list_segments[0].illum_collection.n_surfaces == 3
assert pvground.list_segments[0].shaded_collection.n_surfaces == 2
assert pvground.list_segments[0].shaded_collection.length == 4
np.testing.assert_allclose(pvground.shaded_length, 4)
# Run some checks for index 0, when mer
|
azaghal/ansible
|
test/units/plugins/connection/test_local.py
|
Python
|
gpl-3.0
| 1,355
| 0.001476
|
#
# (c) 2020 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
import pytest
from units.compat import unittes
|
t
from ansible.plugins.connection import local
from ansible.playbook.play_context import PlayContext
class TestLocalConne
|
ctionClass(unittest.TestCase):
def test_local_connection_module(self):
play_context = PlayContext()
play_context.prompt = (
'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
)
in_stream = StringIO()
self.assertIsInstance(local.Connection(play_context, in_stream), local.Connection)
|
louistin/thinkstation
|
a_byte_of_python/unit_15_standard_library/compress_test.py
|
Python
|
mit
| 207
| 0
|
#!/usr/bin/pyth
|
on
# _*_ coding: utf-8 _*_
import zlib
s = b'witch which has which witches wrist watch'
print len(s)
t = zlib.compress(s)
print len(t)
print t
print zlib.decompress(t)
print zlib.crc
|
32(s)
|
CalthorpeAnalytics/urbanfootprint
|
footprint/main/management/commands/create_datadump.py
|
Python
|
gpl-3.0
| 4,014
| 0.005232
|
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
import pwd
import shlex
import subprocess
from optparse import make_option
import os
from distutils import spawn
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from footprint.utils.postgres_utils import build_postgres_conn_string, postgres_env_pass
|
word_loaded
class Command(BaseCommand):
args = '<destination_folder> (optional - if not specified use settings.py option)'
help = 'Creates a data dump'
# I hate having to use optparse. We should be using argparse.
# When https://code.djangoproject.com/ticket/19973 gets fixed, we can
# use the new way of parsing (which will likely use argparse instead).
# In the meantime we'll stick with the documented way of doing this
option_list = BaseCommand.option_list + (
make_option('--destination-folder',
action='store',
type='string',
dest='destination_folder',
default=getattr(settings, 'CALTHORPE_DATA_DUMP_LOCATION', ''),
help='output folder for daily dump'),
)
def handle(self, *args, **options):
rsync = spawn.find_executable('rsync')
if rsync is None:
raise CommandError('rsync not found')
pg_dump = spawn.find_executable('pg_dump')
if pg_dump is None:
raise CommandError('pg_dump not found')
if options['destination_folder'] == '':
raise CommandError('--destination-folder not specified in command line nor settings.py')
# make sure destination folder exists
if not os.path.exists(options['destination_folder']):
try:
os.makedirs(options['destination_folder'])
except Exception, e:
raise Exception("Cannot create directory with user %s. Exception %s" % (
pwd.getpwuid(os.getuid())[0],
e.message))
pg_output_file_name = os.path.join(options['destination_folder'], 'pg_dump.dmp')
media_output_copy_folder = os.path.join(options['destination_folder'], 'media')
# make sure destination daily media folder also exists
if not os.path.exists(media_output_copy_folder):
os.makedirs(media_output_copy_folder)
#################
#rsync folder
rsync += ' -rapthzvO {extra} {src} {dest}'.format(extra=settings.CALTHORPE_DAILY_DUMP_RSYNC_EXTRA_PARAMS,
src=settings.MEDIA_ROOT,
dest=media_output_copy_folder)
self.stdout.write(rsync + '\n')
output = self.exec_cmd(rsync)
self.stdout.write(output)
#################
#do database dump
print settings.DATABASES['default']
with postgres_env_password_loaded():
pg_dump += ' {pg_conn_string} -Fc -f {output_file_name}'.format(
pg_conn_string=build_postgres_conn_string(settings.DATABASES['default']),
output_file_name=pg_output_file_name)
output = self.exec_cmd(pg_dump)
self.stdout.write(output)
self.stdout.write('Wrote ' + pg_output_file_name + '\n')
def exec_cmd(self, cmd):
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise CommandError('Error Executing "{cmd}\n{output}\n"'.format(cmd=cmd, output=out))
return out
|
rahulunair/nova
|
nova/api/openstack/compute/limits.py
|
Python
|
apache-2.0
| 3,590
| 0
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.api_version_request \
import MAX_IMAGE_META_PROXY_API_VERSION
from nova.api.openstack.api_version_request \
import MAX_PROXY_API_SUPPORT_VERSION
from nova.api.openstack.api_version_request \
import MIN_
|
WITHOUT_IMAGE_META_PROXY_API_VERSION
from nova.api.openstack.api_version_request \
import MIN_WITHOUT_PROXY_API_SUPPORT_VERSION
from nova.api.openstack.compute.schemas import limits
from nova.api.openstack.compute.views import limits as limits_views
from nova.api.openstack import wsgi
from nova.api import validation
from nova.policies import limits as limits_policies
from nova import quota
QUOTAS = quota.QUOTAS
# This is a list of limits which needs to filter out from the API response.
# This is due to the deprecation of network related proxy APIs, the related
# limit should be removed from the API also.
FILTERED_LIMITS_2_36 = ['floating_ips', 'security_groups',
'security_group_rules']
FILTERED_LIMITS_2_57 = list(FILTERED_LIMITS_2_36)
FILTERED_LIMITS_2_57.extend(['injected_files', 'injected_file_content_bytes'])
class LimitsController(wsgi.Controller):
"""Controller for accessing limits in the OpenStack API."""
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.expected_errors(())
@validation.query_schema(limits.limits_query_schema)
def index(self, req):
return self._index(req)
@wsgi.Controller.api_version(MIN_WITHOUT_PROXY_API_SUPPORT_VERSION, # noqa
MAX_IMAGE_META_PROXY_API_VERSION) # noqa
@wsgi.expected_errors(())
@validation.query_schema(limits.limits_query_schema)
def index(self, req):
return self._index(req, FILTERED_LIMITS_2_36)
@wsgi.Controller.api_version( # noqa
MIN_WITHOUT_IMAGE_META_PROXY_API_VERSION, '2.56') # noqa
@wsgi.expected_errors(())
@validation.query_schema(limits.limits_query_schema)
def index(self, req):
return self._index(req, FILTERED_LIMITS_2_36, max_image_meta=False)
@wsgi.Controller.api_version('2.57') # noqa
@wsgi.expected_errors(())
@validation.query_schema(limits.limits_query_schema_275, '2.75')
@validation.query_schema(limits.limits_query_schema, '2.57', '2.74')
def index(self, req):
return self._index(req, FILTERED_LIMITS_2_57, max_image_meta=False)
def _index(self, req, filtered_limits=None, max_image_meta=True):
"""Return all global limit information."""
context = req.environ['nova.context']
context.can(limits_policies.BASE_POLICY_NAME)
project_id = req.params.get('tenant_id', context.project_id)
quotas = QUOTAS.get_project_quotas(context, project_id,
usages=True)
builder = limits_views.ViewBuilder()
return builder.build(req, quotas, filtered_limits=filtered_limits,
max_image_meta=max_image_meta)
|
GenericStudent/home-assistant
|
homeassistant/components/openweathermap/config_flow.py
|
Python
|
apache-2.0
| 4,415
| 0.00068
|
"""Config flow for OpenWeatherMap."""
from pyowm import OWM
from pyowm.exceptions.api_call_error import APICallError
from pyowm.exceptions.api_response_error import UnauthorizedError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_API_KEY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_MODE,
CONF_NAME,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_LANGUAGE,
DEFAULT_FORECAST_MODE,
DEFAULT_LANGUAGE,
DEFAULT_NAME,
FORECAST_MODES,
LANGUAGES,
)
from .const import DOMAIN # pylint:disable=unused-import
SCHEMA = vol.Schema(
{
vol.Required(CONF_API_KEY): str,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): str,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_MODE, default=DEFAULT_FORECAST_MODE): vol.In(FORECAST_MODES),
vol.Optional(CONF_LANGUAGE, default=DEFAULT_LANGUAGE): vol.In(LANGUAGES),
}
)
class OpenWeatherMapConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for OpenWeatherMap."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OpenWeatherMapOptionsFlow(config_entry)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
latitude = user_input[CONF_LATITUDE]
longitude = user_input[CONF_LONGITUDE]
await self.async_set_unique_id(f"{latitude}-{longitude}")
self._abort_if_unique_id_configured()
try:
api_online = await _is_owm_api_online(
self.hass, user_input[CONF_API_KEY]
)
if not api_online:
errors["base"] = "invalid_api_key"
except UnauthorizedError:
errors["base"] = "invalid_api_key"
except APICallError:
errors["base"] = "cannot_connect"
if not errors:
return self.async_create_entry(
title=user_input[CONF_NAME], data=user_input
)
return self.async_show_form(step_id="user", data_schema=SCHEMA, errors=errors)
async def async_step_import(self, import_input=None):
"""Set the config entry up from yaml."""
config = import_input.copy()
if CONF_NAME not in config:
config[CONF_NAME] = DEFAULT_NAME
if CONF_LATITUDE not in config:
config[CONF_LATITUDE] = self.hass.config.latitude
if CONF_LONGITUDE not in config:
config[CONF_LONGITUDE] = self.hass.config.longitude
if CONF_MODE not in config:
config[CONF_MODE] = DEFAULT_FORECAST_MODE
if CONF_LANGUAGE not in config:
con
|
fig[CONF_LANGUAGE] = DEFAULT_LANGUAGE
return await self.async_step_user(config)
class OpenWeatherMapOptionsFlow(config_entries.OptionsFlow):
"""Handle options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
ret
|
urn self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=self._get_options_schema(),
)
def _get_options_schema(self):
return vol.Schema(
{
vol.Optional(
CONF_MODE,
default=self.config_entry.options.get(
CONF_MODE, DEFAULT_FORECAST_MODE
),
): vol.In(FORECAST_MODES),
vol.Optional(
CONF_LANGUAGE,
default=self.config_entry.options.get(
CONF_LANGUAGE, DEFAULT_LANGUAGE
),
): vol.In(LANGUAGES),
}
)
async def _is_owm_api_online(hass, api_key):
owm = OWM(api_key)
return await hass.async_add_executor_job(owm.is_API_online)
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_network_interfaces_operations.py
|
Python
|
mit
| 64,200
| 0.005109
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_a
|
rm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations:
|
"""NetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NetworkInterface":
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cl
|
Digilent/u-boot-digilent
|
tools/binman/etype/x86_reset16.py
|
Python
|
gpl-2.0
| 1,018
| 0.000982
|
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2016 Google, Inc
# Written by Simon Glass <[email protected]>
#
# Entry-type module for the 16-bit x86
|
reset code for U-Boot
#
from binman.entry import Entry
from binman.etype.blob import Entry_blob
class Entry_x86_reset16(Entry_blob):
"""x86 16-bit reset code for U-Boot
Properties / Entry arguments:
- filename: Filename of u-boot-x86-reset16.bin (default
'u-boot-x86-reset16.bin')
x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
must be placed at a particular address. This entry holds that code. It is
typically placed at offs
|
et CONFIG_RESET_VEC_LOC. The code is responsible
for jumping to the x86-start16 code, which continues execution.
For 64-bit U-Boot, the 'x86_reset16_spl' entry type is used instead.
"""
def __init__(self, section, etype, node):
super().__init__(section, etype, node)
def GetDefaultFilename(self):
return 'u-boot-x86-reset16.bin'
|
xahhy/Django-vod
|
vodmanagement/models.py
|
Python
|
lgpl-3.0
| 14,356
| 0.002854
|
import logging
import os
import datetime
import six
import humanfriendly
from pathlib import Path
from django.db import models
from django.utils.html import format_html
from django.utils.encoding import uri_to_iri
from django.core.management import call_command
from django.utils.safestring import mark_safe
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, post_init, post_save
from django.dispatch import receiver
from django.core.urlresolvers import reverse
from django.core.files import File
from sortedm2m.fields import SortedManyToManyField
from uuslug import uuslug
# from moviepy.editor import VideoFileClip # get video duration
from .my_storage import VodStorage
from admin_resumable.fields import (
ModelAdminResumableFileField, ModelAdminResumableImageField,
ModelAdminResumableMultiFileField, ModelAdminResumableRestoreFileField
)
from xpinyin import Pinyin # for pinyin search
if six.PY3:
from django.utils.encoding import smart_str
else:
from django.utils.encoding import smart_unicode as smart_str
"""
Copy data in XXX model:
>>>
from vodmanagement.models import *
objs=Vod.objects.all()
for i in range(0,10):
newobj=objs[0]
newobj.pk=None
newobj.save()
>>>
This script will copy 10 objs[0] in database
"""
class UserPermission(models.Model):
user = models.OneToOneField(User)
permission = models.CharField(max_length=100, blank=True, null=True)
end_date = models.DateTimeField(blank=True, null=True)
def __str__(self):
return str(self.user)
def has_permision(self):
delta = self.end_date.date() - datetime.date.today()
print(delta.days)
if delta.days >= 0:
return True
return False
class VodManager(models.Manager):
def active(self, *args, **kwargs):
return super(VodManager, self) # .filter(draft=False).filter(publish__lte=timezone.now())
def upload_location(instance, filename):
# filebase, extension = filename.split(".")
# return "%s/%s.%s" %(instance.id, instance.id, extension)
VodModel = instance.__class__
print('save')
if VodModel.objects.count() is not 0:
new_id = VodModel.objects.order_by("id").last().id - 1
else:
new_id = 0
"""
instance.__class__ gets the model Post. We must use this method because the model is defined below.
Then create a queryset ordered by the "id"s of each object,
Then we get the last object in the queryset with `.last()`
Which will give us the most recently created Model instance
We add 1 to it, so we get what should be the same id as the the post we are creating.
|
"""
print('save image')
return "%s/%s" % (new_id, filename)
def upload_image_location(instance, filename):
VodModel = instance.__class__
if VodModel.objects.count() is not 0:
new_id = VodModel.objects.order_by("id").last().id + 1
else:
new_id = 0
folder = instance.save_path
if folder == "default":
category = instance.category.name
else:
category = instance.category.name +
|
'_' + folder
return "%s/images/%s/%s" % (category, new_id, filename)
def upload_record_image_location(instance, filename):
return "%s/images/%s" % (settings.RECORD_MEDIA_FOLDER, filename)
def default_description(instance):
default = instance.title
print(default)
return 'The %s description' % default
# Create your models here.
def default_filedir():
return settings.MEDIA_ROOT
# ---------------------------------------------------------------------
# if leave path blank,it will save it as the default file dir:settings.MEDIA_ROOT
class FileDirectory(models.Model):
path = models.CharField(max_length=512, default=default_filedir, blank=True)
class Meta:
verbose_name = '视频上传路径'
verbose_name_plural = '视频上传路径管理'
def __str__(self):
return self.path
def save(self, *args, **kwargs):
if self.path is None or self.path == "":
self.path = default_filedir()
super(FileDirectory, self).save(*args, **kwargs)
# ---------------------------------------------------------------------
# Two selections only:Common,Special purpose
TYPES = (
('common', 'Common'),
('special', 'Special purpose'),
)
VIDEO_QUALITY = [
('SD', '标清'),
('HD', '高清'),
('FHD', '超清'),
]
SAVE_PATH = (
('', settings.LOCAL_MEDIA_ROOT),
)
class VideoRegion(models.Model):
name = models.CharField(max_length=200, verbose_name='地区', unique=True)
class Meta:
verbose_name = '视频地区管理'
verbose_name_plural = '视频地区'
def __str__(self):
return self.name
class VideoCategory(models.Model):
name = models.CharField(max_length=128, verbose_name='分类名称')
type = models.CharField(max_length=128, choices=TYPES, default='common', verbose_name='类型')
isSecret = models.BooleanField(default=False, verbose_name='是否加密')
level = models.IntegerField(null=False, blank=False, default=1, choices=((1, '一级分类'), (2, '二级分类')),
verbose_name='分类等级')
subset = models.ManyToManyField('self', blank=True, verbose_name='分类关系')
class Meta:
verbose_name = '视频分类'
verbose_name_plural = '视频分类管理'
def __str__(self):
base_name = self.name + str(' (level %d)' % (self.level))
if self.subset.first() and self.level == 2:
return '--'.join([self.subset.first().name, base_name])
else:
return base_name
def save(self, *args, **kwargs):
super(VideoCategory, self).save(*args, **kwargs)
def colored_level(self):
color_code = 'red' if self.level == 1 else 'green'
return format_html(
'<span style="color:{};">{}</span>',
color_code,
self.get_level_display()
)
colored_level.short_description = '分级'
# ---------------------------------------------------------------------
class MultipleUpload(models.Model):
files = ModelAdminResumableMultiFileField(null=True, blank=True, storage=VodStorage(), verbose_name='文件')
save_path = models.CharField(max_length=128, blank=False, null=True, verbose_name='保存路径')
category = models.ForeignKey(VideoCategory, null=True, verbose_name='分类')
class Meta:
verbose_name = '批量上传'
verbose_name_plural = '批量上传管理'
# ---------------------------------------------------------------------
# TODO(hhy): Please Leave This Model Here. It Will Be Use In The Future.
# class VideoTag(models.Model):
# name = models.CharField(max_length=200, null=False, blank=False)
#
# def __str__(self):
# return self.name
class Restore(models.Model):
txt_file = models.FileField(blank=True, null=True, verbose_name='备份配置文件')
zip_file = ModelAdminResumableRestoreFileField(null=True, blank=True, storage=VodStorage(), verbose_name='压缩包')
save_path = models.CharField(max_length=128, blank=False, null=True) # ,default=FileDirectory.objects.first())
class Meta:
verbose_name = '视频导入'
verbose_name_plural = '视频导入'
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
result = super(Restore, self).save()
file_path = self.txt_file.path
call_command('loaddata', file_path)
return result
class Vod(models.Model):
title = models.CharField(max_length=120, verbose_name='标题')
# image = models.ImageField(upload_to=upload_image_location, null=True, blank=True)
# video = models.FileField(null=True,blank=True,storage=VodStorage())
image = ModelAdminResumableImageField(null=True, blank=True, storage=VodStorage(), max_length=1000,
verbose_name='缩略图')
video = ModelAdminResumableFileField(null=True, blank=True, storage=VodStorage(), max_length=1000,
verbose_name='视频')
duration = models.CharField(max_length=50, blank=True, null=True, verbose_name='时长')
local_video = models.FilePathField(path=settings.LOCAL_MEDIA_ROOT, blank=True, recursive=True)
definition = mod
|
pymedusa/SickRage
|
medusa/server/api/v2/auth.py
|
Python
|
gpl-3.0
| 2,962
| 0.001013
|
# coding=utf-8
"""Request handler for authentication."""
from __future__ import unicode_literals
import logging
import random
import string
import time
from builtins import range
import jwt
from medusa import app, helpers, notifiers
from medusa.logger.adapters.style import BraceAdapter
from medusa.server.api.v2.base import BaseRequestHandler
from six import text_type
from tornado.escape import json_decode
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class AuthHandler(BaseRequestHandler):
"""Auth request handler."""
#: resource name
name = 'authenticate'
#: allowed HTTP methods
allowed_methods = ('POST', )
def _check_authentication(self):
"""Override authentication check for the authentication endpoint."""
return None
def post(self, *args, **kwargs):
"""Request JWT."""
username = app.WEB_USERNAME
password = app.WEB_PASSWORD
# If the user hasn't set a username and/or password just let them login
if not username.strip() or not password.strip():
return self._login()
if not self.request.body:
return self._failed_login(error='No Credentials Provided')
if self.request.headers['content-type'] != 'application/json':
return self._failed_login(error='Incorrect content-type')
request_body = json_decode(self.request.body)
submitted_username = request_body.get('username')
submitted_password = request_body.get('password')
submitted_exp = request_body.get('exp', 86400)
if username != submitted_username or password != submitted_password:
return self._failed_lo
|
gin(error='Invalid credentials')
return self._login(submitted_exp)
def _login(self, exp=86400):
self.set_header('Content-Type', 'application/json')
if app.NOTIFY_ON_LOGIN and not helpers.is_ip_private(self.request.remote_ip):
notifiers.notify_login(self.request.remote_ip)
log.info('{user} logged into the API v2', {'user': app.WEB_USERNAME})
time_now = int(t
|
ime.time())
return self._ok(data={
'token': jwt.encode({
'iss': 'Medusa ' + text_type(app.APP_VERSION),
'iat': time_now,
# @TODO: The jti should be saved so we can revoke tokens
'jti': ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)),
'exp': time_now + int(exp),
'username': app.WEB_USERNAME,
'apiKey': app.API_KEY
}, app.ENCRYPTION_SECRET, algorithm='HS256').decode('utf-8')
})
def _failed_login(self, error=None):
log.warning('{user} attempted a failed login to the API v2 from IP: {ip}', {
'user': app.WEB_USERNAME,
'ip': self.request.remote_ip
})
return self._unauthorized(error=error)
|
optimizely/tcollector
|
tcollector.py
|
Python
|
lgpl-3.0
| 53,880
| 0.001188
|
#!/usr/bin/python
# This file is part of tcollector.
# Copyright (C) 2010 The tcollector Authors.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
#
# tcollector.py
#
"""Simple manager for collection scripts that run and gather data.
The tcollector gathers the data and sends it to the TSD for storage."""
#
# by Mark Smith <[email protected]>.
#
import atexit
import errno
import fcntl
import logging
import os
import random
import re
import signal
import socket
import subprocess
import sys
import threading
import time
from logging.handlers import RotatingFileHandler
from Queue import Queue
from Queue import Empty
from Queue import Full
from optparse import OptionParser
# global variables.
COLLECTORS = {}
GENERATION = 0
DEFAULT_LOG = '/var/log/tcollector.log'
LOG = logging.getLogger('tcollector')
ALIVE = True
# If the SenderThread catches more than this many consecutive uncaught
# exceptions, something is not right and tcollector will shutdown.
# Hopefully some kind of supervising daemon will then restart it.
MAX_UNCAUGHT_EXCEPTIONS = 100
DEFAULT_PORT = 4242
MAX_REASONABLE_TIMESTAMP = 2209212000 # Good until Tue 3 Jan 14:00:00 GMT 2040
# How long to wait for datapoints before assuming
# a collector is dead and restarting it
ALLOWED_INACTIVITY_TIME = 600 # seconds
MAX_SENDQ_SIZE = 10000
MAX_READQ_SIZE = 100000
def register_collector(collector):
"""Register a collector with the COLLECTORS global"""
assert isinstance(collector, Collector), "collector=%r" % (collector,)
# store it in the global list and initiate a kill for anybody with the
# same name that happens to still be hanging around
if collector.name in COLLECTORS:
col = COLLECTORS[collector.name]
if col.proc is not None:
LOG.error('%s still has a process (pid=%d) and is being reset,'
' terminating', col.name, col.proc.pid)
col.shutdown()
COLLECTORS[collector.name] = collector
class ReaderQueue(Queue):
"""A Queue for the reader thread"""
def nput(self, value):
"""A nonblocking put, that simply logs and discards the value when the
queue is full, and returns false if we dropped."""
try:
self.put(value, False)
except Full:
LOG.error("DROPPED LINE: %s", value)
return False
return True
class Collector(object):
"""A Collector is a script that is run that gathers some data
and prints it out in standard TSD format on STDOUT. This
class maintains all of the state information for a given
collector and gives us utility methods for working with
it."""
def __init__(self, colname, interval, filename, mtime=0, lastspawn=0):
"""Construct a new Collector."""
self.name = colname
self.interval = interval
self.filename = filename
self.lastspawn = lastspawn
self.proc = None
self.nextkill = 0
self.killstate = 0
self.dead = False
self.mtime = mtime
self.generation = GENERATION
self.buffer = ""
self.datalines = []
# Maps (metric, tags) to (value, repeated, line, timestamp) where:
# value: Last value seen.
# repeated: boolean, whether the last value was seen more than once.
# line: The last line that was read from that collector.
# timestamp: Time at which we saw the value for the first time.
# This dict is used to keep track of and remove duplicate values.
# Since it might grow unbounded (in case we see many different
# combinations of metrics and tags) someone needs to regularly call
# evict_old_keys() to remove old entries.
self.values = {}
self.lines_sent = 0
self.lines_received = 0
self.lines_invalid = 0
self.last_datapoint = int(time.time())
def read(self):
"""Read bytes from our subprocess and store them in our temporary
line storage buffer. This needs to be non-blocking."""
# we have to use a buffer because sometimes the collectors
# will write out a bunch of data points at one time and we
# get some weird sized chunk. This read call is non-blocking.
# now read stderr for log messages, we could buffer here but since
# we're just logging the messages, I don't care to
try:
out = self.proc.stderr.read()
if out:
LOG.debug('reading %s got %d bytes on stderr',
self.name, len(out))
for line in out.splitlines():
LOG.warning('%s: %s', self.name, line)
except IOError, (err, msg):
if err != errno.EAGAIN:
raise
except:
LOG.exception('uncaught exception in stderr read')
# we have to use a buffer because sometimes the collectors will write
# out a bunch of data points at one time and we get some weird sized
# chunk. This read call is non-blocking.
try:
self.buffer += self.proc.stdout.read()
if len(self.buffer):
LOG.debug('reading %s, buffer now %d bytes',
self.name, len(self.buffer))
except IOError, (err, msg):
if err != errno.EAGAIN:
raise
except:
# sometimes the process goes away in another thread and we don't
# have it anymore, so log an error and bail
LOG.exception('uncaught exception in stdout read')
ret
|
urn
# iterate for each line we have
while self.buffer:
idx = self.buffer.find('\n')
if idx == -1:
break
# one full line is now found and we can pull it out of the buffer
line = self.buffer[0:idx].strip()
if line:
self.datalines.append(line)
self.last_datapoint = int(t
|
ime.time())
self.buffer = self.buffer[idx+1:]
def collect(self):
"""Reads input from the collector and returns the lines up to whomever
is calling us. This is a generator that returns a line as it
becomes available."""
while self.proc is not None:
self.read()
if not len(self.datalines):
return
while len(self.datalines):
yield self.datalines.pop(0)
def shutdown(self):
"""Cleanly shut down the collector"""
if not self.proc:
return
try:
if self.proc.poll() is None:
kill(self.proc)
for attempt in range(5):
if self.proc.poll() is not None:
return
LOG.info('Waiting %ds for PID %d (%s) to exit...'
% (5 - attempt, self.proc.pid, self.name))
time.sleep(1)
kill(self.proc, signal.SIGKILL)
self.proc.wait()
except:
# we really don't want to die as we're trying to exit gracefully
LOG.exception('ignoring uncaught exception while shutting down')
def evict_old_keys(self, cut_off):
"""Remove old entries from the cache used to detect duplicate values.
Args:
cut_off: A UNIX timestamp. Any value that's older than this will be
removed from the cache.
"""
for key in self.values.keys():
time = self.values[key][3]
if t
|
kevinhughes27/TensorKart
|
record.py
|
Python
|
mit
| 6,952
| 0.008631
|
#!/usr/bin/env python
import numpy as np
import os
import shutil
import mss
import matplotlib
matplotlib.use('TkAgg')
from datetime import datetime
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg as FigCanvas
from PIL import ImageTk, Image
import sys
PY3_OR_LATER = sys.version_info[0] >= 3
if PY3_OR_LATER:
# Python 3 specific definitions
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.messagebox as tkMessageBox
else:
# Python 2 specific definitions
import Tkinter as tk
import ttk
import tkMessageBox
from utils import Screenshot, XboxController
IMAGE_SIZE = (320, 240)
IDLE_SAMPLE_RATE = 1500
SAMPLE_RATE = 200
IMAGE_TYPE = ".png"
class MainWindow():
""" Main frame of the application
"""
def __init__(self):
self.root = tk.Tk()
self.sct = mss.mss()
self.root.title('Data Acquisition')
self.root.geometry("660x325")
self.root.resizable(False, False)
# Init controller
self.controller = XboxController()
# Create GUI
self.create_main_panel()
# Timer
self.rate = IDLE_SAMPLE_RATE
self.sample_rate = SAMPLE_RATE
self.idle_rate = IDLE_SAMPLE_RATE
self.recording = False
self.t = 0
self.pause_timer = False
self.on_timer()
self.root.mainloop()
def create_main_panel(self):
# Panels
top_half = tk.Frame(self.root)
top_half.pack(side=tk.TOP, expand=True, padx=5, pady=5)
message = tk.Label(self.root, text="(Note: UI updates are disabled while recording)")
message.pack(side=tk.TOP, padx=5)
bottom_half = tk.Frame(self.root)
bottom_half.pack(side=tk.LEFT, padx=5, pady=10)
# Images
self.img_panel = tk.Label(top_half, image=ImageTk.PhotoImage("RGB", size=IMAGE_SIZE)) # Placeholder
self.img_panel.pack(side = tk.LEFT, expand=False, padx=5)
# Joystick
self.init_plot()
self.PlotCanvas = FigCanvas(figure=self.fig, master=top_half)
self.PlotCanvas.get_tk_widget().pack(side=tk.RIGHT, expand=False, padx=5)
# Recording
textframe = tk.Frame(bottom_half, width=332, height=15, padx=5)
textframe.pack(side=tk.LEFT)
textframe.pack_propagate(0)
self.outputDirStrVar = tk.StringVar()
self.txt_outputDir = tk.Entry(textframe, textvariable=self.outputDirStrVar, width=100)
self.txt_outputDir.pack(side=tk.LEFT)
self.outputDirStrVar.set("samples/" + datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
self.record_button = ttk.Button(bottom_half, text="Record", command=self.on_btn_record)
self.record_button.pack(side = tk.LEFT, padx=5)
def init_plot(self):
self.plotMem = 50 # how much data to keep on the plot
self.plotData = [[0] * (5)] * self.plotMem # mem storage for plot
self.fig = Figure(figsize=(4,3), dpi=80) # 320,240
self.axes = self.fig.add_subplot(111)
def on_timer(self):
self.poll()
# stop drawing if recording to avoid slow downs
if self.recording == False:
self.draw()
if not self.pause_timer:
self.root.after(self.rate, self.on_timer)
def poll(self):
self.img = self.take_screenshot()
self.controller_data = self.controller.read()
self.update_plot()
if self.recording == True:
self.save_data()
self.t += 1
def take_screenshot(self):
# Get raw pixels from the screen
sct_img = self.sct.grab({ "top": Screenshot.OFFSET_Y,
"left": Screenshot.OFFSET_X,
"width": Screenshot.SRC_W,
"height": Screenshot.SRC_H})
# Create the Image
return Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')
def update_plot(self):
self.plotData.append(self.controller_data) # adds to the end of the list
self.plotData.pop(0) # remove the first item in the list, ie the oldest
def save_data(self):
image_file = self.outputDir+'/'+'img_'+str(self.t)+IMAGE_TYPE
self.img.save(image_file)
# write csv line
self.outfile.write( image_file + ',' + ','.join(map(str, self.controller_data)) + '\n' )
def draw(self):
# Image
self.img.thumbnail(IMAGE_SIZE, Image.ANTIALIAS) # Resize
self.img_panel.img = ImageTk.PhotoImage(self.img)
self.img_panel['image'] = self.img_panel.img
# Joystick
x = np.asarray(self.plotData)
self.axes.clear()
self.axes.plot(range(0,self.plotMem), x[:,0], 'r')
self.axes.plot(range(0,self.plotMem), x[:,1], 'b')
self.axes.plot(range(0,self.plotMem), x[:,2], 'g')
self.axes.plot(range(0,self.plotMem), x[:,3], 'k')
self.axes.plot(range(0,self.plotMem), x[:,4], 'y')
self.PlotCanvas.draw()
def on_btn_record(self):
# pause timer
self.pause_timer = True
if self.recording:
self.recording = False
else:
self.start_recording()
if self.recording:
self.t = 0 # Reset our counter for the new recording
self.record_button["text"] = "Stop"
self.rate = self.sample_rate
# make / open outfile
self.outfile = open(self.outputDir+'/'+'data.csv', 'a')
else:
self.record_button["text"] = "Record"
self.rate = self.idle_rate
self.outfile.close()
# un pause timer
self.pause_timer = False
self.on_timer()
def start_recording(self):
should_record = True
# check that a dir has been specified
if not self.outputDirStrVar.get():
tkMessageBox.showerror(title='Error', message='Specify the Output Directory', parent=self.root)
should_record = False
else: # a directory was specified
self.outputDir = self.outputDirStrVar.get()
# check if path e
|
xists - i.e. may be saving over data
if os.path.exists(self.outputDir):
# overwrite the data, yes/no?
if tkMessageBox.askyesno(tit
|
le='Warning!', message='Output Directory Exists - Overwrite Data?', parent=self.root):
# delete & re-make the dir:
shutil.rmtree(self.outputDir)
os.mkdir(self.outputDir)
# answer was 'no', so do not overwrite the data
else:
should_record = False
self.txt_outputDir.focus_set()
# directory doesn't exist, so make one
else:
os.mkdir(self.outputDir)
self.recording = should_record
if __name__ == '__main__':
app = MainWindow()
|
seankelly/buildbot
|
master/buildbot/scripts/trycmd.py
|
Python
|
gpl-2.0
| 903
| 0
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULA
|
R PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
def trycmd(config):
from buildbot.clients import tryclient
t = tryclien
|
t.Try(config)
t.run()
return 0
|
ppb/ppb-vector
|
tests/test_member_access.py
|
Python
|
artistic-2.0
| 444
| 0
|
from hypothesis import gi
|
ven
from ppb_vector import Vector
from utils import floats, vectors
@given(x=floats(), y=floats())
def test_class_member_access(x: float, y: float):
v = Vector(x, y)
assert v.x == x
assert v.y == y
@given(v=vectors())
def test_index_access(v: Vector):
assert v[0] == v.x
assert v[1] == v.y
@given(v=vectors())
def test_key_access(v: Vector):
assert v["x"] == v.x
a
|
ssert v["y"] == v.y
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/sparse/linalg/isolve/lsmr.py
|
Python
|
mit
| 15,126
| 0.000463
|
"""
Copyright (C) 2010 David Fong and Michael Saunders
LSMR us
|
es an iterative method.
07 Jun 2010: Documentation updated
03 Jun 2010: First release version in Python
David Chin-lung Fong [email protected]
Institute for Computational and Mathematical Engineering
Stanford University
Michael Saunders [email protected]
Systems Optimization Laboratory
Dept of MS&E, Stanford University.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['lsmr']
from num
|
py import zeros, infty, atleast_1d
from numpy.linalg import norm
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
from .lsqr import _sym_ortho
def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
maxiter=None, show=False, x0=None):
"""Iterative solver for least-squares problems.
lsmr solves the system of linear equations ``Ax = b``. If the system
is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
A is a rectangular matrix of dimension m-by-n, where all cases are
allowed: m = n, m > n, or m < n. B is a vector of length m.
The matrix A may be dense or sparse (usually sparse).
Parameters
----------
A : {matrix, sparse matrix, ndarray, LinearOperator}
Matrix A in the linear system.
b : array_like, shape (m,)
Vector b in the linear system.
damp : float
Damping factor for regularized least-squares. `lsmr` solves
the regularized least-squares problem::
min ||(b) - ( A )x||
||(0) (damp*I) ||_2
where damp is a scalar. If damp is None or 0, the system
is solved without regularization.
atol, btol : float, optional
Stopping tolerances. `lsmr` continues iterations until a
certain backward error estimate is smaller than some quantity
depending on atol and btol. Let ``r = b - Ax`` be the
residual vector for the current approximate solution ``x``.
If ``Ax = b`` seems to be consistent, ``lsmr`` terminates
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
Otherwise, lsmr terminates when ``norm(A^{T} r) <=
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),
the final ``norm(r)`` should be accurate to about 6
digits. (The final x will usually have fewer correct digits,
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
or `btol` is None, a default value of 1.0e-6 will be used.
Ideally, they should be estimates of the relative error in the
entries of A and B respectively. For example, if the entries
of `A` have 7 correct digits, set atol = 1e-7. This prevents
the algorithm from doing unnecessary work beyond the
uncertainty of the input data.
conlim : float, optional
`lsmr` terminates if an estimate of ``cond(A)`` exceeds
`conlim`. For compatible systems ``Ax = b``, conlim could be
as large as 1.0e+12 (say). For least-squares problems,
`conlim` should be less than 1.0e+8. If `conlim` is None, the
default value is 1e+8. Maximum precision can be obtained by
setting ``atol = btol = conlim = 0``, but the number of
iterations may then be excessive.
maxiter : int, optional
`lsmr` terminates if the number of iterations reaches
`maxiter`. The default is ``maxiter = min(m, n)``. For
ill-conditioned systems, a larger value of `maxiter` may be
needed.
show : bool, optional
Print iterations logs if ``show=True``.
x0 : array_like, shape (n,), optional
Initial guess of x, if None zeros are used.
.. versionadded:: 1.0.0
Returns
-------
x : ndarray of float
Least-square solution returned.
istop : int
istop gives the reason for stopping::
istop = 0 means x=0 is a solution. If x0 was given, then x=x0 is a
solution.
= 1 means x is an approximate solution to A*x = B,
according to atol and btol.
= 2 means x approximately solves the least-squares problem
according to atol.
= 3 means COND(A) seems to be greater than CONLIM.
= 4 is the same as 1 with atol = btol = eps (machine
precision)
= 5 is the same as 2 with atol = eps.
= 6 is the same as 3 with CONLIM = 1/eps.
= 7 means ITN reached maxiter before the other stopping
conditions were satisfied.
itn : int
Number of iterations used.
normr : float
``norm(b-Ax)``
normar : float
``norm(A^T (b - Ax))``
norma : float
``norm(A)``
conda : float
Condition number of A.
normx : float
``norm(x)``
Notes
-----
.. versionadded:: 0.11.0
References
----------
.. [1] D. C.-L. Fong and M. A. Saunders,
"LSMR: An iterative algorithm for sparse least-squares problems",
SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
http://arxiv.org/abs/1006.0758
.. [2] LSMR Software, http://web.stanford.edu/group/SOL/software/lsmr/
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import lsmr
>>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
The first example has the trivial solution `[0, 0]`
>>> b = np.array([0., 0., 0.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
0
>>> x
array([ 0., 0.])
The stopping code `istop=0` returned indicates that a vector of zeros was
found as a solution. The returned solution `x` indeed contains `[0., 0.]`.
The next example has a non-trivial solution:
>>> b = np.array([1., 0., -1.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
1
>>> x
array([ 1., -1.])
>>> itn
1
>>> normr
4.440892098500627e-16
As indicated by `istop=1`, `lsmr` found a solution obeying the tolerance
limits. The given solution `[1., -1.]` obviously solves the equation. The
remaining return values include information about the number of iterations
(`itn=1`) and the remaining difference of left and right side of the solved
equation.
The final example demonstrates the behavior in the case where there is no
solution for the equation:
>>> b = np.array([1., 0.01, -1.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
2
>>> x
array([ 1.00333333, -0.99666667])
>>> A.dot(x)-b
array([ 0.00333333, -0.00333333, 0.00333333])
>>> normr
0.005773502691896255
`istop` indicates that the system is inconsistent and thus `x` is rather an
approximate solution to the corresponding least-squares problem. `normr`
contains the minimal distance that was found.
"""
A = aslinearoperator(A)
b = atleast_1d(b)
if b.ndim > 1:
b = b.squeeze()
msg = ('The exact solution is x = 0, or x = x0, if x0 was given ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
hdg1 = ' itn x(1) norm r norm A''r'
hdg2 = ' compatible LS norm A cond A'
pfreq = 20 # print frequency (for repeating the heading)
pcount = 0 # print counter
m, n = A.shape
# stores the num of singular values
minDim = min([m, n])
if maxiter is None:
maxiter = minDim
if show:
print(' ')
print('LSMR Least-squares solution of Ax = b\n')
print('Th
|
lecaoquochung/ddnb.django
|
tests/staticfiles_tests/storage.py
|
Python
|
bsd-3-clause
| 660
| 0
|
from datetime import datetime
from django.core.files import storage
from django.contrib.staticfiles.storag
|
e import CachedStaticFilesStorage
class DummyStorage(storage.Storage):
"""
A storage class that does implement modified_time() but raises
NotImplementedError when calling
"""
def _save(self, name, content):
return 'dummy'
def delete(self, name):
pass
def exists(self, name):
pass
def modified_time(self, name):
return datetime.date(1970, 1, 1)
class SimpleCachedStaticFilesStorage(CachedStaticFi
|
lesStorage):
def file_hash(self, name, content=None):
return 'deploy12345'
|
diagonalwalnut/Experience
|
lrs/util/req_validate.py
|
Python
|
apache-2.0
| 32,206
| 0.006334
|
import json
import urllib2
from isodate.isodatetime import parse_datetime
from isodate.isoerror import ISO8601Error
from django.conf import settings
from util import validate_uuid, convert_to_dict, get_agent_ifp
from Authorization import auth
from StatementValidator import StatementValidator
from ..models import Statement, Agent, Activity
from ..exceptions import ParamConflict, ParamError, Forbidden, NotFound, BadRequest, IDNotFoundError
def check_for_existing_statementId(stmtID):
return Statement.objects.filter(statement_id=stmtID).exists()
def check_for_no_other_params_supplied(query_dict):
supplied = True
if len(query_dict) <= 1:
supplied = False
return supplied
# Extra agent validation for state and profile
def validate_oauth_state_or_profile_agent(req_dict, endpoint):
ag = req_dict['params']['agent']
token = req_dict['auth']['oauth_token']
scopes = token.scope_to_list()
if not 'all' in scopes:
if not isinstance(ag, dict):
ag = json.loads(ag)
try:
agent = Agent.objects.get(**ag)
except Agent.DoesNotExist:
err_msg = "Agent in %s cannot be found to match user in authorization" % endpoint
raise NotFound(err_msg)
if not agent in req_dict['auth']['authority'].member.all():
err_msg = "Authorization doesn't match agent in %s" % endpoint
raise Forbidden(err_msg)
def validate_void_statement(void_id):
# Retrieve statement, check if the verb is 'voided' - if not then set the voided flag to true else return error
# since you cannot unvoid a statement and should just reissue the statement under a new ID.
try:
stmt = Statement.objects.get(statement_id=void_id)
except Statement.DoesNotExist:
err_msg = "Statement with ID %s does not exist" % void_id
raise IDNotFoundError(err_msg)
if stmt.voided:
err_msg = "Statement with ID: %s is already voided, cannot unvoid. Please re-issue the statement under a new ID." % void_id
raise Forbidden(err_msg)
def server_validate_statement_object(stmt_object, auth):
if stmt_object['objectType'] == 'StatementRef' and not check_for_existing_statementId(stmt_object['id']):
err_msg = "No statement with ID %s was found" % stmt_object['id']
raise IDNotFoundError(err_msg)
def validate_stmt_authority(stmt, auth, auth_validated):
# If not validated yet - validate auth first since it supercedes any auth in stmt
if not auth_validated:
if auth['authority']:
if auth['authority'].objectType == 'Group' and not auth['authority'].oauth_identifier:
err_msg = "Statements cannot have a non-Oauth group as the authority"
raise ParamError(err_msg)
else:
return True
# If no auth then validate authority in stmt if there is one
else:
if 'authority' in stmt:
# If they try using a non-oauth group that already exists-throw error
if stmt['authority']['objectType']
|
==
|
'Group':
contains_account = len([x for m in stmt['authority']['member'] for x in m.keys() if 'account' in x]) > 0
if contains_account:
for agent in stmt['authority']['member']:
if 'account' in agent:
if not 'oauth' in agent['account']['homePage'].lower():
err_msg = "Statements cannot have a non-Oauth group as the authority"
raise ParamError(err_msg)
# No members contain an account so that means it's not an Oauth group
else:
err_msg = "Statements cannot have a non-Oauth group as the authority"
raise ParamError(err_msg)
else:
return True
else:
return True
# Retrieve JSON data from ID
def get_act_def_data(act_data):
act_url_data = {}
# See if id resolves
try:
req = urllib2.Request(act_data['id'])
req.add_header('Accept', 'application/json, */*')
act_resp = urllib2.urlopen(req, timeout=settings.ACTIVITY_ID_RESOLVE_TIMEOUT)
except Exception:
# Doesn't resolve-hopefully data is in payload
pass
else:
# If it resolves then try parsing JSON from it
try:
act_url_data = json.loads(act_resp.read())
except Exception:
# Resolves but no data to retrieve - this is OK
pass
# If there was data from the URL and a defintion in received JSON already
if act_url_data and 'definition' in act_data:
act_data['definition'] = dict(act_url_data.items() + act_data['definition'].items())
# If there was data from the URL and no definition in the JSON
elif act_url_data and not 'definition' in act_data:
act_data['definition'] = act_url_data
def server_validation(stmt_set, auth, payload_sha2s):
auth_validated = False
if type(stmt_set) is list:
for stmt in stmt_set:
server_validation(stmt, auth, payload_sha2s)
else:
if 'id' in stmt_set:
statement_id = stmt_set['id']
if check_for_existing_statementId(statement_id):
err_msg = "A statement with ID %s already exists" % statement_id
raise ParamConflict(err_msg)
server_validate_statement_object(stmt_set['object'], auth)
if stmt_set['verb']['id'] == 'http://adlnet.gov/expapi/verbs/voided':
validate_void_statement(stmt_set['object']['id'])
if not 'objectType' in stmt_set['object'] or stmt_set['object']['objectType'] == 'Activity':
get_act_def_data(stmt_set['object'])
try:
validator = StatementValidator()
validator.validate_activity(stmt_set['object'])
except Exception, e:
raise BadRequest(e.message)
except ParamError, e:
raise ParamError(e.message)
auth_validated = validate_stmt_authority(stmt_set, auth, auth_validated)
if 'attachments' in stmt_set:
attachment_data = stmt_set['attachments']
validate_attachments(attachment_data, payload_sha2s)
@auth
def statements_post(req_dict):
if req_dict['params'].keys():
raise ParamError("The post statements request contained unexpected parameters: %s" % ", ".join(req_dict['params'].keys()))
if isinstance(req_dict['body'], basestring):
req_dict['body'] = convert_to_dict(req_dict['body'])
try:
validator = StatementValidator(req_dict['body'])
validator.validate()
except Exception, e:
raise BadRequest(e.message)
except ParamError, e:
raise ParamError(e.message)
server_validation(req_dict['body'], req_dict['auth'], req_dict.get('payload_sha2s', None))
return req_dict
@auth
def statements_more_get(req_dict):
if not 'more_id' in req_dict:
err_msg = "Missing more_id while trying to hit /more endpoint"
raise ParamError(err_msg)
return req_dict
def validate_statementId(req_dict):
if 'statementId' in req_dict['params'] and 'voidedStatementId' in req_dict['params']:
err_msg = "Cannot have both statementId and voidedStatementId in a GET request"
raise ParamError(err_msg)
elif 'statementId' in req_dict['params']:
statementId = req_dict['params']['statementId']
voided = False
else:
statementId = req_dict['params']['voidedStatementId']
voided = True
not_allowed = ["agent", "verb", "activity", "registration",
"related_activities", "related_agents", "since",
"until", "limit", "ascending"]
bad_keys = set(not_allowed) & set(req_dict['params'].keys())
if bad_keys:
err_msg = "Cannot have %s in a GET request only 'format' and/or 'attachments' are allowed with 'statementId' and 'voidedStatem
|
avsm/xen-unstable
|
tools/python/xen/xend/XendConstants.py
|
Python
|
gpl-2.0
| 3,953
| 0.012396
|
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006 XenSource Ltd.
#============================================================================
from xen.xend.XendAPIConstants import *
from xen.util import auxbin
#
# Shutdown codes and reasons.
#
DOMAIN_POWEROFF = 0
DOMAIN_REBOOT = 1
DOMAIN_SUSPEND = 2
DOMAIN_CRASH = 3
DOMAIN_HALT = 4
DOMAIN_SHUTDOWN_REASONS = {
DOMAIN_POWEROFF: "poweroff",
DOMAIN_REBOOT : "reboot",
DOMAIN_SUSPEND : "suspend",
DOMAIN_CRASH : "crash",
DOMAIN_HALT : "halt"
}
REVERSE_DOMAIN_SHUTDOWN_REASONS = \
dict([(y, x) for x, y in DOMAIN_SHUTDOWN_REASONS.items()])
HVM_PARAM_CALLBACK_IRQ = 0
HVM_PARAM_STORE_PFN = 1
HVM_PARAM_STORE_EVTCHN = 2
HVM_PARAM_PAE_ENABLED = 4
HVM_PARAM_IOREQ_PFN = 5
HVM_PARAM_BUFIOREQ_PFN = 6
HVM_PARAM_NVRAM_FD = 7 # ia64
HVM_PARAM_VHPT_SIZE = 8 # ia64
HVM_PARAM_BUFPIOREQ_PFN = 9 # ia64
HVM_PARAM_VIRIDIAN = 9 # x86
HVM_PARAM_TIMER_MODE = 10
HVM_PARAM_HPET_ENABLED = 11
HVM_PARAM_ACPI_S_STATE = 14
HVM_PARAM_VPT_ALIGN = 16
restart_modes = [
"restart",
"destroy",
"preserve",
"rename-restart",
"coredump-destroy",
"coredump-restart"
]
DOM_STATES = [
'halted',
'paused',
'running',
'suspended',
'shutdown',
'crashed',
'unknown',
]
DOM_STATE_HALTED = XEN_API_VM_POWER_STATE_HALTED
DOM_STATE_PAUSED = XEN_API_VM_POWER_STATE_PAUSED
DOM_STATE_RUNNING = XEN_API_VM_POWER_STATE_RUNNING
DOM_STATE_SUSPENDED = XEN_API_VM_POWER_STATE_SUSPENDED
DOM_STATE_SHUTDOWN = XEN_API_VM_POWER_STATE_SHUTTINGDOWN
DOM_STATE_CRASHED = XEN_API_VM_POWER_STATE_CRASHED
DOM_STATE_UNKNOWN = XEN_API_VM_POWER_STATE_UNKNOWN
DOM_STATES_OLD = [
'running',
'blocked',
'paused',
'shutdown',
'crashed',
'dying'
]
SHUTDOWN_TIMEOUT = (60.0 * 5)
"""Minimum time between domain restarts in seconds."""
MINIMUM_RESTART_TIME = 60
RESTART_IN_PROGRESS = 'xend/restart_in_progress'
DUMPCORE_IN_PROGRESS = 'xend/dumpcore_in_progress'
LAST_SHUTDOWN_REASON = 'xend/last_shutdown_reason'
TRIGGER_NMI = 0
TRIGGER_RESET = 1
TRIGGER_INIT = 2
TRIGGER_POWER = 3
TRIGGER_S3RESUME = 4
TRIGGER_TYPE = {
"nmi" : TRIGGER_NMI,
"reset" : TRIGGER_RESET,
"init" : TRIGGER_INIT,
"s3resume": TRIGGER_S3RESUME,
"power": TRIGGER_POWER
}
#
# Device migration stages (eg. XendDomainInfo, XendCheckpoint, server.tpmif)
#
DEV_MIGRATE_TEST = 0
DEV_MIGRATE_STEP1 = 1
DEV_MIGRATE_STEP2 = 2
DEV_MIGRATE_STEP3 = 3
#
# VTPM-related constants
#
VTPM_DELETE_SCRIPT = auxbin.scripts_dir() + '/vtpm-delete'
#
# Xenstore Constants
#
XS_VMROOT = "/vm/"
NR_PCI_FUNC = 8
NR_PCI_DEV = 32
NR_PCI_DEVFN = NR_PCI_FUNC * NR_PCI_DEV
AUTO_PHP_
|
SLOT = 0x100
#
# tmem
#
TMEM_CONTROL = 0
TMEM_NEW_POOL = 1
TMEM_DESTROY_POOL = 2
TMEM_NEW_PAGE = 3
TMEM_PUT_PAGE = 4
TMEM_GET_PAGE = 5
TMEM_FLUSH_PAGE = 6
TMEM_FLUSH_OBJECT = 7
TMEM_READ = 8
TMEM_WRITE
|
= 9
TMEM_XCHG = 10
TMEMC_THAW = 0
TMEMC_FREEZE = 1
TMEMC_FLUSH = 2
TMEMC_DESTROY = 3
TMEMC_LIST = 4
TMEMC_SET_WEIGHT = 5
TMEMC_SET_CAP = 6
TMEMC_SET_COMPRESS = 7
|
slashk/prowl.alfredworkflow
|
prowl_alfred.py
|
Python
|
apache-2.0
| 1,835
| 0.00545
|
# Copyright 2013 Ken Pepple <[email protected]>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions
|
and limitations
# under the License.
import alfred_utils as utils
import requests
PROWL_URL = "https://api.prowlapp.com/publicapi/"
DEFAULT_PRIORITY = 0
VALID_PRIORITIES = [-2, -1, 0, 1, 2]
def get_api_key():
return utils.get_config('apikey')
def get_priority_key():
try:
p = utils.get_config('priority')
if p not in VALID_PRIORITIES:
p = DEFAULT_PRIORITY
except:
p = DEFAULT_PRIORITY
re
|
turn p
def verify_apikey(apikey):
parameters = {'apikey': apikey}
r = requests.post(PROWL_URL + "verify", params=parameters)
return r.ok
def save_api_key(apikey):
utils.save_config('apikey',apikey)
def send_prowl(description, application="Alfred", event="event", priority=0):
try:
apikey = get_api_key()
except:
print "No APIKEY. Please configure by holding down the cmd key and pasting in prowl APIKEY."
raise Exception("No APIKEY. Please configure by holding down the cmd key and pasting in prowl APIKEY.")
parameters = {'apikey': apikey, 'event': event, 'application': application,
'priority': priority, 'description': description}
r = requests.post(PROWL_URL + "add", params=parameters)
return r.ok
|
Vaidyanath/tempest
|
tempest/api/object_storage/test_container_sync_middleware.py
|
Python
|
apache-2.0
| 1,985
| 0
|
# Copyright(c)2015 NTT corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Un
|
less required by a
|
pplicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.object_storage import test_container_sync
from tempest import config
from tempest import test
CONF = config.CONF
# This test can be quite long to run due to its
# dependency on container-sync process running interval.
# You can obviously reduce the container-sync interval in the
# container-server configuration.
class ContainerSyncMiddlewareTest(test_container_sync.ContainerSyncTest):
@classmethod
def resource_setup(cls):
super(ContainerSyncMiddlewareTest, cls).resource_setup()
# Set container-sync-realms.conf info
cls.realm_name = CONF.object_storage.realm_name
cls.key = 'sync_key'
cls.cluster_name = CONF.object_storage.cluster_name
@test.attr(type='slow')
@test.requires_ext(extension='container_sync', service='object')
def test_container_synchronization(self):
def make_headers(cont, cont_client):
# tell first container to synchronize to a second
account_name = cont_client.base_url.split('/')[-1]
headers = {'X-Container-Sync-Key': "%s" % (self.key),
'X-Container-Sync-To': "//%s/%s/%s/%s" %
(self.realm_name, self.cluster_name,
str(account_name), str(cont))}
return headers
self._test_container_synchronization(make_headers)
|
saukrIppl/seahub
|
thirdpart/openpyxl-2.3.0-py2.7.egg/openpyxl/formatting/__init__.py
|
Python
|
apache-2.0
| 144
| 0
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from .formatting import
|
ConditionalFormatting
from .rule
|
import Rule
|
anchore/anchore-engine
|
tests/unit/anchore_engine/auth/test_common.py
|
Python
|
apache-2.0
| 3,612
| 0.001661
|
import json
import time
import pytest
from anchore_engine.auth.common import (
get_creds_by_registry,
get_docker_registry_userpw,
registry_record_matches,
)
_test_username = "tonystark"
_test_password = "potts"
_test_registry_meta = {
"authorizationToken": "{}:{}".format(_test_username, _test_password)
}
_record_ecr = {
"registry_type": "awsecr",
"registry_meta": json.dumps(_test_registry_meta),
}
_record_not_ecr = {
"registry_type": "other-registry",
"registry_user": _test_username,
"registry_pass": _test_password,
}
_record_ecr_inactive = {
"registry": "docker.io",
"record_state_key": "inactive",
"registry_type": "awsecr",
"registry_meta": json.dumps(_test_registry_meta),
"registry_verify": True,
}
_record_ecr_unavailable = {
"registry": "docker.io",
"record_state_key": "inactive",
"record_state_val": time.time(), # note: technically this could yield nondeterministic results
"registry_type": "awsecr",
"registry_meta": json.dumps(_test_registry_meta),
"registry_verify": True,
}
@pytest.mark.parametrize("registry_record", [_record_ecr, _record_not_ecr])
def test_get_docker_registry_userpw(registry_record):
result = get_docker_registry_userpw(registry_record)
assert result == (_test_username, _test_password)
def test_get_docker_registry_userpw_bad_json():
record_ecr_bad_json = {
"registry_type": "awsecr",
"registry_meta": "this-is-not-valid-json!}",
}
with pytest.raises(Exception):
get_docker_registry_userpw(record_ecr_bad_json)
@pytest.mark.parametrize(
"registry,repository,registry_creds,expected",
[
("docker.io", "library/node", None, (None, None, None)),
(
"docker.io",
"library/node",
[_record_ecr_inactive],
(_test_username, _test_password, True),
),
],
)
def test_get_creds_by_registry(registry, repository, registry_creds, expected):
result = get_creds_by_registry(registry, repository, registry_creds)
assert result == expected
def test_get_creds_by_registry_unavailable():
with pytest.raises(Exception):
get_creds_by_registry("docker.io", "library/node", [_record_ecr_unavailable])
@pytest.mark.parametrize(
"registry_record_str,registry,repository",
[
("docker.io/library/centos", "docker.io", "library/centos"),
("docker.io", "docker.io", "centos"),
("docker.io", "docker.io", "myuser/myrepo"),
],
)
def test_registry_record_matches_exact(registry_record_str, registry, repository):
assert registry_record_matches(registry_record_str, registry, repository)
@pytest.mark.parametrize(
"registry_record_str,registry,repository",
[
("docker.io/library/*", "docker.io", "library/centos"),
("docker.io/*", "docker.io", "library/centos"),
("gcr.io/myproject/*", "gcr.io", "myproject/myuser/myrepo"),
],
)
def test_registry_record_matches_wildcard(registry_record_str, registry, repository):
assert registry_record_matches(registry_record_str, registry, repository)
@pytest.mark.parametrize(
"registry_record_str,registry,repository",
[
("docker.io", "gcr.io", "myproject/myuser"),
("docker.io/*", "gcr.io", "myproject/myuser"),
("docker.io/library/*", "docker.io", "myuser/myrepo"),
|
("docker.io/myus
|
er/myrepo", "docker.io", "myuser/myrepo2"),
],
)
def test_registry_record_matches_non(registry_record_str, registry, repository):
assert not registry_record_matches(registry_record_str, registry, repository)
|
simondlevy/ISCPP
|
Chapter08/sumton.py
|
Python
|
gpl-3.0
| 926
| 0.006479
|
#!/usr/bin/env python3
"""
sumton.py : compute the sum of 0 through N
Copyright (C) Simon D. Levy 2016
This file is part of ISCPP.
ISCPP is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This code is dist
|
ributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
a
|
long with this code. If not, see <http:#www.gnu.org/licenses/>.
"""
def sumToN(n):
res = 0
for k in range(0,n+1):
res = res + k
return res
if __name__ == "__main__":
"""
Example
"""
print(sumToN(5))
|
sevein/archivematica
|
src/MCPClient/lib/clientScripts/generateDIPFromAIPGenerateDIP.py
|
Python
|
agpl-3.0
| 2,681
| 0.002611
|
#!/usr/bin/env python2
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
|
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaClientScript
# @author Joseph Perry <josep
|
[email protected]>
from __future__ import print_function
import os
import sys
import shutil
import django
django.setup()
# dashboard
from main.models import Job, SIP
# archivematicaCommon
from custom_handlers import get_script_logger
from databaseFunctions import createSIP
if __name__ == '__main__':
logger = get_script_logger("archivematica.mcp.client.generateDIPFromAIPGenerateDIP")
# COPY THE METS FILE
# Move the DIP Directory
fauxUUID = sys.argv[1]
unitPath = sys.argv[2]
date = sys.argv[3]
basename = os.path.basename(unitPath[:-1])
uuidLen = 36
originalSIPName = basename[:-(uuidLen+1)*2]
originalSIPUUID = basename[:-(uuidLen+1)][-uuidLen:]
METSPath = os.path.join(unitPath, "metadata/submissionDocumentation/data/", "METS.%s.xml" % (originalSIPUUID))
if not os.path.isfile(METSPath):
print("Mets file not found: ", METSPath, file=sys.stderr)
exit(-1)
# move mets to DIP
src = METSPath
dst = os.path.join(unitPath, "DIP", os.path.basename(METSPath))
shutil.move(src, dst)
# Move DIP
src = os.path.join(unitPath, "DIP")
dst = os.path.join("/var/archivematica/sharedDirectory/watchedDirectories/uploadDIP/", originalSIPName + "-" + originalSIPUUID)
shutil.move(src, dst)
try:
SIP.objects.get(uuid=originalSIPUUID)
except SIP.DoesNotExist:
# otherwise doesn't appear in dashboard
createSIP(unitPath, UUID=originalSIPUUID)
Job.objects.create(jobtype="Hack to make DIP Jobs appear",
directory=unitPath,
sip_id=originalSIPUUID,
currentstep="Completed successfully",
unittype="unitSIP",
microservicegroup="Upload DIP")
|
ESS-LLP/erpnext
|
erpnext/projects/doctype/task/task.py
|
Python
|
gpl-3.0
| 12,175
| 0.026201
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import json
import frappe
from frappe import _, throw
from frappe.desk.form.assign_to import clear, close_all_assignments
from frappe.model.mapper import get_mapped_doc
from frappe.utils import add_days, cstr, date_diff, get_link_to_form, getdate, today, flt
from frappe.utils.nestedset import NestedSet
class CircularReferenceError(frappe.ValidationError): pass
class EndDateCannotBeGreaterThanProjectEndDateError(frappe.ValidationError): pass
class Task(NestedSet):
nsm_parent_field = 'parent_task'
def get_feed(self):
return '{0}: {1}'.format(_(self.status), self.subject)
def get_customer_details(self):
cust = frappe.db.sql("select customer_name from `tabCustomer` where name=%s", self.customer)
if cust:
ret = {'customer_name': cust and cust[0][0] or ''}
return ret
def validate(self):
self.validate_dates()
self.validate_parent_expected_end_date()
self.validate_parent_project_dates()
self.validate_progress()
self.validate_status()
self.update_depends_on()
self.validate_dependencies_for_template_task()
def validate_dates(self):
if self.exp_start_date and self.exp_end_date and getdate(self.exp_start_date) > getdate(self.exp_end_date):
frappe.throw(_("{0} can not be greater than {1}").format(frappe.bold("Expected Start Date"), \
frappe.bold("Expected End Date")))
if self.act_start_date and self.act_end_date and getdate(self.act_start_date) > getdate(self.act_end_date):
frappe.throw(_("{0} can not be greater than {1}").format(frappe.bold("Actual Start Date"), \
frappe.bold("Actual End Date")))
def validate_parent_expected_end_date(self):
if self.parent_task:
parent_exp_end_date = frappe.db.get_value("Task", self.parent_task, "exp_end_date")
if parent_exp_end_date and getdate(self.get("exp_end_date")) > getdate(parent_exp_end_date):
frappe.throw(_("Expected End Date should be less than or equal to parent task's Expected End Date {0}.").format(getdate(parent_exp_end_date)))
def validate_parent_project_dates(self):
if not self.project or frappe.flags.in_test:
return
expected_end_date = frappe.db.get_value("Project", self.project, "expected_end_date")
if expected_end_date:
validate_project_dates(getdate(expected_end_date), self, "exp_start_date", "exp_end_date", "Expected")
validate_project_dates(getdate(expected_end_date), self, "act_start_date", "act_end_date", "Actual")
def validate_status(self):
if self.is_template and self.status != "Template":
self.status = "Template"
if self.status!=self.get_db_value("status") and self.status == "Completed":
for d in self.depends_on:
if frappe.db.get_value("Task", d.task, "status") not in ("Completed", "Cancelled"):
frappe.throw(_("Cannot complete task {0} as its dependant task {1} are not ccompleted / cancelled.").format(frappe.bold(self.name), frappe.bold(d.task)))
close_all_assignments(self.doctype, self.name)
def validate_progress(self):
if flt(self.progress or 0) > 100:
frappe.throw(_("Progress % for a task cannot be more than 100."))
if flt(self.progress) == 100:
self.status = 'Completed'
if self.status == 'Completed':
self.progress = 100
def validate_dependencies_for_template_task(self):
if self.is_template:
self.validate_parent_template_task()
self.validate_depends_on_tasks()
def validate_parent_template_task(self):
if self.parent_task:
if not frappe.db.get_value("Task", self.parent_tas
|
k, "is_template"):
parent_task_format = """<a href="#Form/Task/{0}">{0}</a>""".format(self.parent_task)
frappe.throw(_("Parent Task {0} is not a Template Task").format(parent_task_format
|
))
def validate_depends_on_tasks(self):
if self.depends_on:
for task in self.depends_on:
if not frappe.db.get_value("Task", task.task, "is_template"):
dependent_task_format = """<a href="#Form/Task/{0}">{0}</a>""".format(task.task)
frappe.throw(_("Dependent Task {0} is not a Template Task").format(dependent_task_format))
def update_depends_on(self):
depends_on_tasks = self.depends_on_tasks or ""
for d in self.depends_on:
if d.task and d.task not in depends_on_tasks:
depends_on_tasks += d.task + ","
self.depends_on_tasks = depends_on_tasks
def update_nsm_model(self):
frappe.utils.nestedset.update_nsm(self)
def on_update(self):
self.update_nsm_model()
self.check_recursion()
self.reschedule_dependent_tasks()
self.update_project()
self.unassign_todo()
self.populate_depends_on()
def unassign_todo(self):
if self.status == "Completed":
close_all_assignments(self.doctype, self.name)
if self.status == "Cancelled":
clear(self.doctype, self.name)
def update_total_expense_claim(self):
self.total_expense_claim = frappe.db.sql("""select sum(total_sanctioned_amount) from `tabExpense Claim`
where project = %s and task = %s and docstatus=1""",(self.project, self.name))[0][0]
def update_time_and_costing(self):
tl = frappe.db.sql("""select min(from_time) as start_date, max(to_time) as end_date,
sum(billing_amount) as total_billing_amount, sum(costing_amount) as total_costing_amount,
sum(hours) as time from `tabTimesheet Detail` where task = %s and docstatus=1"""
,self.name, as_dict=1)[0]
if self.status == "Open":
self.status = "Working"
self.total_costing_amount= tl.total_costing_amount
self.total_billing_amount= tl.total_billing_amount
self.actual_time= tl.time
self.act_start_date= tl.start_date
self.act_end_date= tl.end_date
def update_project(self):
if self.project and not self.flags.from_project:
frappe.get_cached_doc("Project", self.project).update_project()
def check_recursion(self):
if self.flags.ignore_recursion_check: return
check_list = [['task', 'parent'], ['parent', 'task']]
for d in check_list:
task_list, count = [self.name], 0
while (len(task_list) > count ):
tasks = frappe.db.sql(" select %s from `tabTask Depends On` where %s = %s " %
(d[0], d[1], '%s'), cstr(task_list[count]))
count = count + 1
for b in tasks:
if b[0] == self.name:
frappe.throw(_("Circular Reference Error"), CircularReferenceError)
if b[0]:
task_list.append(b[0])
if count == 15:
break
def reschedule_dependent_tasks(self):
end_date = self.exp_end_date or self.act_end_date
if end_date:
for task_name in frappe.db.sql("""
select name from `tabTask` as parent
where parent.project = %(project)s
and parent.name in (
select parent from `tabTask Depends On` as child
where child.task = %(task)s and child.project = %(project)s)
""", {'project': self.project, 'task':self.name }, as_dict=1):
task = frappe.get_doc("Task", task_name.name)
if task.exp_start_date and task.exp_end_date and task.exp_start_date < getdate(end_date) and task.status == "Open":
task_duration = date_diff(task.exp_end_date, task.exp_start_date)
task.exp_start_date = add_days(end_date, 1)
task.exp_end_date = add_days(task.exp_start_date, task_duration)
task.flags.ignore_recursion_check = True
task.save()
def has_webform_permission(self):
project_user = frappe.db.get_value("Project User", {"parent": self.project, "user":frappe.session.user} , "user")
if project_user:
return True
def populate_depends_on(self):
if self.parent_task:
parent = frappe.get_doc('Task', self.parent_task)
if self.name not in [row.task for row in parent.depends_on]:
parent.append("depends_on", {
"doctype": "Task Depends On",
"task": self.name,
"subject": self.subject
})
parent.save()
def on_trash(self):
if check_if_child_exists(self.name):
throw(_("Child Task exists for this Task. You can not delete this Task."))
self.update_nsm_model()
def after_delete(self):
self.update_project()
def update_status(self):
if self.status not in ('Cancelled', 'Completed') and self.exp_end_date:
from datetime import datetime
if self.exp_end_date < datetime.now().date():
self.db_set('status', 'Overdue', update_modified=Fa
|
belangeo/pyo
|
pyo/examples/sequencing/01_starttime_duration.py
|
Python
|
lgpl-3.0
| 1,118
| 0.002683
|
#!/usr/bin/env python
# encoding: utf-8
"""
Show how to use `dur` and `delay` parameters of play() and out()
methods to sequence events over time.
"""
from pyo import *
import random
s = Server(duplex=0).b
|
oot()
num = 70
freqs = [random.uniform(100, 1000) for i in range(num)]
start1 = [i * 0.5 for i in range(num)]
fade1 = Fader([1] * num, 1, 5, mul=0.03).play(dur=5, delay=start1)
a = SineLoop(freqs, feedback=0.05, mul=fade1).out(dur=5, delay=start1)
start2 = 30
dur2 = 40
snds = [
"../snds/alum1.wav",
"../snds/alum2.w
|
av",
"../snds/alum3.wav",
"../snds/alum4.wav",
]
tabs = SndTable(snds)
fade2 = Fader(0.05, 10, dur2, mul=0.7).play(dur=dur2, delay=start2)
b = Beat(time=0.125, w1=[90, 30, 30, 20], w2=[30, 90, 50, 40], w3=[0, 30, 30, 40], poly=1).play(dur=dur2, delay=start2)
out = TrigEnv(b, tabs, b["dur"], mul=b["amp"] * fade2).out(dur=dur2, delay=start2)
start3 = 45
dur3 = 30
fade3 = Fader(15, 15, dur3, mul=0.02).play(dur=dur3, delay=start3)
fm = FM(carrier=[149, 100, 151, 50] * 3, ratio=[0.2499, 0.501, 0.75003], index=10, mul=fade3).out(
dur=dur3, delay=start3
)
s.gui(locals())
|
emotrix/Emotrix
|
emotrix/HeadsetTester.py
|
Python
|
bsd-2-clause
| 529
| 0
|
# -*- coding: utf-8 -*-
from Headset import Headset
import logging
import time
puerto = 'COM3'
headset = Headset(logging.INFO)
t
|
ry:
headset.connect(puerto, 115200)
except Exception, e:
raise e
print "Is conected? " + str(headset.isConnected())
print "-----------------------------------------"
headset.startReading(persist_data=True)
time.sleep(5)
headset.stopReading()
headset.closePort()
|
print "-----------------------------------------"
print "Is conected? " + str(headset.isConnected())
print headset.getStatus()
|
KevinHoo/new-file-pro
|
commands/NewFileBase.py
|
Python
|
gpl-3.0
| 3,250
| 0.029231
|
import sublime
import sublime_plugin
import re
import os
import datetime
TMLP_DIR = 'templates'
KEY_SYNTAX = 'syntax'
KEY_FILE_EXT = 'extension'
IS_GTE_ST3 = int(sublime.version()) >= 3000
PACKAGE_NAME = 'new-file-pro'
PACKAGES_PATH = sublime.packages_path()
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
class NewFileBase(sublime_plugin.WindowCommand):
def __init__(self, window):
super(NewFileBase, self).__init__(window)
def appendFileExtension(self, name, t):
tmp = name.split('.')
length = len(tmp)
s_ext = tmp[length - 1]
exts = {'css': 'css', 'html': 'html', 'js': 'js', 'json': 'json', 'php': 'php', 'php-class': 'php', 'php-interface': 'php', 'xml':'xml', 'python': 'python', 'ruby': 'ruby'}
try:
t_ext = exts[t]
if (s_ext == t_ext and length == 1) or s_ext != t_ext:
return name + '.' + t_ext
except KeyError:
pass
return name;
def appendPHPExtension(self, name):
t = name.split('.')
length = len(t)
ext = t[length - 1]
if ext != "php":
return name + '.php'
return name;
def get_code(self, type='text' ):
code = ''
file_name = "%s.tmpl" % type
isIOError = False
if IS_GTE_ST3:
tmpl_dir = 'Packages/' + PACKAGE_NAME + '/' + TMLP_DIR + '/'
user_tmpl_dir = 'Packages/User/' + PACKAGE_NAME + '/' + TMLP_DIR + '/'
else:
tmpl_dir = os.path.join(PACKAGES_PATH, PACKAGE_NAME, TMLP_DIR)
user_tmpl_dir = os.path.join(PACKAGES_PATH, 'User', PACKAGE_NAME, TMLP_DIR)
self.user_tmpl_path = os.path.join(user_tmpl_dir, file_name)
self.tmpl_path = os.path.join(tmpl_dir, file_name)
if IS_GTE_ST3:
try:
code = sublime.load_resource(self.user_tmpl_path)
except IOError:
try:
code = sublime.load_resource(self.tmpl_path)
except IOError:
isIOError = True
else:
if os.path.isfile(self.user_tmpl_path):
code = self.open_file(self.user_tmpl_path)
elif os.path.isfile(self.tmpl_path):
code = self.open_file(self.tmpl_path)
else:
isIOError = True
if isIOError:
sublime.message_dialog('[Warning] No such file: ' + self.tmpl_path + ' or ' + self.user_tmpl_path)
return self.format_tag(code)
def format_tag(self, code):
win = sublime.active_window()
code = code.replace('\r', '') # replace \r\n -> \n
# format
setti
|
ngs = self.get_settings()
format = settings.get('date_format', '%Y-%m-%d')
date = datetime.datetime.now().strftime(format)
if not IS_GTE_ST3:
code = code.decode('utf8') # for st2 && Chinese characters
code = code.replace('${date}', date)
attr = settings.get('attr', {})
for key in attr:
code = code.replace('${%s}' % key, attr.get(key, ''))
if settings.get('enable_project_variables', Fals
|
e) and hasattr(win, 'extract_variables'):
variables = win.extract_variables()
for key in ['project_base_name', 'project_path', 'platform']:
code = code.replace('${%s}' % key, variables.get(key, ''))
code = re.sub(r"(?<!\\)\${(?!\d)", '\${', code)
return code
def open_file(self, path, mode='r'):
fp = open(path, mode)
code = fp.read()
fp.close()
return code
def get_settings(self, type=None):
settings = sublime.load_settings(PACKAGE_NAME + '.sublime-settings')
if not type:
return settings
opts = settings.get(type, [])
return opts
|
scaramallion/pynetdicom
|
pynetdicom/apps/movescu/movescu.py
|
Python
|
mit
| 11,001
| 0.000364
|
#!/usr/bin/env python
"""A QR Move SCU application.
For sending Query/Retrieve (QR) C-MOVE requests to a QR Move SCP.
"""
import argparse
import sys
from pynetdicom import (
AE,
evt,
QueryRetrievePresentationContexts,
AllStoragePresentationContexts,
)
from pynetdicom.apps.common import setup_logging, create_dataset, handle_store
from pynetdicom._globals import ALL_TRANSFER_SYNTAXES, DEFAULT_MAX_LENGTH
from pynetdicom.pdu_primitives import SOPClassExtendedNegotiation
from pynetdicom.sop_class import (
PatientRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove,
)
__version__ = "0.4.0"
def _setup_argparser():
"""Setup the command line arguments"""
# Description
parser = argparse.ArgumentParser(
description=(
"The movescu application implements a Service Class User (SCU) "
"for the Query/Retrieve (QR) Service Class and (optionally) a "
"Storage SCP for the Storage Service Class. movescu supports "
"retrieve functionality using the C-MOVE message. It sends query "
"keys to an SCP and waits for a response. It will accept "
"associations for the purpose of receiving images sent as a "
"result of the C-MOVE request. movescu can initiate the transfer "
"of images to a third party or can retrieve images to itself "
"(note: the use of the term 'move' is a misnomer, the C-MOVE "
"operation performs a SOP Instance copy only)"
),
usage="movescu [options] addr port",
)
# Parameters
req_opts = parser.add_argument_group("Parameters")
req_opts.add_argument(
"addr", help="TCP/IP address or hostname of DICOM peer", type=str
)
req_opts.add_argument("port", help="TCP/IP port number of peer", type=int)
# General Options
gen_opts = parser.add_argument_group("General Options")
gen_opts.add_argument(
"--version", help="print version information and exit", action="store_true"
)
output = gen_opts.add_mutually_exclusive_group()
output.add_argument(
"-q",
"--quiet",
help="quiet mode, print no warnings and errors",
action="store_const",
dest="log_type",
const="q",
)
output.add_argument(
"-v",
"--verbose",
help="verbose mode, print processing details",
action="store_const",
dest="log_type",
const="v",
)
output.add_argument(
"-d",
"--debug",
help="debug mode, print debug information",
action="store_const",
dest="log_type",
const="d",
)
gen_opts.add_argument(
"-ll",
"--log-level",
metavar="[l]",
help=("use level l for the logger (critical, error, warn, info, debug)"),
type=str,
choices=["critical", "error", "warn", "info", "debug"],
)
parser.set_defaults(log_type="v")
# Network Options
net_opts = parser.add_argument_group("Network Options")
net_opts.add_argument(
"-aet",
"--calling-aet",
metavar="[a]etitle",
help="set my calling AE title (default: MOVESCU)",
type=str,
default="MOVESCU",
|
)
net_opts.add_argument(
"-aec",
"--called-aet",
metavar="[a]etitle",
help="set called AE title of peer (default: ANY-SCP)",
type=str,
default="ANY-SCP",
)
net_opts.add_argument(
"-aem",
"--move-aet",
metavar="[a]etitle",
help="set move destination AE title (
|
default: STORESCP)",
type=str,
default="STORESCP",
)
net_opts.add_argument(
"-ta",
"--acse-timeout",
metavar="[s]econds",
help="timeout for ACSE messages (default: 30 s)",
type=float,
default=30,
)
net_opts.add_argument(
"-td",
"--dimse-timeout",
metavar="[s]econds",
help="timeout for DIMSE messages (default: 30 s)",
type=float,
default=30,
)
net_opts.add_argument(
"-tn",
"--network-timeout",
metavar="[s]econds",
help="timeout for the network (default: 30 s)",
type=float,
default=30,
)
net_opts.add_argument(
"-pdu",
"--max-pdu",
metavar="[n]umber of bytes",
help=(
f"set max receive pdu to n bytes (0 for unlimited, "
f"default: {DEFAULT_MAX_LENGTH})"
),
type=int,
default=DEFAULT_MAX_LENGTH,
)
# Query information model choices
qr_group = parser.add_argument_group("Query Information Model Options")
qr_model = qr_group.add_mutually_exclusive_group()
qr_model.add_argument(
"-P",
"--patient",
help="use patient root information model (default)",
action="store_true",
)
qr_model.add_argument(
"-S", "--study", help="use study root information model", action="store_true"
)
qr_model.add_argument(
"-O",
"--psonly",
help="use patient/study only information model",
action="store_true",
)
# Query Options
qr_query = parser.add_argument_group("Query Options")
qr_query.add_argument(
"-k",
"--keyword",
metavar="[k]eyword: (gggg,eeee)=str, keyword=str",
help=(
"add or override a query element using either an element tag as "
"(group,element) or the element's keyword (such as PatientName)"
),
type=str,
action="append",
)
qr_query.add_argument(
"-f",
"--file",
metavar="path to [f]ile",
help=(
"use a DICOM file as the query dataset, if "
"used with -k then the elements will be added to or overwrite "
"those present in the file"
),
type=str,
)
# Store SCP options
store_group = parser.add_argument_group("Storage SCP Options")
store_group.add_argument(
"--store",
help="start a Storage SCP that can be used as the move destination",
action="store_true",
default=False,
)
store_group.add_argument(
"--store-port",
metavar="[p]ort",
help="the port number to use for the Storage SCP",
type=int,
default=11113,
)
store_group.add_argument(
"--store-aet",
metavar="[a]etitle",
help="the AE title to use for the Storage SCP",
type=str,
default="STORESCP",
)
# Extended Negotiation Options
ext_neg = parser.add_argument_group("Extended Negotiation Options")
ext_neg.add_argument(
"--relational-retrieval",
help="request the use of relational retrieval",
action="store_true",
)
ext_neg.add_argument(
"--enhanced-conversion",
help="request the use of enhanced multi-frame image conversion",
action="store_true",
)
# Output Options
out_opts = parser.add_argument_group("Output Options")
out_opts.add_argument(
"-od",
"--output-directory",
metavar="[d]irectory",
help="write received objects to directory d",
type=str,
)
out_opts.add_argument(
"--ignore", help="receive data but don't store it", action="store_true"
)
ns = parser.parse_args()
if ns.version:
pass
elif not bool(ns.file) and not bool(ns.keyword):
parser.error("-f and/or -k must be specified")
return ns
def main(args=None):
"""Run the application."""
if args is not None:
sys.argv = args
args = _setup_argparser()
if args.version:
print(f"movescu.py v{__version__}")
sys.exit()
APP_LOGGER = setup_logging(args, "movescu")
APP_LOGGER.debug(f"movescu.py v{__version__}")
APP_LOGGER.debug("")
# Create query (identifier) dataset
try:
# If you're looking at this to see how QR Move works then `identifer`
# is a pydicom Dataset instance with your query keys, e.g.:
|
veratulips/OMOOC2py
|
_src/exCodesHardWay/ex41.py
|
Python
|
mit
| 4,337
| 0.027669
|
from sys import exit
from random import randint
def death():
quips = ["You died. You kinda suck at this.",
"Your mom would be proud. If she were smarter.",
"Such a luser.",
"I have a small puppy that's better at this."]
print quips[randint(0,len(quips)-1)]
exit(1)
def princess_lives_here():
print "You see a beatiful Princess with a shiny crown."
print "She offers you some cake."
eat_it = raw_input("> ")
if eat_it == "eat it":
print "You explode like a pinata full of frogs."
print "The Princess cackles and eats the frogs. Yum!"
return 'death'
elif eat_it == "do not eat it":
print "She throws the cake at you and it curs off your head."
print "The last thing you see is her munching on your torso. Yum!"
return 'death'
elif eat_it == "make her eat it":
print "The Princess screams as you cram the cake in her mouth."
print "Then she smiles and cries and thanks you for saving her."
print "She points to a tiny door and says, 'The Koi needs cake too.'"
print "She gives you the very last bit of cake and ahoves you in."
return 'gold_koi_pond'
else:
print "The princess looks at you confused and just points at the cake."
return 'princess_lives_here'
def gold_koi_pond():
print "There is a garden with a koi pond in the center."
print "You walk close and see a massive fin pole out."
print "You peek in and a creepy looking huge Koi stares at you."
print "It opens its mouth waiting for food."
feed_it = raw_input("> ")
if feed_it == "feed it":
print "The Koi jumps up and rather than eating the cake, eats your arm."
print "You fall in and the Koi shruge than eats you."
print "You are then pooped out sometime later."
return 'death'
elif feed_it == "do not feed it":
print "The Koi grimaces, then thrashes around for a second."
print "It rushes to the other end of the pond, braces against the wall..."
print "then it *lunges* out of the water, up in the air and over your"
print "entire body, cake and all."
print "You are then poped out a week later."
return 'death
|
'
elif feed_it == "throw it in":
print "The Koi wiggles, then leaps into the air to eat the cake."
print "You can see it's happy, it then grunts, thrashes..."
print "and finally rolls over and poops a magic diamond into the air"
print "at your feet."
return 'bear_with_sword'
else:
print "The Koi gets annoyed and wiggles a bit."
return 'gold_koi_pond'
def bear_with_sword():
print "Puzzled, you are about to pick up the fish poop diamond when"
print "a bear bearing a load bearing sw
|
ord walks in."
print '"Hey! That\' my diamond! Where\'d you get that!?"'
print "It holds its paw out and looks at you."
give_it = raw_input("> ")
if give_it == "give it":
print "The bear swipes at your hand to grab the diamond and"
print "rips your hand off in the process. It then looks at"
print 'your bloody stump and says, "Oh crap, sorry about that."'
print "It tries to put your hand back on, but you collapse."
print "The last thing you see is the bear shrug and eat you."
return 'death'
elif give_it == "say_no":
print "The bear looks shocked. Nobody ever told a bear"
print "with a broadsword 'no'. It asks, "
print '"Is it because it\'s not a katana? I could go get one!"'
print "It then runs off and now you notice a big iron gate."
print '"Where the hell did that come from?" You say.'
return 'big_iron_gate'
else:
print "The bear look puzzled as to why you'd do that."
return "bear_with_sword"
def big_iron_gate():
print "You walk up to the big iron gate and see there's a handle."
open_it = raw_input("> ")
if open_it == 'open it':
print "You open it and you are free!"
print "There are mountains. And berries! And..."
print "Oh, but then the bear comes with his katana and stabs you."
print '"Who\'s laughing now!? Love this katana."'
return 'death'
else:
print "That doesn't seem sensible. I mean, the door's right there."
return 'big_iron_gate'
ROOMS = {
'death':death,
'princess_lives_here':princess_lives_here,
'gold_koi_pond':gold_koi_pond,
'big_iron_gate':big_iron_gate,
'bear_with_sword':bear_with_sword
}
def runner(map,start):
next = start
while True:
room = map[next]
print "\n------------"
next = room()
runner(ROOMS,'princess_lives_here')
|
sasha-gitg/python-aiplatform
|
google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py
|
Python
|
apache-2.0
| 37,342
| 0.001767
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers
from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
from google.cloud.aiplatform_v1beta1.types import specialist_pool
from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool
from google.cloud.aiplatform_v1beta1.types import specialist_pool_service
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import SpecialistPoolServiceGrpcTransport
from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport
class SpecialistPoolServiceClientMeta(type):
"""Metaclass for the SpecialistPoolService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[st
|
r, Type[SpecialistPoolServiceTransport]]
_transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport
_transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[SpecialistPoolServiceTransport]:
"""Returns an ap
|
propriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta):
"""A service for creating and managing Customer SpecialistPools.
When customers start Data Labeling jobs, they can reuse/create
Specialist Pools to bring their own Specialists to label the
data. Customers can add/remove Managers for the Specialist Pool
on Cloud console, then Managers will get email notifications to
manage Specialists and tasks on CrowdCompute console.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpecialistPoolServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpecialistPoolServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> SpecialistPoolServiceTransport:
"""Returns the transport used by the client instance.
Returns:
SpecialistPoolServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def specialist_pool_path(project: str, location: str, specialist_pool: str,) -> str:
"""Returns a fully-qualified specialist_pool string."""
return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(
project=project, location=location, specialist_pool=specialist_pool,
)
@staticmethod
def parse_specialist_pool_path(path: str) -> Dict[str, str]:
"""Parses a specialist_pool path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/specialistPools/(?P<specialist_pool>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folde
|
acsone/bank-statement-import
|
account_bank_statement_import_ofx/__openerp__.py
|
Python
|
agpl-3.0
| 534
| 0
|
# -*- coding: utf-8 -*-
{
'name': 'Import OFX Bank Statement',
'category': 'Banking addons',
'version': '8.0.1.0.1',
'license': 'AGPL-3',
'author': 'OpenERP SA,'
'Odoo Community Association (OCA)',
'website': 'https://github.com/OCA/bank-statement-import',
'depends': [
'accoun
|
t_bank_statement_import'
],
'demo': [
'demo/demo_data.xml',
],
'external_dependencies': {
'python': ['ofxparse'],
},
'a
|
uto_install': False,
'installable': True,
}
|
snegovick/bcam
|
bcam/events.py
|
Python
|
gpl-3.0
| 31,621
| 0.004744
|
from __future__ import absolute_import, division, print_function
import pygtk
pygtk.require('2.0')
import gtk, gobject, cairo
import time
import sys
import imp
import os
from bcam.loader_dxf import DXFLoader
from bcam.loader_excellon import ExcellonLoader
from bcam.tool_operation import TOResult
from bcam.tool_op_drill import TODrill
from bcam.tool_op_exact_follow import TOExactFollow
from bcam.tool_op_offset_follow import TOOffsetFollow
from bcam.tool_op_pocketing import TOPocketing
from bcam.calc_utils import AABB, OverlapEnum
from bcam.path import Path
from bcam.project import project
from bcam.generalized_setting import TOSTypes
from logging import debug, info, warning, error, critical
from bcam.util import dbgfname
from bcam.singleton import Singleton
from bcam.state import State
class EVEnum(object):
load_click = "load_click"
save_click = "save_click"
load_file = "load_file"
save_file = "save_file"
load_project_click = "load_project_click"
save_project_click = "save_project_click"
save_project_as_click = "save_project_as_click"
load_project = "load_project"
save_project = "save_project"
new_project_click = "new_project_click"
quit_click = "quit_click"
screen_left_press = "screen_left_press"
screen_left_release = "screen_left_release"
pointer_motion = "pointer_motion"
drill_tool_click = "drill_tool_click"
deselect_all = "deselect_all"
shift_press = "shift_press"
shift_release = "shift_release"
ctrl_press = "ctrl_press"
ctrl_release = "ctrl_release"
update_paths_list = "update_paths_list"
update_tool_operations_list = "update_tool_operations_list"
path_list_selection_changed = "path_list_selection_changed"
tool_operations_list_selection_changed = "tool_operations_list_selection_changed"
exact_follow_tool_click = "exact_follow_tool_click"
offset_follow_tool_click = "offset_follow_tool_click"
pocket_tool_click = "pocket_tool_click"
update_settings = "update_settings"
tool_operation_up_click = "tool_operation_up_click"
tool_operation_down_click = "tool_operation_down_click"
scroll_up = "scroll_up"
scroll_down = "scroll_down"
hscroll = "hscroll"
vscroll = "vscroll"
tool_paths_check_button_click = "tool_paths_check_button_click"
paths_check_button_click = "paths_check_button_click"
path_delete_button_click = "path_delete_button_click"
tool_operation_delete_button_click = "tool_operation_delete_button_click"
update_progress = "update_progress"
undo_click = "undo_click"
redo_click = "redo_click"
main_start = "main_start"
pause = "pause"
class EventProcessor(object):
ee = EVEnum()
event_list = []
selected_elements = []
selected_path = None
selected_tool_operation = None
left_press_start = None
pointer_position = None
shift_pressed = False
ctrl_pressed = False
def __init__(self):
Singleton.ee = self.ee
Singleton.ep = self
self.started = False
self.events = {
self.ee.load_click: [self.load_click],
self.ee.save_click: [self.save_click],
self.ee.load_file: [self.load_file],
self.ee.save_file: [self.save_file],
self.ee.load_project_click: [self.load_project_click],
self.ee.save_project_click: [self.save_project_click],
self.ee.save_project_as_click: [self.save_project_as_click],
self.ee.load_project: [self.load_project],
self.ee.save_project: [self.save_project],
self.ee.new_project_click: [self.new_project_click],
self.ee.quit_click: [self.quit_click],
self.ee.screen_left_press: [self.screen_left_press],
self.ee.screen_left_release: [self.screen_left_release],
self.ee.pointer_motion: [self.pointer_motion],
self.ee.drill_tool_click: [self.drill_tool_click],
self.ee.deselect_all: [self.deselect_all],
self.ee.shift_press: [self.shift_press],
self.ee.shift_release: [self.shift_release],
self.ee.ctrl_press: [self.ctrl_press],
self.ee.ctrl_release: [self.ctrl_release]
|
,
self.ee.update_paths_list: [self.update_paths_list],
self.ee.path_list_selection_changed: [self.path_list_selection_changed],
self.ee.exact_follow_tool_click: [self.exact_follow_tool_click],
self.ee.offset_follow
|
_tool_click: [self.offset_follow_tool_click],
self.ee.pocket_tool_click: [self.pocket_tool_click],
self.ee.update_tool_operations_list: [self.update_tool_operations_list],
self.ee.tool_operations_list_selection_changed: [self.tool_operations_list_selection_changed],
self.ee.update_settings: [self.update_settings],
self.ee.tool_operation_up_click: [self.tool_operation_up_click],
self.ee.tool_operation_down_click: [self.tool_operation_down_click],
self.ee.scroll_up: [self.scroll_up],
self.ee.scroll_down: [self.scroll_down],
self.ee.hscroll: [self.hscroll],
self.ee.vscroll: [self.vscroll],
self.ee.tool_paths_check_button_click: [self.tool_paths_check_button_click],
self.ee.paths_check_button_click: [self.paths_check_button_click],
self.ee.path_delete_button_click: [self.path_delete_button_click],
self.ee.tool_operation_delete_button_click: [self.tool_operation_delete_button_click],
self.ee.update_progress: [self.update_progress],
self.ee.undo_click: [self.undo_click],
self.ee.redo_click: [self.redo_click],
self.ee.main_start: [self.main_start],
self.ee.pause: [self.pause],
}
def reset(self):
self.selected_elements = []
self.selected_path = None
self.selected_tool_operation = None
self.left_press_start = None
def append_event_processor(self, event, proc):
self.events[event].append(proc)
def prepend_event_processor(self, event, proc):
self.events[event].insert(0, proc)
def set_event(self, event, proc_list):
self.events[event] = proc_list
def push_event(self, event, *args):
self.event_list.append((event, args))
def process(self):
if self.started == False:
self.push_event(self.ee.main_start, None)
self.started = True
event_list = self.event_list[:]
self.event_list = []
for e, args in event_list:
if e in self.events:
for p in self.events[e]:
r = p(args)
if (r == False):
break
else:
dbgfname()
warning(" Unknown event:"+str(e)+" args: "+str(args))
warning(" Please report")
def load_click(self, args):
mimes = [("Drawings (*.dxf)", "Application/dxf", "*.dxf"),
("Drill files (*.drl)", "Application/drl", "*.drl")]
result = self.mw.mk_file_dialog("Open ...", mimes)
if result!=None:
self.push_event(self.ee.load_file, result)
def save_click(self, args):
mimes = [("GCode (*.ngc)", "Application/ngc", "*.ngc")]
result = self.mw.mk_file_save_dialog("Save ...", mimes)
if result!=None:
self.push_event(self.ee.save_file, result)
def load_project_click(self, args):
mimes = [("BCam projects (*.bcam)", "Application/bcam", "*.bcam")]
result = self.mw.mk_file_dialog("Open project ...", mimes)
if result!=None:
self.push_event(self.ee.load_project, result)
def save_project_click(self, args):
dbgfname()
debug(" save project clicked")
if (project.get_path() != None):
self.save_project((project.get_path(), ))
else:
mimes = [("BCam project (*.bcam)", "Application/bcam", "*.bcam")]
result = self.mw.mk_file_save_dialog("Save project ...", mimes)
if result!=None:
self.save_project((result, ))
def save_p
|
uwosh/uwosh.emergency.client
|
uwosh/emergency/client/interfaces.py
|
Python
|
gpl-2.0
| 228
| 0.008772
|
from zope.interface import Interface, Attribute
from zope import schema
from uwosh.emergency.clien
|
t.config import mf as _
class IUWOs
|
hEmergencyClientLayer(Interface):
"""Marker interface that defines a browser layer
"""
|
airbnb/kafka
|
tests/kafkatest/services/monitor/jmx.py
|
Python
|
apache-2.0
| 5,768
| 0.003814
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.cluster.remoteaccount import RemoteCommandError
from ducktape.utils.util import wait_until
class JmxMixin(object):
"""This mixin helps existing service subclasses start JmxTool on their worker nodes and collect jmx stats.
A couple things worth noting:
- this is not a service in its own right.
- we assume the service using JmxMixin also uses KafkaPathResolverMixin
"""
def __init__(self, num_nodes, jmx_object_names=None, jmx_attributes=None):
self.jmx_object_names = jmx_object_names
self.jmx_attributes = jmx_attributes or []
self.jmx_port = 9192
self.started = [False] * num_nodes
self.jmx_stats = [{} for x in range(num_nodes)]
self.maximum_jmx_value = {} # map from object_attribute_name to maximum value observed over time
self.average_jmx_value = {} # map from object_attribute_name to average value observed over time
self.jmx_tool_log = "/mnt/jmx_tool.log"
self.jmx_tool_err_log = "/mnt/jmx_tool.err.log"
def clean_node(self, node):
node.account.kill_process("jmx", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf %s" % self.jmx_tool_log, allow_fail=False)
def start_jmx_tool(self, idx, node):
if self.jmx_object_names is None:
self.logger.debug("%s: Not starting jmx tool because no jmx objects are defined" % node.account)
return
if self.started[idx-1]:
self.logger.debug("%s: jmx tool has been started already on this node" % node.account)
return
cmd = "%s kafka.tools.JmxTool " % self.path.script("kafka-run-class.sh", node)
cmd += "--reporting-interval 1000 --jmx-url service:jmx:rmi:///jndi/rmi://127.0.0.1:%d/jmxrmi" % self.jmx_port
for jmx_object_name in self.jmx_object_names:
cmd += " --object-name %s" % jmx_object_name
for jmx_attribute in self.jmx_attributes:
cmd += " --attributes %s" % jmx_attribute
cmd += " 1>> %s" % self.jmx_tool_log
cmd += " 2>> %s
|
&" % self.jmx_tool_
|
err_log
self.logger.debug("%s: Start JmxTool %d command: %s" % (node.account, idx, cmd))
node.account.ssh(cmd, allow_fail=False)
wait_until(lambda: self._jmx_has_output(node), timeout_sec=10, backoff_sec=.5, err_msg="%s: Jmx tool took too long to start" % node.account)
self.started[idx-1] = True
def _jmx_has_output(self, node):
"""Helper used as a proxy to determine whether jmx is running by that jmx_tool_log contains output."""
try:
node.account.ssh("test -z \"$(cat %s)\"" % self.jmx_tool_log, allow_fail=False)
return False
except RemoteCommandError:
return True
def read_jmx_output(self, idx, node):
if not self.started[idx-1]:
return
object_attribute_names = []
cmd = "cat %s" % self.jmx_tool_log
self.logger.debug("Read jmx output %d command: %s", idx, cmd)
lines = [line for line in node.account.ssh_capture(cmd, allow_fail=False)]
assert len(lines) > 1, "There don't appear to be any samples in the jmx tool log: %s" % lines
for line in lines:
if "time" in line:
object_attribute_names = line.strip()[1:-1].split("\",\"")[1:]
continue
stats = [float(field) for field in line.split(',')]
time_sec = int(stats[0]/1000)
self.jmx_stats[idx-1][time_sec] = {name: stats[i+1] for i, name in enumerate(object_attribute_names)}
# do not calculate average and maximum of jmx stats until we have read output from all nodes
# If the service is multithreaded, this means that the results will be aggregated only when the last
# service finishes
if any(len(time_to_stats) == 0 for time_to_stats in self.jmx_stats):
return
start_time_sec = min([min(time_to_stats.keys()) for time_to_stats in self.jmx_stats])
end_time_sec = max([max(time_to_stats.keys()) for time_to_stats in self.jmx_stats])
for name in object_attribute_names:
aggregates_per_time = []
for time_sec in xrange(start_time_sec, end_time_sec + 1):
# assume that value is 0 if it is not read by jmx tool at the given time. This is appropriate for metrics such as bandwidth
values_per_node = [time_to_stats.get(time_sec, {}).get(name, 0) for time_to_stats in self.jmx_stats]
# assume that value is aggregated across nodes by sum. This is appropriate for metrics such as bandwidth
aggregates_per_time.append(sum(values_per_node))
self.average_jmx_value[name] = sum(aggregates_per_time) / len(aggregates_per_time)
self.maximum_jmx_value[name] = max(aggregates_per_time)
def read_jmx_output_all_nodes(self):
for node in self.nodes:
self.read_jmx_output(self.idx(node), node)
|
shobhitmittal/textract
|
docs/conf.py
|
Python
|
mit
| 8,694
| 0.005866
|
# -*- coding: utf-8 -*-
#
# textract documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 4 11:09:09 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(os.path.abspath('.'), '..'))
import textract
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# Th
|
e encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'textract'
copyright = u'2014, Dean Malmgren'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = textract.VERSION
# The full version, including alpha/beta/rc
|
tags.
release = textract.VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'textractdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'textract.tex', u'textract Documentation',
u'Dean Malmgren', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'textract', u'textract Documentation',
[u'Dean Malmgren'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'textract', u'textract Documentation',
u'Dean Malmgren', 'textract', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
mamaddeveloper/teleadmin
|
tools/imageQueryParser.py
|
Python
|
mit
| 1,079
| 0
|
import random
class ImageQueryParser:
def __init__(self):
pass
def parse(self, query_string):
tab = query_string.split(" ")
last = tab[-1].lower()
is_random = False
index = 0
if last.startswith("-"):
if last == "-r":
|
is_random = True
tab.pop()
else:
try:
index = int(last[1:])
tab.pop()
except ValueError:
pass
query_string = " ".join(tab)
return ImageQuery(query_string, is_random, index)
cla
|
ss ImageQuery:
def __init__(self, query, is_random, index):
self.__query = query
self.__is_random = is_random
self.__index = index
def query(self):
return self.__query
def is_random(self):
return self.__is_random
def next_index(self):
if self.is_random():
return random.randrange(0, 100)
else:
i = self.__index
self.__index += 1
return i
|
gcrisis/ardupilot
|
Tools/autotest/param_metadata/param.py
|
Python
|
gpl-3.0
| 5,433
| 0.024664
|
class Parameter(object):
def __init__(self, name):
self.name = name
class Vehicle(object):
def __init__(self, name, path):
self.name = name
self.path = path
self.params = []
class Library(object):
def __init__(self, name):
self.name = name
self.params = []
known_param_fields = [
'Description',
'DisplayName',
'Values',
'Range',
'Units',
'Increment',
'User',
'RebootRequired',
'Bitmask',
'Volatile',
'ReadOnly',
]
# Follow SI units conventions from:
# http://physics.nist.gov/cuu/Units/units.html
# http://physics.nist.gov/cuu/Units/outside.html
# and
# http://physics.nist.gov/cuu/Units/checklist.html
# http://www.bipm.org/en/publications/si-brochure/
# http://www1.bipm.org/en/CGPM/db/3/2/ g_n unit for G-force
# one further constrain is that only printable (7bit) ASCII characters are allowed
known_units = {
# abreviation : full-text (used in .html .rst and .wiki files)
# time
's' : 'seconds' ,
'ds' : 'deciseconds' ,
'cs' : 'centiseconds' ,
'ms' : 'milliseconds' ,
'PWM' : 'PWM in microseconds' , # should be microseconds, this is NOT a SI unit, but follows https://github.com/ArduPilot/ardupilot/pull/5538#issuecomment-271943061
'Hz' : 'hertz' ,
# distance
'km' : 'kilometers' , # metre is the SI unit name, meter is the american spelling of it
'm' : 'meters' , # metre is the SI unit name, meter is the american spelling of it
'm/s' : 'meters per second' , # metre is the SI unit name, meter is the american spelling of it
'm/s/s' : 'meters per square second' , # metre is the SI unit name, meter is the american spelling of it
'm/s/s/s' : 'meters per cubic second' , # metre is the SI unit name, meter is the american spelling of it
'cm' : 'centimeters' , # metre is the SI unit name, meter is the american spelling of it
'cm/s' : 'centimeters per second' , # metre is the SI unit name, meter is the american spelling of it
'cm/s/s' : 'centimeters per square second', # metre is the SI unit name, meter is the american spelling of it
'cm/s/s/s': 'centimeters per cubic second' , # metre is the SI unit name, meter is the american spelling of it
'mm' : 'millimeters' , # metre is the SI unit name, meter is the american spelling of it
# temperature
'degC' : 'degrees Celsius' , # Not SI, but Kelvin
|
is too cumbersome for most users
# angle
'deg' : 'degrees' , # Not SI, but is some situations more user-friendly than radians
'deg/s' : 'degrees per second' , # Not SI, but is some situations more user-friendly than radians
'cdeg'
|
: 'centidegrees' , # Not SI, but is some situations more user-friendly than radians
'cdeg/s' : 'centidegrees per second', # Not SI, but is some situations more user-friendly than radians
'cdeg/s/s': 'centidegrees per square second' , # Not SI, but is some situations more user-friendly than radians
'rad' : 'radians' ,
'rad/s' : 'radians per second' ,
'rad/s/s' : 'radians per square second' ,
# electricity
'A' : 'ampere' ,
'V' : 'volt' ,
'W' : 'watt' ,
# magnetism
'Gauss' : 'gauss' , # Gauss is not an SI unit, but 1 tesla = 10000 gauss so a simple replacement is not possible here
'Gauss/s' : 'gauss per second' , # Gauss is not an SI unit, but 1 tesla = 10000 gauss so a simple replacement is not possible here
'mGauss' : 'milligauss' , # Gauss is not an SI unit, but 1 tesla = 10000 gauss so a simple replacement is not possible here
# pressure
'Pa' : 'pascal' ,
'mbar' : 'millibar' ,
# ratio
'%' : 'percent' ,
'%/s' : 'percent per second' ,
'd%' : 'decipercent' , # decipercent is strange, but "per-mille" is even more exotic
# compound
'm.m/s/s' : 'square meter per square second',
'deg/m/s' : 'degrees per meter per second' ,
'm/s/m' : 'meters per second per meter' , # Why not use Hz here ????
'mGauss/A': 'milligauss per ampere' ,
'mA.h' : 'milliampere hour' ,
'A/V' : 'ampere per volt' ,
'm/V' : 'meters per volt' ,
'gravities': 'standard acceleration due to gravity' , # g_n would be a more correct unit, but IMHO no one understands what g_n means
}
required_param_fields = [
'Description',
'DisplayName',
'User',
]
known_group_fields = [
'Path',
]
|
dongweiming/web_develop
|
chapter3/section4/consts.py
|
Python
|
gpl-3.0
| 170
| 0
|
# coding=utf-8
HOSTNAME =
|
'localhost'
DATABASE = 'r'
USERNAME = 'web'
PASSWORD = 'web'
DB_URI = 'mysql://{}:{}@{}/{}'.format(
USERNAME, PASSWORD, HOSTNAME, DATABAS
|
E)
|
django-wodnas/django-tinymce
|
tinymce/views.py
|
Python
|
mit
| 4,440
| 0.001802
|
# Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
import logging
from django.core import urlresolvers
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext, loader
from django.utils import simplejson
from django.utils.translation import ugettext as _
from tinymce.compressor import gzip_compressor
from tinymce.widgets import get_language_config
from django.views.decora
|
tors.csrf import csrf_exempt
def textareas_js(request, name, lang=None):
"""
Returns a HttpResponse whose content is a Javscript file. The template
is loaded from 'tinymce/<name>_textareas.js' or
'<name>/tinymce_textareas.js'. Optionally, the lang argument sets the
content lan
|
guage.
"""
template_files = (
'tinymce/%s_textareas.js' % name,
'%s/tinymce_textareas.js' % name,
)
template = loader.select_template(template_files)
vars = get_language_config(lang)
vars['content_language'] = lang
context = RequestContext(request, vars)
return HttpResponse(template.render(context),
content_type="application/x-javascript")
@csrf_exempt
def spell_check(request):
"""
Returns a HttpResponse that implements the TinyMCE spellchecker protocol.
"""
try:
import enchant
raw = request.raw_post_data
input = simplejson.loads(raw)
id = input['id']
method = input['method']
params = input['params']
lang = params[0]
arg = params[1]
if not enchant.dict_exists(str(lang)):
raise RuntimeError("dictionary not found for language '%s'" % lang)
checker = enchant.Dict(str(lang))
if method == 'checkWords':
result = [word for word in arg if not checker.check(word)]
elif method == 'getSuggestions':
result = checker.suggest(arg)
else:
raise RuntimeError("Unkown spellcheck method: '%s'" % method)
output = {
'id': id,
'result': result,
'error': None,
}
except Exception:
logging.exception("Error running spellchecker")
return HttpResponse(_("Error running spellchecker"))
return HttpResponse(simplejson.dumps(output),
content_type='application/json')
def preview(request, name):
"""
Returns a HttpResponse whose content is an HTML file that is used
by the TinyMCE preview plugin. The template is loaded from
'tinymce/<name>_preview.html' or '<name>/tinymce_preview.html'.
"""
template_files = (
'tinymce/%s_preview.html' % name,
'%s/tinymce_preview.html' % name,
)
template = loader.select_template(template_files)
return HttpResponse(template.render(RequestContext(request)),
content_type="text/html")
def flatpages_link_list(request):
"""
Returns a HttpResponse whose content is a Javscript file representing a
list of links to flatpages.
"""
from django.contrib.flatpages.models import FlatPage
link_list = [(page.title, page.url) for page in FlatPage.objects.all()]
return render_to_link_list(link_list)
def compressor(request):
"""
Returns a GZip-compressed response.
"""
return gzip_compressor(request)
def render_to_link_list(link_list):
"""
Returns a HttpResponse whose content is a Javscript file representing a
list of links suitable for use wit the TinyMCE external_link_list_url
configuration option. The link_list parameter must be a list of 2-tuples.
"""
return render_to_js_vardef('tinyMCELinkList', link_list)
def render_to_image_list(image_list):
"""
Returns a HttpResponse whose content is a Javscript file representing a
list of images suitable for use wit the TinyMCE external_image_list_url
configuration option. The image_list parameter must be a list of 2-tuples.
"""
return render_to_js_vardef('tinyMCEImageList', image_list)
def render_to_js_vardef(var_name, var_value):
output = "var %s = %s" % (var_name, simplejson.dumps(var_value))
return HttpResponse(output, content_type='application/x-javascript')
def filebrowser(request):
fb_url = urlresolvers.reverse('filebrowser.views.browse')
return render_to_response('tinymce/filebrowser.js', {'fb_url': fb_url},
context_instance=RequestContext(request))
|
JaeGyu/PythonEx_1
|
p200_048.py
|
Python
|
mit
| 497
| 0
|
'''
mode | de
|
sc
r 또는 rt | 텍스트 모드로 읽기
w 또는 wt | 텍스트 모드로 쓰기
a 또는 at | 텍스트 모드로 파일 마지막에 추가하기
rb | 바이너리 모드로 읽기
wb | 바이너리 모드로 쓰기
ab | 바이
|
너리 모드로 파일 마지막에 추가하기
'''
f = open("./py200_sample.txt", "w")
f.write("abcd")
f.close()
r = open("./py200_sample.txt", "r")
print("-" * 60)
print(r.readline())
r.close()
|
karllessard/tensorflow
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/structured_output.py
|
Python
|
apache-2.0
| 5,614
| 0.015497
|
# Copyright 2019 The TensorFlow Auth
|
ors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required b
|
y applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/structured_output | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
# The fNNNN name prefixes in this file are such that the sorted order of the
# functions in the resulting MLIR output match the order in the source file,
# allowing us to conveniently co-locate the CHECK's with the code they are
# checking.
#
# Note: CHECK-DAG doesn't work with CHECK-SAME/CHECK-NEXT.
# Check index paths for results.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = []})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0000_single_return"]
@tf.function(input_signature=[])
def f0000_single_return(self):
return tf.constant(1.0, shape=[1])
# Check index paths for results with multiple return values.
# Note that semantically in Python, multiple return values are equivalent
# to returning a tuple/list.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0001_multiple_results_no_punctuation"]
@tf.function(input_signature=[])
def f0001_multiple_results_no_punctuation(self):
return tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])
# Check index paths for results written explicitly with parentheses.
# This is semantically equivalent to the earlier test without parentheses,
# but this test serves as documentation of this behavior for the purposes
# of tf_saved_model users.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0002_multiple_results_parentheses"]
@tf.function(input_signature=[])
def f0002_multiple_results_parentheses(self):
return (tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2]))
# Check index paths for results written explicitly with brackets.
# This is semantically equivalent to the earlier test without parentheses,
# but this test serves as documentation of this behavior for the purposes
# of tf_saved_model users.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0003_multiple_results_brackets"]
@tf.function(input_signature=[])
def f0003_multiple_results_brackets(self):
return [tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])]
# Check index paths for lists.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0, 0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [0, 1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0004_list_2_elements"]
@tf.function(input_signature=[])
def f0004_list_2_elements(self):
return [[tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])]]
# Check index paths for dicts.
# Keys are linearized in sorted order, matching `tf.nest.flatten`.
# More thorough testing of this is in structured_input.py. The underlying code
# path for linearization is shared, so no need to replicate that testing here.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = ["y"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0005_dict_2_keys"]
@tf.function(input_signature=[])
def f0005_dict_2_keys(self):
return {
'x': tf.constant(1.0, shape=[1]),
'y': tf.constant(1.0, shape=[2]),
}
# Check index paths for outputs are correctly handled in the presence of
# multiple return statements.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<f32> {tf._user_specified_name = "x", tf_saved_model.index_path = [0]}
# CHECK-SAME: ) -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0006_multiple_return_statements"]
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def f0006_multiple_return_statements(self, x):
if x > 3.:
return {'x': tf.constant(1.0, shape=[1])}
else:
return {'x': tf.constant(1.0, shape=[1])}
if __name__ == '__main__':
common.do_test(TestModule)
|
TGITS/programming-workouts
|
exercism/python/series/series_test.py
|
Python
|
mit
| 1,747
| 0.000572
|
import unittest
from series import slices
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0
class SeriesTest(unittest.TestCase):
def test_slices_of_one_from_one(self):
self.assertEqual(slices("1", 1), ["1"])
def test_slices_of_one_from_two(self):
self.asse
|
rtEqual(slices("12", 1), ["1", "2"])
def test_slices_of_two(self):
self.assertEqual(slices("35", 2), ["35"])
def test_slices_of_two_overlap(self):
self.assertEqual(slices("9142", 2), ["91", "14", "42"])
def test_slices_can_include_duplicates(self):
self.assertEqual(slices("777777", 3), ["777", "777", "777
|
", "777"])
def test_slices_of_a_long_series(self):
self.assertEqual(
slices("918493904243", 5),
["91849", "18493", "84939", "49390", "93904", "39042", "90424", "04243"],
)
def test_slice_length_is_too_large(self):
with self.assertRaisesWithMessage(ValueError):
slices("12345", 6)
def test_slice_length_cannot_be_zero(self):
with self.assertRaisesWithMessage(ValueError):
slices("12345", 0)
def test_slice_length_cannot_be_negative(self):
with self.assertRaisesWithMessage(ValueError):
slices("123", -1)
def test_empty_series_is_invalid(self):
with self.assertRaisesWithMessage(ValueError):
slices("", 1)
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == "__main__":
unittest.main()
|
photoninger/ansible
|
lib/ansible/plugins/action/win_reboot.py
|
Python
|
gpl-3.0
| 8,292
| 0.0041
|
# (c) 2016, Matt Davis <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import time
from datetime import datetime, timedelta
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.module_utils._text import to_native
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class TimedOutException(Exception):
pass
class ActionModule(ActionBase):
TRANSFERS_FILES = False
DEFAULT_REBOOT_TIMEOUT = 600
DEFAULT_CONNECT_TIMEOUT = 5
DEFAULT_PRE_REBOOT_DELAY = 2
DEFAULT_POST_REBOOT_DELAY = 0
DEFAULT_TEST_COMMAND = 'whoami'
DEFAULT_REBOOT_MESSAGE = 'Reboot initiated by Ansible.'
def get_system_uptime(self):
uptime_command = "(Get-WmiObject -ClassName Win32_OperatingSystem).LastBootUpTime"
(rc, stdout, stderr) = self._connection.exec_command(uptime_command)
if rc != 0:
raise Exception("win_reboot: failed to get host uptime info, rc: %d, stdout: %s, stderr: %s"
% (rc, stdout, stderr))
return stdout
def do_until_success_or_timeout(self, what, timeout, what_desc, fail_sleep=1):
max_end_time = datetime.utcnow() + timedelta(seconds=timeout)
exc = ""
while datetime.utcnow() < max_end_time:
try:
what()
if what_desc:
display.debug("win_reboot: %s success" % what_desc)
return
except Exception as e:
exc = e
if what_desc:
display.debug("win_reboot: %s fail (expected), retrying in %d seconds..." % (what_desc, fail_sleep))
time.sleep(fail_sleep)
raise TimedOutException("timed out waiting for %s: %s" % (what_desc, exc))
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
if self._play_context.check_mode:
return dict(changed=True, elapsed=0, rebooted=True)
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped', False) or result.get('failed', False):
return result
# Handle timeout parameters and its alias
deprecated_args = {
'shutdown_timeout': '2.5',
'shutdown_timeout_sec': '2.5',
}
for arg, version in deprecated_args.items():
if self._task.args.get(arg) is not None:
display.warning("Since Ansible %s, %s is no longer used with win_reboot" % (arg, version))
if self._task.args.get('connect_timeout') is not None:
connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT))
else:
connect_timeout = int(self._task.args.get('connect_timeout_sec', self.DEFAULT_CONNECT_TIMEOUT))
if self._task.args.get('reboot_timeout') is not None:
reboot_timeout = int(self._task.args.get('reboot_timeout', self.DEFAULT_REBOOT_TIMEOUT))
else:
reboot_timeout = int(self._task.args.get('reboot_timeout_sec', self.DEFAULT_REBOOT_TIMEOUT))
if self._task.args.get('pre_reboot_delay') is not None:
pre_reboot_delay = int(self._task.args.get('pre_reboot_delay', self.DEFAULT_PRE_REBOOT_DELAY))
else:
pre_reboot_delay = int(self._task.args.get('pre_reboot_del
|
ay_sec', self.DEFAULT_PRE_REBOOT_DELAY))
|
if self._task.args.get('post_reboot_delay') is not None:
post_reboot_delay = int(self._task.args.get('post_reboot_delay', self.DEFAULT_POST_REBOOT_DELAY))
else:
post_reboot_delay = int(self._task.args.get('post_reboot_delay_sec', self.DEFAULT_POST_REBOOT_DELAY))
test_command = str(self._task.args.get('test_command', self.DEFAULT_TEST_COMMAND))
msg = str(self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE))
# Get current uptime
try:
before_uptime = self.get_system_uptime()
except Exception as e:
result['failed'] = True
result['reboot'] = False
result['msg'] = to_native(e)
return result
# Initiate reboot
display.vvv("rebooting server")
(rc, stdout, stderr) = self._connection.exec_command('shutdown /r /t %d /c "%s"' % (pre_reboot_delay, msg))
# Test for "A system shutdown has already been scheduled. (1190)" and handle it gracefully
if rc == 1190:
display.warning('A scheduled reboot was pre-empted by Ansible.')
# Try to abort (this may fail if it was already aborted)
(rc, stdout1, stderr1) = self._connection.exec_command('shutdown /a')
# Initiate reboot again
(rc, stdout2, stderr2) = self._connection.exec_command('shutdown /r /t %d' % pre_reboot_delay)
stdout += stdout1 + stdout2
stderr += stderr1 + stderr2
if rc != 0:
result['failed'] = True
result['rebooted'] = False
result['msg'] = "Shutdown command failed, error text was %s" % stderr
return result
start = datetime.now()
# Get the original connection_timeout option var so it can be reset after
connection_timeout_orig = None
try:
connection_timeout_orig = self._connection.get_option('connection_timeout')
except AnsibleError:
display.debug("win_reboot: connection_timeout connection option has not been set")
try:
# keep on checking system uptime with short connection responses
def check_uptime():
display.vvv("attempting to get system uptime")
# override connection timeout from defaults to custom value
try:
self._connection.set_options(direct={"connection_timeout": connect_timeout})
self._connection._reset()
except AttributeError:
display.warning("Connection plugin does not allow the connection timeout to be overridden")
# try and get uptime
try:
current_uptime = self.get_system_uptime()
except Exception as e:
raise e
if current_uptime == before_uptime:
raise Exception("uptime has not changed")
self.do_until_success_or_timeout(check_uptime, reboot_timeout, what_desc="reboot uptime check success")
# reset the connection to clear the custom connection timeout
try:
self._connection.set_options(direct={"connection_timeout": connection_timeout_orig})
self._connection._reset()
except (AnsibleError, AttributeError):
display.debug("Failed to reset connection_timeout back to default")
# finally run test command to ensure everything is working
def run_test_command():
display.vvv("attempting post-reboot test command '%s'" % test_command)
(rc, stdout, stderr) = self._connection.exec_command(test_command)
if rc != 0:
raise Exception('test command failed')
# FUTURE: add a stability check (system must remain up for N seconds) to deal with self-multi-reboot updates
self.do_until_success_or_timeout(run_test_command, reboot_timeout, what_desc="post-reboot test command success")
result['rebooted'] = True
result['changed'] = True
except TimedOutException as toex:
result['failed'] = True
result['rebooted'] = True
result['msg'] = to_native(toex)
if post_reboot_delay != 0:
display.vvv("win_reboot: waiting an additional %d seconds" % post_reboot_delay)
time.sleep(post_reboot_dela
|
mahmutf/dupeguru
|
core/engine.py
|
Python
|
gpl-3.0
| 18,745
| 0.002401
|
# Created By: Virgil Dupras
# Created On: 2006/01/29
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import difflib
import itertools
import logging
import string
from collections import defaultdict, namedtuple
from unicodedata import normalize
from hscommon.util import flatten, multi_replace
from hscommon.trans import tr
from hscommon.jobprogress import job
(WEIGHT_WORDS, MATCH_SIMILAR_WORDS, NO_FIELD_ORDER,) = range(3)
JOB_REFRESH_RATE = 100
def getwords(s):
# We decompose the string so that ascii letters with accents can be part of the word.
s = normalize("NFD", s)
s = multi_replace(s, "-_&+():;\\[]{}.,<>/?~!@#$*", " ").lower()
s = "".join(
c for c in s if c in string.ascii_letters + string.digits + string.whitespace
)
return [_f for _f in s.split(" ") if _f] # remove empty elements
def getfields(s):
fields = [getwords(field) for field in s.split(" - ")]
return [_f for _f in fields if _f]
def unpack_fields(fields):
result = []
for field in fields:
if isinstance(field, list):
result += field
else:
result.append(field)
return result
def compare(first, second, flags=()):
"""Returns the % of words that match between ``first`` and ``second``
The result is a ``int`` in the range 0..100.
``first`` and ``second`` can be either a string or a list (of words).
"""
if not (first and second):
return 0
if any(isinstance(element, list) for element in first):
return compare_fields(first, second, flags)
second = second[:] # We must use a copy of second because we remove items from it
match_similar = MATCH_SIMILAR_WORDS in flags
weight_words = WEIGHT_WORDS in flags
joined = first + second
total_count = sum(len(word) for word in joined) if weight_words else len(joined)
match_count = 0
in_order = True
for word in first:
if match_similar and (word not in second):
similar = difflib.get_close_matches(word, second, 1, 0.8)
if similar:
word = similar[0]
if word in second:
if second[0] != word:
in_order = False
second.remove(word)
match_count += len(word) if weight_words else 1
result = round(((match_count * 2) / total_count) * 100)
if (result == 100) and (not in_order):
result = 99 # We cannot consider a match exact unless the ordering is the same
return result
def compare_fields(first, second, flags=()):
"""Returns the score for the lowest matching :ref:`fields`.
``first`` and ``second`` must be lists of lists of string. Each sub-list is then compared with
:func:`compare`.
"""
if len(first) != len(second):
return 0
if NO_FIELD_ORDER in flags:
results = []
# We don't want to remove field directly in the list. We must work on a copy.
second = second[:]
for field1 in first:
max = 0
matched_field = None
for field2 in second:
r = compare(field1, field2, flags)
if r > max:
max = r
matched_field = field2
results.append(max)
if matched_field:
second.remove(matched_field)
else:
results = [
compare(field1, field2, flags) for field1, field2 in zip(first, second)
]
return min(results) if results else 0
def build_word_dict(objects, j=job.nulljob):
"""Returns a dict of objects mapped by their words.
objects must have a ``words`` attribute being a list of strings or a list of lists of strings
(:ref:`fields`).
The result will be a dict with words as keys, lists of objects as values.
"""
result = defaultdict(set)
for object in j.iter_with_progress(
objects, "Prepared %d/%d files", JOB_REFRESH_RATE
):
for word in unpack_fields(object.words):
result[word].add(object)
return result
def merge_similar_words(word_dict):
"""Take all keys in ``word_dict`` that are similar, and merge them together.
``word_dict`` has been built with :func:`build_word_dict`. Similarity is computed with Python's
``difflib.get_close_matches()``, which computes the number of edits that are necessary to make
a word equal to the other.
"""
keys = list(word_dict.keys())
keys.sort(key=len) # we want the shortest word to stay
while keys:
key = keys.pop(0)
similars = difflib.get_close_matches(key, keys, 100, 0.8)
if not similars:
continue
objects = word_dict[key]
for similar in similars:
objects |= word_dict[similar]
del word_dict[similar]
keys.remove(similar)
def reduce_common_words(word_dict, threshold):
"""Remove all objects from ``word_dict`` values where the object count >= ``threshold``
``word_dict`` has been built with :func:`build_word_dict`.
The exception to this removal are the objects where all the words of the object are common.
Because if we remove them, we will miss some duplicates!
"""
uncommon_words = set(
word for word, objects in word_dict.items() if len(objects) < threshold
)
for word, objects in list(word_dict.items()):
if len(objects) < threshold:
continue
reduced = set()
for o in objects:
if not any(w in uncommon_words for w in unpack_fields(o.words)):
reduced.add(o)
if reduced:
word_dict[word] = reduced
else:
del word_dict[word]
# Writing docstrings in a namedtuple is tricky. From Python 3.3, it's possible to set __doc__, but
# some research allowed me to find a more elegant solution, which is what is done here. See
# http://stackoverflow.com/questions/1606436/adding-docstrings-to-namedtuples-in-python
class Match(namedtuple("Match", "first second percentage")):
"""Represents a match between two :class:`~core.fs.File`.
Regarless of the matching method, when two files are determined to match, a Match pair is created,
which holds, of course, the two matched files, but also their match "level".
.. attribute:: first
first file of the pair.
.. attribute:: second
second file of the pair.
.. attribute:: percentage
their match level according to the scan method which found the match. int from 1 to 100. For
exact scan methods, such as Contents scans, this will always be 100.
"""
__slots__ = ()
def get_match(first, second, flags=()):
# it is assumed here that first and second both have a "words" attribute
percentage = compare(first.words, second.words, flags)
return Match(first, second, percentage)
def getmatches(
objects,
min_match_percentage=0,
match_similar_words=False,
weight_words=False,
no_field_order=False,
j=job.nulljob,
):
"""Returns a list of :class:`Match` within ``objects`` after fuzzily matching their words.
:param objects: List of :class:`~core.fs.File` to match.
:param int min_match_percentage: minimum % of words that have to match.
:param bool match_similar_words: make similar words (see :func:`merge_similar_words`) match.
:param bool weight_words: longer words are worth more in match % computations.
:param bool no_field_order: match :ref:`fields` regardless of their order.
:param j: A :ref:`job progress instance <jobs>
|
`.
"""
COMMON_WORD_THRESHOLD = 50
LIMIT = 5000000
j = j.start_subjob(2)
sj = j.start_subjob(2)
for o in objects:
if not hasattr(o, "words"):
o.words = getwords(o.name)
word_dict = build_word_dict(objects, sj)
reduce_common_words(word_dict, COMMON_WORD_THRESHOLD)
if match_similar_words:
merge_similar_words(word_dict)
match_fl
|
ags = []
if we
|
alex/pyechonest
|
examples/try_new_things.py
|
Python
|
bsd-3-clause
| 6,593
| 0.005764
|
#!/usr/bin/env python
# encoding: utf-8
"""
Copyright (c) 2010 The Echo Nest. All rights reserved.
Created by Tyler Williams on 2010-09-01
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""
# ========================
# = try_new_things.py =
# ========================
#
# enter a few of your favorite artists and create a playlist of new music that
# you might like.
#
import sys, os, logging
import xml.sax.saxutils as saxutils
from optparse import OptionParser
from pyechonest import artist, playlist
# set your api key here if it's not set in the environment
# config.ECHO_NEST_API_KEY = "XXXXXXXXXXXXXXXXX"
logger = logging.getLogger(__name__)
class XmlWriter(object):
""" code from: http://users.musicbrainz.org/~matt/xspf/m3u2xspf
Copyright (c) 2006, Matthias Friedrich <[email protected]>
"""
def __init__(self, outStream, indentAmount=' '):
self._out = outStream
self._indentAmount = indentAmount
self._stack = [ ]
def prolog(self, encoding='UTF-8', version='1.0'):
pi = '<?xml version="%s" encoding="%s"?>' % (version, encoding)
self._out.write(pi + '\n')
def start(self, name, attrs={ }):
indent = self._getIndention()
self._stack.append(name)
self._out.write(indent + self._makeTag(name, attrs) + '\n')
def end(self):
name = self._stack.pop()
indent = self._getIndention()
self._out.write('%s</%s>\n' % (indent, name))
def elem(self, name, value, attrs={ }):
# delete attributes with an unset value
for (k, v) in attrs.items():
if v is None or v == '':
del attrs[k]
if value is None or value == '':
if len(attrs) == 0:
return
self._out.write(self._getIndention())
self._out.write(self._makeTag(name, attrs, True) + '\n')
else:
escValue = saxutils.escape(value or '')
self._out.write(self._getIndention())
self._out.write(self._makeTag(name, attrs))
self._out.write(escValue)
self._out.write('</%s>\n' % name)
def _getIndention(self):
return self._indentAmount * len(self._stack)
def _makeTag(self, name, attrs={ }, close=False):
ret = '<' + name
for (k, v) in attrs.iteritems():
if v is not None:
v = saxutils.quoteattr(str(v))
ret += ' %s=%s' % (k, v)
if close:
return ret + '/>'
else:
return ret + '>'
def write_xspf(f, tuples):
"""send me a list of (artist,title,mp3_url)"""
xml = XmlWriter(f, indentAmount=' ')
xml.prolog()
xml.start('playlist', { 'xmlns': 'http://xspf.org/ns/0/', 'version': '1' })
xml.start('trackList')
for tupe in tuples:
xml.start('track')
xml.elem('creator',tupe[0])
xml.elem('title',tupe[1])
xml.elem('location', tupe[2])
xml.end()
xml.end()
xml.end()
f.close()
def lookup_seeds(seed_artist_names):
seed_ids = []
for artist_name in seed_artist_names:
try:
seed_ids.append("-%s" % (artist.Artist(artist_name).id,))
except Exception:
logger.info('artist "%s" not found.' % (artist_name,))
# we could try to do full artist search here
# and let them choose the right artist
logger.info('seed_ids: %s' % (seed_ids,))
return seed_ids
def find_playlist(seed_artist_ids, playable=False):
if playable:
logger.info("finding playlist with audio...")
p = playlist.static(type='artist-radio', artist_id=seed_artist_ids, variety=1, buckets=['id:7digital', 'tracks'], limit=True)
else:
logger.info("finding playlist without audio...")
p = playlist.static(type='artist-radio', artist_id=seed_artist_ids, variety=1)
return p
if __name__ == "__main__":
usage = 'usage: %prog [options] "artist 1" "artist 2" ... "artist N"\n\n' \
'example:\n' \
'\t ./%prog "arcade fire" "feist" "broken social scene" -x -f arcade_feist_scene.xspf\n' \
'\t ./%prog "justice" "four tet" "bitshifter" -v\n'
parser = OptionParser(usage=usage)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="say what you're doing")
parser.add_option("-a", "--audio",
action="store_true", dest="audio", default=False,
help="fetch sample audio for songs")
parser.add_option("-x", "--xspf",
action="store_true", dest="xspf", default=False,
help="output an xspf format playlist")
parser.add_option("-f", "--filename",
metavar="FILE", help="write output to FILE")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("you must provide at least 1 seed artist!")
# handle verbose logging
log_level = logging.ERROR
if options.verbose:
log_level = logging.INFO
logging.basicConfig(level=log_level)
logger.setLevel(log_level)
# make sure output file doesn't already exist
if options.filename and os.path.exists(options.filename):
logger.error("The file path: %s already exists." % (options.filename,))
sys.exit(1)
# resolve seed artists
seed_ids = lookup_seeds(args)
# find playlist
raw_plist = find_playlist(seed_ids, playable=(options.audio or options.xspf))
tuple_plist = []
for s in raw_plist:
name = s.artist_name
title = s.title
url = ""
if options.audio:
url = s.get_tracks('7digital', [{}])[0].get('preview_url')
tuple_plist.append((name,title,url))
# write to stdout or file specified
fout = open(options.filename, 'w') if options.filename else sys.stdout
if options.xspf:
write_xspf(fout, tuple_plist)
else:
for tupe in tup
|
le_plist:
fout.wr
|
ite("%s - %s \t %s\n" % tupe)
logger.info("all done!")
sys.exit(0)
|
SasView/sasview
|
src/sas/qtgui/UnitTesting/SquishTestSuites/suite_sasview_qt/tst_Resolution/test.py
|
Python
|
bsd-3-clause
| 3,434
| 0.009027
|
# -*
|
- coding: utf-8 -*-
def main():
startApplication("sasview")
c
|
lickTab(waitForObject(":FittingWidgetUI.tabFitting_QTabWidget_2"), "Resolution")
test.compare(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").currentIndex, 0)
test.compare(str(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").currentText), "None")
test.compare(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").count, 1)
clickTab(waitForObject(":FittingWidgetUI.tabFitting_QTabWidget_2"), "Model")
clickButton(waitForObject(":groupBox.cmdLoad_QPushButton"))
waitForObjectItem(":stackedWidget.listView_QListView", "test")
doubleClickItem(":stackedWidget.listView_QListView", "test", 36, 4, 0, Qt.LeftButton)
waitForObjectItem(":stackedWidget.listView_QListView", "1d\\_data")
doubleClickItem(":stackedWidget.listView_QListView", "1d\\_data", 30, 10, 0, Qt.LeftButton)
waitForObjectItem(":stackedWidget.listView_QListView", "cyl\\_400\\_20\\.txt")
doubleClickItem(":stackedWidget.listView_QListView", "cyl\\_400\\_20\\.txt", 72, 3, 0, Qt.LeftButton)
clickButton(waitForObject(":groupBox.cmdSendTo_QPushButton"))
mouseClick(waitForObject(":groupBox_6.cbCategory_QComboBox_2"), 136, 8, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":groupBox_6.cbCategory_QComboBox_2", "Cylinder"), 129, 9, 0, Qt.LeftButton)
clickTab(waitForObject(":FittingWidgetUI.tabFitting_QTabWidget_2"), "Resolution")
test.compare(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").currentIndex, 0)
test.compare(str(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").currentText), "None")
test.compare(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").count, 3)
mouseClick(waitForObject(":groupBox_4.cbSmearing_QComboBox"), 117, 7, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":groupBox_4.cbSmearing_QComboBox", "Custom Pinhole Smear"), 113, 6, 0, Qt.LeftButton)
test.compare(str(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").currentText), "Custom Pinhole Smear")
test.compare(str(waitForObjectExists(":groupBox_4.lblSmearUp_QLabel").text), "<html><head/><body><p>dQ<span style=\" vertical-align:sub;\">low</span></p></body></html>")
test.compare(str(waitForObjectExists(":groupBox_4.lblSmearDown_QLabel").text), "<html><head/><body><p>dQ<span style=\" vertical-align:sub;\">high</span></p></body></html>")
test.compare(str(waitForObjectExists(":groupBox_4.txtSmearUp_QLineEdit").text), "")
test.compare(waitForObjectExists(":groupBox_4.txtSmearUp_QLineEdit").enabled, True)
test.compare(str(waitForObjectExists(":groupBox_4.txtSmearDown_QLineEdit").text), "")
test.compare(waitForObjectExists(":groupBox_4.txtSmearDown_QLineEdit").enabled, True)
mouseClick(waitForObject(":groupBox_4.cbSmearing_QComboBox"), 117, 15, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":groupBox_4.cbSmearing_QComboBox", "Custom Slit Smear"), 89, 5, 0, Qt.LeftButton)
test.compare(str(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").currentText), "Custom Slit Smear")
test.compare(waitForObjectExists(":groupBox_4.lblSmearUp_QLabel").visible, True)
test.compare(str(waitForObjectExists(":groupBox_4.lblSmearUp_QLabel").text), "Slit height")
test.compare(str(waitForObjectExists(":groupBox_4.lblSmearDown_QLabel").text), "Slit width")
test.compare(waitForObjectExists(":groupBox_4.lblSmearDown_QLabel").visible, True)
|
ARMmbed/yotta_osx_installer
|
workspace/lib/python2.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.py
|
Python
|
apache-2.0
| 5,540
| 0.000722
|
from __future__ import absolute_import, division
import time
import os
try:
unicode
except NameError:
unicode = str
from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
class SQLiteLockFile(LockBase):
"Demonstrate SQL-based locking."
testdb = None
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = SQLiteLockFile('somefile')
>>> lock = SQLiteLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
if SQLiteLockFile.testdb is None:
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
|
SQLiteLockFile.testdb = testdb
import sqlite3
self.connection = sqlite3.connect(SQLiteLockFile.testdb)
c = self.connection.cursor()
try:
c.execute
|
("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteLockFile.testdb)
def acquire(self, timeout=None):
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit longer.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock and we are impatient..
raise AlreadyLocked("%s is already locked" % self.path)
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me (by %s)" %
(self.unique_name, self._who_is_locking()))
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
|
bbusemeyer/busempyer
|
drivers/gen_json.py
|
Python
|
gpl-2.0
| 653
| 0.018377
|
#!/usr/bin/python
imp
|
ort json
import sys
import data_processing as dp
from mython import NumpyToListEncoder
from subprocess import check_output
from imp import reload
reload(dp)
# Neat way of calling:
# find . -name '*_metadata.json' > rootlist
# python gen_json.py $(< rootlist) &> gen_json.out
files = sys.argv[1:]
roots = [f.replace('_metadata.json','') for f in files]
for root in roots:
data = dp.read_dir_autogen(root,gosling='/home/busemey2/bin/gosling')
loc = '/
|
'.join(root.split('/')[:-1])
outfn = loc+"/record.json"
print("Outputting to %s..."%outfn)
with open(outfn,'w') as outf:
json.dump(data,outf,cls=NumpyToListEncoder)
|
AndyLamperski/lemkelcp
|
lemkelcp/lemkelcp.py
|
Python
|
mit
| 5,431
| 0.02228
|
import numpy as np
class lemketableau:
def __init__(self,M,q,maxIter = 100):
n = len(q)
self.T = np.hstack((np.eye(n),-M,-np.ones((n,1)),q.reshape((n,1))))
self.n = n
self.wPos = np.arange(n)
self.zPos = np.arange(n,2*n)
self.W = 0
self.Z = 1
self.Y = 2
self.Q = 3
TbInd = np.vstack((self.W*np.ones(n,dtype=int),
np.arange(n,dtype=int)))
TnbInd = np.vstack((self.Z*np.ones(n,dtype=int),
np.arange(n,dtype=int)))
DriveInd = np.array([[self.Y],[0]])
QInd = np.array([[self.Q],[0]])
self.Tind = np.hstack((TbInd,TnbInd,DriveInd,QInd))
self.maxIter = maxIter
def lemkeAlgorithm(self):
initVal = self.initialize()
if not initVal:
return np.zeros(self.n),0,'Solution Found'
for k in range(self.maxIter):
stepVal = self.step()
if self.Tind[0,-2] == self.Y:
# Solution Found
z = self.extractSolution()
return z,0,'Solution Found'
elif not stepVal:
return None,1,'Secondary ray found'
return None,2,'Max Iterations Exceeded'
def initialize(self):
q = self.T[:,-1]
minQ = np.min(q)
if minQ < 0:
ind = np.argmin(q)
self.clearDriverColumn(ind)
self.pivot(ind)
return True
else:
return False
def step(self):
q = self.T[:,-1]
a = self.T[:,-2]
ind = np.nan
minRatio = np.inf
for i in range(self.n):
if a[i] > 0:
newRatio = q[i] / a[i]
if newRatio < minRatio:
ind = i
minRatio = newRatio
if minRatio < np.inf:
self.clearDriverColumn(ind)
self.pivot(ind)
return True
else:
return False
def extractSolution(self):
z = np.zeros(self.n)
q = self.T[:,-1]
for i in range(self.n):
if self.Tind[0,i] == self.Z:
z[self.Tind[1,i]] = q[i]
return z
def partnerPos(self,pos):
v,ind = self.Tind[:,pos]
if v == self.W:
ppos = self.zPos[in
|
d]
elif v == self.Z:
ppos = self.wPos[ind]
else:
ppos = None
|
return ppos
def pivot(self,pos):
ppos = self.partnerPos(pos)
if ppos is not None:
self.swapColumns(pos,ppos)
self.swapColumns(pos,-2)
return True
else:
self.swapColumns(pos,-2)
return False
def swapMatColumns(self,M,i,j):
Mi = np.array(M[:,i],copy=True)
Mj = np.array(M[:,j],copy=True)
M[:,i] = Mj
M[:,j] = Mi
return M
def swapPos(self,v,ind,newPos):
if v == self.W:
self.wPos[ind] = newPos % (2*self.n+2)
elif v == self.Z:
self.zPos[ind] = newPos % (2*self.n+2)
def swapColumns(self,i,j):
iInd = self.Tind[:,i]
jInd = self.Tind[:,j]
v,ind = iInd
self.swapPos(v,ind,j)
v,ind = jInd
self.swapPos(v,ind,i)
self.Tind = self.swapMatColumns(self.Tind,i,j)
self.T = self.swapMatColumns(self.T,i,j)
def clearDriverColumn(self,ind):
a = self.T[ind,-2]
self.T[ind] /= a
for i in range(self.n):
if i != ind:
b = self.T[i,-2]
self.T[i] -= b * self.T[ind]
def ind2str(self,indvec):
v,pos = indvec
if v == self.W:
s = 'w%d' % pos
elif v == self.Z:
s = 'z%d' % pos
elif v == self.Y:
s = 'y'
else:
s = 'q'
return s
def indexStringArray(self):
indstr = np.array([self.ind2str(indvec) for indvec in self.Tind.T],dtype=object)
return indstr
def indexedTableau(self):
indstr = self.indexStringArray()
return np.vstack((indstr,self.T))
def __repr__(self):
IT = self.indexedTableau()
return IT.__repr__()
def __str__(self):
IT = self.indexedTableau()
return IT.__str__()
def lemkelcp(M,q,maxIter=100):
"""
sol = lemkelcp(M,q,maxIter)
Uses Lemke's algorithm to copute a solution to the
linear complementarity problem:
Mz + q >= 0
z >= 0
z'(Mz+q) = 0
The inputs are given by:
M - an nxn numpy array
q - a length n numpy array
maxIter - an optional number of pivot iterations. Set to 100 by default
The solution is a tuple of the form:
z,exit_code,exit_string = sol
The entries are summaries in the table below:
|z | exit_code | exit_string |
-----------------------------------------------------------
| solution to LCP | 0 | 'Solution Found' |
| None | 1 | 'Secondary ray found' |
| None | 2 | 'Max Iterations Exceeded' |
"""
tableau = lemketableau(M,q,maxIter)
return tableau.lemkeAlgorithm()
|
niubileme/shadowsocks-manyuser
|
shadowsocks/config.py
|
Python
|
apache-2.0
| 805
| 0.016149
|
import logging
#Config
MYSQL_HOST = '127.0.0.1'
MYSQL_PORT = 3306
MYSQL_USER = 'root'
MYSQL_PASS = 'oppzk'
MYSQL_DB = 'SSMM'
MANAGE_PASS = 'passwd'
#if you want manage in other server you should set this value to global ip
MANAGE_BIND_IP = '127.0.0.1'
#make sure this port is idle
MANAGE_PORT = 10001
PANEL_VERSION = 'V2' # V2 or V3. V2 not support API
API_URL = 'http://domain/mu'
API_PASS = 'mupass'
NODE_ID = '1'
CHECKTIME = 30 # check service time
SYNCTIME = 300 # sync traffic time
RESETTIME = 300 # reset traffic time
#BIND IP
#if you want bind ipv4 and i
|
pv6 '[::]'
#if you want bind all of ipv4 if '0.0.0.0'
#if you want bind all of if only '4.4.4.4'
SS_BIND_IP = '0.0.0.0'
SS_METHOD = 'rc4-md5'
#LOG CONFIG
LOG_ENABLE = False
LOG_LEVEL = logging.DEBUG
LOG_FILE = '/var
|
/log/shadowsocks.log'
|
shenqicang/openmc
|
tests/test_particle_restart_eigval/results.py
|
Python
|
mit
| 1,055
| 0.008531
|
#!/usr/bin/env python
import sys
# import particle restart
sys.path.append('../../src/utils')
import particle_restart as pr
# read in particle restart file
if len(sys.argv) > 1:
p = pr.Particle(sys.argv[1])
else:
p = pr.Particle('particle_12_842.binary')
# set up ou
|
tput string
outstr = ''
# write out properties
outstr += 'current batch:\n'
outstr += "{0:12.6E}\n".format(p.current_batch)
outstr += 'current gen:\n'
outstr += "{0:12.6E}\n".format(p.current_gen)
outstr += 'particle id:\n'
outstr += "{0:12.6E}\n".format(p.id)
outstr += 'run mode:\n'
outstr += "{0:12.6E}\n".format(p.run_mode)
outstr += 'particle weight:\n'
outstr += "{0:12.6E}\n".format(p.weight)
outstr += 'particle energy:\n'
outstr += "{0:12.6E}\n".format(p.energy)
outs
|
tr += 'particle xyz:\n'
outstr += "{0:12.6E} {1:12.6E} {2:12.6E}\n".format(p.xyz[0],p.xyz[1],p.xyz[2])
outstr += 'particle uvw:\n'
outstr += "{0:12.6E} {1:12.6E} {2:12.6E}\n".format(p.uvw[0],p.uvw[1],p.uvw[2])
# write results to file
with open('results_test.dat','w') as fh:
fh.write(outstr)
|
codesyntax/Products.zorionagurra
|
Products/zorionagurra/browser/portlet.py
|
Python
|
gpl-2.0
| 1,398
| 0.004292
|
__version__ = '$Id$'
from Acquisition import aq_inner
from Products.Five.browser import BrowserView
from Products.CMFCore.utils import getToolByName
from DateTime import DateTime
class LastZorionagurrak(BrowserView):
def getLastZorionagurrak(self, num=5):
context = aq_inner(self.context)
today = DateTime().earliestTime()
todayend = DateTime().latestTime()
tomorrow = today + 1
pcal = getToolByName(context, 'portal_catalog')
todaybrains = pcal(portal_type='Zorionagurra',
review_state='published',
getDate={'query':(today, todayend),
'range':'min:max'},
sort_on='getDate',
sort_limit=num)
todaybrainnumber = len(todaybrains)
if todaybrainnumber >= num:
return todaybrains
else:
tomorrowbrainnumber =
|
num - todaybrainnumber
tomorrowbrains = pcal(portal_type='Zorionagurra',
|
review_state='published',
getDate={'query':(todayend,),
'range':'min'},
sort_on='getDate',
sort_limit=tomorrowbrainnumber)
return todaybrains + tomorrowbrains
|
luzi82/HiSocial
|
HiPubIface/test/000_basic/src/hipubiface_test_basic.py
|
Python
|
gpl-3.0
| 4,370
| 0.019908
|
import unittest
import os
import hiframe
import hipubiface_test_basic_plugin._hiframe
MY_ABSOLUTE_PATH = os.path.abspath(__file__)
MY_ABSOLUTE_PARENT = os.path.dirname(MY_ABSOLUTE_PATH)
HIPUBIFACE_PATH = os.path.dirname(os.path.dirname(os.path.dirname(MY_ABSOLUTE_PARENT)))
HIPUBIFACE_SRC_PATH = HIPUBIFACE_PATH+"/src"
class HipubifaceTestBasic(unittest.TestCase):
# def test_guest_ping_pass(self):
# cv = [["00000000", "ffffffff"],
# ["00000001", "fffffffe"],
# ["a7845763", "587ba89c"],
# ["8da581bf", "725a7e40"],
# ["0da581bf", "f25a7e40"]
# ]
# for c in cv :
# r = hipubiface.call("base", "guest_ping", {"txt_value":c[0].upper()})
# self.check_ok(r)
# self.assertEqual(r["type"],"value")
# self.assertEqual(r["value"],c[1].lower())
#
# r = hipubiface.call("base", "guest_ping", {"txt_value":c[0].lower()})
# self.check_ok(r)
# self.assertEqual(r["type"],"value")
# self.assertEqual(r["value"],c[1].lower())
#
# r = hipubiface.call("base", "guest_ping", {"txt_value":c[1].upper()})
# self.check_ok(r)
# self.assertEqual(r["type"],"value")
# self.asse
|
rtEqual(r["value"],c[0].lower())
#
# r = hipubiface.call("base", "guest_ping", {"txt_value":c[1].lower()})
# self.check_ok(r)
# self.assertEqual(r["type"],"value")
# self.assertEqual(r["value"],c[0].lower())
#
# def test_guest_ping_fail(self):
# cv = ["asdf",
# "0000",
# "1234",
# "dddd",
# "1234567890",
# "-9999999",
#
|
"-99999999",
# "9999999",
# "999999999"
# ]
# for c in cv :
# r = hipubiface.call("base", "guest_ping", {"txt_value":c})
# self.assertTrue(r != None)
# self.assertTrue(isinstance(r,dict))
# self.assertEqual(r[hipubiface.RESULT_KEY], hipubiface.RESULT_VALUE_FAIL_TXT)
# self.assertEqual(r["fail_reason"],"bad value")
# def test_list_cmd(self):
# ret = hipubiface._hiframe.command_guest_list_cmd()
# self.check_ok(ret)
# self.assertEqual(ret["type"],"value")
# self.assertTrue("value" in ret)
# self.assertTrue("hs_plugin" in ret["value"])
# self.assertTrue("guest_list_cmd" in ret["value"]["hs_plugin"])
# self.assertEqual(ret["value"]["hs_plugin"]["guest_list_cmd"],[])
def test_call_noarg(self):
hf=hiframe.HiFrame(plugin_path_list=[MY_ABSOLUTE_PARENT,HIPUBIFACE_SRC_PATH])
hf.start()
me=hf.plugin_D["hipubiface"]
ret = me.call("hipubiface_test_basic_plugin","helloworld")
self.assertEqual(ret, "helloworld")
def test_call_arg(self):
hf=hiframe.HiFrame(plugin_path_list=[MY_ABSOLUTE_PARENT,HIPUBIFACE_SRC_PATH])
hf.start()
me=hf.plugin_D["hipubiface"]
ret = me.call("hipubiface_test_basic_plugin","uppercase",{"txt_a":"asdf"})
self.assertEqual(ret, "ASDF")
def test_call_exception(self):
hf=hiframe.HiFrame(plugin_path_list=[MY_ABSOLUTE_PARENT,HIPUBIFACE_SRC_PATH])
hf.start()
me=hf.plugin_D["hipubiface"]
try:
me.call("hipubiface_test_basic_plugin","hello_exception")
self.fail()
except hipubiface_test_basic_plugin._hiframe.TestException:
pass
except:
self.fail()
# def test_hellofile(self):
# ret = hipubiface.call("hipubiface_test_basic_plugin","hellofile")
# self.check_ok(ret)
# self.assertEqual(ret["type"], "file")
# self.assertEqual(ret["file_type"], "local")
# self.assertEqual(ret["mime"], "text/plain; charset=us-ascii")
# self.assertTrue(ret["file_name"].endswith("/test/res/test0.torrent.txt"))
#
# def test_hellofile2(self):
# ret = hipubiface.call("hipubiface_test_basic_plugin","hellofile2")
# self.check_ok(ret)
# self.assertEqual(ret["type"], "file")
# self.assertEqual(ret["file_type"], "local")
# self.assertTrue(not ("mime" in ret))
# self.assertTrue(ret["file_name"].endswith("/test/res/test0.torrent.txt"))
|
scoky/pytools
|
curve/ks_test.py
|
Python
|
mit
| 2,158
| 0.008341
|
#!/usr/bin/env python
import os
import sys
import argparse
import traceback
sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir))
from toollib.group import Group,UnsortedInputGrouper
import scipy.stats as ss
class KSGroup(Group):
def __init__(self, tup):
super(KSGroup, self).__init__(tup)
self.samples = []
def add(self, chunks):
self.samples.append(float(chunks[args.column]))
def done(self):
jdelim =
|
args.delimiter if args.delimiter != None else ' '
if len(self.tup) > 0:
args.outfile.write(jdelim.join(self.tup) + jdelim)
args.outfile.write(jdelim.join(map(str, ss.kstest(self.samples, args.distf, args=args.params))) + '\n')
if __name__ == "__main__":
# set up command line args
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\
|
description='Compare the request distributions of all clients')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument('-s', '--source', default='scipy.stats', choices=['scipy.stats', 'lambda'], help='source of the distribution to fit')
parser.add_argument('-i', '--dist', default='paretoLomax')
parser.add_argument('-p', '--params', default='', help='initial parameters')
parser.add_argument('-c', '--column', type=int, default=0)
parser.add_argument('-g', '--group', nargs='+', type=int, default=[])
parser.add_argument('-d', '--delimiter', default=None)
args = parser.parse_args()
args.params = map(float, args.params.split(args.delimiter))
if args.source == 'scipy.stats':
args.source = ss
else:
args.source = None
if args.source:
mod = args.source
for c in args.dist.split('.'):
mod = getattr(mod, c)
args.distf = mod
else:
args.distf = eval(args.dist)
grouper = UnsortedInputGrouper(args.infile, KSGroup, args.group, args.delimiter)
grouper.group()
|
morelab/labman_ud
|
labman_ud/entities/news/models.py
|
Python
|
gpl-3.0
| 3,999
| 0.003251
|
from django.db import models
from django.template.defaultfilters import slugify
from datetime import datetime
from redactor.fields import RedactorField
from management.post_tweet import post_tweet
### News
####################################################################################################
class News(models.Model):
post_tweet = models.BooleanField(
default=False,
)
tweet_cc = models.CharField(
max_length=70,
blank=True,
null=True,
)
title = models.CharField(
max_length=250,
)
slug = models.SlugField(
max_length=250,
blank=True,
unique=True,
)
content = RedactorField()
created = models.DateTimeField(
default=datetime.now,
blank=True,
null=True,
)
city = models.ForeignKey(
'utils.City',
blank=True,
null=True,
)
country = models.ForeignKey(
'utils.Country',
blank=True,
null=True,
)
tags = models.ManyToManyField(
'utils.Tag',
through='NewsTag',
related_name='news',
)
projects = models.ManyToManyField(
'projects.Project',
through='ProjectRelatedToNews',
related_name='news',
)
publications = models.ManyToManyField(
'publications.Publication',
through='PublicationRelatedToNews',
related_name='news',
)
persons = models.ManyToManyField(
'persons.Person',
through='PersonRelatedToNews',
related_name='news',
)
class Meta:
ordering = ('-created',)
verbose_name = u'News piece'
verbose_name_plural = u'News pieces'
def __unicode__(self):
return u'%s' % self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
if self.post_tweet:
post_tweet(self)
self.content = self.content.replace("<img src=", "<img class='img-responsive' src=")
super(News, self).save(*args, **kwargs)
### NewsTag
####################################################################################################
class NewsTag(models.Model):
tag = models.ForeignKey('utils.Tag')
news = models.ForeignKey('News')
class Meta:
verbose_name = u'News Tag'
verbose_name_plural = u'News Tags'
### ProjectRelatedToNews
####################################################################################################
class ProjectRelatedToNews(models.Model):
project = models.ForeignKey('projects.Project')
news = models.ForeignKey('News')
class Meta:
verbose_name = u'Project related to News piece'
verbose_name_plural = u'Projects related to News pieces'
### PublicationRelatedToNews
####################################################################################################
class PublicationRelatedToNews(models.Model):
publication = models.ForeignKey('publications.Publication')
news = models.ForeignKey('News')
class Meta:
verbose_name = u'Publication related to News piece'
verbose_name_plural = u'Publications related to News pieces'
### PersonRelatedToNews
####################################################################################################
class PersonRelatedToNews(models.Model):
p
|
erson = models.ForeignKey('persons.Person')
news = models.ForeignKey('News')
class Meta:
verbose
|
_name = u'Person related to News piece'
verbose_name_plural = u'People related to News pieces'
### EventRelatedToNews
####################################################################################################
class EventRelatedToNews(models.Model):
event = models.ForeignKey('events.Event')
news = models.ForeignKey('News')
class Meta:
verbose_name = u'Event related to News piece'
verbose_name_plural = u'Events related to News pieces'
|
ZzCalvinzZ/picturepay
|
picture/views.py
|
Python
|
mit
| 5,543
| 0.027242
|
from PIL import Image
import stripe
import datetime
from django.shortcuts import render, redirect
from django.views.generic import TemplateView, View, FormView
from django.core.urlresolvers import reverse_lazy, reverse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.conf import settings
from paypal.standard.forms import PayPalPaymentsForm
from picture.models import Picture, Settings, Pixel, PaymentNote
from picture.forms import PaymentNoteForm
from paypal.standard.models import ST_PP_COMPLETED
from paypal.standard.ipn.signals import valid_ipn_received, invalid_ipn_received, payment_was_flagged
# Create your views here.
class PictureIndexView(FormView):
template_name = 'picture/index.html'
form_class = PaymentNoteForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['picture'] = Settings.objects.first().picture
context['random'] = datetime.datetime.now()
context['payment_notes'] = [{
'name': note.name,
'url': note.url,
'number': note.number,
'pixels': [{
'r': pixel.r,
'g': pixel.g,
'b': pixel.b,
} for pixel in note.pixels.all()[:50]]
} for note in PaymentNote.objects.filter(picture=self.picture).order_by('-number')]
return context
def form_valid(self, form):
note = form.save(commit=False)
self.request.session['payment_note'] = {
'name': note.name,
'url': note.url,
'number': note.number,
}
return super().form_valid(form)
def dispatch(self, request, *args, **kwargs):
self.picture = Settings.objects.first().picture
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['picture'] = self.picture
return kwargs
def get_success_url(self):
if getattr(settings,'NO_PAYMENTS', False) == True:
create_payment_note(self.request.session['payment_note'])
return reverse('picture-payment-success')
else:
return reverse('picture-payment')
class PaymentView(TemplateView):
template_name = 'picture/payment.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['picture'] = Settings.objects.first().picture
context['paypal_form'] = self.paypal_form
context['stripe'] = self.stripe_options
context['amount'] = self.request.session.get('payment_note').get('number')
return context
@method_decorato
|
r(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
self.picture = Settings.objects.first().picture
business = settings.PAYPAL_EMAIL
paypal_options = {
"business
|
": business,
"amount": request.session.get('payment_note').get('number'),
"invoice": request.session.get('payment_note').get('url'),
"custom": request.session.get('payment_note').get('name'),
"item_name": "Pixel Reveal",
# "invoice": "unique-invoice-id",
"notify_url": request.build_absolute_uri(reverse('paypal-ipn')),
"return_url": request.build_absolute_uri(reverse('picture-paypal-payment-success')),
"cancel_return": request.build_absolute_uri(reverse('picture-index')),
}
self.paypal_form = PayPalPaymentsForm(initial=paypal_options)
#STRIPE stuff
self.stripe_options = {
'p_key': settings.STRIPE_PUBLISH,
'amount': request.session.get('payment_note').get('number') * 100,
'name': 'Calvin Collins',
'description': 'Pixel Reveal',
}
return super().dispatch(request, *args, **kwargs)
class PaymentSuccessView(TemplateView):
template_name = 'picture/payment_success.html'
class PaypalPaymentSuccessView(TemplateView):
template_name = 'picture/paypal_payment_success.html'
class PaymentErrorView(TemplateView):
template_name = 'picture/payment_error.html'
class StripeView(View):
def post(self, request, *args, **kwargs):
self.picture = Settings.objects.first().picture
stripe.api_key = settings.STRIPE_SECRET
token = request.POST['stripeToken']
try:
charge = stripe.Charge.create(
amount = request.session.get('payment_note').get('number') * 100,
currency="usd",
source=token,
description="Pixel Reveal"
)
except stripe.error.CardError as e:
# The card has been declined
return redirect(reverse('picture-payment-error'))
else:
create_payment_note(self.request.session['payment_note'])
return redirect(reverse('picture-payment-success'))
def create_payment_note(note_info):
form = PaymentNoteForm(note_info, picture=Settings.objects.first().picture)
if form.is_valid():
note = form.save(commit=False)
note.picture = Settings.objects.first().picture
note.save()
coords = note.picture.uncover_line(note.number)
img = note.picture.pillow_image.convert('RGB')
for i, coord in enumerate(coords):
if i > 50:
break
r, g, b = img.getpixel((coord['x'], coord['y']))
note.pixels.add(Pixel.objects.create(
x = coord['x'],
y = coord['y'],
r = r,
g = g,
b = b
))
note.save()
def handle_payment(sender, **kwargs):
ipn_obj = sender
if ipn_obj.payment_status == ST_PP_COMPLETED:
# WARNING !
# Check that the receiver email is the same we previously
# set on the business field request. (The user could tamper
# with those fields on payment form before send it to PayPal)
if ipn_obj.receiver_email != settings.PAYPAL_EMAIL:
# Not a valid payment
return
note_info = {
'name': ipn_obj.custom,
'url': ipn_obj.invoice,
'number': ipn_obj.mc_gross,
}
create_payment_note(note_info)
valid_ipn_received.connect(handle_payment)
|
crc5464/groupme-bot
|
src/markov.py
|
Python
|
gpl-3.0
| 6,551
| 0.003817
|
import startbot, stats, os, re, random, sys
import utils
MARKOV_LENGTH = 2
#majority of the code taken from https://github.com/hrs/markov-sentence-generator
#changes made: allowed it to hook up from the text gotten directly from messages
#changed it to be encompassed in a class structure. Made minor changes to make it Py3.X compatible
class markov():
# These mappings can get fairly large -- they're stored globally to
# save copying time.
# (tuple of words) -> {dict: word -> number of times the word appears following the tuple}
# Example entry:
# ('eyes', 'turned') => {'to': 2.0, 'from': 1.0}
# Used briefly while first constructing the normalized mapping
tempMapping = {}
# (tuple of words) -> {dict: word -> *normalized* number of times the word appears following the tuple}
# Example entry:
# ('eyes', 'turned') => {'to': 0.66666666, 'from': 0.33333333}
mapping = {}
# Contains the set of words that can start sentences
starts = []
m_botName = None
def __init__(self, groupObj, groupName, bot):
self.m_botName = bot.name
self.train(groupObj, groupName)
def train(self, groupObj, groupName):
stats.getAllText(groupObj, groupName, self.m_botName)
self.buildMapping(self.wordlist('..{1}cache{1}messages-{0}.txt'.format(groupName, os.path.sep)), MARKOV_LENGTH)
utils.showOutput("bot successfully trained.")
def talk(self, message, bot, groupName):
try:
bot.post(self.genSentence2(message, MARKOV_LENGTH))
except:
bot.post(self.genSentence(MARKOV_LENGTH))
# We want to be able to compare words independent of their capitalization.
def fixCaps(self, word):
# Ex: "FOO" -> "foo"
if word.isupper() and word != "I":
word = word.lower()
# Ex: "LaTeX" => "Latex"
elif word [0].isupper():
word = word.lower().capitalize()
# Ex: "wOOt" -> "woot"
else:
word = word.lower()
return word
# Tuples can be hashed; lists can't. We need hashable values for dict keys.
# This looks like a hack (and it is, a little) but in practice it doesn't
# affect processing time too negatively.
def toHashKey(self, lst):
return tuple(lst)
# Returns the contents of the file, split into a list of words and
# (some) punctuation.
def wordlist(self, filename):
f = open(filename, 'r', encoding='utf-8')
wordlist = [self.fixCaps(w) for w in re.findall(r"[\w']+|[.,!?;]", f.read())]
f.close()
return wordlist
# Self-explanatory -- adds "word" to the "tempMapping" dict under "history".
# tempMapping (and mapping) both match each word to a list of possible next
# words.
# Given history = ["the", "rain", "in"] and word = "Spain", we add "Spain" to
# the entries for ["the", "rain", "in"], ["rain", "in"], and ["in"].
def addItemToTempMapping(self, history, word):
while len(history) > 0:
first = self.toHashKey(history)
if first in self.tempMapping:
if word in self.tempMapping[first]:
self.tempMapping[first][word] += 1.0
else:
self.tempMapping[first][word] = 1.0
else:
self.tempMapping[first] = {}
self.tempMapping[first][word] = 1.0
history = history[1:]
# Building and normalizing the mapping.
def buildMapping(self, wordlist, markovLength):
self.starts.append(wordlist [0])
for i in range(1, len(wordlist) - 1):
if i <= markovLength:
history = wordlist[: i + 1]
else:
history = wordlist[i - markovLength + 1 : i + 1]
follow = wordlist[i + 1]
# if the last elt was a period, add the next word to the start list
if history[-1] == "." and follow not in ".,!?;":
self.starts.append(follow)
self.addItemToTempMapping(history, follow)
# Normalize the values in tempMapping, put
|
them into mapping
for first, followset in self.tempMapping.items():
total = sum(followset
|
.values())
# Normalizing here:
self.mapping[first] = dict([(k, v / total) for k, v in followset.items()])
# Returns the next word in the sentence (chosen randomly),
# given the previous ones.
def next(self, prevList):
sum = 0.0
retval = ""
index = random.random()
# Shorten prevList until it's in mapping
while self.toHashKey(prevList) not in self.mapping:
prevList.pop(0)
# Get a random word from the mapping, given prevList
for k, v in self.mapping[self.toHashKey(prevList)].items():
sum += v
if sum >= index and retval == "":
retval = k
return retval
def genSentence2(self, message, markovLength): #attempts to use input sentence material to construct a sentence
# Start with a random "starting word" from the input message
splitmessage = message.lower().split()
splitmessage.remove('{0},'.format(self.m_botName.lower()))
if len(splitmessage) == 0:
curr = random.choice(self.starts)
else:
curr = random.choice(splitmessage)
sent = curr.capitalize()
prevList = [curr]
# Keep adding words until we hit a period
while (curr not in "."):
curr = self.next(prevList)
prevList.append(curr)
# if the prevList has gotten too long, trim it
if len(prevList) > markovLength:
prevList.pop(0)
if (curr not in ".,!?;"):
sent += " " # Add spaces between words (but not punctuation)
sent += curr
return sent
def genSentence(self, markovLength):
# Start with a random "starting word"
curr = random.choice(self.starts)
sent = curr.capitalize()
prevList = [curr]
# Keep adding words until we hit a period
while (curr not in "."):
curr = self.next(prevList)
prevList.append(curr)
# if the prevList has gotten too long, trim it
if len(prevList) > markovLength:
prevList.pop(0)
if (curr not in ".,!?;"):
sent += " " # Add spaces between words (but not punctuation)
sent += curr
return sent
|
CroceRossaItaliana/jorvik
|
survey/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 2,602
| 0.003459
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2018-12
|
-04 15:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('anagrafica', '0049_auto_20181028_1639'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=Fa
|
lse, verbose_name='ID')),
('text', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('required', models.BooleanField(default=True, verbose_name='Obbligatorio')),
],
options={
'verbose_name': 'Domanda',
'verbose_name_plural': 'Domande',
},
),
migrations.CreateModel(
name='Survey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('text', models.CharField(max_length=255)),
],
options={
'verbose_name': 'Questionario di gradimento',
'verbose_name_plural': 'Questionari di gradimento',
},
),
migrations.CreateModel(
name='SurveyResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('response', models.TextField(blank=True, max_length=1000, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.Question')),
('survey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.Survey')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='anagrafica.Persona')),
],
options={
'verbose_name': "Risposta dell'utente",
'verbose_name_plural': 'Risposte degli utenti',
},
),
migrations.AddField(
model_name='question',
name='survey',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.Survey'),
),
]
|
michaupl/wordtester
|
src/tests/WordTableModelTests.py
|
Python
|
apache-2.0
| 2,079
| 0.005291
|
#-*- coding: utf-8 -*-
import unittest
from top.WordTableModel import WordTableModel
class WordTableModelTestsTestCase(unittest.TestCase):
def setUp(self):
self.model = WordTableModel()
self.model.load("dotestow.pkl")
def testLoading(self):
assert len(self.model.words) == 5, "incorrect number of loaded words " + \
"got: " + len(self.model.words) + ", but: 5 was expected"
list = []
for word in self.model.words:
list.append(word.word)
msg = "failed while loading the words with number: "
assert list[0] == "sibilant sound", msg + '0'
assert list[1] == "aberration", msg + '1'
assert list[2] == "acrid", msg + '2'
assert list[3] == "adjourn", msg + '3'
assert list[4] == "ambience", msg + '4'
def testSorting(self):
self.model.sortByWord()
assert self.model.words[0].word == "aberration", "incorrect sorting by word " + \
"got: " + self.model.words[0].word + ", but: 'aberration' was expected"
self.model.sortByDifficulty()
assert self.model.words[0].word == "adjourn", "incorrect sorting by word " + \
"got: " + self.model.words[0].word + ", but: 'adjourn' was expected"
self.model.reversedDiffSort = True
self.model.sortByDifficulty()
assert self.model.words[0].word == "ambience", "incorrect sorting by word " + \
"got: " + self.model.words[0].word + ", but: 'ambience' was expected"
def testExport(self):
|
self.model.exportWords("exportTest.txt")
modelFh = open("dotestow.txt")
testFh = open("exportTest.txt")
modelText = modelFh.read
|
()
testText = testFh.read()
assert modelText == testText, "incorrect export"
modelFh.close()
testFh.close()
import os
os.remove("exportTest.txt")
def testImport(self):
self.model.words.clearWords()
self.model.importWords("dotestow.txt")
self.testLoading()
if __name__ == '__main__':
unittest.main()
|
mikar/projects
|
various/word_possibilities.py
|
Python
|
mit
| 1,021
| 0.007835
|
#!/usr/bin/env python2
# Print out the 2^n possibilities of a word with the length n
import unittest
from itertools import product, permutations
def word_variations(s):
|
try:
if not len(s): return
lower, upper = s.lower(), s.upper()
except:
return
# Since number strings won't produce cartesian values with lower/upper,
# we use itertools.permutations.
if lower == upper:
pairs = permutations(lower)
else:
pairs = product(*zip(lower, upper))
result = {''.join(pair) for pair in pairs} # Using set literal notation.
print result, "\n", len(result)
return result
word_variations("abc")
c
|
lass WordTest(unittest.TestCase):
def _test(self, s, expected):
result = word_variations(s)
self.assertEqual(len(result), expected)
def test_basecase(self):
self._test("hello", 32)
def test_int(self):
self._test("123", 6)
def test_empty(self):
self.assertEqual(word_variations(""), None)
|
defcube/django-south
|
south/tests/logic.py
|
Python
|
apache-2.0
| 32,253
| 0.002263
|
import unittest
from collections import deque
import datetime
import sys
import os
import StringIO
from south import exceptions
from south.migration import migrate_app
from south.migration.base import all_migrations, Migration, Migrations
from south.migration.utils import depends, dfs, flatten, get_app_label
from south.models import MigrationHistory
from south.tests import Monkeypatcher
from south.db import db
class TestBrokenMigration(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp", "brokenapp"]
def test_broken_dependencies(self):
self.assertRaises(
exceptions.DependsOnUnmigratedApplication,
Migrations.calculate_dependencies,
force=True,
)
#depends_on_unknown = self.brokenapp['0002_depends_on_unknown']
#self.assertRaises(exceptions.DependsOnUnknownMigration,
# depends_on_unknown.dependencies)
#depends_on_higher = self.brokenapp['0003_depends_on_higher']
#self.assertRaises(exceptions.DependsOnHigherMigration,
# depends_on_higher.dependencies)
class TestMigration(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def setUp(self):
super(TestMigration, self).setUp()
self.fakeapp = Migrations('fakeapp')
self.otherfakeapp = Migrations('otherfakeapp')
Migrations.calculate_dependencies(force=True)
def test_str(self):
migrations = [str(m) for m in self.fakeapp]
self.assertEqual(['fakeapp:0001_spam',
'fakeapp:0002_eggs',
'fakeapp:0003_alter_spam'],
migrations)
def test_repr(self):
migrations = [repr(m) for m in self.fakeapp]
self.assertEqual(['<Migration: fakeapp:0001_spam>',
'<Migration: fakeapp:0002_eggs>',
'<Migration: fakeapp:0003_alter_spam>'],
migrations)
def test_app_label(self):
self.assertEqual(['fakeapp', 'fakeapp', 'fakeapp'],
[m.app_label() for m in self.fakeapp])
def test_name(self):
self.assertEqual(['0001_spam', '0002_eggs', '0003_alter_spam'],
[m.name() for m in self.fakeapp])
def test_full_name(self):
self.assertEqual(['fakeapp.migrations.0001_spam',
'fakeapp.migrations.0002_eggs',
'fakeapp.migrations.0003_alter_spam'],
[m.full_name() for m in self.fakeapp])
def test_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration
M3 = __import__("fakeapp.migrations.0003_alter_spam", {}, {}, ['Migration']).Migration
self.assertEqual([M1, M2, M3],
[m.migration().Migration for m in self.fakeapp])
self.assertRaises(exceptions.UnknownMigration,
self.fakeapp['9999_unknown'].migration)
def test_previous(self):
self.assertEqual([None,
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs']],
[m.previous() for m in self.fakeapp])
def test_dependencies(self):
"Test that the dependency detection works."
self.assertEqual([
set([]),
set([self.fakeapp['0001_spam']]),
set([self.fakeapp['0002_eggs']])
],
[m.dependencies for m in self.fakeapp],
)
self.assertEqual([
set([self.fakeapp['0001_spam']]),
set([self.otherfakeapp['0001_first']]),
set([
self.otherfakeapp['0002_second'],
self.fakeapp['0003_alter_spam'],
])
],
[m.dependencies for m in self.otherfakeapp],
)
def test_forwards_plan(self):
self.assertEqual([
[self.fakeapp['0001_spam']],
[
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs']
],
[
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs'],
self.fakeapp['0003_alter_spam'],
]
],
[m.forwards_plan() for m in self.fakeapp],
)
self.assertEqual([
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first']
],
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first'],
self.otherfakeapp['0002_second']
],
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first'],
self.otherfakeapp['0002_second'],
self.fakeapp['0002_eggs'],
self.fakeapp['0003_alter_spam'],
self.otherfakeapp['0003_third'],
]
],
[m.forwards_plan() for m in self.otherfakeapp],
)
def test_is_before(self):
F1 = self.fakeapp['0001_spam']
F2 = self.fakeapp['0002_eggs']
F3 = self.fakeapp['0003_alter_spam']
O1 = self.otherfakeapp['0001_first']
O2 = self.otherfakeapp['0002_second']
O3 = self.otherfakeapp['0003_th
|
ird']
self.assert
|
True(F1.is_before(F2))
self.assertTrue(F1.is_before(F3))
self.assertTrue(F2.is_before(F3))
self.assertEqual(O3.is_before(O1), False)
self.assertEqual(O3.is_before(O2), False)
self.assertEqual(O2.is_before(O2), False)
self.assertEqual(O2.is_before(O1), False)
self.assertEqual(F2.is_before(O1), None)
self.assertEqual(F2.is_before(O2), None)
self.assertEqual(F2.is_before(O3), None)
class TestMigrationDependencies(Monkeypatcher):
installed_apps = ['deps_a', 'deps_b', 'deps_c']
def setUp(self):
super(TestMigrationDependencies, self).setUp()
self.deps_a = Migrations('deps_a')
self.deps_b = Migrations('deps_b')
self.deps_c = Migrations('deps_c')
Migrations.calculate_dependencies(force=True)
def test_dependencies(self):
self.assertEqual(
[
set([]),
set([self.deps_a['0001_a']]),
set([self.deps_a['0002_a']]),
set([
self.deps_a['0003_a'],
self.deps_b['0003_b'],
]),
set([self.deps_a['0004_a']]),
],
[m.dependencies for m in self.deps_a],
)
self.assertEqual(
[
set([]),
set([
self.deps_b['0001_b'],
self.deps_a['0002_a']
]),
set([
self.deps_b['0002_b'],
self.deps_a['0003_a']
]),
set([self.deps_b['0003_b']]),
set([self.deps_b['0004_b']]),
],
[m.dependencies for m in self.deps_b],
)
self.assertEqual(
[
set([]),
set([self.deps_c['0001_c']]),
set([self.deps_c['0002_c']]),
set([self.deps_c['0003_c']]),
set([
self.deps_c['0004_c'],
self.deps_a['0002_a']
]),
],
[m.dependencies for m in self.deps_c],
)
def test_dependents(self):
self.assertEqual([set([self.deps_a['0002_a']]),
set([self.deps_c['0005_c'],
self.deps_b['0002_b'],
self.deps_a['0003_a']]),
set([self.deps_b[
|
yephper/django
|
tests/syndication_tests/feeds.py
|
Python
|
bsd-3-clause
| 6,128
| 0.000653
|
from __future__ import unicode_literals
from django.contrib.syndication import views
from django.utils import feedgenerator
from django.utils.timezone import get_fixed_timezone
from .models import Article, Entry
class TestRss2Feed(views.Feed):
title = 'My blog'
description = 'A more thorough description of my blog.'
link = '/blog/'
feed_guid = '/foo/bar/1234'
author_name = 'Sally Smith'
author_email = '[email protected]'
author_link = 'http://www.example.com/'
categories = ('python', 'django')
feed_copyright = 'Copyright (c) 2007, Sally Smith'
ttl = 600
def items(self):
return Entry.objects.all()
def item_description(self, item):
return "Overridden description: %s" % item
def item_pubdate(self, item):
return item.published
def item_updateddate(self, item):
return item.updated
item_author_name = 'Sally Smith'
item_author_email = '[email protected]'
item_author_link = 'http://www.example.com/'
item_categories = ('python', 'testing')
item_copyright = 'Copyright (c) 2007, Sally Smith'
class TestRss2FeedWithGuidIsPermaLinkTrue(TestRss2Feed):
def item_guid_is_permalink(self, item):
return True
class TestRss2FeedWithGuidIsPermaLinkFalse(TestRss2Feed):
def item_guid(self, item):
return str(item.pk)
def item_guid_is_permalink(self, item):
return False
class TestRss091Feed(TestRss2Feed):
feed_type = feedgenerator.RssUserland091Feed
class TestNoPubdateFeed(views.Feed):
title = 'Test feed'
link = '/feed/'
def items(self):
return Entry.objects.all()
class TestAtomFeed(TestRss2Feed):
feed_type = feedgenerator.Atom1Feed
subtitle = TestRss2Feed.description
class TestLatestFeed(TestRss2Feed):
"""
A feed where the latest entry date is an `updated` element.
"""
feed_type = feedgenerator.Atom1Feed
subtitle = TestRss2Feed.description
def items(self):
return Entry.objects.exclude(pk=5)
class ArticlesFeed(TestRss2Feed):
"""
A feed to test no link being defined. Articles have no get_absolute_url()
method, and item_link() is not defined.
"""
def items(self):
return Article.objects.all()
class TestSingleEnclosureRSSFeed(TestRss2Feed):
"""
A feed to test that RSS feeds work with a single enclosure.
"""
def item_enclosure_url(self, item):
return 'http://example.com'
def item_enclosure_size(self, item):
return 0
def item_mime_type(self, item):
return 'image/png'
class TestMultipleEnclosureRSSFeed(TestRss2Feed):
"""
A feed to test that RSS feeds raise an exception with multiple enclosures.
"""
def item_enclosures(self, item):
return [
feedgenerator.Enclosure('http://example.com/h
|
ello.png', 0, 'image/png'),
feedgenerator.Enclosure('http://example.com/goodbye.png', 0, 'image/png'),
]
class TemplateFeed
|
(TestRss2Feed):
"""
A feed to test defining item titles and descriptions with templates.
"""
title_template = 'syndication/title.html'
description_template = 'syndication/description.html'
# Defining a template overrides any item_title definition
def item_title(self):
return "Not in a template"
class TemplateContextFeed(TestRss2Feed):
"""
A feed to test custom context data in templates for title or description.
"""
title_template = 'syndication/title_context.html'
description_template = 'syndication/description_context.html'
def get_context_data(self, **kwargs):
context = super(TemplateContextFeed, self).get_context_data(**kwargs)
context['foo'] = 'bar'
return context
class NaiveDatesFeed(TestAtomFeed):
"""
A feed with naive (non-timezone-aware) dates.
"""
def item_pubdate(self, item):
return item.published
class TZAwareDatesFeed(TestAtomFeed):
"""
A feed with timezone-aware dates.
"""
def item_pubdate(self, item):
# Provide a weird offset so that the test can know it's getting this
# specific offset and not accidentally getting on from
# settings.TIME_ZONE.
return item.published.replace(tzinfo=get_fixed_timezone(42))
class TestFeedUrlFeed(TestAtomFeed):
feed_url = 'http://example.com/customfeedurl/'
class MyCustomAtom1Feed(feedgenerator.Atom1Feed):
"""
Test of a custom feed generator class.
"""
def root_attributes(self):
attrs = super(MyCustomAtom1Feed, self).root_attributes()
attrs['django'] = 'rocks'
return attrs
def add_root_elements(self, handler):
super(MyCustomAtom1Feed, self).add_root_elements(handler)
handler.addQuickElement('spam', 'eggs')
def item_attributes(self, item):
attrs = super(MyCustomAtom1Feed, self).item_attributes(item)
attrs['bacon'] = 'yum'
return attrs
def add_item_elements(self, handler, item):
super(MyCustomAtom1Feed, self).add_item_elements(handler, item)
handler.addQuickElement('ministry', 'silly walks')
class TestCustomFeed(TestAtomFeed):
feed_type = MyCustomAtom1Feed
class TestSingleEnclosureAtomFeed(TestAtomFeed):
"""
A feed to test that Atom feeds work with a single enclosure.
"""
def item_enclosure_url(self, item):
return 'http://example.com'
def item_enclosure_size(self, item):
return 0
def item_mime_type(self, item):
return 'image/png'
class TestMultipleEnclosureAtomFeed(TestAtomFeed):
"""
A feed to test that Atom feeds work with multiple enclosures.
"""
def item_enclosures(self, item):
return [
feedgenerator.Enclosure('http://example.com/hello.png', '0', 'image/png'),
feedgenerator.Enclosure('http://example.com/goodbye.png', '0', 'image/png'),
]
|
inductivekickback/ev3
|
ev3/direct_command.py
|
Python
|
mit
| 72,705
| 0.011361
|
"""A simple interface for executing bytecodes over a Bluetooth serial port.
From the lms2012 source code documentation:
Beside running user programs the VM is able to execute direct commands from
the Communication Module. In fact direct commands are small programs that
consist of regular byte codes and they are executed in parallel with a running
user program. Special care MUST be taken when writing direct commands because
the decision until now is NOT to restrict the use of "dangerous" codes and
constructions (loops in a direct command are allowed).
If a new direct command from the same source is going to be executed an actual
running direct command is terminated.
Because of a small header objects are limited to one VMTHREAD only - SUBCALLs
and BLOCKs are, of course, not possible. This header contains information about
the number of global variables (for response), number of local variables, and
command size.
Direct commands that have data responses can place the data in the global
variable space. The global variable space is equal to the communication
response buffer. The composition of the direct command defines at which
offset the result is placed (global variable 0 is placed at offset 0 in
the buffer).
Offsets in the response buffer (global variables) must be aligned (i.e. 32bit
variable offsets are divisible by 4, 16bit variable offsets are divisible by 2).
All multi-byte words are little endian.
Direct Command bytes:
------------------------------
Byte 0 - 1: Command size
Byte 2 - 3: Message counter
Byte 4: CommandType
Byte 5 - 6: Number of global and local variables (compressed).
Byte 6 Byte 5
76543210 76543210
-------- --------
llllllgg gggggggg
gg gggggggg Global variables [0..MAX_COMMAND_GLOBALS]
|
llllll Local variables [0..MAX_COMMAND_LOCALS]
Byte 7 - n: Byte codes
Direct Command response Bytes:
------------------------------
Byte 0 - 1: Reply size
Byte 2 - 3: Message counter
Byte 4: ReplyType
Byte 5 - n: Respon
|
se buffer (global variable values)
"""
import ev3
import message
MAX_CMD_LEN = 1019 # The size of the brick's txBuf is 1024 bytes but
# the header requires 5 bytes.
MAX_STR_LEN = 255
MAX_VERSION_STR_LEN = 64
MAX_LOCAL_VARIABLE_BYTES = 0xFFFFFFFF
MAX_NAME_STR_LEN = 64
MOTOR_MIN_POWER = -100
MOTOR_MAX_POWER = 100
MOTOR_MIN_SPEED = -100
MOTOR_MAX_SPEED = 100
USB_CHAIN_LAYER_MASTER = 0
USB_CHAIN_LAYER_SLAVE = 1
MOTOR_MIN_RATIO = -200
MOTOR_MAX_RATIO = 200
MIN_VOLUME = 0
MAX_VOLUME = 100
LCD_HEIGHT_PIXELS = 128
LCD_WIDTH_PIXELS = 178
class DirectCommandError(Exception):
"""Subclass for reporting errors."""
pass
class CommandType(object):
"""Every System Command must be one of these two types."""
DIRECT_COMMAND_REPLY = 0x00
DIRECT_COMMAND_NO_REPLY = 0x80
class ReplyType(object):
"""Every reply to a System Command must be one of these two types."""
DIRECT_REPLY = 0x02
DIRECT_REPLY_ERROR = 0x04
class OutputPort(object):
"""These can be OR'd together to operate on multiple ports at once."""
PORT_A = 0x01
PORT_B = 0x02
PORT_C = 0x04
PORT_D = 0x08
ALL = (PORT_A | PORT_B | PORT_C | PORT_D)
class InputPort(object):
"""These can be OR'd together to operate on multiple ports at once."""
PORT_1 = 0x00
PORT_2 = 0x01
PORT_3 = 0x02
PORT_4 = 0x03
PORT_A = 0x10
PORT_B = 0x11
PORT_C = 0x12
PORT_D = 0x13
class StopType(object):
"""When an OutputPort is stopped it can be told to brake or coast."""
COAST = 0
BRAKE = 1
class PolarityType(object):
""""""
BACKWARD = -1
TOGGLE = 0
FORWARD = 1
class TouchMode(object):
""""""
TOUCH = 0
BUMPS = 1
class NXTLightMode(object):
""""""
REFLECT = 0
AMBIENT = 1
class NXTSoundMode(object):
""""""
DECIBELS = 0
ADJUSTED_DECIBLES = 1
class NXTColorMode(object):
""""""
REFLECTIVE = 0
AMBIENT = 1
COLOR = 2
GREEN = 3
BLUE = 4
RAW = 5
class NXTUltrasonicMode(object):
""""""
CM = 0
INCHES = 1
class NXTTemperatureMode(object):
""""""
CELSIUS = 0
FAHRENHEIT = 1
class MotorMode(object):
""""""
DEGREES = 0
ROTATIONS = 1
PERCENT = 2
class UltrasonicMode(object):
""""""
CM = 0
INCH = 1
LISTEN = 2
class GyroMode(object):
""""""
ANGLE = 0
RATE = 1
FAS = 2
G_AND_A = 3
class IRMode(object):
""""""
PROXIMITY = 0
SEEK = 1
REMOTE = 2
REMOTE_A = 3
SALT = 4
CALIBRATION = 5
class ColorMode(object):
""""""
RELECTIVE = 0
AMBIENT = 1
COLOR = 2
class ColorSensorColor(object):
"""These are the results that the EV3 color sensor can return when operating
in ColorMode.COLOR.
"""
NONE = 0
BLACK = 1
BLUE = 2
GREEN = 3
YELLOW = 4
RED = 5
WHITE = 6
BROWN = 7
class LEDPattern(object):
"""The brick user interface has several status LEDs."""
OFF = 0
GREEN = 1
RED = 2
ORANGE = 3
FLASHING_GREEN = 4
FLASHING_RED = 5
FLASHING_ORANGE = 6
GREEN_HEARTBEAT = 7
RED_HEARTBEAT = 8
ORANGE_HEARTBEAT = 9
class DeviceType(object):
"""These are the known device types.
NOTE: These have only been partially confirmed.
"""
NXT_TOUCH = 0x01
NXT_LIGHT = 0x02
NXT_SOUND = 0x03
NXT_COLOR = 0x04
NXT_ULTRASONIC = 0x05
NXT_TEMPERATURE = 0x06
TACHO = 0x07 # TYPE_TACHO in lms2012.h
MINI_TACHO = 0x08 # TYPE_MINITACHO in lms2012.h
NEW_TACHO = 0x09 # TYPE_NEWTACHO in lms2012.h
EV3_TOUCH = 0x10
EV3_COLOR = 0x1D
EV3_ULTRASONIC = 0x1E
EV3_GYROSCOPE = 0x20
EV3_INFRARED = 0x21
SENSOR_INITIALIZING = 0x7D
PORT_EMPTY = 0x7E
ERROR_PORT = 0x7F
UNKNOWN = 0xFF
class LCDColor(object):
"""The brick's LCD only displays two colors."""
BACKGROUND = 0
FOREGROUND = 1
class ButtonType(object):
"""The brick's user interface contains 6 buttons."""
NO_BUTTON = 0
UP_BUTTON = 1
ENTER_BUTTON = 2
DOWN_BUTTON = 3
RIGHT_BUTTON = 4
LEFT_BUTTON = 5
BACK_BUTTON = 6
ANY_BUTTON = 7
class MathType(object):
""""""
EXP = 1 # e^x r = expf(x)
MOD = 2 # Modulo r = fmod(x,y)
FLOOR = 3 # Floor r = floor(x)
CEIL = 4 # Ceiling r = ceil(x)
ROUND = 5 # Round r = round(x)
ABS = 6 # Absolute r = fabs(x)
NEGATE = 7 # Negate r = 0.0 - x
SQRT = 8 # Squareroot r = sqrt(x)
LOG = 9 # Log r = log10(x)
LN = 10 # Ln r = log(x)
SIN = 11
COS = 12
TAN = 13
ASIN = 14
ACOS = 15
ATAN = 16
MOD8 = 17 # Modulo DATA8 r = x % y
MOD16 = 18 # Modulo DATA16 r = x % y
MOD32 = 19 # Modulo DATA32 r = x % y
POW = 20 # Exponent r = powf(x,y)
TRUNC = 21 # Truncate r = (float)((int)(x * pow(y))) / pow(y)
class BrowserType(object):
""""""
BROWSE_FOLDERS = 0 # Browser for folders
BROWSE_FOLDS_FILES = 1 # Browser for folders and files
BROWSE_CACHE = 2 # Browser for cached / recent files
BROWSE_FILES = 3 # Browser for files
class Icon(object):
"""The icons on the brick are enumerated by value."""
ICON_NONE = -1
ICON_RUN = 0
ICON_FOLDER = 1
ICON_FOLDER2 = 2
ICON_USB = 3
ICON_SD
|
rgarcia-herrera/vectores
|
vectores.py
|
Python
|
gpl-3.0
| 446
| 0.006726
|
from vectores_oo import Vector
x = input('vector U componente X= ')
y = input('vector U componente X= ')
U = Vector(x,y)
m = input('vector V magnitud= ')
a = input('vector V angulo= ')
V = Vector(m=m, a=a)
E = input('Escalar= ')
print "U=%s" % U
print "V=%s" % V
print 'UxE=%s' % U.x_escalar(E)
print 'VxE=%s' % V.x_escalar(E)
print 'U+V=%s' % U.Suma(V)
print 'U.V=%s' % U.Product
|
oPunto(V)
print '|UxV|=%s' % U.Modulo_ProductoCruz(V)
|
|
vtungn/HackaPanzer
|
Sprite.py
|
Python
|
mit
| 2,212
| 0.007233
|
# -*- coding: utf-8 -*-
import os
import pygame
from pygame.locals import *
class Sprite(pygame.sprite.Sprite):
def __init__(self,SpriteName):
pygame.sprite.Sprite.__init__(self)
self.Name = SpriteName
self.rect = 0
self.image = 0
def getRect(self):
return self.rect
def getImg(self):
return self.image
def load_image(self, name, colorkey=None):
#fullname = os.path.join('data', 'images')
fullname = name + '.png'
try:
image = pygame.image.load(fullname)
except pygame.error, message:
print 'Cannot load image:', fullname
raise SystemExit, message
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
return image, image.get_rect()
class spritesheet(object):
def __init__(self, filename):
try:
self.sheet = pygame.image.load(filename).convert()
except pygame.error, message:
print 'Unable to load spritesheet image:', filename
raise SystemExit, message
# Load a specific image from a specific rectangle
def image_at(self, rectangle, colorkey = None):
"Loads image from x,y,x+offset,y+offset"
rect = pygame.Rect(rectangle)
image = pygame.Surface(rect.size).convert()
image.blit(self.sheet, (0, 0), rect)
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
return image, rect
# Load a whole bunch of images an
|
d return them as a list
def images_at(self, rects):
"Loads multiple images, supply a list of coordinates"
return [self.image_at(rect) for rec
|
t in rects], rect
# Load a whole strip of images
def load_strip(self, rect, image_count, colorkey = None):
"Loads a strip of images and returns them as a list"
tups = [(rect[0]+rect[2]*x, rect[1], rect[2], rect[3])
for x in range(image_count)]
return self.images_at(tups, colorkey)
|
omf2097/pyomftools
|
omftools/pyshadowdive/palette_mapping.py
|
Python
|
mit
| 1,335
| 0
|
from validx import Dict, List
from .protos import DataObject
from .pale
|
tte import Palette
from .utils.parser import BinaryParser
from .utils.validator import UInt8
from .utils.types import Remappings, Remapping
class PaletteMapping(DataObject):
__slots__ = (
"colors",
"remaps",
)
schema = Dict({"colors": Palette.schema, "remaps": List(List(UInt8))})
def __init__(self):
self.colors: Palette = Palette
|
()
self.remaps: Remappings = []
def remap(self, remap_id: int) -> Palette:
return self.colors.remap(self.remaps[remap_id])
def read(self, parser: BinaryParser):
self.colors = Palette().read(parser)
for k in range(0, 19):
remap: Remapping = []
for m in range(0, 256):
remap.append(parser.get_uint8())
self.remaps.append(remap)
return self
def write(self, parser):
self.colors.write(parser)
for k in range(0, 19):
for m in range(0, 256):
parser.put_uint8(self.remaps[k][m])
def serialize(self) -> dict:
return {"colors": self.colors.serialize(), "remaps": self.remaps}
def unserialize(self, data: dict):
self.colors = Palette().unserialize(data["colors"])
self.remaps = data["remaps"]
return self
|
craneworks/python-pyroute2
|
pyroute2/netlink/rtnl/errmsg.py
|
Python
|
apache-2.0
| 155
| 0
|
from pyroute2.netlink import nlmsg
class errmsg(nlmsg):
'''
Custom message type
Error ersatz-message
'''
f
|
ields = (('co
|
de', 'i'), )
|
MarxMustermann/OfMiceAndMechs
|
src/itemFolder/obsolete/chemical.py
|
Python
|
gpl-3.0
| 1,571
| 0
|
import src
class Chemical(src.items.Item):
type = "Chemical"
def __init__(self):
super().__init__(display=src.canvas.displayChars.fireCrystals)
self.name = "chemical"
self.composition = b"cccccggggg"
def apply(self, character):
import hashlib
results = []
counter = 0
while 1:
tmp = random.choice(["mix", "shift"])
if tmp == "mix":
self.mix(character)
|
elif tmp == "switch":
self.mix(character)
elif tmp == "shift":
self.shift()
test = hashlib.sha256(self.composition[0:9])
character.addMessage(counter)
|
result = int(test.digest()[-1])
result2 = int(test.digest()[-2])
if result < 15:
character.addMessage(test.digest())
character.addMessage(result)
character.addMessage(result2)
break
counter += 1
# character.addMessage(results)
def shift(self):
self.composition = self.composition[1:] + self.composition[0:1]
def mix(self, character):
part1 = self.composition[0:5]
part2 = self.composition[5:10]
self.composition = (
part1[0:1]
+ part2[0:1]
+ part1[1:2]
+ part2[1:2]
+ part1[2:3]
+ part2[2:3]
+ part1[3:4]
+ part2[3:4]
+ part1[4:5]
+ part2[4:5]
)
src.items.addType(Chemical)
|
nlgcoin/guldencoin-official
|
contrib/zmq/zmq_sub3.4.py
|
Python
|
mit
| 3,273
| 0.001833
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Bitcoin should be started with the command line arguments:
GuldenD -testnet -daemon \
-zmqpubhashblock=tcp://127.0.0.1:28332 \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this crea
|
tes an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
The `@asyncio.coroutine` decorator and the `yield from` syntax found here
was introduced in python 3.4 and has been deprecated in favor of the `async`
and `await` keywords respectively.
A blocking example using python 2.7 can be obtained from the g
|
it history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 4):
print("This example only works with Python 3.4 and greater")
exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = zmq.asyncio.install()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
|
mdurrant-b3/acos-client
|
acos_client/v21/slb/template/persistence.py
|
Python
|
apache-2.0
| 2,180
| 0
|
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from acos_client import errors as acos_errors
from acos_client.v21 import base
class BasePersistence(base.BaseV21):
def __init__(self, client):
super(BasePersistence, self).__init__(client)
self.prefix = "slb.template.%s_persistence" % self.pers_type
def get(self, name, **kwargs):
return self._post(("%s.search" % self.prefi
|
x), {'name': name},
**kwargs)
def exists(self, name, **kwargs):
try:
self.get(name, **kwargs)
return True
except acos_errors.NotFound:
return False
def create(self, name, **kwargs):
self._post(("%s.create" % self.prefix), self.get_params(name),
**kwargs)
def delete(self, name, **kwargs):
self._post(("%s.delete" % self.prefix), {'name': name}, **kwargs)
c
|
lass CookiePersistence(BasePersistence):
def __init__(self, client):
self.pers_type = 'cookie'
super(CookiePersistence, self).__init__(client)
def get_params(self, name):
return {
"cookie_persistence_template": {
"name": name
}
}
class SourceIpPersistence(BasePersistence):
def __init__(self, client):
self.pers_type = 'src_ip'
super(SourceIpPersistence, self).__init__(client)
def get_params(self, name):
return {
"src_ip_persistence_template": {
"name": name
}
}
|
AttorneyOnlineVidya/tsuserver3
|
server/ban_manager.py
|
Python
|
agpl-3.0
| 2,392
| 0.001254
|
# tsuserver3, an Attorney Online server
#
# Copyright (C) 2016 argoneus <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public L
|
icense for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ipaddress
import json
import yaml
from server.exceptions import ServerError
class
|
BanManager:
def __init__(self):
self.bans = {}
self.load_banlist()
self.hdid_exempt = {}
self.load_hdidexceptions()
def load_banlist(self):
try:
with open('storage/banlist.json', 'r') as banlist_file:
self.bans = json.load(banlist_file)
except FileNotFoundError:
with open('storage/banlist.json', 'w') as poll_list_file:
json.dump({}, poll_list_file)
def write_banlist(self):
with open('storage/banlist.json', 'w') as banlist_file:
json.dump(self.bans, banlist_file)
def add_ban(self, ip):
try:
x = len(ip)
except AttributeError:
raise ServerError('Argument must be an 12-digit number.')
if x == 12:
self.bans[ip] = True
self.write_banlist()
def remove_ban(self, client, ip):
try:
try:
int(ip)
except ValueError:
ipaddress.ip_address(ip)
ip = client.server.get_ipid(ip)
except ValueError:
if not len(ip) == 12:
raise ServerError('Argument must be an IP address or 10-digit number.')
del self.bans[ip]
self.write_banlist()
def is_banned(self, ipid):
try:
return self.bans[ipid]
except KeyError:
return False
def load_hdidexceptions(self):
with open('config/hdid_exceptions.yaml', 'r', encoding='utf-8') as hdid:
self.hdid_exempt = yaml.load(hdid)
|
Willem23/mbed
|
workspace_tools/export/__init__.py
|
Python
|
apache-2.0
| 4,602
| 0.004129
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os, tempfile
from os.path import join, exists, basename
from shutil import copytree, rmtree
from workspace_tools.utils import mkdir
from workspace_tools.export import uvision4, codesourcery, codered, gccarm, ds5_5, iar, emblocks, coide, kds
from workspace_tools.export.exporters import zip_working_directory_and_clean_up, OldLibrariesException
from workspace_tools.targets import EXPORT_MAP
EXPORTERS = {
'uvision': uvision4.Uvision4,
'lpcxpresso': codered.CodeRed,
'codesourcery': codesourcery.CodeSourcery,
'gcc_arm': gccarm.GccArm,
'ds5_5': ds5_5.DS5_5,
'iar': iar.IAREmbeddedWorkbench,
'emblocks' : emblocks.IntermediateFile,
'coide' : coide.CoIDE,
'kds' : kds.KDS,
}
ERROR_MESSAGE_UNSUPPORTED_TOOLCHAIN = """
Sorry, the target %s is not currently supported on the %s toolchain.
Please refer to <a href='/handbook/Exporting-to-offline-toolchains' target='_blank'>Exporting to offline toolchains</a> for more information.
"""
ERROR_MESSAGE_NOT_EXPORT_LIBS = """
To export this project please <a href='http://mbed.org/compiler/?import=http://mbed.org/users/mbed_official/code/mbed-export/k&mode=lib' target='_blank'>import the export
|
version of the mbed library</a>.
"""
def online_build_url_resolver(url):
# TODO: Retrieve the path and name of an online library build URL
return {'path':'', 'name':''}
def export(project_path, project_name, ide, target, destination='/tmp/',
tempdir=No
|
ne, clean=True, extra_symbols=None, build_url_resolver=online_build_url_resolver):
# Convention: we are using capitals for toolchain and target names
if target is not None:
target = target.upper()
if tempdir is None:
tempdir = tempfile.mkdtemp()
if ide is None:
# Simply copy everything, no project files to be generated
for d in ['src', 'lib']:
os.system("cp -r %s/* %s" % (join(project_path, d), tempdir))
report = {'success': True}
else:
report = {'success': False}
if ide not in EXPORTERS:
report['errormsg'] = "Unsupported toolchain"
else:
Exporter = EXPORTERS[ide]
target = EXPORT_MAP.get(target, target)
if target not in Exporter.TARGETS:
report['errormsg'] = ERROR_MESSAGE_UNSUPPORTED_TOOLCHAIN % (target, ide)
else:
try:
exporter = Exporter(target, tempdir, project_name, build_url_resolver, extra_symbols=extra_symbols)
exporter.scan_and_copy_resources(project_path, tempdir)
exporter.generate()
report['success'] = True
except OldLibrariesException, e:
report['errormsg'] = ERROR_MESSAGE_NOT_EXPORT_LIBS
zip_path = None
if report['success']:
# add readme file to every offline export.
open(os.path.join(temdir, 'README.html'),'w').write('<meta http-equiv="refresh" content="0; url=http://developer.mbed.org/handbook/ExportToOfflineToolchain#%s#%s"/>'% (target,ide))
zip_path = zip_working_directory_and_clean_up(tempdir, destination, project_name, clean)
return zip_path, report
###############################################################################
# Generate project folders following the online conventions
###############################################################################
def copy_tree(src, dst, clean=True):
if exists(dst):
if clean:
rmtree(dst)
else:
return
copytree(src, dst)
def setup_user_prj(user_dir, prj_path, lib_paths=None):
"""
Setup a project with the same directory structure of the mbed online IDE
"""
mkdir(user_dir)
# Project Path
copy_tree(prj_path, join(user_dir, "src"))
# Project Libraries
user_lib = join(user_dir, "lib")
mkdir(user_lib)
if lib_paths is not None:
for lib_path in lib_paths:
copy_tree(lib_path, join(user_lib, basename(lib_path)))
|
lino-framework/noi
|
lino_noi/lib/noi/user_types.py
|
Python
|
bsd-2-clause
| 3,072
| 0.002604
|
# -*- coding: UTF-8 -*-
# Copyright 2015-2021 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""
Defines a set of user roles and fills
:class:`lino.modlib.users.choicelists.UserTypes`.
This is used as the :attr:`user_types_module
<lino.core.site.Site.user_types_module>` for :ref:`noi`.
"""
from django.utils.translation import gettext_lazy as _
from lino.modlib.office.roles import OfficeStaff, OfficeUser
from lino.modlib.users.roles import Helper
# from lino.modlib.comments.roles import CommentsReader
from lino.modlib.comments.roles import CommentsUser, CommentsStaff, PrivateCommentsReader, CommentsReader
from lino.core.roles import SiteUser, SiteAdmin
from lino_xl.lib.excerpts.roles import ExcerptsUser, ExcerptsStaff
from lino_xl.lib.contacts.roles import ContactsUser, ContactsStaff
from lino_xl.lib.courses.roles import CoursesUser
from lino_xl.lib.tickets.roles import Reporter, Searcher, Triager, TicketsStaff
from lino_xl.lib.working.roles import Worker
from lino_xl.lib.cal.roles import CalendarReader
from lino_xl.lib.votes.roles import VotesStaff, VotesUser
from lino_xl.lib.products.roles import ProductsStaff
from lino_xl.lib.ledger.roles import LedgerStaff
from lino.modlib.users.choicelists import UserTypes
class Customer(SiteUser, OfficeUser, VotesUser, Searcher, Reporter, CommentsUser):
"""
A **Customer** is somebody who uses our software and may report
tickets, but won't work on them. Able to comment and view tickets on sites
where they are contact people. Unable to see any client data other than orgs
where they are a contact person and themselves.
"""
pass
class Contributor(Customer, Searcher, Helper, Worker, ExcerptsUser, CoursesUser):
"""
A **Contributor** is somebody who works on and see tickets of sites they are team members of.
"""
pass
class Developer(Contributor, ContactsUser, Triager, ExcerptsStaff, CommentsStaff, TicketsStaff, PrivateC
|
ommentsReader):
"""
A **Developer** is a trusted user who has s
|
igned an NDA, has access to client contacts.
Is able to make service reports as well as manage tickets.
"""
pass
class SiteAdmin(SiteAdmin, Developer, OfficeStaff, VotesStaff, ContactsStaff, CommentsStaff, ProductsStaff, LedgerStaff):
"""
Can do everything.
"""
# class Anonymous(CommentsReader, CalendarReader):
class Anonymous(CalendarReader, CommentsReader, Searcher):
pass
UserTypes.clear()
add = UserTypes.add_item
add('000', _("Anonymous"), Anonymous, 'anonymous',
readonly=True, authenticated=False)
add('100', _("Customer"), Customer, 'customer user')
add('200', _("Contributor"), Contributor, 'contributor')
add('400', _("Developer"), Developer, 'developer')
add('900', _("Administrator"), SiteAdmin, 'admin')
# UserTypes.user = UserTypes.customer
# from lino.core.merge import MergeAction
# from lino.api import rt
# lib = rt.models
# for m in (lib.contacts.Company, ):
# m.define_action(merge_row=MergeAction(
# m, required_roles=set([ContactsStaff])))
|
ULHPC/modules
|
easybuild/easybuild-easyblocks/easybuild/easyblocks/s/suitesparse.py
|
Python
|
mit
| 7,433
| 0.002422
|
##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for SuiteSparse, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import fileinput
import re
import os
import shutil
import sys
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir
from easybuild.tools.modules import get_software_root
class EB_SuiteSparse(ConfigureMake):
"""Support for building SuiteSparse."""
def __init__(self, *args, **kwargs):
"""Custom constructor for SuiteSparse easyblock, initialize custom class parameters."""
super(EB_SuiteSparse, self).__init__(*args, **kwargs)
self.config_name = 'UNKNOWN'
def configure_step(self):
"""Configure build by patching UFconfig.mk or SuiteSparse_config.mk."""
if LooseVersion(self.version) < LooseVersion('4.0'):
self.config_name = 'UFconfig'
else:
self.config_name = 'SuiteSparse_config'
cfgvars = {
'CC': os.getenv('MPICC'),
'CFLAGS': os.getenv('CFLAGS'),
'CXX': os.getenv('MPICXX'),
'F77': os.getenv('MPIF77'),
'F77FLAGS': os.getenv('F77FLAGS'),
'BLAS': os.getenv('LIBBLAS_MT'),
'LAPACK': os.getenv('LIBLAPACK_MT'),
}
metis = get_software_root('METIS')
parmetis = get_software_root('ParMETIS')
if parmetis:
metis_path = parmetis
metis_libs = ' '.join([
os.path.join(parmetis, 'lib', 'libparmetis.a'),
os.path.join(parmetis, 'lib', 'metis.a'),
])
elif metis:
metis_path = metis
metis_libs = os.path.join(metis, 'lib', 'metis.a')
else:
raise EasyBuildError("Neither METIS or ParMETIS module loaded.")
cfgvars.update({
'METIS_PATH': metis_path,
'METIS': metis_libs,
})
# patch file
fp = os.path.join(self.cfg['start_dir'], self.config_name, '%s.mk' % self.config_name)
try:
for line in fileinput.input(fp, inplace=1, backup='.orig'):
for (var, val) in cfgvars.items():
orig_line = line
# for variables in cfgvars, substiture lines assignment
# in the file, whatever they are, by assignments to the
# values in cfgvars
line = re.sub(r"^\s*(%s\s*=\s*).*$" % var,
r"\1 %s # patched by EasyBuild" % val,
line)
if line != orig_line:
cfgvars.pop(var)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s in: %s", fp, err)
# add remaining entries a
|
t the e
|
nd
if cfgvars:
try:
f = open(fp, "a")
f.write("# lines below added automatically by EasyBuild")
for (var, val) in cfgvars.items():
f.write("%s = %s\n" % (var, val))
f.close()
except IOError, err:
raise EasyBuildError("Failed to complete %s: %s", fp, err)
def install_step(self):
"""Install by copying the contents of the builddir to the installdir (preserving permissions)"""
for x in os.listdir(self.cfg['start_dir']):
src = os.path.join(self.cfg['start_dir'], x)
dst = os.path.join(self.installdir, x)
try:
if os.path.isdir(src):
shutil.copytree(src, dst)
# symlink
# - dst/Lib to dst/lib
# - dst/Include to dst/include
for c in ['Lib', 'Include']:
nsrc = os.path.join(dst, c)
ndst = os.path.join(dst, c.lower())
if os.path.exists(nsrc):
os.symlink(nsrc, ndst)
else:
shutil.copy2(src, dst)
except OSError, err:
raise EasyBuildError("Copying src %s to dst %s failed: %s", src, dst, err)
# some extra symlinks are necessary for UMFPACK to work.
paths = [
os.path.join('AMD', 'include', 'amd.h'),
os.path.join('AMD' ,'include' ,'amd_internal.h'),
os.path.join(self.config_name, '%s.h' % self.config_name),
os.path.join('AMD', 'lib', 'libamd.a')
]
for path in paths:
src = os.path.join(self.installdir, path)
dn = path.split(os.path.sep)[-2]
fn = path.split(os.path.sep)[-1]
dstdir = os.path.join(self.installdir, 'UMFPACK', dn)
mkdir(dstdir)
if os.path.exists(src):
try:
os.symlink(src, os.path.join(dstdir, fn))
except OSError, err:
raise EasyBuildError("Failed to make symbolic link from %s to %s: %s", src, dst, err)
def make_module_req_guess(self):
"""Add config dir to CPATH so include file is found."""
guesses = super(EB_SuiteSparse, self).make_module_req_guess()
guesses.update({'CPATH': [self.config_name]})
return guesses
def sanity_check_step(self):
"""Custom sanity check for SuiteSparse."""
if LooseVersion(self.version) < LooseVersion('4.0'):
csparse_dir = 'CSparse3'
else:
csparse_dir = 'CSparse'
custom_paths = {
'files': [os.path.join(x, 'lib', 'lib%s.a' % x.lower()) for x in ["AMD", "BTF", "CAMD", "CCOLAMD", "CHOLMOD",
"COLAMD", "CXSparse", "KLU", "LDL", "RBio",
"SPQR", "UMFPACK"]] +
[os.path.join(csparse_dir, 'lib', 'libcsparse.a')],
'dirs': ["MATLAB_Tools"],
}
super(EB_SuiteSparse, self).sanity_check_step(custom_paths=custom_paths)
|
seakers/daphne_brain
|
example_problem/explorer/urls.py
|
Python
|
mit
| 208
| 0
|
from django.urls import path
from . import views
urlpatterns = [
path('start-ga', views.StartGA.as_view()),
p
|
ath('stop-ga', views.StopGA.as_view()),
path('check-ga', views.CheckGA.as_view())
|
,
]
|
madecoste/swarming
|
appengine/isolate/tests/stats_test.py
|
Python
|
apache-2.0
| 3,366
| 0.006239
|
#!/usr/bin/env python
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
import datetime
import sys
import unittest
import test_env
test_env.setup_test_env()
# From components/third_party/
import webtest
import webapp2
import stats
from components import stats_framework
from support import stats_framework_mock
from support import test_case
# pylint: disable=R0201
class Store(webapp2.RequestHandler):
def get(self):
"""Generates fake stats."""
stats.add_entry(stats.STORE, 2048, 'GS; inline')
self.response.write('Yay')
class Return(webapp2.RequestHandler):
def get(self):
"""Generates fake stats."""
stats.add_entry(stats.RETURN, 4096, 'memcache')
self.response.write('Yay')
class Lookup(webapp2.RequestHandler):
def get(self):
"""Generates fake stats."""
stats.add_entry(stats.LOOKUP, 200, 103)
self.response.write('Yay')
class Dupe(webapp2.RequestHandler):
def get(self):
"""Generates fake stats."""
stats.add_entry(stats.DUPE, 1024, 'inline')
self.response.write('Yay')
def to_str(now, delta):
"""Converts a datetime to unicode."""
now = now + datetime.timedelta(seconds=delta)
return unicode(now.strftime(stats.utils.DATETIME_FORMAT))
class StatsTest(test_case.TestCase, stats_framework_mock.MockMixIn):
def setUp(self):
super(StatsTest, self).setUp()
fake_routes = [
('/store', Store),
('/return', Return),
('/lookup', Lookup),
('/dupe', Dupe),
]
self.app = webtest.TestApp(
webapp2.WSGIApplication(fake_routes, debug=True),
extra_environ={'REMOTE_ADDR': 'fake-ip'})
stats_framework_mock.configure(self)
self.now = datetime.datetime(2010, 1, 2, 3, 4, 5, 6)
self.mock_now(self.now, 0)
def _test_handler(self, url, added_data):
stats_framework_mock.reset_timestamp(stats.STATS_HANDLER, self.now)
self.assertEqual('Yay', self.app.get(url).body)
self.assertEqual(1, len(list(stats_framework.yield_entries(None, None))))
self.mock_now(self.now, 60)
self.assertEqual(10, stats.generate_stats())
actual = stats_framework.get_stats(
stats.STATS_HANDLER, 'minutes', self.now, 1, True)
expect
|
ed = [
{
'contains_lookups': 0,
'contains_requests': 0,
'downloads': 0,
'downloads_bytes': 0,
'failures': 0,
'key': datetime.datetime(2010, 1, 2, 3, 4),
'other_requests': 0,
'requests': 1,
'uploads': 0,
'uploads_bytes': 0,
},
]
expected[0].update(added_data)
self.assertEqual(exp
|
ected, actual)
def test_store(self):
expected = {
'uploads': 1,
'uploads_bytes': 2048,
}
self._test_handler('/store', expected)
def test_return(self):
expected = {
'downloads': 1,
'downloads_bytes': 4096,
}
self._test_handler('/return', expected)
def test_lookup(self):
expected = {
'contains_lookups': 200,
'contains_requests': 1,
}
self._test_handler('/lookup', expected)
def test_dupe(self):
expected = {
'other_requests': 1,
}
self._test_handler('/dupe', expected)
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
|
nvbn/django-socialregistration
|
socialregistration/urls.py
|
Python
|
mit
| 4,237
| 0.006372
|
"""
Updated on 19.12.2009
@author: alen, pinda
"""
from django.conf import settings
from django.conf.urls.defaults import *
from socialregistration.utils import OpenID, OAuthClient, OAuthTwitter, OAuthLinkedin
urlpatterns = patterns('',
url('^setup/$', 'socialregistration.views.setup',
name='socialregistration_setup'),
url('^logout/$', 'socialregistration.views.logout',
name='social_logout'),
)
# Setup Facebook URLs if there's an API key specified
if getattr(settings, 'FACEBOOK_API_KEY', None) is not None:
urlpatterns = urlpatterns + patterns('',
url('^facebook/login/$', 'socialregistration.views.facebook_login',
name='facebook_login'),
url('^facebook/connect/$', 'socialregistration.views.facebook_connect',
name='facebook_connect'),
url('^xd_receiver.htm', 'django.views.generic.simple.direct_to_template',
{'template':'socialregistration/xd_receiver.html'},
name='facebook_xd_receiver'),
)
#Setup Twitter URLs if there's an API key specified
if getattr(settings, 'TWITTER_CONSUMER_KEY', None) is not None:
urlpatterns = urlpatterns + patterns('',
url('^twitter/redirect/$', 'socialregistration.views.oauth_redirect',
dict(
consumer_key=settings.TWITTER_CONSUMER_KEY,
secret_key=settings.TWITTER_CONSUMER_SECRET_KEY,
request_token_url=settings.TWITTER_REQUEST_TOKEN_URL,
access_token_url=settings.TWITTER_ACCESS_TOKEN_URL,
authorization_url=settings.TWITTER_AUTHORIZATION_URL,
callback_url='twitter_callback',
client_class = OAuthClient
),
name='twitter_redirect'),
url('^twitter/callback/$', 'socialregistration.views.oauth_callback',
dict(
consumer_key=settings.TWITTER_CONSUMER_KEY,
secret_key=settings.TWITTER_CONSUMER_SECRET_KEY,
request_token_url=settings.TWITTER_REQUEST_TOKEN_URL,
access_token_url=settings.TWITTER_ACCESS_TOKEN_URL,
authorization_url=settings.TWITTER_AUTHORIZATION_URL,
callback_url='twitter',
client_class = OAuthClient
),
name='twitter_callback'
),
url('^twitter/$', 'socialregistration.views.twitter', {'client_class': OAuthTwitter}, name='twitter'),
)
#Setup Linkedin URLs if there's an API key specified
if getattr(settings, 'LINKEDIN_CONSUMER_KEY', None) is not None:
urlpatterns = urlpatterns + patterns('',
url('^linkedin/redirect/$', 'socialregistration.views.oauth_redirect',
dict(
consumer_key=settings.LINKEDIN_CONSUMER_KEY,
secret_key=settings.LINKEDIN_CONSUMER_SECRET_KEY,
request_token_url=settings.LINKEDIN_REQUEST_TOKEN_URL,
access_token_url=settings.LINKEDIN_ACCESS_TOKEN_URL,
authorization_url=settings.LINKEDIN_AUTHORIZATION_URL,
callback_url='linkedin_callback',
client_class = OAuthClient
),
name='linkedin_redirect'),
url('^linkedin/callback/$', 'socialregistration.views.oauth_callback',
|
dict(
consumer_key=settings.LINKEDIN_CONSUMER_KEY,
|
secret_key=settings.LINKEDIN_CONSUMER_SECRET_KEY,
request_token_url=settings.LINKEDIN_REQUEST_TOKEN_URL,
access_token_url=settings.LINKEDIN_ACCESS_TOKEN_URL,
authorization_url=settings.LINKEDIN_AUTHORIZATION_URL,
callback_url='linkedin',
client_class = OAuthClient,
parameters={'oauth_verifier':''}
),
name='linkedin_callback'
),
url('^linkedin/$', 'socialregistration.views.linkedin', {'client_class': OAuthLinkedin}, name='linkedin'),
)
urlpatterns = urlpatterns + patterns('',
url('^openid/redirect/$', 'socialregistration.views.openid_redirect', { 'client_class': OpenID}, name='openid_redirect'),
url('^openid/callback/$', 'socialregistration.views.openid_callback', { 'client_class': OpenID}, name='openid_callback')
)
|
aliyun/aliyun-oss-python-sdk
|
oss2/compat.py
|
Python
|
mit
| 2,283
| 0.001447
|
# -*- coding: utf-8 -*-
"""
兼容Python版本
"""
import sys
is_py2 = (sys.version_info[0] == 2)
is_py3 = (sys.version_info[0] == 3)
is_py33 = (sys.version_info[0] == 3 and sys.version_info[1] == 3)
try:
import simplejson as json
except (ImportError, SyntaxError):
import json
if is_py2:
from urllib import quote as urlquote, unquote as urlunquote
from urlparse import urlparse, parse_qs, urlsplit
def to_bytes(data):
"""若输入为unicode, 则转为utf-8编码的bytes;其他则原样返回。"""
if isinstance(data, unicode):
return data.encode('utf-8')
else:
return data
def to_string(data):
"""把输入转换为str对象"""
return to_bytes(data)
def to_unicode(data):
"""把输入转换为unicode,要求输入是unicode或者utf-8编码的bytes。"""
if isinstance(data, bytes):
return data.decode('utf-8')
else:
return data
def stringify(input):
if isinstance(input, dict):
return dict([(stringify(key), stringify(value)) for key,value in input.iteritems()])
elif isinstance(input, list):
return [stringify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
builtin_str = str
bytes = str
str = unicode
elif is_py3:
from urllib.parse import quote as urlquote, unquote as urlunquote
from urllib.parse import urlparse, parse_qs, urlsplit
def to_bytes(data):
"""若输入为str(即unicode),则转为utf-8编码的b
|
ytes;其他则原样返回"""
if isinstance(data, str):
return data.encode(encoding='utf-8')
else:
return data
def to_string(data):
"""若输入为bytes,则认为是utf-8编码,并返回str"""
if isinstance(data, bytes):
return data.decode('utf-8')
else:
return data
def to_unicode(data):
"""把输入转换为unicode,要求输入是unicode或者utf-8编码的by
|
tes。"""
return to_string(data)
def stringify(input):
return input
builtin_str = str
bytes = bytes
str = str
|
Callek/build-relengapi-slaveloan
|
relengapi/blueprints/slaveloan/slave_mappings.py
|
Python
|
mpl-2.0
| 3,163
| 0
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
# Todo this mapping REALLY need
|
s a non-hardcoded home
_slave_type = {
"bld-linux64-ec2": [
re.compile("^bld-centos6-hp-"),
re.com
|
pile("^bld-linux64-ec2-"),
re.compile("^bld-linux64-ix-"),
re.compile("^b-linux64-ix-"),
re.compile("^bld-linux64-spot-"),
re.compile("^b-linux64-hp-"),
re.compile("^try-linux64-spot-"),
],
"bld-lion-r5": [
re.compile("^bld-lion-r5-"),
],
"b-2008-ix": [
re.compile("^b-2008-ix-"),
re.compile("^b-2008-sm-"),
re.compile("^w64-ix-"),
],
"tst-linux64-ec2": [
re.compile("^talos-linux64-ix-"),
re.compile("^tst-linux64-spot-"),
re.compile("^tst-linux64-ec2-"),
],
"tst-linux32-ec2": [
re.compile("^talos-linux32-ix-"),
re.compile("^tst-linux32-spot-"),
re.compile("^tst-linux32-ec2-"),
],
"t-yosemite-r5": [
re.compile("^t-yosemite-r5-"),
],
"talos-mtnlion-r5": [
re.compile("^talos-mtnlion-r5-"),
],
"t-snow-r4": [
re.compile("^t-snow-r4-"),
re.compile("^talos-r4-snow-"),
],
"t-w732-ix": [
re.compile("^t-w732-ix-"),
],
"t-w864-ix": [
re.compile("^t-w864-ix-"),
],
"t-xp32-ix": [
re.compile("^t-xp32-ix-"),
],
}
_gpo_needed = [
"b-2008-ix", "t-w732-ix", "t-w864-ix", "t-xp32-ix"
]
def slave_patterns():
vals = []
ret = {}
for key, values in _slave_type.items():
for regex in values:
vals += [regex.pattern[1:] + "*"]
vals.sort()
ret[key] = vals
vals = []
return ret
def slave_to_slavetype(slave):
if slave in _slave_type.keys():
return slave
for key, values in _slave_type.items():
for regex in values:
if regex.match(slave):
return key
return None
def is_aws_serviceable(slave):
slaveclass = slave_to_slavetype(slave)
if 'ec2' in slaveclass:
return True
return False
def needs_gpo(slave):
slaveclass = slave_to_slavetype(slave)
if slaveclass in _gpo_needed:
return True
return False
def slave_filter(slave_class):
def _inner_slave_filter(item):
for i in _slave_type[slave_class]:
if i.match(item["name"]):
return True
return False # If we got here, no match
return _inner_slave_filter
def slavetype_to_awsprefix(slave_class):
if not is_aws_serviceable(slave_class):
raise ValueError("Unsupported Slave")
basic_slave_prefix = slave_to_slavetype(slave_class)
if basic_slave_prefix.startswith("bld"):
loan_prefix = basic_slave_prefix.replace("bld-", "dev-")
elif basic_slave_prefix.startswith("tst"):
loan_prefix = basic_slave_prefix
else:
raise ValueError("Unsure how to name this aws slave")
return loan_prefix
|
yiwen-luo/LeetCode
|
Python/can-place-flowers.py
|
Python
|
mit
| 1,263
| 0.003167
|
# Time: O(n)
# Space: O(1)
# Suppose you have a long flowerbed in which some of the plots are planted and some are not.
# However, flowers cannot be planted in adjacent plots - they would compete for water
# and both would die.
#
# Given a flowerbed (represented as an array containing 0 and 1,
# where 0 means empty and 1 means not empty), and a number n,
# return if n new flowers can be planted in it without
|
violating the no-adjacent-flowers rule.
#
# Example 1:
# Input: flowerbed = [1,0,0,0,1
|
], n = 1
# Output: True
# Example 2:
# Input: flowerbed = [1,0,0,0,1], n = 2
# Output: False
# Note:
# The input array won't violate no-adjacent-flowers rule.
# The input array size is in the range of [1, 20000].
# n is a non-negative integer which won't exceed the input array size.
class Solution(object):
def canPlaceFlowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
for i in xrange(len(flowerbed)):
if flowerbed[i] == 0 and (i == 0 or flowerbed[i-1] == 0) and \
(i == len(flowerbed)-1 or flowerbed[i+1] == 0):
flowerbed[i] = 1
n -= 1
if n <= 0:
return True
return False
|
gangadhar-kadam/sapphite_lib
|
core/doctype/customize_form/customize_form.py
|
Python
|
mit
| 8,959
| 0.041299
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Customize Form is a Single DocType used to mask the Property Setter
Thus providing a better UI from user perspective
"""
import webnotes
from webnotes.utils import cstr
class DocType:
def __init__(self, doc, doclist=[]):
self.doc, self.doclist = doc, doclist
self.doctype_properties = [
'search_fields',
'default_print_format',
'read_only_onload',
'allow_print',
'allow_email',
'allow_copy',
'allow_attach',
'max_attachments'
]
self.docfield_properties = [
'idx',
'label',
'fieldtype',
'fieldname',
'options',
'permlevel',
'width',
'print_width',
'reqd',
'in_filter',
'in_list_view',
'hidden',
'print_hide',
'report_hide',
'allow_on_submit',
'depends_on',
'description',
'default',
'name'
]
self.property_restrictions = {
'fieldtype': [['Currency', 'Float'], ['Small Text', 'Data'], ['Text', 'Text Editor', 'Code']],
}
self.forbidden_properties = ['idx']
def get(self):
"""
Gets DocFields applied with Property Setter customizations via Customize Form Field
"""
self.clear()
if self.doc.doc_type:
from webnotes.model.doc import addchild
for d in self.get_ref_doclist():
if d.doctype=='DocField':
new = addchild(self.doc, 'fields', 'Customize Form Field',
self.doclist)
self.set(
{
'list': self.docfield_properties,
'doc' : d,
'doc_to_set': new
}
)
elif d.doctype=='DocType':
self.set({ 'list': self.doctype_properties, 'doc': d })
def get_ref_doclist(self):
"""
* Gets doclist of type self.doc.doc_type
* Applies property setter properties on the doclist
* returns the modified doclist
"""
from webnotes.model.doctype import get
ref_doclist = get(self.doc.doc_type)
ref_doclist = webnotes.doclist([ref_doclist[0]]
+ ref_doclist.get({"parent": self.doc.doc_type}))
return ref_doclist
def clear(self):
"""
Clear fields in the doc
"""
# Clear table before adding new doctype's fields
self.doclist = self.doc.clear_table(self.doclist, 'fields')
self.set({ 'list': self.doctype_properties, 'value': None })
def set(self, args):
"""
Set a list of attributes of a doc to a value
or to attribute values of a doc passed
args can contain:
* list --> list of attributes to set
* doc_to_set --> defaults to self.doc
* value --> to set all attributes to one value eg. None
* doc --> copy attributes from doc to doc_to_set
"""
if not 'doc_to_set' in args:
args['doc_to_set'] = self.doc
if 'list' in args:
if 'value' in args:
for f in args['list']:
args['doc_to_set'].fields[f] = None
elif 'doc' in args:
for f in args['list']:
args['doc_to_set'].fields[f] = args['doc'].fields.get(f)
else:
webnotes.msgprint("Please specify args['list'] to set", raise_exception=1)
def post(self):
"""
Save diff between Customize Form Bean and DocType Bean as property setter entries
"""
if self.doc.doc_type:
from webnotes.model import doc
from core.doctype.doctype.doctype import validate_fields_for_doctype
this_doclist = webnotes.doclist([self.doc] + self.doclist)
ref_doclist = self.get_ref_doclist()
dt_doclist = doc.get('DocType', self.doc.doc_type)
# get a list of property setter docs
diff_list = self.diff(this_doclist, ref_doclist, dt_doclist)
self.set_properties(diff_list)
validate_fields_for_doctype(self.doc.doc_type)
webnotes.clear_cache(doctype=self.doc.doc_type)
webnotes.msgprint("Updated")
def diff(self, new_dl, ref_dl, dt_dl):
"""
Get difference between new_dl doclist and ref_dl doclist
then check how it differs from dt_dl i.e. default doclist
"""
import re
self.defaults = self.get_defaults()
diff_list = []
for new_d in new_dl:
for ref_d in ref_dl:
if ref_d.doctype == 'DocField' and new_d.name == ref_d.name:
for prop in self.docfield_properties:
# do not set forbidden properties like idx
if prop in self.forbidden_properties: continue
d = self.prepare_to_set(prop, new_d, ref_d, dt_dl)
if d: diff_list.append(d)
break
elif ref_d.doctype == 'DocType' and new_d.doctype == 'Customize Form':
for prop in self.doctype_properties:
d = self.prepare_to_set(prop, new_d, ref_d, dt_dl)
if d: diff_list.append(d)
break
return diff_list
def get_defaults(self):
"""
Get fieldtype and default value for properties of a field
"""
df_defaults = webnotes.conn.sql("""
SELECT fieldname, fieldtype, `default`, label
FROM `tabDocField`
WHERE parent='DocField' or parent='DocType'""", as_dict=1)
defaults = {}
for d in df_defaults:
defaults[d['fieldname']] = d
defaults['idx'] = {'fieldname' : 'idx', 'fieldtype' : 'Int', 'default' : 1, 'label' : 'idx'}
defaults['previous_field'] = {'fieldname' : 'previous_field', 'fieldtype' : 'Data', 'default' : None, 'label' : 'Previous Field'}
return defaults
def prepare_to_set(self, prop, new_d, ref_d, dt_doclist, delete=0):
"""
Prepares docs of property setter
sets delete property if it is require
|
d to be deleted
"""
# Check if property has changed compared to when it was loaded
if new_d.fields.get(prop) != ref_d.fields.get(prop) \
and not \
( \
new_d.fields.get(prop) in [None, 0] \
and ref_d.fields.get(prop) in [None, 0] \
) and not \
( \
new_d.fields.get(prop) in [None, ''] \
and ref_d.fields.get(prop) in [None, ''] \
):
#webnotes.msgprint("new: " + str(new_d.fields[prop]) + " | old: " + str(ref_d
|
.fields[prop]))
# Check if the new property is same as that in original doctype
# If yes, we need to delete the property setter entry
for dt_d in dt_doclist:
if dt_d.name == ref_d.name \
and (new_d.fields.get(prop) == dt_d.fields.get(prop) \
or \
( \
new_d.fields.get(prop) in [None, 0] \
and dt_d.fields.get(prop) in [None, 0] \
) or \
( \
new_d.fields.get(prop) in [None, ''] \
and dt_d.fields.get(prop) in [None, ''] \
)):
delete = 1
break
value = new_d.fields.get(prop)
if prop in self.property_restrictions:
allow_change = False
for restrict_list in self.property_restrictions.get(prop):
if value in restrict_list and \
ref_d.fields.get(prop) in restrict_list:
allow_change = True
break
if not allow_change:
webnotes.msgprint("""\
You cannot change '%s' of '%s' from '%s' to '%s'.
%s can only be changed among %s.
<i>Ignoring this change and saving.</i>""" % \
(self.defaults.get(prop, {}).get("label") or prop,
new_d.fields.get("label") or new_d.fields.get("idx"),
ref_d.fields.get(prop), value,
self.defaults.get(prop, {}).get("label") or prop,
" -or- ".join([", ".join(r) for r in \
self.property_restrictions.get(prop)])))
return None
# If the above conditions are fulfilled,
# create a property setter doc, but dont save it yet.
from webnotes.model.doc import Document
d = Document('Property Setter')
d.doctype_or_field = ref_d.doctype=='DocField' and 'DocField' or 'DocType'
d.doc_type = self.doc.doc_type
d.field_name = ref_d.fieldname
d.property = prop
d.value = value
d.property_type = self.defaults[prop]['fieldtype']
#d.default_value = self.defaults[prop]['default']
if delete: d.delete = 1
if d.select_item:
d.select_item = self.remove_forbidden(d.select_item)
# return the property setter doc
return d
else: return None
def set_properties(self, ps_doclist):
"""
* Delete a property setter entry
+ if it already exists
+ if marked for deletion
* Save the property setter doc in the list
"""
for d in ps_doclist:
# Delete existing property setter entry
if not d.fields.get("field_name"):
webnotes.conn.sql("""
DELETE FROM `tabProperty Setter`
WHERE doc_type = %(doc_type)s
AND property = %(property)s""", d.fields)
else:
webnotes.conn.sql("""
DELETE
|
googleinterns/adversarial-0th-order-optimization
|
discretezoo/loss/semantic_similarity.py
|
Python
|
apache-2.0
| 9,072
| 0.003307
|
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Callable, List
import tensorflow as tf
import tensorflow_hub as tfhub
from discretezoo import attack_setup
class EmbeddedCosineDistance:
"""EmbeddedCosineDistance calculates cosine distance in embedding space.
Attributes:
embeddings: A tensor containing an embedding vector for each index in vocab.
<float32>[vocab_size, embedding_dimension]
"""
def __init__(self, embeddings: tf.Tensor):
"""Initializes EmbeddedCosineDistance with embeddings.
Arguments:
embeddings: A tensor containing an embedding for each index in vocab.
<float32>[vocab_size, embedding_dimension]
"""
assert embeddings.ndim == 2, (
'Embeddings are expected to have 2 dimensions'
f' but you passed a tensor with {embeddings.ndim}.')
self._embeddings = embeddings
@tf.function
def __call__(self, original_sentences: tf.Tensor,
adversarial_sentences: tf.Tensor) -> tf.Tensor:
r"""Calculates cosine distance between reduced embedded sentences.
Sentences are embedded and then reduced by summing them up.
Cosine similarity is then given by \frac{v_{original} \cdot v_{adversarial}}
{|v_{original}| \times |v_{adversarial|}}.
Cosine distance is defined as 1 - similarity.
Arguments:
original_sentences: A tensor of token indices in the original sentences.
<int32>[batch_size, sentence_length]
adversarial_sentences: A tensor of token indices in the adversarial
sentences. <int32>[batch_size, sentence_length]
Returns:
A tensor <float32>[batch_size, 1] of cosine distances between original and
adversarial sentences. Return values are in the range [0, 2]
https://www.tensorflow.org/api_docs/python/tf/keras/losses/cosine_similarity
is used internally, which computes negative similarity, and 1 is added.
"""
original_sentences_embedded = tf.nn.embedding_lookup(
self._embeddings, original_sentences)
adversarial_sentences_embedded = tf.nn.embedding_lookup(
self._embeddings, adversarial_sentences)
original_sentences_reduced = tf.math.reduce_sum(original_sentences_embedded,
axis=1)
adversarial_sentences_reduced = tf.math.reduce_sum(
adversarial_sentences_embedded, axis=1)
# Unintuitively, tf.keras.losses.cosine_similarity returns negative cosine
# similarity. Adding 1 means that two vectors will have 0 as a minimum
# distance instead of -1, which is helpful in later loss computation.
distance = 1 + tf.keras.losses.cosine_similarity(
original_sentences_reduced, adversarial_sentences_reduced)
return tf.expand_dims(distance, 1)
class EmbeddedEuclideanDistance:
"""EmbeddedEuclideanDistance calculates euclidean distance in embedding space.
Attributes:
embeddings: A tensor containing an embedding vector for each index in vocab.
<float32>[vocab_size, embedding_dimension]
reduce_mean: This is a boolean flag that signals how embedded sentences will
be reduced to a single vector. True for mean, False for sum.
"""
def __init__(self, embeddings: tf.Tensor, reduce_mean: bool = True):
"""Initializes EmbeddedEuclideanDistance with embeddings and reduction type.
Arguments:
embeddings: A tensor containing an embedding for each index in vocab.
<float32>[vocab_size, embedding_dimension]
reduce_mean: This boolean flag signals how embedded sentences will be
reduced to a single vector. True for mean, False for sum.
"""
assert embeddings.ndim == 2, (
'Embeddings are expected to have 2 dimensions'
f' but you passed a tensor with {embeddings.ndim}.')
self._embeddings = embeddings
self._reduce_mean = reduce_mean
@tf.function
def __call__(self, original_sentences: tf.Tensor,
adversarial_sentences: tf.Tensor) -> tf.Tensor:
"""Calculates euclidean distances between reduced embedded sentences.
Arguments:
original_sentences: A tensor of token indices in the original sentences.
<int32>[batch_size, sentence_length]
adversarial_sentences: A tensor of token indices in the adversarial
sentences. <int32>[batch_size, sentence_length]
Returns:
A tensor <float32>[batch_size, 1] of euclidean distances between original
and adversarial sentences.
"""
original_sentences_embedded = tf.nn.embedding_lookup(
self._embeddings, original_sentences)
adversarial_sentences_embedded = tf.nn.embedding_lookup(
self._embeddings, adversarial_sentences)
if self._reduce_mean:
original_sentences_reduced = tf.math.reduce_mean(
original_sentences_embedded, axis=1)
adversarial_sentences_reduced = tf.math.reduce_mean(
adversarial_sentences_embedded, axis=1)
else:
original_sentences_reduced
|
= tf.math.reduce_s
|
um(
original_sentences_embedded, axis=1)
adversarial_sentences_reduced = tf.math.reduce_sum(
adversarial_sentences_embedded, axis=1)
difference_vector = tf.math.subtract(original_sentences_reduced,
adversarial_sentences_reduced)
distance = tf.norm(difference_vector, axis=-1, keepdims=True)
return distance
class UniversalSentenceEncoderDistance:
"""Wraps the Universal Sentence Encoder and converts tensors to strings.
The Universal Sentence Encoder expects python strings as input and includes
its own tokenizer. The attack functions on tensors, so we need to convert
vocab indices to tokens and then detokenize the text back into strings.
Attributes:
detokenizer: Detokenizer accepts a list of tokens, joins them by whitespace,
and then undoes the regexes used to tokenize text.
vocab: A list of tokens in the vocabulary.
padding_index: An integer indicating which vocab entry is the padding token.
encoder: This is a tensorflow hub module corresponding to the Universal
Sentence Encoder.
"""
def __init__(
self,
detokenizer: Callable[[List[str]], str],
vocab: List[str],
padding_index: int = 0,
use_tfhub_url:
str = 'https://tfhub.dev/google/universal-sentence-encoder-large/5'):
"""Initializes the UniversalSentenceEncoderDistance class.
Arguments:
detokenizer: Detokenizer accepts a list of tokens, joins them by whitespace,
and then undoes the regexes used to tokenize text.
vocab: A list of tokens in the vocabulary.
padding_index: An integer indicating which vocab entry is the padding token.
use_tfhub_url: The URL to the Universal Sentence Encoder on the Tensorflow
Hub. The default value corresponds to the Transformer based model, but
Deep Averaging Networks and multilingual versions are also available.
"""
self._vocab = vocab
self._padding_index = padding_index
self._detokenizer = detokenizer
self._encoder = tfhub.load(use_tfhub_url)
def __call__(self, original_sentences: tf.Tensor,
adversarial_sentences: tf.Tensor) -> tf.Tensor:
"""Converts tensors of vocabulary indices to strings and calls the encoder.
Arguments:
original_sentences: A tensor of token indices in the original sentences.
<int32>[batch_size, sentence_length]
adversarial_sentences: A tensor of token indices in the adversarial
sentences. <int32>[batch_size, sentence_length]
Returns:
A tensor <float32>[batch_size, 1] of cosine distances between original
and adversarial sentences encoded by
|
torgartor21/solar
|
solar/solar/system_log/tasks.py
|
Python
|
apache-2.0
| 1,007
| 0
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed t
|
o in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF
|
ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from solar.orchestration.runner import app
from solar.system_log.operations import set_error, move_to_commited
__all__ = ['error_logitem', 'commit_logitem']
@app.task(name='error_logitem')
def error_logitem(task_uuid):
return set_error(task_uuid.rsplit(':', 1)[-1])
@app.task(name='commit_logitem')
def commit_logitem(task_uuid):
return move_to_commited(task_uuid.rsplit(':', 1)[-1])
|
far-far-away-science/hab-v2
|
software/ax25-utils/code_generation_v2.py
|
Python
|
gpl-3.0
| 2,693
| 0.004827
|
import numpy
import data_generator
class Generator:
def __init__(self):
self.combinations = data_generator.generateAllByteToAx25DataCombinations()
self.frameSeparatorOne = data_generator.calculateNewAx25DataFromOldImpl(1, 0, 0x7E, False)
self.frameSeparatorZero = data_generator.calculateNewAx25DataFromOldImpl(0, 0, 0x7E, False)
def generateDefinitionsHeader(self, filePath):
text = '''#pragma once
#include <stdint.h>
typedef struct AX25EncodedData_t
{
uint16_t dataGivenThatPreviousBitWasZero;
uint8_t dataBitsCount;
uint8_t newNumberOfOnes;
} AX25EncodedData;
//
// To figure out what those values mean see ax25-utils Python project,
// code_generation_v2.py file
//
extern const AX25EncodedData byte2ax25EncodedData[];
#define FRAME_SEPARATOR_GIVEN_THAT_PREVIOUS_BIT_WAS_ZERO ''' + str(self.frameSeparatorZero[0]) + '''
#define FRAME_SEPARATOR_GIVEN_THAT_PREVIOUS_BIT_WAS_ONE ''' + str(self.frameSeparatorOne[0]) + '''
#define GET_VALUE_IF_LAST_BIT_IS_ONE(pAx25EncodedData) \\
((~(pAx25EncodedData)->dataGivenThatPreviousBitWasZero) & ((1 << ((pAx25EncodedData)->dataBitsCount)) - 1))
#define GET_VALUE_IF_LAST_BIT_IS_ZERO(pAx25EncodedData) \\
((pAx25EncodedData)->dataGivenThatPreviousBitWasZero)
#define GET_LAST_BIT(value, pAx25EncodedData) \\
(((value) >> ((pData)->dataBitsCount - 1)) & 1)
#define GENERATE_AX25_TABLE_INDEX(currentNumberOfOnes, byte) \\
(((currentNumberOfOnes) << 8) + (byte))
#define GET_AX25_ENCODED_DATA_FOR_BYTE(currentNumberOfOnes, byte) \\
&byte2ax25EncodedData[GENERATE_AX25_TABLE_INDEX((currentNumberOfOnes), (byte))];
'''
with open(filePath, 'w+') as f:
f.write(text)
def generateSource(self, filePath):
text = '''#include "ax25.h"
const AX25EncodedData byte2ax25EncodedData[] =
{
'''
i = 0
for (oldNumberOfOnes, byte2Encode, newDataGiventL
|
astBitWasZero, newLastBitGiventLastBitWasZero, newDataGiventLast
|
BitWasOne, newLastBitGiventLastBitWasOne, newDataNumberOfBits, newNumberOfOnes) in self.combinations:
text += ' {' + '{:>3}'.format(newDataGiventLastBitWasZero) + ', ' + '{:>2}'.format(newDataNumberOfBits) + ', ' + '{:>2}'.format(newNumberOfOnes) + '}, ' + \
'// idx = ' + '{:0>4}'.format(i) + ', oldNumberOfOnes = ' + str(oldNumberOfOnes) + ', byte2Encode = ' + '{:0>3}'.format(byte2Encode) + '\n'
i += 1
text += '''};
'''
with open(filePath, 'w+') as f:
f.write(text)
generator = Generator()
generator.generateDefinitionsHeader("../com-telemetry/src/aprs/generated/ax25.h")
generator.generateSource("../com-telemetry/src/aprs/generated/ax25.c")
|
Foxugly/MyTaxAccountant
|
scripts/move_document.py
|
Python
|
agpl-3.0
| 504
| 0.003968
|
from documents.models import Document
|
from categories.models import Category
import os
def move_doc(doc_id, cat_id):
doc = Document.objects.get(pk=int(doc_id))
old_cat = doc.refer_category
new_
|
cat = Category.objects.get(pk=int(cat_id))
for p in doc.pages.all():
cmd = "mv " + p.get_absolute_path() + " " + new_cat.get_absolute_path() + "/"
os.system(cmd)
doc.refer_category = new_cat
doc.save()
old_cat.documents.remove(doc)
new_cat.documents.add(doc)
|
mitmproxy/mitmproxy
|
test/mitmproxy/contentviews/test_msgpack.py
|
Python
|
mit
| 1,392
| 0.000718
|
from hypothesis import given
from hypothesis.strategies import binary
from msgpack import packb
from mitmproxy.contentviews import msgpack
from . import full_eval
def msgpack_encode(content):
return packb(content, use
|
_bin_type=True)
def test_parse_msgpack():
assert msgpack.parse_msgpack(msgpack_encode({"foo": 1}))
assert msg
|
pack.parse_msgpack(b"aoesuteoahu") is msgpack.PARSE_ERROR
assert msgpack.parse_msgpack(msgpack_encode({"foo": "\xe4\xb8\x96\xe7\x95\x8c"}))
def test_format_msgpack():
assert list(msgpack.format_msgpack({
"data": [
"str",
42,
True,
False,
None,
{},
[]
]
}))
def test_view_msgpack():
v = full_eval(msgpack.ViewMsgPack())
assert v(msgpack_encode({}))
assert not v(b"aoesuteoahu")
assert v(msgpack_encode([1, 2, 3, 4, 5]))
assert v(msgpack_encode({"foo": 3}))
assert v(msgpack_encode({"foo": True, "nullvalue": None}))
@given(binary())
def test_view_msgpack_doesnt_crash(data):
v = full_eval(msgpack.ViewMsgPack())
v(data)
def test_render_priority():
v = msgpack.ViewMsgPack()
assert v.render_priority(b"data", content_type="application/msgpack")
assert v.render_priority(b"data", content_type="application/x-msgpack")
assert not v.render_priority(b"data", content_type="text/plain")
|
johnoneil/arib
|
arib/read.py
|
Python
|
apache-2.0
| 2,368
| 0.022382
|
# vim: set ts=2 expandtab:
'''
Module: read.py
Desc: unpack data from binary files
Author: John O'Neil
Email: [email protected]
DATE: Thursday, March 13th 2014
'''
import struct
DEBUG = False
class EOFError(Exception):
""" Custom exception raised when we read to EOF
"""
pass
def split_buffer(length, buf):
'''split provided array at index x
'''
#print "split-buffer******"
a = []
if len(buf)<length:
return (a, buf)
#print "length of buf is" + str(len(buf))
for i in range(length):
a.append(buf.pop(0))
return (a,buf)
def dump_list(list):
print(u' '.join(u'{:#x}'.format(x) for x in list))
def ucb(f):
'''Read unsigned char byte from binary file
'''
if isinstance(f, list):
if len(f) < 1:
raise EOFError()
b, f = split_buffer(1, f)
return struct.unpack('B', ''.join(b))[0]
else:
_f = f.read(1)
if len(_f) < 1:
raise EOFError()
return struct.unpack('B', _f)[0]
def usb(f):
'''Read unsigned short from binary file
'''
if isinstance(f, list):
n, f = split_buffer(2, f)
return struct.unpack('>H', ''.join(n))[0]
else:
_f = f.read(2)
if DEBUG:
print("usb: " + hex(ord(_f[0])) + ":" + hex(ord(_f[1])))
if len(_f) < 2:
raise EOFError()
return struct.unpack('>H', _f)[0]
def ui3b(f):
'''Read 3 byte unsigned short from binary file
'''
if isinstance(f, list):
n, f = split_buffer(3, f)
return struct.unpack('>I', '\x00'+ ''.join(n))[0]
else:
_f = f.read(3)
if len(_f) < 3:
raise EOFError()
return struct.unpack('>I', '\x00'+ (_f))[0]
def uib(f):
'''
'''
if isinstance(f, list):
n, f = split_buffer(4, f)
return struct.unpack('>L', ''.join(n))[0]
else:
_f = f.read(4)
if len(_f) < 4:
raise EOFError()
return struct.unpack('>L', _f)[0]
def ulb(f):
'''Read unsigned long long (64bit integer) from binary file
'''
if isinstance(f, list):
n, f = split_buffer(8, f)
return struct.unpack('>Q', ''.join(n))[0]
else:
_f = f.read(8)
if len(_f) < 8:
raise EOFError()
return struc
|
t.unpack('>Q', _f)[0]
def buffer(f, s
|
ize):
'''Read N bytes from either a file or list
'''
if isinstance(f, list):
n, f = split_buffer(size, f)
return ''.join(n)
else:
_f = f.read(size)
if len(_f) < size:
raise EOFError()
return _f
|
queria/my-tempest
|
tempest/api/compute/admin/test_simple_tenant_usage_negative.py
|
Python
|
apache-2.0
| 2,656
| 0
|
# Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from tempest.api.compute import base
from tempest import exceptions
from tempest import test
class TenantUsagesNegativeTestJSON(base.BaseV2ComputeAdminTest):
@classmethod
def resource_setup(cls):
super(TenantUsagesNegativeTestJSON, cls).resource_setup()
cls.adm_client = cls.os_adm.tenant_usages_client
cls.client = cls.os.tenant_usages_client
cls.identity_client = cls._get_identity_admin_client()
now = datetime.datetime.now()
cls.start = cls._parse_strtime(now - datetime.timedelta(days=1))
cls.end = cls._parse_strtime(now + datetime.timedelta(days=1))
@classmethod
def _parse_strtime(cls, at):
# Returns formatted datetime
return at.strftime('%Y-%m-%dT%H:%M:%S.%f')
@test.attr(type=['negative', 'gate'])
def test_get_usage_tenant_with_empty_tenant_id(self):
# Get usage for a specific tenant empty
para
|
ms = {'start': self.start,
'end': self.end}
self.assertRaises(exceptions.NotFound,
self.adm_client.get_tenant_usage,
'', params)
|
@test.attr(type=['negative', 'gate'])
def test_get_usage_tenant_with_invalid_date(self):
# Get usage for tenant with invalid date
params = {'start': self.end,
'end': self.start}
self.assertRaises(exceptions.BadRequest,
self.adm_client.get_tenant_usage,
self.client.tenant_id, params)
@test.attr(type=['negative', 'gate'])
def test_list_usage_all_tenants_with_non_admin_user(self):
# Get usage for all tenants with non admin user
params = {'start': self.start,
'end': self.end,
'detailed': int(bool(True))}
self.assertRaises(exceptions.Unauthorized,
self.client.list_tenant_usages, params)
class TenantUsagesNegativeTestXML(TenantUsagesNegativeTestJSON):
_interface = 'xml'
|
barentsen/surveytools
|
surveytools/tests/test_footprint.py
|
Python
|
mit
| 2,130
| 0.002817
|
"""Tests the surveytools.footprint module."""
import numpy as np
from surveytools.footprint import VphasFootprint, VphasOffset
def test_vphas_offset_coordinates():
"""Test the offset pattern, which is expected to equal
ra -0, dec +0 arcsec for the "a" pointing;
ra -588, dec +660 arcsec for the "b" pointing;
ra -300, dec +350 arcsec for the "c" pointing.
"""
vf = VphasFootprint()
np.testing.assert_almost_equal(vf.offsets['0001a']['ra'], 97.2192513369)
np.testing.assert_almost_equal(vf.offsets['0001a']['dec'], 0)
np.testing.assert_almost_equal(vf.offsets['0001b']['ra'], 97.2192513369 - 588/3600.)
np.testing.assert_almost_equal(vf.offsets['0001b']['dec'], 0 + 660/3600.)
np.testing.assert_almost_equal(vf.offsets['0001c']['ra'], 97.2192513369 - 300/3600.)
np.testing.assert_almost_equal(vf.offsets['0001c']['dec'], 0 + 350/3600.)
def test_vphas_offset_pattern():
vf = VphasFootprint()
for field in ['0500', '1000', '2000']:
ra, dec = vf.offsets[field+'a']['ra'], vf.offsets[field+'a']['dec']
np.testing.assert_almost_equal(vf.offsets[field+'b']['ra'],
ra - (588/3600.) / np.cos(np.radians(dec)))
np.testing.assert_almost_equal(vf.offsets[field+'b']['dec'],
dec + 660/3600.)
def test_vphas_filenames():
"""Ensure the right filename is returned for a given band/offs
|
et."""
assert VphasOffset('1122a').ima
|
ge_filenames['ha'] == 'o20120330_00032.fit'
assert VphasOffset('1122b').image_filenames['ha'] == 'o20120330_00034.fit'
assert VphasOffset('1122c').image_filenames['ha'] == 'o20120330_00033.fit'
assert VphasOffset('1842a').image_filenames['r'] == 'o20130314_00061.fit'
assert VphasOffset('1842b').image_filenames['r'] == 'o20130314_00062.fit'
assert VphasOffset('0765a').image_filenames['g'] == 'o20130413_00024.fit'
assert VphasOffset('0765b').image_filenames['g'] == 'o20130413_00026.fit'
assert VphasOffset('0765c').image_filenames['g'] == 'o20130413_00025.fit'
if __name__ == '__main__':
test_vphas_filenames()
|
agoravoting/agora-tools
|
import_election_csv.py
|
Python
|
agpl-3.0
| 18,685
| 0.005516
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of agora-tools.
# Copyright (C) 2014-2016 Agora Voting SL <[email protected]>
# agora-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License.
# agora-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with agora-tools. If not, see <http://www.gnu.org/licenses/>.
import json
import csv
import os
import copy
import operator
import argparse
from datetime import datetime, timedelta
from utils.csvblocks import csv_to_blocks
from utils.json_serialize import serialize
def iget(d, key, default):
'''
Ignore-case get
This function makes a dict.get but there's no need for the key to be
exactly the same as the key in the dict. Before the real **get** we
look into the dict keys and find for this key ignoring the case so the
key param can differ from the dict key in lower or upper cases.
:param d: this is the dict to search in
:param key: this is the key to search
:param default: this is the default value to return if key isn't in the
dict
'''
real_key = key
keyl = key.lower()
for k in d.keys():
if k.lower() == keyl:
real_key = k
return d.get(real_key, default)
BASE_ELECTION = {
"id": -1,
"title": "",
"description": "",
"layout": "",
"presentation": {
"share_text": [
{
"network": "Twitter",
"button_text": "",
"social_message": "I have just voted in election __URL__, you can too! #nvotes"
},
{
"network": "Facebook",
"button_text": "",
"social_message": "__URL__"
}
],
"theme": 'default',
"urls": [],
"theme_css": "",
"extra_options": {}
},
"end_date": "",
"start_date": "",
"real": True,
"questions": []
}
BASE_QUESTION = {
"description": "",
"layout": 'simple',
"max": 1,
"min": 0,
"num_winners": 1,
"title": "",
"randomize_answer_order": True,
"tally_type": "plurality-at-large",
"answer_total_votes_percentage": "over-total-votes",
"extra_options": {},
"answers": []
}
BASE_ANSWER = {
"id": -1,
"category": '',
"details": "",
"sort_order": -1,
"urls": [],
"text": ""
}
def parse_int(s):
return int(s)
def parse_list(s):
return s.split(",")
def parse_bool(s):
|
return s == "TRUE"
def parse_extra(q):
val = dict(
(key.replace("extra: ", ""), value)
for key, value in q.items() if key.startswith("extra: ")
)
if "success_screen__hide_download_ballot_ticket" in val:
val["success_screen__hide_download_ballot_ticket"]
|
= parse_bool(
val["success_screen__hide_download_ballot_ticket"]
)
if "shuffle_category_list" in val:
val["shuffle_category_list"] = parse_list(val["shuffle_category_list"])
if "shuffle_categories" in val:
val["shuffle_categories"] = parse_bool(val["shuffle_categories"])
if "shuffle_all_options" in val:
val["shuffle_all_options"] = parse_bool(val["shuffle_all_options"])
if "select_all_category_clicks" in val:
val["select_all_category_clicks"] = parse_int(val["select_all_category_clicks"])
if "answer_group_columns_size" in val:
val["answer_group_columns_size"] = parse_int(val["answer_group_columns_size"])
if "answer_columns_size" in val:
val["answer_columns_size"] = parse_int(val["answer_columns_size"])
return val
def blocks_to_election(blocks, config, add_to_id=0):
'''
Parses a list of blocks into an election
'''
# convert blocks into a more convenient structure
election = blocks[0]['values']
blocks.pop(0)
questions = []
def get_answer_id(answer):
return answer['Id']
def get_description(answer):
return answer.get('Description', '').replace('\n', '<br/>')
def get_url(key, value):
if key in ['Gender', 'Tag', 'Support']:
return "https://agoravoting.com/api/%s/%s" % (key.lower(), value)
elif value.startswith('http://') or value.startswith('https://'):
return value.strip()
return key + value.strip()
for question, options in zip(blocks[0::2], blocks[1::2]):
q = question['values']
q['options'] = options['values']
data = {
"description": q.get("Description", ''),
"layout": q.get("Layout", 'simple'),
"max": int(q["Maximum choices"]),
"min": int(q["Minimum choices"]),
"num_winners": int(q["Number of winners"]),
"title": q["Title"],
"randomize_answer_order": parse_bool(q.get("Randomize options order", False)),
"tally_type": q.get("Voting system", "plurality-at-large"),
"answer_total_votes_percentage": q["Totals"],
"extra_options": parse_extra(q),
"answers": [
{
"id": int(get_answer_id(answer)),
"category": answer.get("Category", ''),
"details": get_description(answer),
"sort_order": index,
"urls": [
{
'title': url_key,
'url': get_url(url_key, url_val)
}
for url_key, url_val in answer.items()
if url_key in ['Image URL', 'URL', 'Gender', 'Tag', 'Support'] and\
len(url_val.strip()) > 0
],
"text": answer['Text'],
}
for answer, index in zip(q['options'], range(len(q['options'])))
if len("".join(answer.values()).strip()) > 0
]
}
# check answers
try:
assert len(data['answers']) == len(set(list(map(operator.itemgetter('text'), data['answers']))))
except Exception as e:
print("duplicated options in question '%s':" % q["Title"])
l = list(map(operator.itemgetter('text'), data['answers']))
print(set([x for x in l if l.count(x) > 1]))
raise e
data['max'] = min(data['max'], len(data['answers']))
data['num_winners'] = min(data['num_winners'], len(data['answers']))
for answ in data['answers']:
try:
assert answ['id'] == answ['sort_order']
except:
print(answ)
questions.append(data)
def get_def(dictionary, key, default_value):
if key not in dictionary or len(dictionary[key]) == 0:
return default_value
return dictionary[key]
start_date = datetime.strptime("10/10/2015 10:10", "%d/%m/%Y %H:%M")
if len(election["Start date time"]) > 0:
try:
start_date = datetime.strptime(election["Start date time"], "%d/%m/%Y %H:%M:%S")
except:
start_date = datetime.strptime(election["Start date time"], "%d/%m/%Y %H:%M")
ret = {
"id": int(election['Id']) + add_to_id,
"authorities": config['authorities'],
"director": config['director'],
"title": election['Title'],
"description": election['Description'],
"layout": election.get('Layout', ''),
"presentation": {
"share_text": [
{
"network": "Twitter",
"button_text": "",
"social_message": election.get('Share Text', '')
},
{
"network": "Facebook",
"button_text": "",
"social_message": "__URL__"
}
],
"theme": election.get('Theme', 'default'),
"urls": [],
"theme_css": "",
"extra_optio
|
jmesteve/saas3
|
openerp/addons/base_report_designer/base_report_designer.py
|
Python
|
agpl-3.0
| 3,471
| 0.007491
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from StringIO import StringIO
from openerp.modules.module import get_module_resource
import openerp.modules.registry
from openerp.osv import osv
from openerp_sxw2rml import sxw2rml
class report_xml(osv.osv):
_inherit = 'ir.actions.report.xml'
def sxwtorml(self, cr, uid, file_sxw, file_type):
'''
The use of this function is to get rml file from sxw file.
'''
sxwval = StringIO(base64.decodestring(file_sxw))
if file_type=='sxw':
fp = open(get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_oo2rml.xsl'),'rb')
if file_type=='odt':
fp = open(get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_odt2rml.xsl'),'rb')
return {'report_rml_content': str(sxw2rml(sxwval, xsl=fp.read()))}
def upload_report(self, cr, uid, report_id, file_sxw, file_type, context=None):
'''
Untested function
'''
sxwval = StringIO(base64.decodestring(file_sxw))
if file_type=='sxw':
fp = open(get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_oo2rml
|
.xsl'),'rb')
if file_type=='odt':
fp = open(get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_odt2rml.xsl'),'rb')
report = self.pool['ir.action
|
s.report.xml'].write(cr, uid, [report_id], {
'report_sxw_content': base64.decodestring(file_sxw),
'report_rml_content': str(sxw2rml(sxwval, xsl=fp.read())),
})
return True
def report_get(self, cr, uid, report_id, context=None):
if context is None:
context = {}
# skip osv.fields.sanitize_binary_value() because we want the raw bytes in all cases
context.update(bin_raw=True)
report = self.browse(cr, uid, report_id, context=context)
sxw_data = report.report_sxw_content
rml_data = report.report_rml_content
if isinstance(sxw_data, unicode):
sxw_data = sxw_data.encode("iso-8859-1", "replace")
if isinstance(rml_data, unicode):
rml_data = rml_data.encode("iso-8859-1", "replace")
return {
'file_type' : report.report_type,
'report_sxw_content': sxw_data and base64.encodestring(sxw_data) or False,
'report_rml_content': rml_data and base64.encodestring(rml_data) or False
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sixu05202004/newsmeme
|
newsmeme/newsmeme/models/posts.py
|
Python
|
bsd-3-clause
| 10,746
| 0.000279
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from datetime import datetime
from werkzeug import cached_property
from flask import url_for, Markup
from flask.ext.sqlalchemy import BaseQuery
from flask.ext.principal import Permission, UserNeed, Denial
from newsmeme.extensions import db
from newsmeme.helpers import slugify, domain, markdown
from newsmeme.permissions import auth, moderator
from newsmeme.models.types import DenormalizedText
from newsmeme.models.users import User
class PostQuery(BaseQuery):
def jsonify(self):
for post in self.all():
yield post.json
def as_list(self):
"""
Return restricted list of columns for list queries
"""
deferred_cols = ("description",
"tags",
"author.email",
"author.password",
"author.activation_key",
"author.openid",
"author.date_joined",
"author.receive_email",
"author.email_alerts",
"author.followers",
"author.following")
options = [db.defer(col) for col in deferred_cols]
return self.options(*options)
def deadpooled(self):
return self.filter(Post.score <= 0)
def popular(self):
return self.filter(Post.score > 0)
def hottest(self):
return self.order_
|
by(Post.num_comments.desc(),
Post.sc
|
ore.desc(),
Post.id.desc())
def public(self):
return self.filter(Post.access == Post.PUBLIC)
def restricted(self, user=None):
"""
Returns posts filtered for a) public posts b) posts authored by
the user or c) posts authored by friends
"""
if user and user.is_moderator:
return self
criteria = [Post.access == Post.PUBLIC]
if user:
criteria.append(Post.author_id == user.id)
if user.friends:
criteria.append(db.and_(Post.access == Post.FRIENDS,
Post.author_id.in_(user.friends)))
return self.filter(reduce(db.or_, criteria))
def search(self, keywords):
criteria = []
for keyword in keywords.split():
keyword = '%' + keyword + '%'
criteria.append(db.or_(Post.title.ilike(keyword),
Post.description.ilike(keyword),
Post.link.ilike(keyword),
Post.tags.ilike(keyword),
User.username.ilike(keyword)))
q = reduce(db.and_, criteria)
return self.filter(q).join(User).distinct()
class Post(db.Model):
__tablename__ = "posts"
PUBLIC = 100
FRIENDS = 200
PRIVATE = 300
PER_PAGE = 40
query_class = PostQuery
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Integer,
db.ForeignKey(User.id, ondelete='CASCADE'),
nullable=False)
title = db.Column(db.Unicode(200))
description = db.Column(db.UnicodeText)
link = db.Column(db.String(250))
date_created = db.Column(db.DateTime, default=datetime.utcnow)
score = db.Column(db.Integer, default=1)
num_comments = db.Column(db.Integer, default=0)
votes = db.Column(DenormalizedText)
access = db.Column(db.Integer, default=PUBLIC)
_tags = db.Column("tags", db.UnicodeText)
author = db.relation(User, innerjoin=True, lazy="joined")
__mapper_args__ = {'order_by': id.desc()}
class Permissions(object):
def __init__(self, obj):
self.obj = obj
@cached_property
def default(self):
return Permission(UserNeed(self.obj.author_id)) & moderator
@cached_property
def view(self):
if self.obj.access == Post.PUBLIC:
return Permission()
if self.obj.access == Post.FRIENDS:
needs = [UserNeed(user_id) for user_id in
self.obj.author.friends]
return self.default & Permission(*needs)
return self.default
@cached_property
def edit(self):
return self.default
@cached_property
def delete(self):
return self.default
@cached_property
def vote(self):
needs = [UserNeed(user_id) for user_id in self.obj.votes]
needs.append(UserNeed(self.obj.author_id))
return auth & Denial(*needs)
@cached_property
def comment(self):
return auth
def __init__(self, *args, **kwargs):
super(Post, self).__init__(*args, **kwargs)
self.votes = self.votes or set()
self.access = self.access or self.PUBLIC
def __str__(self):
return self.title
def __repr__(self):
return "<%s>" % self
@cached_property
def permissions(self):
return self.Permissions(self)
def vote(self, user):
self.votes.add(user.id)
def _get_tags(self):
return self._tags
def _set_tags(self, tags):
self._tags = tags
if self.id:
# ensure existing tag references are removed
d = db.delete(post_tags, post_tags.c.post_id == self.id)
db.engine.execute(d)
for tag in set(self.taglist):
slug = slugify(tag)
tag_obj = Tag.query.filter(Tag.slug == slug).first()
if tag_obj is None:
tag_obj = Tag(name=tag, slug=slug)
db.session.add(tag_obj)
if self not in tag_obj.posts:
tag_obj.posts.append(self)
tags = db.synonym("_tags", descriptor=property(_get_tags, _set_tags))
@property
def taglist(self):
if self.tags is None:
return []
tags = [t.strip() for t in self.tags.split(",")]
return [t for t in tags if t]
@cached_property
def linked_taglist(self):
"""
Returns the tags in the original order and format,
with link to tag page
"""
return [(tag, url_for('frontend.tag',
slug=slugify(tag)))
for tag in self.taglist]
@cached_property
def domain(self):
if not self.link:
return ''
return domain(self.link)
@cached_property
def json(self):
"""
Returns dict of safe attributes for passing into
a JSON request.
"""
return dict(post_id=self.id,
score=self.score,
title=self.title,
link=self.link,
description=self.description,
num_comments=self.num_comments,
author=self.author.username)
@cached_property
def access_name(self):
return {
Post.PUBLIC: "public",
Post.FRIENDS: "friends",
Post.PRIVATE: "private"
}.get(self.access, "public")
def can_access(self, user=None):
if self.access == self.PUBLIC:
return True
if user is None:
return False
if user.is_moderator or user.id == self.author_id:
return True
return self.access == self.FRIENDS and self.author_id in user.friends
@cached_property
def comments(self):
"""
Returns comments in tree. Each parent comment has a "comments"
attribute appended and a "depth" attribute.
"""
from newsmeme.models.comments import Comment
comments = Comment.query.filter(Comment.post_id == self.id).all()
def _get_comments(parent, depth):
parent.comments = []
parent.depth = depth
for comment in comments:
if comment.parent_id == parent.id:
parent.comments.append(comment)
_get_comments(comment, depth + 1)
parents = [c for c i
|
tensorflow/agents
|
tf_agents/policies/random_py_policy_test.py
|
Python
|
apache-2.0
| 5,609
| 0.001783
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for tf_agents.utils.random_py_policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.policies import random_py_policy
from tf_agents.specs import array_spec
from tf_agents.trajectories import time_step
from tf_agents.utils import test_utils
class RandomPyPolicyTest(test_utils.TestCase):
def setUp(self):
super(RandomPyPolicyTest, self).setUp()
self._time_step_spec = time_step.time_step_spec(
observation_spec=array_spec.ArraySpec((1,), np.int32))
self._time_ste
|
p = time_step.restart(observation=np.array([1]))
def testGeneratesActions(self):
action_spec = [
array_spec.BoundedArr
|
aySpec((2, 3), np.int32, -10, 10),
array_spec.BoundedArraySpec((1, 2), np.int32, -10, 10)
]
policy = random_py_policy.RandomPyPolicy(
time_step_spec=self._time_step_spec, action_spec=action_spec)
action_step = policy.action(self._time_step)
tf.nest.assert_same_structure(action_spec, action_step.action)
self.assertTrue(np.all(action_step.action[0] >= -10))
self.assertTrue(np.all(action_step.action[0] <= 10))
self.assertTrue(np.all(action_step.action[1] >= -10))
self.assertTrue(np.all(action_step.action[1] <= 10))
def testGeneratesBatchedActions(self):
action_spec = [
array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10),
array_spec.BoundedArraySpec((1, 2), np.int32, -10, 10)
]
policy = random_py_policy.RandomPyPolicy(
time_step_spec=self._time_step_spec,
action_spec=action_spec,
outer_dims=(3,))
action_step = policy.action(self._time_step)
tf.nest.assert_same_structure(action_spec, action_step.action)
self.assertEqual((3, 2, 3), action_step.action[0].shape)
self.assertEqual((3, 1, 2), action_step.action[1].shape)
self.assertTrue(np.all(action_step.action[0] >= -10))
self.assertTrue(np.all(action_step.action[0] <= 10))
self.assertTrue(np.all(action_step.action[1] >= -10))
self.assertTrue(np.all(action_step.action[1] <= 10))
def testGeneratesBatchedActionsWithoutSpecifyingOuterDims(self):
action_spec = [
array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10),
array_spec.BoundedArraySpec((1, 2), np.int32, -10, 10)
]
time_step_spec = time_step.time_step_spec(
observation_spec=array_spec.ArraySpec((1,), np.int32))
policy = random_py_policy.RandomPyPolicy(
time_step_spec=time_step_spec, action_spec=action_spec)
action_step = policy.action(
time_step.restart(np.array([[1], [2], [3]], dtype=np.int32)))
tf.nest.assert_same_structure(action_spec, action_step.action)
self.assertEqual((3, 2, 3), action_step.action[0].shape)
self.assertEqual((3, 1, 2), action_step.action[1].shape)
self.assertTrue(np.all(action_step.action[0] >= -10))
self.assertTrue(np.all(action_step.action[0] <= 10))
self.assertTrue(np.all(action_step.action[1] >= -10))
self.assertTrue(np.all(action_step.action[1] <= 10))
def testPolicyStateSpecIsEmpty(self):
policy = random_py_policy.RandomPyPolicy(
time_step_spec=self._time_step_spec, action_spec=[])
self.assertEqual(policy.policy_state_spec, ())
def testMasking(self):
batch_size = 1000
time_step_spec = time_step.time_step_spec(
observation_spec=array_spec.ArraySpec((1,), np.int32))
action_spec = array_spec.BoundedArraySpec((), np.int64, -5, 5)
# We create a fixed mask here for testing purposes. Normally the mask would
# be part of the observation.
mask = [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0]
np_mask = np.array(mask)
batched_mask = np.array([mask for _ in range(batch_size)])
policy = random_py_policy.RandomPyPolicy(
time_step_spec=time_step_spec,
action_spec=action_spec,
observation_and_action_constraint_splitter=(
lambda obs: (obs, batched_mask)))
my_time_step = time_step.restart(time_step_spec, batch_size)
action_step = policy.action(my_time_step)
tf.nest.assert_same_structure(action_spec, action_step.action)
# Sample from the policy 1000 times, and ensure that actions considered
# invalid according to the mask are never chosen.
action_ = self.evaluate(action_step.action)
self.assertTrue(np.all(action_ >= -5))
self.assertTrue(np.all(action_ <= 5))
self.assertAllEqual(np_mask[action_ - action_spec.minimum],
np.ones([batch_size]))
# Ensure that all valid actions occur somewhere within the batch. Because we
# sample 1000 times, the chance of this failing for any particular action is
# (2/3)^1000, roughly 1e-176.
for index in range(action_spec.minimum, action_spec.maximum + 1):
if np_mask[index - action_spec.minimum]:
self.assertIn(index, action_)
if __name__ == '__main__':
test_utils.main()
|
DasIch/editor
|
prototypes/regex/tests.py
|
Python
|
bsd-3-clause
| 17,417
| 0.000172
|
# coding: utf-8
"""
regex.tests
~~~~~~~~~~~
:copyright: 2012 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst
"""
from unittest import TestCase
from itertools import izip
from contextlib import contextmanager
from regex.parser import (
parse, ParserError, Parser, DEFAULT_ALPHABET, DEFAULT_LANGUAGE
)
from regex.ast import (
Epsilon, Character, Concatenation, Union, Repetition, Group, Either,
Neither, Range, Any
)
from regex.matcher import Find, Span
from regex.tokenizer import Tokenizer, Token, TokenizerError
class TestParser(TestCase):
def test_epsilon(self):
self.assertEqual(parse(u""), Epsilon())
def test_character(self):
self.assertEqual(parse(u"a"), Character(u"a"))
def test_concatenation(self):
self.assertEqual(
parse(u"ab"),
Concatenation(Character(u"a"), Character(u"b"))
)
def test_union(self):
self.assertEqual(
parse(u"a|b"),
Union(Character(u"a"), Character(u"b"))
)
def test_zero_or_more(self):
self.assertEqual(
parse(u"a*"),
Repetition(Character(u"a"))
)
def test_zero_or_more_missing_repeatable(self):
with self.assertRaises(ParserError) as context:
parse(u"*")
exception = context.exception
self.assertEqual(
exception.reason,
u"* is not preceded by a repeatable expression"
)
self.assertEqual(exception.annotation, (
u"*\n"
u"^"
))
def test_one_or_more(self):
self.assertEqual(
parse(u"a+"),
Concatenation(Character(u"a"), Repetition(Character(u"a")))
)
def test_one_or_more_missing_repeatable(self):
with self.assertRaises(ParserError) as context:
parse(u"+")
exception = context.exception
self.assertEqual(
exception.reason,
u"+ is not preceded by a repeatable expression",
)
self.assertEqual(
exception.annotation,
(
u"+\n"
u"^"
)
)
def test_group(self):
self.assertEqual(
parse(u"(a)"),
Group(Character(u"a"))
)
def test_group_missing_begin(self):
with self.assertRaises(ParserError) as context:
parse(u"a)")
exception = context.exception
self.assertEqual(
exception.reason,
u"found unmatched )"
)
self.assertEqual(
exception.annotation,
(
u"a)\n"
u" ^"
)
)
def test_group_missing_end(self):
with self.assertRaises(ParserError) as context:
parse(u"(a")
exception = context.exception
self.assertEqual(
exception.reason,
u"unexpected end of string, expected ) corresponding to ("
)
self.assertEqual(
exception.annotation,
(
u"(a\n"
u"^-^"
)
)
def test_either(self):
self.assertEqual(
parse(u"[ab]"),
Either(frozenset(map(Character, u"ab")))
)
def test_either_missing_begin(self):
with self.assertRaises(ParserError) as context:
parse(u"ab]")
exception = context.exception
self.assertEqual(
exception.reason,
u"found unmatched ]"
)
self.assertEqual(
exception.annotation,
(
u"ab]\n"
u" ^"
)
)
def test_either_missing_end(self):
with self.assertRaises(ParserError) as context:
parse(u"[ab")
exception = context.exception
self.assertEqual(
exception.reason,
u"unexpected end of string, expected ] corresponding to ["
)
self.assertEqual(
exception.annotation,
(
u"[ab\n"
u"^--^"
)
)
def test_neither(self):
self.assertEqual(
parse(u"[^ab]"),
Neither(frozenset(map(Character, u"ab")), DEFAULT_ALPHABET)
)
def test_range(self):
self.assertEqual(
parse(u"[a-c]"),
Either(frozenset([Range(
Character(u"a"),
Character(u"c"),
DEFAULT_ALPHABET
)]))
)
def test_range_missing_start(self):
with self.assertRaises(ParserError) as context:
parse(u"[-c]")
exception = context.exception
self.assertEqual(exception.reason, u"range is missing start")
self.assertEqual(
exception.annotation,
(
u"[-c]\n"
u"^"
)
)
def test_range_missing_end(self):
with self.assertRaises(ParserError) as context:
parse(u"[a-]")
exception = context.exception
self.assertEqual(
exception.reason,
u"expected character, found instructio
|
n: ]"
)
self.assertEqual(
exception.annotation,
(
u"[a-]\n"
u" ^"
)
)
def test_any(self):
parser = Parser(DEFAULT_LANGUAGE, alphabet=frozenset(u"ab"))
self.assertEqual(
parser.parse(u"."),
Any(frozenset(u"ab"))
)
class RegexTestWrapper(object):
def __init__(self, regex):
self.regex =
|
regex
self.ast = parse(regex)
@property
def nfa(self):
if not hasattr(self, "_nfa"):
self._nfa = self.ast.to_nfa()
return self._nfa
@property
def dfa(self):
if not hasattr(self, "_dfa"):
self._dfa = self.ast.to_dfa()
return self._dfa
@property
def dfa_table(self):
if not hasattr(self, "_dfa_table"):
self._dfa_table = self.dfa.to_dfa_table()
return self._dfa_table
@property
def matchers(self):
if hasattr(self, "_matchers"):
return self._matchers
return self._iter_matchers()
def _iter_matchers(self):
self._matchers = []
matcher = lambda x: self._matchers.append(x) or x
yield matcher(self.nfa)
yield matcher(self.dfa)
yield matcher(self.dfa_table)
def assertMatches(self, string, expected_end):
for matcher in self.matchers:
end = matcher.match(string)
assert end == expected_end, end
def assertAllMatches(self, matches):
for string, end in matches:
self.assertMatches(string, end)
def assertNotMatches(self, string):
for matcher in self.matchers:
end = matcher.match(string)
assert end is None, end
def assertNotMatchesAny(self, strings):
for string in strings:
self.assertNotMatches(string)
def assertFindEqual(self, string, span):
for matcher in self.matchers:
find = matcher.find(string)
assert find == Find(string, span), find
def assertAllFinds(self, finds):
for string, span in finds:
self.assertFindEqual(string, span)
def assertFindAllEqual(self, string, spans):
for matcher in self.matchers:
finds = matcher.find_all(string)
for find, span in izip(finds, spans):
assert find == Find(string, span), find
try:
find = finds.next()
raise AssertionError("unexpected find: %r" % find)
except StopIteration:
pass
def assertSub(self, string, sub, expected_result):
for matcher in self.matchers:
result = matcher.subn(string, sub)
assert result == expected_result, result
assert matcher.sub(string, sub) == expected_result[0]
class TestMatcher(TestCase):
compilers = ["to_nfa", "to_dfa", "to_dfa_table"]
@contextmanager
def regex(self, regex):
yield Regex
|
ilv/gettor
|
gettor/smtp.py
|
Python
|
bsd-3-clause
| 17,953
| 0.000501
|
# -*- coding: utf-8 -*-
#
# This file is part of GetTor, a Tor Browser distribution system.
#
# :authors: Israel Leiva <[email protected]>
# see also AUTHORS file
#
# :copyright: (c) 2008-2014, The Tor Project, Inc.
# (c) 2014, Israel Leiva
#
# :license: This is Free Software. See LICENSE for license information.
import os
import re
import sys
import time
import email
import gettext
import logging
import smtplib
import datetime
import ConfigParser
from email import Encoders
from email.MIMEBase import MIMEBase
from email.mime.text import MIMEText
from email.MIMEMultipart import MIMEMultipart
import core
import utils
import blacklist
"""SMTP module for processing email requests."""
OS = {
'osx': 'Mac OS X',
'linux': 'Linux',
'windows': 'Windows'
}
class ConfigError(Exception):
pass
class AddressError(Exception):
pass
class SendEmailError(Exception):
pass
class InternalError(Exception):
pass
class SMTP(object):
"""Receive and reply requests by email.
Public methods:
process_email(): Process the email received.
Exceptions:
ConfigError: Bad configuration.
AddressError: Address of the sender malformed.
SendEmailError: SMTP server not responding.
InternalError: Something went wrong internally.
"""
def __init__(self, cfg=None):
"""Create new object by reading a configuration file.
:param: cfg (string) path of the configuration file.
"""
default_cfg = 'smtp.cfg'
config = ConfigParser.ConfigParser()
if cfg is None or not os.path.isfile(cfg):
cfg = default_cfg
try:
with open(cfg) as f:
config.readfp(f)
except IOError:
raise ConfigError("File %s not found!" % cfg)
try:
self.our_domain = config.get('general', 'our_domain')
self.mirrors = config.get('general', 'mirrors')
self.i18ndir = config.get('i18n', 'dir')
logdir = config.get('log', 'dir')
logfile = os.path.join(logdir, 'smtp.log')
loglevel = config.get('log', 'level')
blacklist_cfg = config.get('blacklist', 'cfg')
self.bl = blacklist.Blacklist(blacklist_cfg)
self.bl_max_req = config.get('blacklist', 'max_requests')
self.bl_max_req = int(self.bl_max_req)
self.bl_wait_time = config.get('blacklist', 'wait_time')
self.bl_wait_time = int(self.bl_wait_time)
core_cfg = config.get('general', 'core_cfg')
self.core = core.Core(core_cfg)
except ConfigParser.Error as e:
raise ConfigError("Configuration error: %s" % str(e))
except blacklist.ConfigError as e:
raise InternalError("Blacklist error: %s" % str(e))
except core.ConfigError as e:
raise InternalError("Core error: %s" % str(e))
# logging
log = logging.getLogger(__name__)
logging_format = utils.get_logging_format()
date_format = utils.get_date_format()
formatter = logging.Formatter(logging_format, date_format)
log.info('Redirecting SMTP logging to %s' % logfile)
logfileh = logging.FileHandler(logfile, mode='a+')
logfileh.setFormatter(formatter)
logfileh.setLevel(logging.getLevelName(loglevel))
log.addHandler(logfileh)
# stop logging on stdout from now on
log.propagate = False
self.log = log
def _is_blacklisted(self, addr):
"""Check if a user is blacklisted.
:param: addr (string) the hashed address of the user.
:return: true is the address is blacklisted, false otherwise.
"""
try:
self.bl.is_blacklisted(
addr, 'SMTP', self.bl_max_req, self.bl_wait_time
)
return False
except blacklist.BlacklistError as e:
return True
def _get_lc(self, addr):
"""Get the locale from an email address.
Process the email received and look for the locale in the recipient
address (e.g. [email protected]). If no locale found, english
by default.
:param: (string) the email address we want to get the locale from.
:return: (string) the locale (english if none).
"""
# if no match found, english by default
lc = 'en'
# look for [email protected]
m = re.match('gettor\+(\w\w)@\w+\.\w+', addr)
if m:
# we found a request for locale lc
lc = "%s" % m.groups()
return lc.lower()
def _get_normalized_address(self, addr):
"""Get normalized address.
We look for anything inside the last '<' and '>'. Code taken from
the old GetTor (utils.py).
:param: addr (string) the address we want to normalize.
:raise: AddressError if the address can't be normalized.
:return: (string) the normalized address.
"""
if '<' in addr:
idx = addr.rindex('<')
addr = addr[idx:]
m = re.search(r'<([^>]*)>', addr)
if m is None:
# malformed address
raise AddressError("Couldn't extract normalized address "
"from %s" % self_get_sha256(addr))
addr = m.group(1)
return addr
def _get_content(self, email):
"""Get the body content of an email.
:param: email (object) the email object to extract the content from.
:return: (string) body of the message.
"""
# get the body content of the email
maintype = email.get_content_maintype()
if maintype == 'multipart':
for part in email.get_payload():
if part.get_content_maintype() == 'text':
return part.get_payload()
elif maintype == 'text':
return email.get_payload()
def _get_msg(self, msgid, lc):
"""Get message identified by msgid in a specific locale.
:param: msgid (string) the identifier of a string.
:param: lc (string) the locale.
:return: (string) the message from the .po file.
"""
# obtain the content in the proper language
try:
t = gettext.translation(lc, self.i18ndir, languages=[lc])
_ = t.ugettext
msgstr = _(msgid)
return msgstr
except IOError as e:
raise ConfigError("%s" % str(e))
def _parse_email(self, ms
|
g, addr):
"""Parse the email received.
Get the locale and parse the text for the rest of the info.
:param: msg (string) the content of the email to be parsed.
:param: addr (string) the address of the recipient (i.e. us).
:return: (list) 4-tuple with locale, os and type o
|
f request.
"""
req = self._parse_text(msg)
lc = self._get_lc(addr)
supported_lc = self.core.get_supported_lc()
if lc in supported_lc:
req['lc'] = lc
else:
req['lc'] = 'en'
return req
def _parse_text(self, msg):
"""Parse the text part of the email received.
Try to figure out what the user is asking, namely, the type
of request, the package and os required (if applies).
:param: msg (string) the content of the email to be parsed.
:return: (list) 3-tuple with the type of request, os and pt info.
"""
# by default we asume the request is asking for help
req = {}
req['type'] = 'help'
req['os'] = None
# core knows what OS are supported
supported_os = self.core.get_supported_os()
# search for OS or mirrors request
# if nothing is found, help by default
found_request = False
words = re.split('\s+', msg.strip())
for word in words:
if not found_request:
# OS first
for os in supported_os:
if re.match(os, word, re.IGNORECASE):
req['os']
|
yuxng/Deep_ISM
|
ISM/lib/ism/config.py
|
Python
|
mit
| 10,824
| 0.001663
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Fast R-CNN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
import math
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
# region proposal network (RPN) or not
__C.IS_RPN = False
__C.FLIP_X = False
__C.INPUT = 'COLOR'
# multiscale training and testing
__C.IS_MULTISCALE = True
__C.IS_EXTRAPOLATING = True
#
__C.REGION_PROPOSAL = 'RPN'
__C.NET_NAME = 'CaffeNet'
__C.SUBCLS_NAME = 'voxel_exemplars'
#
# Training options
#
__C.TRAIN = edict()
__C.TRAIN.VISUALIZE = False
__C.TRAIN.VERTEX_REG = False
__C.TRAIN.GRID_SIZE = 256
__C.TRAIN.CHROMATIC = False
# Scales to compute real features
__C.TRAIN.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0)
# The number of scales per octave in the image pyramid
# An octave is the set of scales up to half of the initial scale
__C.TRAIN.NUM_PER_OCTAVE = 4
# parameters for ROI generating
__C.TRAIN.SPATIAL_SCALE = 0.0625
__C.TRAIN.KERNEL_SIZE = 5
# Aspect ratio to use during training
__C.TRAIN.ASPECTS = (1, 0.75, 0.5, 0.25)
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 2
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = (0.5,)
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = (0.5,)
__C.TRAIN.BG_THRESH_LO = (0.1,)
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = (0.5,)
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 10000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
__C.TRAIN.USE_PREFETCH = False
# Train using subclasses
__C.TRAIN.SUBCLS = True
# Train using viewpoint
__C.TRAIN.VIEWPOINT = False
# Threshold of ROIs in training RCNN
__C.TRAIN.ROI_THRESHOLD = 0.1
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 16
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
__C.TRAIN.RPN_BASE_SIZE = 16
__C.TRAIN.RPN_ASPECTS = [0.25, 0.5, 0.75, 1, 1.5, 2, 3] # 7 aspects
__C.TRAIN.RPN_SCALES = [2, 2.82842712, 4, 5.65685425, 8, 11.3137085, 16, 22.627417, 32, 45.254834] # 2**np.arange(1, 6, 0.5), 10 scales
#
# Testing options
#
__C.TEST = edict()
__C.TEST.IS_PATCH = False;
__C.TEST.VERTEX_REG = False
__C.TEST.VISUALIZE = False
# Scales to compute real features
__C.TEST.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0)
# The number of scales per octave in the image pyramid
# An octave is the set of scales up to half of the initial scale
__C.TEST.NUM_PER_OCTAVE = 4
# Aspect ratio to use during testing
__C.TEST.ASPECTS = (1, 0.75, 0.5, 0.25)
# parameters for ROI generating
__C.TEST.SPATIAL_SCALE = 0.0625
__C.TEST.KERNEL_SIZE = 5
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.5
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Test using subclass
__C.TEST.SUBCLS = True
# Train using viewpoint
__C.TEST.VIEWPOINT = False
# Threshold of ROIs in testing
__C.TEST.ROI_THRESHOLD = 0.1
__C.TEST.ROI_THRESHOLD_NUM = 80000
__C.TEST.ROI_NUM = 2000
__C.TEST.DET_THRESHOLD = 0.0001
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
## Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
## Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TEST.RPN_MIN_SIZE = 16
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1./16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# These are the values originally used for training VGG16
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_S
|
EED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
def get
|
_output_dir(imdb, net):
"""Return the directory where experimental artifacts are placed.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
path = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is None:
return path
else:
return osp.join(path, net.name)
def _add_more_info(is_train):
# compute all the scales
if is_train:
scales_base = __C.TRAIN.SCALES_BASE
num_per_octave = __C.TRAIN.NUM_PER_OCTAVE
else:
scales_base = __C.TEST.SCALES_BASE
num_per_octave = __C.TEST.NUM_PER_OCTAVE
num_scale_base = len(scales_base)
num = (num_scale_base - 1) * num_per_octave + 1
scales = []
for i in xrange(num):
index_scale_base = i / num_per_octave
sbase = scales_base[index_scale_base]
j = i % num_per_octave
if j == 0:
scales.append(sbase)
else:
sbase_next = scales_base[index_scale_base+1]
step = (sbase_next - sbase) / num_per_octave
scales.append(sbase + j * step)
if is_train:
__C.TRAIN.SCALES = scales
else:
__C.TEST.SCALES = scales
|
EArmour/pyfibot
|
pyfibot/modules/module_spotify.py
|
Python
|
bsd-3-clause
| 1,777
| 0.005627
|
"""
Parse spotify URLs
"""
from __future__ import unicode_l
|
iterals
import re
import logging
log = logging.getLogger('spotify')
def handle_privmsg(bot, user, channel, args):
"""Grab Spotify URLs from the messages and handle them"""
m = re.match(".*(http:\/\/open.spotify.com\/|spotify:)(?P<item>album|artist|track|user[:\/]\S+[:\/]playlist)[:\/](?P<id>[a-zA-Z0-9]+)\/?.*", args)
if not m:
return None
spotify_id = m.group('id')
item = m.group('item').replace(':', '/'
|
).split('/')
item[0] += 's'
if item[0] == 'users':
# All playlists seem to return 401 at the time, even the public ones
return None
apiurl = "https://api.spotify.com/v1/%s/%s" % ('/'.join(item), spotify_id)
r = bot.get_url(apiurl)
if r.status_code != 200:
if r.status_code not in [401, 403]:
log.warning('Spotify API returned %s while trying to fetch %s' % r.status_code, apiurl)
return
data = r.json()
title = '[Spotify] '
if item[0] in ['albums', 'tracks']:
artists = []
for artist in data['artists']:
artists.append(artist['name'])
title += ', '.join(artists)
if item[0] == 'albums':
title += ' - %s (%s)' % (data['name'], data['release_date'])
if item[0] == 'artists':
title += data['name']
genres_n = len(data['genres'])
if genres_n > 0:
genitive = 's' if genres_n > 1 else ''
genres = data['genres'][0:4]
more = ' +%s more' % genres_n - 5 if genres_n > 4 else ''
title += ' (Genre%s: %s%s)' % (genitive, ', '.join(genres), more)
if item[0] == 'tracks':
title += ' - %s - %s' % (data['album']['name'], data['name'])
return bot.say(channel, title)
|
hunch/hunch-gift-app
|
django/contrib/comments/templatetags/comments.py
|
Python
|
mit
| 12,178
| 0.006077
|
from django import template
from django.template.loader import render_to_string
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib import comments
from django.utils.encoding import smart_unicode
register = template.Library()
class BaseCommentNode(template.Node):
"""
Base helper class (abstract) for handling the get_comment_* template tags.
Looks a bit strange, but the subclasses below should make this a bit more
obvious.
"""
#@classmethod
def handle_token(cls, parser, token):
"""Class method to parse get_comment_list/count/form and return a Node."""
tokens = token.contents.split()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% get_whatever for obj as varname %}
if len(tokens) == 5:
if tokens[3] != 'as':
raise template.TemplateSyntaxError("Third argument in %r must be 'as'" % tokens[0])
return cls(
object_expr = parser.compile_filter(tokens[2]),
as_varname = tokens[4],
)
# {% get_whatever for app.model pk as varname %}
elif len(tokens) == 6:
if tokens[4] != 'as':
raise template.TemplateSyntaxError("Fourth argument in %r must be 'as'" % tokens[0])
return cls(
ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3]),
as_varname = tokens[5]
)
else:
raise template.TemplateSyntaxError("%r tag requires 4 or 5 arguments" % tokens[0])
handle_token = classmethod(handle_token)
#@staticmethod
def lookup_content_type(token, tagname):
try:
app, model = token.split('.')
return ContentType.objects.get(app_label=app, model=model)
except ValueError:
raise template.TemplateSyntaxError("Third argument in %r must be in the format 'app.model'" % tagname)
except ContentType.DoesNotExist:
raise template.TemplateSyntaxError("%r tag has non-existant content-type: '%s.%s'" % (tagname, app, model))
lookup_content_type = staticmethod(lookup_content_type)
def __init__(self, ctype=None, object_pk_expr=None, object_expr=None, as_varname=None, comment=None):
if ctype is None and object_expr is None:
raise template.TemplateSyntaxError("Comment nodes must be given either a literal object or a ctype and object pk.")
self.comment_model = comments.get_model()
self.as_varname = as_varname
self.ctype = ctype
self.object_pk_expr = object_pk_expr
self.object_expr = object_expr
self.comment = comment
def render(self, context):
qs = self.get_query_set(context)
context[self.as_varname] = self.get_context_value_from_queryset(context, qs)
return ''
def get_query_set(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if not object_pk:
return self.comment_model.objects.none()
qs = self.comment_model.objects.filter(
content_type = ctype,
object_pk = smart_unicode(object_pk),
site__pk = settings.SITE_ID,
)
# The is_public and is_removed fields are implementation details of the
# built-in comment model's spam filtering system, so they might not
# be present on a custom comment model subclass. If they exist, we
# should filter on them.
field_names = [f.name for f in self.comment_model._meta.fields]
if 'is_public' in field_names:
qs = qs.filter(is_public=True)
if getattr(settings, 'COMMENTS_HIDE_REMOVED', True) and 'is_removed' in field_names:
qs = qs.filter(is_removed=False)
return qs
def get_target_ctype_pk(self, context):
if self.object_expr:
try:
obj = self.object_expr.resolve(context)
except template.VariableDoesNotExist:
return None, None
return ContentType.objects.get_for_model(obj), obj.pk
else:
return self.ctype, self.object_pk_expr.resolve(context, ignore_failures=True)
def get_context_value_from_queryset(self, context, qs):
"""Subclasses should override this."""
raise NotImplementedError
class CommentListNode(BaseCommentNode):
"""Insert a list of comments into the context."""
def get_context_value_from_queryset(self, context, qs):
return list(qs)
class CommentCountNode(BaseCommentNode):
"""Insert a count of comments into the context."""
def get_context_value_from_queryset(self, context, qs):
return qs.count()
class CommentFormNode(BaseCommentNode):
"""Insert a form for the comment model into the context."""
def get_form(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if object_pk:
return comments.get_form()(ctype.get_object_for_this_type(pk=object_pk))
else:
return None
def render(self, context):
context[self.as_varname] = self.get_form(context)
return ''
class RenderCommentFormNode(CommentFormNode):
"""Render the comment form directly"""
#@classmethod
def handle_token(cls, parser, token):
"""Class method to parse render_comment_form and return a Node."""
tokens = token.contents.split()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% render_comment_form for obj %}
if len(tokens) == 3:
return cls(object_expr=parser.compile_filter(tokens[2]))
# {% render_comment_form for app.models pk %}
elif len(tokens) == 4:
return cls(
ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3])
)
handle_token = classmethod(handle_token)
def render(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if object_pk:
template_search_list = [
"comments/%s/%s/form.html" % (ctype.app_label, ctype.model),
"comments/%s/form.html" % ctype.app_label,
"comm
|
ents/form.html"
]
context.push()
formstr = render_to_string(template_search_list, {"form" : self.get_form(context)}, context)
context.pop()
return formstr
else:
return ''
class RenderCommentListNode(CommentListNode):
"""Render the comment list directly"""
#@classmethod
def handle_token(cls, parser, token):
"""Class method to pa
|
rse render_comment_list and return a Node."""
tokens = token.contents.split()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% render_comment_list for obj %}
if len(tokens) == 3:
return cls(object_expr=parser.compile_filter(tokens[2]))
# {% render_comment_list for app.models pk %}
elif len(tokens) == 4:
return cls(
ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3])
)
handle_token = classmethod(handle_token)
def render(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if object_pk:
template_search_list = [
"comments/%s/%s/list.html" % (ctype.app_label, ctype.model),
"comments/%s/list.html" % ctype.app_label,
"comments/list.html"
]
qs = self.
|
wangjiezhe/FetchNovels
|
novel/sources/quanben5.py
|
Python
|
gpl-3.0
| 612
| 0
|
#!/usr/bin/env pyth
|
on
# -*- coding: utf-8 -*-
from novel import serial, utils
BAS
|
E_URL = 'http://www.quanben5.com/n/{}/xiaoshuo.html'
class Quanben5(serial.SerialNovel):
def __init__(self, tid):
super().__init__(utils.base_to_url(BASE_URL, tid), '#content',
intro_sel='.description',
chap_type=serial.ChapterType.path,
chap_sel='.list li',
tid=tid)
def get_title_and_author(self):
name = self.doc('h1').text()
author = self.doc('.author').text()
return name, author
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.