repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
rdo-management/tuskar
|
tuskar/tests/templates/test_namespace.py
|
Python
|
apache-2.0
| 1,765
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from tuskar.templates import namespace
class NamespaceTests(unittest.TestCase):
def test_apply_template_namespace(self):
namespaced = namespace.apply_template_namespace('test-ns', 'test-name')
self.assertEqual(namespaced, 'test-ns::test-name')
self.assertTrue(namespace.matches_template_namespace('test-ns',
namespaced))
def test_remove_template_namespace(self):
stripped = namespace.remove_template_namespace('test-ns::test-name')
self.assertEqual(stripped, 'test-name')
def test_matches_template_namespace(self):
value = 'test-ns::test-name'
|
self.assertTrue(namespace.matches_template_namespace('test-ns', value))
self.assertFalse(namespace.matches_template_namespace('fake', value))
def test_apply_resource_alias_namespace(self):
namespaced = namespace.apply_resource_alias_namespace('compute')
self.assertEqual(namespaced, 'Tuskar::compute')
def test_remove_resource_alias_namespace(self):
stripped = namespace.remove_resource_alias_namespace(
'Tuskar::controller')
self.assertEq
|
ual(stripped, 'controller')
|
QualiSystems/OpenStack-Shell
|
package/tests/test_cp/test_openstack/test_domain/test_services/test_nova/test_nova_instance_service.py
|
Python
|
isc
| 27,295
| 0.005166
|
from unittest import TestCase
from mock import Mock
from cloudshell.cp.openstack.domain.services.nova.nova_instance_service import NovaInstanceService
import cloudshell.cp.openstack.domain.services.nova.nova_instance_service as test_nova_instance_service
from cloudshell.cp.openstack.common.driver_helper import CloudshellDriverHelper
from cloudshell.cp.openstack.models.exceptions import CommandCancellationException, InstanceErrorStateException
class TestNovaInstanceService(TestCase):
def setUp(self):
instance_waiter = Mock()
instance_waiter.wait = Mock()
instance_waiter.ACTIVE = 'ACTIVE'
self.instance_service = NovaInstanceService(instance_waiter=instance_waiter)
self.mock_logger = Mock()
self.openstack_session = Mock()
def test_instance_create_empty_openstack_session(self):
test_name = 'test'
result = self.instance_service.create_instance(openstack_session=None,
name=test_name,
reservation=Mock(),
cp_resource_model=Mock(),
|
deploy_req_model=Mock(),
cancellation_context=Mock(),
logger=self.mock_logger)
self.assertEqual(result, None)
def test_instance_create_success(self):
test_name = 'test'
CloudshellDriverHelper.get_uuid = Mock(return_value='1234')
test_uniq_name = 'test-1234'
|
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
# mock_client.Client = Mock(return_vaule=mock_client2)
mock_image = Mock()
mock_flavor = Mock()
mock_client2.images.find = Mock(return_value=mock_image)
mock_client2.flavors.find = Mock(return_value=mock_flavor)
mock_deploy_req_model = Mock()
mock_deploy_req_model.affinity_group_id = ''
mock_deploy_req_model.availability_zone = 'test-avail-zone'
test_nova_instance_service.udev_rules_sh_str = 'test_userdata'
mock_cp_resource_model = Mock()
mock_cp_resource_model.qs_mgmt_os_net_uuid = '1234'
mock_cancellation_context = Mock()
mock_client2.servers = Mock()
mocked_inst = Mock()
mock_client2.servers.create = Mock(return_value=mocked_inst)
mock_qnet_dict = {'net-id': mock_cp_resource_model.qs_mgmt_os_net_uuid}
result = self.instance_service.create_instance(openstack_session=self.openstack_session,
name=test_name,
reservation=Mock(),
cp_resource_model=mock_cp_resource_model,
deploy_req_model=mock_deploy_req_model,
cancellation_context=mock_cancellation_context,
logger=self.mock_logger)
mock_client2.servers.create.assert_called_with(name=test_uniq_name,
image=mock_image,
flavor=mock_flavor,
availability_zone='test-avail-zone',
userdata='test_userdata',
nics=[mock_qnet_dict])
self.assertEquals(result, mocked_inst)
self.instance_service.instance_waiter.wait.assert_called_with(mocked_inst,
state=self.instance_service.instance_waiter.ACTIVE,
cancellation_context=mock_cancellation_context,
logger=self.mock_logger)
def test_instance_create_cancellation_called(self):
test_name = 'test'
CloudshellDriverHelper.get_uuid = Mock(return_value='1234')
test_uniq_name = 'test-1234'
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
# mock_client.Client = Mock(return_vaule=mock_client2)
mock_image = Mock()
mock_flavor = Mock()
mock_client2.images.find = Mock(return_value=mock_image)
mock_client2.flavors.find = Mock(return_value=mock_flavor)
mock_cp_resource_model = Mock()
mock_cp_resource_model.qs_mgmt_os_net_uuid = '1234'
mock_cancellation_context = Mock()
mock_client2.servers = Mock()
mocked_inst = Mock()
mock_client2.servers.create = Mock(return_value=mocked_inst)
mock_qnet_dict = {'net-id': mock_cp_resource_model.qs_mgmt_os_net_uuid}
self.instance_service.instance_waiter = Mock()
self.instance_service.instance_waiter.wait = Mock(side_effect=CommandCancellationException)
with self.assertRaises(CommandCancellationException):
result = self.instance_service.create_instance(openstack_session=self.openstack_session,
name=test_name,
reservation=Mock(),
cp_resource_model=mock_cp_resource_model,
deploy_req_model=Mock(),
cancellation_context=mock_cancellation_context,
logger=self.mock_logger)
mock_client2.servers.delete.assert_called_once_with(mocked_inst)
def test_instance_create_success_affinity_group(self):
test_name = 'test'
CloudshellDriverHelper.get_uuid = Mock(return_value='1234')
test_uniq_name = 'test-1234'
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
# mock_client.Client = Mock(return_vaule=mock_client2)
mock_image = Mock()
mock_flavor = Mock()
mock_client2.images.find = Mock(return_value=mock_image)
mock_client2.flavors.find = Mock(return_value=mock_flavor)
mock_deploy_req_model = Mock()
mock_deploy_req_model.affinity_group_id = 'test_affinity_group_id'
mock_deploy_req_model.availability_zone = ''
mock_deploy_req_model.auto_udev = False
mock_cp_resource_model = Mock()
mock_cp_resource_model.qs_mgmt_os_net_uuid = '1234'
mock_cancellation_context = Mock()
mock_client2.servers = Mock()
mocked_inst = Mock()
mock_client2.servers.create = Mock(return_value=mocked_inst)
mock_qnet_dict = {'net-id': mock_cp_resource_model.qs_mgmt_os_net_uuid}
result = self.instance_service.create_instance(openstack_session=self.openstack_session,
name=test_name,
reservation=Mock(),
cp_resource_model=mock_cp_resource_model,
deploy_req_model=mock_deploy_req_model,
cancellation_context=mock_cancellation_context,
logger=self.mock_logger)
mock_client2.servers.create.assert_called_with(name=test_uniq_name,
image=mock_image,
flavor=mock_flavor,
nics=[mock_qnet_dict],
scheduler_hints={'group': 'test_affinity_group_id'})
|
Alignak-monitoring/alignak
|
tests/test_macros_resolver.py
|
Python
|
agpl-3.0
| 51,044
| 0.005924
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2018: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, [email protected]
# Grégory Starck, [email protected]_command
# Sebastien Coavoux, [email protected]
# Jean Gabes, [email protected]_command
# Zoran Zaric, [email protected]
# Gerhard Lausser, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
import pytest
from .alignak_test import *
from alignak.macroresolver import MacroResolver
from alignak.commandcall import CommandCall
class MacroResolverTester(object):
def get_hst_svc(self):
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
hst = self._scheduler.hosts.find_by_name("test_host_0")
return (svc, hst)
def test_resolv_simple(self):
"""Test a simple macro resolution
:return:
"""
# These are macros built from a variable declare in alignak.ini file
# ; Some macros for the tests
# $alignak_test_macro=test macro
# _alignak_test_macro2=test macro 2
result = self.mr.resolve_simple_macros_in_string("$ALIGNAK_TEST_MACRO$", [], None, None, None)
assert result == "test macro"
result = self.mr.resolve_simple_macros_in_string("$ALIGNAK_TEST_MACRO2$", [], None, None, None)
assert result == "test macro 2"
# These are macros read from a pack. section of the alignak.ini configuration
result = self.mr.resolve_simple_macros_in_string("$SMTP_SERVER$", [], None, None, None)
assert result == "your_smtp_server_address"
result = self.mr.resolve_simple_macros_in_string("$MAIL_FROM$", [], None, None, None)
assert result == "alignak@monitoring"
# This is a macro built from a variable that is a string
result = self.mr.resolve_simple_macros_in_string("$ALIGNAK$", [], None, None, None)
assert result == "My Alignak"
# This is a macro built from a variable that is a list of strings
result = self.mr.resolve_simple_macros_in_string("$ALIGNAK_CONFIG$", [], None, None, None)
assert isinstance(result, string_types)
expected = "[%s]" % ','.join(self.alignak_env.cfg_files)
assert result == expected
# This is a macro built from a dynamic variable
result = self.mr.resolve_simple_macros_in_string("$MAINCONFIGFILE$", [], None, None, None)
assert result == os.path.abspath(os.path.join(self._test_dir, self.setup_file))
result = self.mr.resolve_simple_macros_in_string("$MAINCONFIGDIR$", [], None, None, None)
assert result == os.path.abspath(os.path.join(self._test_dir, './cfg'))
# This is an empty macro -> ''
result = self.mr.resolve_simple_macros_in_string("$COMMENTDATAFILE$", [], None, None, None)
assert result == ""
# This is a macro built from an Alignak variable - because the variable is prefixed with _
# The macro name is built from the uppercased variable name without the leading
# and trailing underscores: _dist -> $DIST$
result = self.mr.resolve_simple_macros_in_string("$DIST$", [], None, None, None)
assert result == "/tmp"
# Alignak variable interpolated from %(var) is available as a macro
result = self.mr.resolve_simple_macros_in_string("$DIST_ETC$", [], None, None, None)
assert result == "/tmp/etc/alignak"
# # Alignak "standard" variable is not available as a macro
# # Empty value ! todo: Perharps should be changed ?
# Sometimes the user is defined to alignak for test purpose and it remans set to this value!
# result = self.mr.resolve_simple_macros_in_string("$USER$", [], None, None, None)
# assert result == ""
def test_resolv_simple_command(self):
"""Test a simple command resolution
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
macros_command = self.mr.resolve_command(svc.check_command, data,
self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == "plugins/test_servicecheck.pl --type=ok --failchance=5% " \
"--previous-state=OK --state-duration=0 " \
"--total-critical-on-host=0 --total-warning-on-host=0 " \
"--hostname test_host_0 --servicedesc test_ok_0"
# @pytest.mark.skip(reason="A macro remains valued where all should be reset to default!")
de
|
f test_args_macro(self):
"""
Test ARGn macros
:return:
"""
print("Initial test macros: %d - %s" % (len(self._scheduler.pushed_conf.__class__.macros),
self._scheduler.pushed_conf.__class__.macros))
print(" - : %s" % (self._scheduler.pushed_conf.__class__.properties['$USER1$']))
pri
|
nt(" - : %s" % (self._scheduler.pushed_conf.properties['$USER1$']))
print(" - : %s" % (getattr(self._scheduler.pushed_conf, '$USER1$', None)))
for key in self._scheduler.pushed_conf.__class__.macros:
key = self._scheduler.pushed_conf.__class__.macros[key]
if key:
value = getattr(self._scheduler.pushed_conf.properties, key, '')
print(" - %s : %s" % (key, self._scheduler.pushed_conf.properties[key]))
if value:
print("- %s = %s" % (key, value))
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
# command_with_args is defined with 5 arguments as:
# $PLUGINSDIR$/command -H $HOSTADDRESS$ -t 9 -u -c $ARG1$
# -a $ARG2$ $ARG3$ $ARG4$ and the last is $ARG5$.
# No arguments are provided - will be valued as empty strings
dummy_call = "command_with_args"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# todo: Test problem is here!
# Whereas we should get:
assert macros_command == 'plugins/command -H 127.0.0.1 -t 9 -u -c -a and the last is .'
# We get:
# assert macros_command == '/var/lib/shinken/libexec/command -H 127.0.0.1 -t 9 -u -c -a and the last is .'
# Outside the test env, everything is ok ! Because some tests executed before the macro
# do not have the cor
|
nachiketmistry/splunk-app-pstack
|
bin/paginatefields.py
|
Python
|
mit
| 391
| 0.015345
|
#
|
Version 4.0
i
|
mport csv
import sys
count = 10
offset = 0
if len(sys.argv) >= 3:
count = int(sys.argv[1])
offset = int(sys.argv[2]) - 1
start = offset*count
start = 1 if start==0 else start
end = start + count
r = csv.reader(sys.stdin)
rows = []
i = 0
for l in r:
rows.append(l[:1] + l[start:end])
i = i + 1
if(i > 1):
csv.writer(sys.stdout).writerows(rows)
|
OpenClovis/ncclient
|
ncclient/transport/session.py
|
Python
|
apache-2.0
| 9,854
| 0.003146
|
# Copyright 2009 Shikhar Bhushan
# Copyright 2014 Leonidas Poulopoulos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import logging
from threading import Thread, Lock, Event
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from ncclient.xml_ import *
from ncclient.capabilities import Capabilities
from ncclient.transport.errors import TransportError, SessionError, SessionCloseError
from ncclient.transport.notify import Notification
logger = logging.getLogger('ncclient.transport.session')
class Session(Thread):
"Base class for use by transport protocol implementations."
def __init__(self, capabilities):
Thread.__init__(self)
self.setDaemon(True)
self._listeners = set()
self._lock = Lock()
self.setName('session')
self._q = Queue()
self._notification_q = Queue()
self._client_capabilities = capabilities
self._server_capabilities = None # yet
self._id = None # session-id
self._timeout = None
self._connected = False # to be set/cleared by subclass implementation
logger.debug('%r created: client_capabilities=%r' %
(self, self._client_capabilities))
self._device_handler = None # Should be set by child class
def _dispatch_message(self, raw):
try:
root = parse_root(raw)
except Exception as e:
device_handled_raw=self._device_handler.handle_raw_dispatch(raw)
if isinstance(device_handled_raw, str):
root = parse_root(device_handled_raw)
elif isinstance(device_handled_raw, Exception):
self._dispatch_error(device_handled_raw)
return
else:
logger.error('error parsing dispatch message: %s' % e)
return
with self._lock:
listeners = list(self._listeners)
for l in listeners:
logger.debug('dispatching message to %r: %s' % (l, raw))
l.callback(root, raw) # no try-except; fail loudly if you must!
def _dispatch_error(self, err):
with self._lock:
listeners = list(self._listeners)
for l in listeners:
logger.debug('dispatching error to %r' % l)
try: # here we can be more considerate with catching exceptions
l.errback(err)
except Exception as e:
logger.warning('error dispatching to %r: %r' % (l, e))
def _post_connect(self):
"Greeting stuff"
init_event = Event()
error = [None] # so that err_cb can bind error[0]. just how it is.
# callbacks
def ok_cb(id, capabilities):
self._id = id
self._server_capabilities = capabilities
init_event.set()
def err_cb(err):
error[0] = err
init_event.set()
self.add_listener(NotificationHandler(self._notification_q))
listener = HelloHandler(ok_cb, err_cb)
self.add_listener(listener)
self.send(HelloHandler.build(self._client_capabilities, self._device_handler))
logger.debug('starting main loop')
self.start()
# we expect server's hello message
if not init_event.wait(self._timeout):
raise SessionCloseError("Session hello timeout")
# received hello message or an error happened
self.remove_listener(listener)
if error[0]:
raise error[0]
#if ':base:1.0' not in self.server_capabilities:
# raise MissingCapabilityError(':base:1.0')
logger.info('initialized: session-id=%s | server_capabilities=%s' %
(self._id, self._server_capabilities))
def add_listener(self, listener):
"""Register a listener that will be notified of incoming messages and
errors.
:type listener: :cl
|
ass:`SessionListener`
|
"""
logger.debug('installing listener %r' % listener)
if not isinstance(listener, SessionListener):
raise SessionError("Listener must be a SessionListener type")
with self._lock:
self._listeners.add(listener)
def remove_listener(self, listener):
"""Unregister some listener; ignore if the listener was never
registered.
:type listener: :class:`SessionListener`
"""
logger.debug('discarding listener %r' % listener)
with self._lock:
self._listeners.discard(listener)
def get_listener_instance(self, cls):
"""If a listener of the specified type is registered, returns the
instance.
:type cls: :class:`SessionListener`
"""
with self._lock:
for listener in self._listeners:
if isinstance(listener, cls):
return listener
def connect(self, *args, **kwds): # subclass implements
raise NotImplementedError
def run(self): # subclass implements
raise NotImplementedError
def send(self, message):
"""Send the supplied *message* (xml string) to NETCONF server."""
if not self.connected:
raise TransportError('Not connected to NETCONF server')
logger.debug('queueing %s' % message)
self._q.put(message)
def scp(self):
raise NotImplementedError
### Properties
def take_notification(self, block, timeout):
try:
return self._notification_q.get(block, timeout)
except Empty:
return None
@property
def connected(self):
"Connection status of the session."
return self._connected
@property
def client_capabilities(self):
"Client's :class:`Capabilities`"
return self._client_capabilities
@property
def server_capabilities(self):
"Server's :class:`Capabilities`"
return self._server_capabilities
@property
def id(self):
"""A string representing the `session-id`. If the session has not been initialized it will be `None`"""
return self._id
class SessionListener(object):
"""Base class for :class:`Session` listeners, which are notified when a new
NETCONF message is received or an error occurs.
.. note::
Avoid time-intensive tasks in a callback's context.
"""
def callback(self, root, raw):
"""Called when a new XML document is received. The *root* argument allows the callback to determine whether it wants to further process the document.
Here, *root* is a tuple of *(tag, attributes)* where *tag* is the qualified name of the root element and *attributes* is a dictionary of its attributes (also qualified names).
*raw* will contain the XML document as a string.
"""
raise NotImplementedError
def errback(self, ex):
"""Called when an error occurs.
:type ex: :exc:`Exception`
"""
raise NotImplementedError
class HelloHandler(SessionListener):
def __init__(self, init_cb, error_cb):
self._init_cb = init_cb
self._error_cb = error_cb
def callback(self, root, raw):
tag, attrs = root
if (tag == qualify("hello")) or (tag == "hello"):
try:
id, capabilities = HelloHandler.parse(raw)
except Exception as e:
self._error_cb(e)
else:
self._init_cb(id, capabilities)
def errback(self, err):
self._error_cb(err)
@staticmethod
def build(capabilities, device_handler):
"Given a list of capability URI's returns <hello> messag
|
DataDog/integrations-extras
|
portworx/datadog_checks/portworx/__init__.py
|
Python
|
bsd-3-clause
| 115
| 0
|
from .__about__ import __version__
from .portworx import PortworxCheck
|
__all__ = ['
|
__version__', 'PortworxCheck']
|
alexrudy/Zeeko
|
zeeko/handlers/setup_package.py
|
Python
|
bsd-3-clause
| 1,501
| 0.016656
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from zeeko._build_helpers import get_utils_extension_args, get_zmq_extension_args, _generate_cython_extensions, pxd, get_package_data
from astropy_helpers import setup_helpers
utilities = [pxd("..utils.rc"),
pxd("..utils.msg"),
pxd("..utils.pthread"),
pxd("..utils.lock"),
pxd("..utils.condition"),
pxd("..utils.clock")]
base = [ pxd("..cyloop.throttle"), pxd("..cyloop.statemachine"), pxd(".snail"), pxd(".base")]
dependencies = {
'base' : utilities + [ pxd("..cyloop.throttle") ],
'snail' : utilities + [ pxd("..cyloop.throttle"), pxd("..cyloop.statemachine") ],
'client' : utilities + base + [ pxd("..messages.receiver") ],
'server' : utilities + base + [ pxd("..messages.publisher") ],
}
def get_extensions(**kwargs):
"""Get the Cython extensions"""
extension_args = setup_helpers.DistutilsExtensionArgs()
extension_args.update(get_utils_extension_a
|
rgs())
extension_args.update(get_zmq_extension_args())
extension_args['include_dirs'].append('numpy')
package_name = __name__.split(".")[:-1]
extensions = [e for e in _generate_cython_extensions(extension_args, os.pat
|
h.dirname(__file__), package_name)]
for extension in extensions:
name = extension.name.split(".")[-1]
if name in dependencies:
extension.depends.extend(dependencies[name])
return extensions
|
jplusplus/broken-promises
|
Scripts/collect_articles.py
|
Python
|
gpl-3.0
| 3,847
| 0.011178
|
#!/usr/bin/env python
# Encoding: utf-8
# -----------------------------------------------------------------------------
# Project : Broken Promises
# -----------------------------------------------------------------------------
# Author : Edouard Richard <[email protected]>
# -----------------------------------------------------------------------------
# License : GNU General Public License
# -----------------------------------------------------------------------------
# Creation : 28-Oct-2013
# Last mod : 27-Nov-2013
# -----------------------------------------------------------------------------
# This file is part of Broken Promises.
#
# Broken Promises is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Broken Promises is distributed in the hope that it will be useful,
# but WITH
|
OUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Broken Promises. If not, see <http://www.gnu.org/licenses/>.
from brokenpromises.operations import CollectArticles
from bson.json_u
|
til import dumps
import optparse
import brokenpromises.channels
import sys
import reporter
reporter.REPORTER.register(reporter.StderrReporter())
debug, trace, info, warning, error, fatal = reporter.bind("script_collect_articles")
oparser = optparse.OptionParser(usage ="\n./%prog [options] year \n./%prog [options] year month\n./%prog [options] year month day")
# oparser.add_option("-C", "--nocache", action="store_true", dest="nocache",
# help = "Prevents from using the cache", default=False)
oparser.add_option("-f", "--channelslistfile", action="store", dest="channels_file",
help = "Use this that as channels list to use", default=None)
oparser.add_option("-c", "--channels", action="store", dest="channels_list",
help = "channels list comma separated", default=None)
oparser.add_option("-s", "--storage", action="store_true", dest="storage",
help = "Save the result with the default storage", default=False)
oparser.add_option("-d", "--drop", action="store_true", dest="mongodb_drop",
help = "drop the previous articles from database before", default=False)
oparser.add_option("--force", action="store_true", dest="force_collect",
help = "Force the scrap. If --storage is enable, the scrap could be escape b/c of a previous similar scrap", default=False)
oparser.add_option("-o", "--output", action="store", dest="output_file",
help = "Specify a file to write the export to. If you do not specify a file name, the program writes data to standard output (e.g. stdout)", default=None)
# Think to update the README.md file after modifying the options
options, args = oparser.parse_args()
assert len(args) > 0 and len(args) <= 3
if options.output_file:
sys.stdout = open(options.output_file, 'a')
channels = brokenpromises.channels.get_available_channels()
if options.channels_file:
with open(options.channels_file) as f:
channels = [line.replace("\n", "") for line in f.readlines()]
if options.channels_list:
channels = options.channels_list.split(",")
collector = CollectArticles(channels, *args, use_storage=options.storage, force_collect=options.force_collect)
if options.mongodb_drop:
collector.storage.get_database().drop_collection("articles")
collector.storage.get_database().drop_collection("reports")
results = collector.run()
# OUTPUT
print dumps([_.__dict__ for _ in results]).encode('utf-8')
info("%d articles collected." % (len(results)))
exit()
# EOF
|
ajaniv/softwarebook
|
cpython/contract/core/api.py
|
Python
|
gpl-2.0
| 1,699
| 0.006474
|
#/usr/bin/env python
# -#- coding: utf-8 -#-
#
# contract/core/api.py - functions which simplify contract package feature access
#
# This file is part of OndALear collection of open source components
#
# This so
|
ftware is provided 'as-is', without an
|
y express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Copyright (C) 2008 Amnon Janiv <[email protected]>
#
# Initial version: 2008-02-01
# Author: Amnon Janiv <[email protected]>
"""
.. module:: contract.core.api
:synopsis: Contract core simplified feature access module
Set of functions which simplify access to contract.core features.
.. moduleauthor:: Amnon Janiv <[email protected]>
"""
__revision__ = '$Id: $'
__version__ = '0.0.1'
from contract.core.package import BusContractCorePackageDescriptor
import busxml.core.api
def parse_file(file_name):
"""Parse an xml file containing contract object graph
:param file_name: XML file name.
:type file_name: str.
:returns: BusinessContractWorkspace -- contract object graph container.
"""
package_desc = BusContractCorePackageDescriptor.get_instance()
root_obj = busxml.core.api.parse_file(file_name, package_desc)
return root_obj
def export_to_string(obj):
"""Export contract object graph to string
:param obj: Contract object graph container.
:type obj: BusinessContractWorkspace.
:returns: unicode -- xml string with underlying contract information
"""
package_desc = BusContractCorePackageDescriptor.get_instance()
buf = busxml.core.api.export_to_string(obj, package_desc)
return buf
|
lukashermann/pytorch-rl
|
core/models/a3c_mlp_con.py
|
Python
|
mit
| 4,501
| 0.005776
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.init_weights import init_weights, normalized_columns_initializer
from core.model import Model
class A3CMlpConModel(Model):
def __init__(self, args):
super(A3CMlpConModel, self).__init__(args)
# build model
# 0. feature layers
self.fc1 = nn.Linear(self.input_dims[0] * self.input_dims[1], self.hidden_dim) # NOTE: for pkg="gym"
self.rl1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl3 = nn.ReLU()
self.fc4 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl4 = nn.ReLU()
self.fc1_v = nn.Linear(self.input_d
|
ims[0] * self.input_dims[1], self.hidden_dim) # NOTE: for pkg="gym"
self.rl1_v = nn.ReLU()
self.fc2_v = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl2_v = nn.ReLU()
self.fc3_v = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl3_v = nn.ReLU()
self.fc4_v = nn.Linear(self.hidden_dim, self.hidden_dim)
self.r
|
l4_v = nn.ReLU()
# lstm
if self.enable_lstm:
self.lstm = nn.LSTMCell(self.hidden_dim, self.hidden_dim)
self.lstm_v = nn.LSTMCell(self.hidden_dim, self.hidden_dim)
# 1. policy output
self.policy_5 = nn.Linear(self.hidden_dim, self.output_dims)
self.policy_sig = nn.Linear(self.hidden_dim, self.output_dims)
self.softplus = nn.Softplus()
# 2. value output
self.value_5 = nn.Linear(self.hidden_dim, 1)
self._reset()
def _init_weights(self):
self.apply(init_weights)
self.fc1.weight.data = normalized_columns_initializer(self.fc1.weight.data, 0.01)
self.fc1.bias.data.fill_(0)
self.fc2.weight.data = normalized_columns_initializer(self.fc2.weight.data, 0.01)
self.fc2.bias.data.fill_(0)
self.fc3.weight.data = normalized_columns_initializer(self.fc3.weight.data, 0.01)
self.fc3.bias.data.fill_(0)
self.fc4.weight.data = normalized_columns_initializer(self.fc4.weight.data, 0.01)
self.fc4.bias.data.fill_(0)
self.fc1_v.weight.data = normalized_columns_initializer(self.fc1_v.weight.data, 0.01)
self.fc1_v.bias.data.fill_(0)
self.fc2_v.weight.data = normalized_columns_initializer(self.fc2_v.weight.data, 0.01)
self.fc2_v.bias.data.fill_(0)
self.fc3_v.weight.data = normalized_columns_initializer(self.fc3_v.weight.data, 0.01)
self.fc3_v.bias.data.fill_(0)
self.fc4_v.weight.data = normalized_columns_initializer(self.fc4_v.weight.data, 0.01)
self.fc4_v.bias.data.fill_(0)
self.policy_5.weight.data = normalized_columns_initializer(self.policy_5.weight.data, 0.01)
self.policy_5.bias.data.fill_(0)
self.value_5.weight.data = normalized_columns_initializer(self.value_5.weight.data, 1.0)
self.value_5.bias.data.fill_(0)
self.lstm.bias_ih.data.fill_(0)
self.lstm.bias_hh.data.fill_(0)
self.lstm_v.bias_ih.data.fill_(0)
self.lstm_v.bias_hh.data.fill_(0)
def forward(self, x, lstm_hidden_vb=None):
p = x.view(x.size(0), self.input_dims[0] * self.input_dims[1])
p = self.rl1(self.fc1(p))
p = self.rl2(self.fc2(p))
p = self.rl3(self.fc3(p))
p = self.rl4(self.fc4(p))
p = p.view(-1, self.hidden_dim)
if self.enable_lstm:
p_, v_ = torch.split(lstm_hidden_vb[0],1)
c_p, c_v = torch.split(lstm_hidden_vb[1],1)
p, c_p = self.lstm(p, (p_, c_p))
p_out = self.policy_5(p)
sig = self.policy_sig(p)
sig = self.softplus(sig)
v = x.view(x.size(0), self.input_dims[0] * self.input_dims[1])
v = self.rl1_v(self.fc1_v(v))
v = self.rl2_v(self.fc2_v(v))
v = self.rl3_v(self.fc3_v(v))
v = self.rl4_v(self.fc4_v(v))
v = v.view(-1, self.hidden_dim)
if self.enable_lstm:
v, c_v = self.lstm_v(v, (v_, c_v))
v_out = self.value_5(v)
if self.enable_lstm:
return p_out, sig, v_out, (torch.cat((p,v),0), torch.cat((c_p, c_v),0))
else:
return p_out, sig, v_out
|
dluschan/olymp
|
lomonosov/num16.py
|
Python
|
mit
| 550
| 0.005455
|
n = int(input())
s = i
|
nput()
letterlist = ['x', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
letter = {}
for l in letterlist:
letter[l] = 0
for i in range(n):
if s[i:i+1] in letterlist:
letter[s[i:i+1]] += 1
if letter['x'] > 0 and letter['0'] > 0:
letter['0'] -= 1
del letter['x']
del letterlist[0]
res = '0x'
if any(c > 0 for c in letter.values()):
letterlist.reverse()
for l in letter
|
list:
res += l*letter[l]
print(res)
else:
print('No')
|
ReanGD/web-home-manage
|
backend/torrents/apps.py
|
Python
|
apache-2.0
| 91
| 0
|
from django.apps i
|
mport AppConfig
class TorrentsConfig(AppConfig):
name = 'torre
|
nts'
|
pivot-libre/tideman
|
tests/parse_ballot.py
|
Python
|
apache-2.0
| 2,798
| 0.006076
|
import sys
import json
def make_column_to_candidate_dict(header_row):
my_dict = {}
for colIndex, candidate in enumerate(header_row):
my_dict[colIndex] = candidate.strip()
return my_dict
def return_candidates_in_order(row, col_to_candidate_dict):
ballot = []
for i in range(0,len(row)):
ballot.append([])
for colIndex, rank in enumerate(row):
candidate = col_to_candidate_dict[colIndex]
int_rank = int(rank)
ballot[int_rank-1].append(candidate)
ballot = filter(lambda x: len(x) > 0, ballot)
return ballot
def split_line(line):
return line.split('\t')
def convert_csv(filename):
return convert_csv_to_php(filename)
def convert_csv_to_json(filename):
ballot_arrays = get_ballot_arrays(filename)
objects = []
for ballot_array in ballot_arrays:
ballot_object = {'count': 1, 'values': ballot_array}
print(json.dumps(objects))
def convert_csv_to_php(filename):
class_text = ''
with open('TestScenarioHeader.php.fragment', 'r') as class_header:
class_text += class_header.read()
ballot_arrays = get_ballot_arrays(filename)
class_text += generate_php(ballot_arrays)
with open('TestScenarioFooter.php.fragment', 'r') as class_footer:
class_text += class_footer.read().rstrip()
print class_text
def generate_php(ballot_arrays):
ballots = []
for ballot in ballot_arrays:
ballots.append(generate_one_ballot_php(ballot))
return ' return [\n' + ',\n'.join(ballots) + '\n ];\n'
def generate_one_ballot_php(ballot):
php = ' new NBallot(\n 1,\n'
candidate_lists = []
for group in ballot:
candidate_list = ' new CandidateList(\n'
candidates = []
for candidate in group:
candidates.append(' new Candidate("' + ca
|
ndidate + '")')
candidate_list += ',\n'.join(candidates)
candidate_list += '\n )'
candidate_lists.append(candidate_list)
php += ',\n'.join(candidate_lists
|
)
php += '\n )'
return php
def get_ballot_arrays(filename):
ballots = []
header = True
ids = False
with open(filename, 'r') as csv:
for line in csv.readlines():
row = split_line(line)
if header:
header = False
ids = True
elif ids:
col_to_candidate_dict = make_column_to_candidate_dict(row)
ids = False
else:
ballot = return_candidates_in_order(row, col_to_candidate_dict)
##print ballot
ballots.append(ballot)
return ballots
if __name__ == '__main__':
convert_csv(sys.argv[1])
|
anhstudios/swganh
|
data/scripts/templates/object/static/structure/tatooine/shared_pillar_pristine_small_style_01.py
|
Python
|
mit
| 469
| 0.046908
|
#### NOTICE: THIS FILE
|
IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/tatooine/shared_pillar_pristine_small_style_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return resu
|
lt
|
qytz/qytz-notes
|
source/tech/PyGObject-Tutorial/examples/button_example.py
|
Python
|
mit
| 1,105
| 0.00362
|
#!/usr/bin/eny python
#coding:utf-8
from gi.repository import Gtk
class ButtonWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title='Button Demo')
self.set_border_width(10)
hbox = Gtk.Box(spacing=6)
self.add(hbox)
button = Gtk.Button('Click Me')
button.connect('clicked', self.on_click_me_clicked)
hbox.pack_start(button, True, True, 0)
button = Gtk.Button(stock=Gtk.STOCK_OPEN)
button.connect('clicked', self.on_open_clicked)
hbox.pack_start(button, True, True, 0)
button = Gtk.Button('_Close', use_underline=True)
button.connect('clicked', self.on_close_clicked)
hbox.pack_start(but
|
ton, True, True, 0)
def on_click_me_clicked(self, button):
print '"click me" button was clicked'
def on_open_clicked(self, button):
print '"open" button was clicked'
def on_close_clicked(self, button):
print 'Closing application'
Gtk.main_quit()
|
wind = ButtonWindow()
wind.connect('delete-event', Gtk.main_quit)
wind.show_all()
Gtk.main()
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_stocktransferwizard.py
|
Python
|
gpl-2.0
| 2,810
| 0.004982
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
import gtk
import mock
from stoqlib.database.runtime import get_current_branch
from stoqlib.domain.transfer import TransferOrder
from stoqlib.gui.test.uitestutils import GUITest
from stoqlib.gui.wizards.stocktransferwizard import StockTransferWizard
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class TestStockTransferWizard(GUITest):
@mock.patch('stoqlib.gui.wizards.stocktransferwizard.print_report')
@mock.patch('stoqlib.gui.wizards.stocktransferwizard.yesno')
def test_create(self, yesno, print_report):
sellable = self.create_sellable(description=u"Product to transfer")
self.create_storable(sellable.product, get_current_branch(self.store),
stock=10)
wizard = StockTransferWizard(self.store)
self.assertNotSensitive(wizard, ['next_button'])
self.check_wizard(wizard, 'wizard-stock-transfer-create')
step = wizard.get_current_step()
step.destination_branch.set_active(0)
self.assertSensitive(wizard, ['next_button'])
self.click(wizard.next_button)
step = wizard.get_current_step()
# adds sellable to step
step.sellable_selected(sellable)
step._add_sellable()
self.check_wizard(wizard, 'wizard-stock-transfer-products')
module = 'stoqlib.gui.events.StockTransferWizardFinishEvent
|
.emit'
with mock.patch(module) as emit:
with mock.patch.object(self.store, 'commit'):
self.click(wizard.next_button)
self.assertEquals(emit.call_count, 1)
args, kwargs = emit.call_args
|
self.assertTrue(isinstance(args[0], TransferOrder))
yesno.assert_called_once_with(
_('Would you like to print a receipt for this transfer?'),
gtk.RESPONSE_YES, 'Print receipt', "Don't print")
self.assertEquals(print_report.call_count, 1)
|
2ndy/RaspIM
|
usr/lib/python2.6/lib2to3/fixes/fix_itertools_imports.py
|
Python
|
gpl-2.0
| 1,840
| 0.000543
|
""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import BlankLine, syms, token
class FixItertoolsImports(fixer_base.BaseFix):
PATTERN = """
import_from< 'from' 'itertools' 'import' imports=any >
""" %(locals())
def transform(self, node, results):
imports = results['imports']
if imports.type == syms.import_as_name or not imports.children:
children = [imports]
else:
children = imports.children
for child in children[::2]:
if child.type == token.NAME:
member = child.value
name_node = child
else:
assert child.type == syms.import_as_name
name_node = child.children[0]
member_name = name_node.value
if member_name in (u'imap', u'izip',
|
u'ifilter'):
child.value = None
child.remove()
|
elif member_name == u'ifilterfalse':
node.changed()
name_node.value = u'filterfalse'
# Make sure the import statement is still sane
children = imports.children[:] or [imports]
remove_comma = True
for child in children:
if remove_comma and child.type == token.COMMA:
child.remove()
else:
remove_comma ^= True
if children[-1].type == token.COMMA:
children[-1].remove()
# If there are no imports left, just get rid of the entire statement
if not (imports.children or getattr(imports, 'value', None)) or \
imports.parent is None:
p = node.prefix
node = BlankLine()
node.prefix = p
return node
|
brotherjack/RoodeStem
|
tests/test_voting_systems.py
|
Python
|
mit
| 709
| 0.008463
|
'''
Created on Jun 29, 2016
@author: Thomas Adriaan Hellinger
'''
import pytest
from roodestem.voting_systems.voting_system import Result
class TestResult:
def test_null_re
|
sult_not_tolerated(self):
with pytest.raises(TypeError):
Result()
|
def test_passed_multiple_winners(self):
res = Result(winner=['a', 'b', 'c'], tied=['b','c'])
assert res == Result(tied=['a', 'b', 'c'])
def test_passed_all_losers(self):
res = Result(loser=['a', 'b', 'c'])
assert res == Result(tied=['a', 'b', 'c'])
def test_passed_all_winners(self):
res = Result(winner=['a', 'b', 'c'])
assert res == Result(tied=['a', 'b', 'c'])
|
KDD-OpenSource/fexum
|
users/views.py
|
Python
|
mit
| 1,286
| 0.001555
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.status import HTTP_204_NO_CONTENT
from users.serializers import UserSerializer
from rest_framework.permissions import AllowAny
from django.contrib.auth import login, logout
from rest_framework.authentication import BaseAuthentication, SessionAuthentication
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.authtoken.serializers import AuthTokenSerializer
from django.contrib.auth import login, logout
from users.authentication import CustomBaseAuthentication
class AuthLoginView(APIView):
authentication_classes = (CustomBaseAuthentication, SessionAuthentication)
def post(self, request):
login(request, request.
|
user)
return Response(status=HTTP_204_NO_CONTENT)
class AuthLogoutView(APIView):
def delete(sel
|
f, request):
logout(request)
return Response(status=HTTP_204_NO_CONTENT)
class UserRegisterView(APIView):
permission_classes = (AllowAny,)
authentication_classes = () # TODO: Remove
def post(self, request):
serializer = UserSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
|
butla/experiments
|
aiohttp_redis_producer_consumer/txodds_code_test/url_extractor.py
|
Python
|
mit
| 2,896
| 0.003108
|
"""
The consumer's code.
It takes HTML from the queue and outputs the URIs found in it.
"""
import asyncio
import json
import logging
from typing import List
from urllib.parse import urljoin
import aiored
|
is
from bs4 import BeautifulSoup
from . import app_cli, redis_queue
_log = logging.getLogger('url_extractor')
def _scrape_urls(html: str, base_url: str) -> List[str]:
"""Gets all valid links from a site and returns them as URIs (some links may be relative.
If the URIs scraped here would go back into the system t
|
o have more URIs scraped from their
HTML, we would need to filter out all those who are not HTTP or HTTPS.
Also, assuming that many consumers and many producers would be running at the same time,
connected to one Redis instance, we would need to cache normalized versions or visited URIs
without fragments (https://tools.ietf.org/html/rfc3986#section-3.5) so we don't fall into loops.
For example two sites referencing each other.
The cached entries could have time-to-live (Redis EXPIRE command), so we could refresh our
knowledge about a site eventually.
"""
soup = BeautifulSoup(html, 'html.parser')
href = 'href'
return [urljoin(base_url, link.get(href))
for link in soup.find_all('a') if link.has_attr(href)]
async def _scrape_urls_from_queued_html(redis_pool: aioredis.RedisPool):
_log.info('Processing HTML from queue...')
while True:
try:
html_payload = await redis_queue.pop(redis_pool)
_log.info('Processing HTML from URL %s', html_payload.url)
scraped_urls = _scrape_urls(html_payload.html, html_payload.url)
_log.info('Scraped URIs from URL %s', html_payload.url)
output_json = {html_payload.url: scraped_urls}
# flush for anyone who is watching the stream
print(json.dumps(output_json), flush=True)
except redis_queue.QueueEmptyError:
# wait for work to become available
await asyncio.sleep(1) # pragma: no cover
def main():
"""Run the URL extractor (the consumer).
"""
app_cli.setup_logging()
args_parser = app_cli.get_redis_args_parser(
'Start a worker that will get URL/HTML pairs from a Redis queue and for each of those '
'pairs output (on separate lines) a JSON in format {ORIGINATING_URL: [FOUND_URLS_LIST]}')
args = args_parser.parse_args()
loop = app_cli.get_event_loop()
_log.info('Creating a pool of connections to Redis at %s:%d.',
args.redis_host, args.redis_port)
# the pool won't be closed explicitly, since the process needs to be terminated to stop anyway
redis_pool = loop.run_until_complete(
aioredis.create_pool((args.redis_host, args.redis_port)))
loop.run_until_complete(_scrape_urls_from_queued_html(redis_pool))
if __name__ == '__main__':
main()
|
isubuz/zahlen
|
algorithms/sorting/radix_sort.py
|
Python
|
mit
| 805
| 0
|
from math import log
def sort(a_list, base):
"""Sort the input list with the specified base, using Radix sort.
This implementation assumes that the input list does not contain negative
numbers. This algorithm is inspired from the
|
Wikipedia implmentation of
Radix sort.
"""
passes = int(log(max(a_list), base) + 1)
items = a_list[:]
for digit_index in xrange(passes):
buckets = [[] for _ in xrange(base)] # Buckets for sorted sublists.
for item in items:
digit = _get_digit(item, base, digit_index)
buckets[digit].append(item)
items = []
for su
|
blists in buckets:
items.extend(sublists)
return items
def _get_digit(number, base, digit_index):
return (number // base ** digit_index) % base
|
bnsgeyer/Copter3_4
|
Tools/autotest/sim_vehicle.py
|
Python
|
gpl-3.0
| 32,299
| 0.002848
|
#!/usr/bin/env python
"""
Framework to start a simulated vehicle and connect it to MAVProxy.
Peter Barker, April 2016
based on sim_vehicle.sh by Andrew Tridgell, October 2011
"""
from __future__ import print_function
import atexit
import getpass
import optparse
import os
import os.path
import re
import signal
import subprocess
import sys
import tempfile
import time
import shlex
# List of open terminal windows for macosx
windowID = []
class CompatError(Exception):
"""A custom exception class to hold state if we encounter the parse error we are looking for"""
def __init__(self, error, opts, rargs):
Exception.__init__(self, error)
self.opts = opts
self.rargs = rargs
class CompatOptionParser(optparse.OptionParser):
"""An option parser which emulates the behaviour of the old sim_vehicle.sh; if passed -C, the first argument not understood starts a list of arguments that are passed straight to mavproxy"""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
def error(self, error):
"""Override default error handler called by optparse.OptionParser.parse_args when a parse error occurs; raise a detailed exception which can be caught"""
if error.find("no such option") != -1:
raise CompatError(error, self.values, self.rargs)
optparse.OptionParser.error(self, error)
def parse_args(self, args=None, values=None):
"""Wrap parse_args so we can catch the exception raised upon discovering the known parameter parsing error"""
try:
opts, args = optparse.OptionParser.parse_args(self)
except CompatError as e:
if not e.opts.sim_vehicle_sh_compatible:
print(e)
print("Perhaps you want --sim_vehicle_sh_compatible (-C)?")
sys.exit(1)
if e.opts.mavproxy_args:
print("--mavproxy-args not permitted in compat mode")
sys.exit(1)
args = []
opts = e.opts
mavproxy_args = [str(e)[16:]] # this trims "no such option" off
mavproxy_args.extend(e.rargs)
opts.ensure_value("mavproxy_args", " ".join(mavproxy_args))
return opts, args
def cygwin_pidof(proc_name):
""" Thanks to kata198 for this:
https://gi
|
thub.com/kata198/cygwin-ps-misc/blob/master/pidof
"""
pipe = subprocess.Popen("ps -ea | grep " + proc_name, shell=True, stdout=subprocess.PIPE)
output_lines = pipe.stdout.read().replace("\r", "").split("\n")
ret = pipe.wait()
pids = []
if ret != 0:
# No results
return []
for line in output_lines:
if not line:
continue
line_split = [item f
|
or item in line.split(' ') if item]
cmd = line_split[-1].split('/')[-1]
if cmd == proc_name:
try:
pid = int(line_split[0].strip())
except:
pid = int(line_split[1].strip())
if pid not in pids:
pids.append(pid)
return pids
def under_cygwin():
"""Return if Cygwin binary exist"""
return os.path.exists("/usr/bin/cygstart")
def under_macos():
return sys.platform == 'darwin'
def kill_tasks_cygwin(victims):
"""Shell out to ps -ea to find processes to kill"""
for victim in list(victims):
pids = cygwin_pidof(victim)
# progress("pids for (%s): %s" % (victim,",".join([ str(p) for p in pids])))
for apid in pids:
os.kill(apid, signal.SIGKILL)
def kill_tasks_macos():
for window in windowID:
cmd = "osascript -e \'tell application \"Terminal\" to close (window(get index of window id %s))\'" % window
os.system(cmd)
def kill_tasks_psutil(victims):
"""Use the psutil module to kill tasks by name. Sadly, this module is not available on Windows, but when it is we should be able to *just* use this routine"""
import psutil
for proc in psutil.process_iter():
if proc.status == psutil.STATUS_ZOMBIE:
continue
if proc.name in victims:
proc.kill()
def kill_tasks_pkill(victims):
"""Shell out to pkill(1) to kill processed by name"""
for victim in victims: # pkill takes a single pattern, so iterate
cmd = ["pkill", victim]
run_cmd_blocking("pkill", cmd, quiet=True)
class BobException(Exception):
"""Handle Bob's Exceptions"""
pass
def kill_tasks():
"""Clean up stray processes by name. This is a somewhat shotgun approach"""
progress("Killing tasks")
try:
victim_names = {
'JSBSim',
'lt-JSBSim',
'ArduPlane.elf',
'ArduCopter.elf',
'APMrover2.elf',
'AntennaTracker.elf',
'JSBSIm.exe',
'MAVProxy.exe',
'runsim.py',
'AntennaTracker.elf',
}
for frame in _options_for_frame.keys():
if "waf_target" not in _options_for_frame[frame]:
continue
exe_name = os.path.basename(_options_for_frame[frame]["waf_target"])
victim_names.add(exe_name)
if under_cygwin():
return kill_tasks_cygwin(victim_names)
if under_macos():
return kill_tasks_macos()
try:
kill_tasks_psutil(victim_names)
except ImportError:
kill_tasks_pkill(victim_names)
except Exception as e:
progress("kill_tasks failed: {}".format(str(e)))
def check_jsbsim_version():
"""Assert that the JSBSim we will run is the one we expect to run"""
jsbsim_cmd = ["JSBSim", "--version"]
progress_cmd("Get JSBSim version", jsbsim_cmd)
try:
jsbsim_version = subprocess.Popen(jsbsim_cmd, stdout=subprocess.PIPE).communicate()[0]
except OSError:
jsbsim_version = '' # this value will trigger the ".index"
# check below and produce a reasonable
# error message
try:
jsbsim_version.index(b"ArduPilot")
except ValueError:
print(r"""
=========================================================
You need the latest ArduPilot version of JSBSim installed
and in your \$PATH
Please get it from git://github.com/tridge/jsbsim.git
See
http://ardupilot.org/dev/docs/setting-up-sitl-on-linux.html
for more details
=========================================================
""")
sys.exit(1)
def progress(text):
"""Display sim_vehicle progress text"""
print("SIM_VEHICLE: " + text)
def find_autotest_dir():
"""Return path to autotest directory"""
return os.path.dirname(os.path.realpath(__file__))
def find_root_dir():
"""Return path to root directory"""
return os.path.realpath(os.path.join(find_autotest_dir(), '../..'))
"""
make_target: option passed to make to create binaries. Usually sitl, and "-debug" may be appended if -D is passed to sim_vehicle.py
default_params_filename: filename of default parameters file. Taken to be relative to autotest dir.
extra_mavlink_cmds: extra parameters that will be passed to mavproxy
"""
_options_for_frame = {
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
# COPTER
"+": {
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"quad": {
"model": "+",
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"X": {
"waf_target": "bin/arducopter-quad",
# this param set FRAME doesn't actually work because mavproxy
# won't set a parameter unless it knows of it, and the param fetch happens asynchronously
"default_params_filename": "default_params/copter.parm",
"extra_mavlink_cmds": "param fetch frame; param set FRAME 1;",
},
"hexa": {
"make_target": "sitl-hexa",
"waf_target": "bin/arducopter-hexa",
"default_params_filename": "default_params/copter.parm",
},
"octa-quad": {
"make_target": "sitl-octa-quad",
"waf_tar
|
hansika/pyquora
|
scrape_quora/pyquora.py
|
Python
|
apache-2.0
| 3,445
| 0.004354
|
import urllib2
from lxml import etree
####################################################################
# API
####################################################################
class Scrape_Quora:
regexpNS = "http://exslt.org/regular-expressions"
@staticmethod
def get_name(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
name = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/div/h1/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return name
@staticmethod
def get_url(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
URL = response.geturl()
return URL
@staticmethod
def get_profile_picture_link(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
profile_picture_link = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/div/img/@data-src', namespaces={'re':Scrape_Quora.regexpNS})[0]
return profile_picture_link
@staticmethod
def get_no_of_q
|
uestions(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_questions = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Questions"]/span/text()', namespaces={
|
're':Scrape_Quora.regexpNS})[0]
return no_of_questions
@staticmethod
def get_no_of_answers(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_answers = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Answers"]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_answers
@staticmethod
def get_no_of_followers(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_followers = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Followers "]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_followers
@staticmethod
def get_no_of_following(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_following = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Following "]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_following
@staticmethod
def get_no_of_edits(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_edits = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Edits"]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_edits
|
FCP-INDI/C-PAC
|
CPAC/group_analysis/__init__.py
|
Python
|
bsd-3-clause
| 158
| 0.006329
|
from .group_analysis import create_fsl_flame_wf, \
|
get_operation
__all__ = ['create_fsl_flame_wf', \
'get_oper
|
ation']
|
mastizada/kuma
|
kuma/wiki/migrations/0041_auto__del_firefoxversion__del_unique_firefoxversion_item_id_document__.py
|
Python
|
mpl-2.0
| 21,931
| 0.007387
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'OperatingSystem', fields ['item_id', 'document']
db.delete_unique('wiki_operatingsystem', ['item_id', 'document_id'])
# Removing unique constraint on 'FirefoxVersion', fields ['item_id', 'document']
db.delete_unique('wiki_firefoxversion', ['item_id', 'document_id'])
# Deleting model 'FirefoxVersion'
db.delete_table('wiki_firefoxversion')
# Deleting model 'OperatingSystem'
db.delete_table('wiki_operatingsystem')
# Deleting field 'Revision.significance'
db.delete_column('wiki_revision', 'significance')
def backwards(self, orm):
# Adding model 'FirefoxVersion'
db.create_table('wiki_firefoxversion', (
('item_id', self.gf('django.db.models.fields.IntegerField')()),
('document', self.gf('django.db.models.fields.related.ForeignKey')(related_name='firefox_version_set', to=orm['wiki.Document'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('wiki', ['FirefoxVersion'])
# Adding unique constraint on 'FirefoxVersion', fields ['item_id', 'document']
db.create_unique('wiki_firefoxversion', ['item_id', 'document_id'])
# Adding model 'OperatingSystem'
db.create_table('wiki_operatingsystem', (
|
('item_id', self.gf('django.db.models.fields.IntegerField')()),
('document', self.gf('django.db.models.fields.related.ForeignKey')(related_name='operating_system_set', to=orm['wiki.Document'])),
('id', self.gf('django.db.models.fields.Auto
|
Field')(primary_key=True)),
))
db.send_create_signal('wiki', ['OperatingSystem'])
# Adding unique constraint on 'OperatingSystem', fields ['item_id', 'document']
db.create_unique('wiki_operatingsystem', ['item_id', 'document_id'])
# Adding field 'Revision.significance'
db.add_column('wiki_revision', 'significance',
self.gf('django.db.models.fields.IntegerField')(null=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teamwork.team': {
'Meta': {'object_name': 'Team'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'tidings.watch': {
'Meta': {'object_name': 'Watch'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'attachments.attachment': {
'Meta': {'object_name': 'Attachment'},
'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_rev'", 'null': 'True', 'to': "orm['attachments.AttachmentRevision']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mindtouch_attachment_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'attachments.a
|
cloudbase/cinder
|
cinder/tests/unit/volume/drivers/emc/vnx/test_client.py
|
Python
|
apache-2.0
| 17,512
| 0
|
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception \
as storops_ex
from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops as storops
from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
from cinder.tests.unit.volume.drivers.emc.vnx import utils
from cinder.volume.drivers.emc.vnx import client as vnx_client
from cinder.volume.drivers.emc.vnx import common as vnx_common
class TestCondition(test.TestCase):
@res_mock.patch_client
def test_is_lun_io_ready_false(self, client, mocked):
r = vnx_client.Condition.is_lun_io_ready(mocked['lun'])
self.assertFalse(r)
@res_mock.patch_client
def test_is_lun_io_ready_true(self, client, mocked):
r = vnx_client.Condition.is_lun_io_ready(mocked['lun'])
self.assertTrue(r)
@res_mock.patch_client
def test_is_lun_io_ready_exception(self, client, mocked):
self.assertRaises(exception.VolumeBackendAPIException,
vnx_client.Condition.is_lun_io_ready,
mocked['lun'])
class TestClient(test.TestCase):
def setUp(self):
super(TestClient, self).setUp()
self.origin_timeout = vnx_common.DEFAULT_TIMEOUT
vnx_common.DEFAULT_TIMEOUT = 0
def tearDown(self):
super(TestClient, self).tearDown()
vnx_common.DEFAULT_TIMEOUT = self.origin_timeout
@res_mock.patch_client
def test_create_lun(self, client, mocked):
client.create_lun(pool='pool1', name='test', size=1, provision=None,
tier=None, cg_id=None, ignore_thresholds=False)
client.vnx.get_pool.assert_called_once_with(name='pool1')
pool = client.vnx.get_pool(name='pool1')
pool.create_lun.assert_called_with(lun_name='test',
size_gb=1,
provision=None,
tier=None,
ignore_thresholds=False)
@res_mock.patch_client
def test_create_lun_error(self, client, mocked):
self.assertRaises(storops_ex.VNXCreateLunError,
client.create_lun,
pool='pool1',
name='test',
size=1,
provision=None,
tier=None,
cg_id=None,
ignore_thresholds=False)
client.vnx.get_pool.assert_called_once_with(name='pool1')
@res_mock.patch_client
def test_create_lun_already_existed(s
|
elf, client, mocked):
client.create_lun(pool='pool1', name='lun3', size=1, provision=None,
tier=None, cg_id=None, ignore_thresholds=False)
client.vnx.get_lun.assert_called_once_with(name='lun3')
@res_mock.patch_client
def test_create_lun_in_cg(self, client, mocked):
client.create_lun(
pool='pool1', name='test', size=1, provision=None,
|
tier=None, cg_id='cg1', ignore_thresholds=False)
@res_mock.patch_client
def test_create_lun_compression(self, client, mocked):
client.create_lun(pool='pool1', name='lun2', size=1,
provision=storops.VNXProvisionEnum.COMPRESSED,
tier=None, cg_id=None,
ignore_thresholds=False)
@res_mock.patch_client
def test_migrate_lun(self, client, mocked):
client.migrate_lun(src_id=1,
dst_id=2)
lun = client.vnx.get_lun()
lun.migrate.assert_called_with(2, storops.VNXMigrationRate.HIGH)
@unittest.skip("Skip until bug #1578986 is fixed")
@utils.patch_sleep
@res_mock.patch_client
def test_migrate_lun_with_retry(self, client, mocked, mock_sleep):
lun = client.vnx.get_lun()
self.assertRaises(storops_ex.VNXTargetNotReadyError,
client.migrate_lun,
src_id=4,
dst_id=5)
lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH)
mock_sleep.assert_called_with(15)
@res_mock.patch_client
def test_session_finished_faulted(self, client, mocked):
lun = client.vnx.get_lun()
r = client.session_finished(lun)
self.assertTrue(r)
@res_mock.patch_client
def test_session_finished_migrating(self, client, mocked):
lun = client.vnx.get_lun()
r = client.session_finished(lun)
self.assertFalse(r)
@res_mock.patch_client
def test_session_finished_not_existed(self, client, mocked):
lun = client.vnx.get_lun()
r = client.session_finished(lun)
self.assertTrue(r)
@res_mock.patch_client
def test_migrate_lun_error(self, client, mocked):
lun = client.vnx.get_lun()
self.assertRaises(storops_ex.VNXMigrationError,
client.migrate_lun,
src_id=4,
dst_id=5)
lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH)
@res_mock.patch_client
def test_verify_migration(self, client, mocked):
r = client.verify_migration(1, 2, 'test_wwn')
self.assertTrue(r)
@res_mock.patch_client
def test_verify_migration_false(self, client, mocked):
r = client.verify_migration(1, 2, 'fake_wwn')
self.assertFalse(r)
@res_mock.patch_client
def test_cleanup_migration(self, client, mocked):
client.cleanup_migration(1, 2)
@res_mock.patch_client
def test_get_lun_by_name(self, client, mocked):
lun = client.get_lun(name='lun_name_test_get_lun_by_name')
self.assertEqual(888, lun.lun_id)
@res_mock.patch_client
def test_delete_lun(self, client, mocked):
client.delete_lun(mocked['lun'].name)
@res_mock.patch_client
def test_delete_smp(self, client, mocked):
client.delete_lun(mocked['lun'].name)
@res_mock.patch_client
def test_delete_lun_not_exist(self, client, mocked):
client.delete_lun(mocked['lun'].name)
@res_mock.patch_client
def test_delete_lun_exception(self, client, mocked):
self.assertRaisesRegexp(storops_ex.VNXDeleteLunError,
'General lun delete error.',
client.delete_lun, mocked['lun'].name)
@res_mock.patch_client
def test_enable_compression(self, client, mocked):
lun_obj = mocked['lun']
client.enable_compression(lun_obj)
lun_obj.enable_compression.assert_called_with(ignore_thresholds=True)
@res_mock.patch_client
def test_enable_compression_on_compressed_lun(self, client, mocked):
lun_obj = mocked['lun']
client.enable_compression(lun_obj)
@res_mock.patch_client
def test_get_vnx_enabler_status(self, client, mocked):
re = client.get_vnx_enabler_status()
self.assertTrue(re.dedup_enabled)
self.assertFalse(re.compression_enabled)
self.assertTrue(re.thin_enabled)
self.assertFalse(re.fast_enabled)
self.assertTrue(re.snap_enabled)
@res_mock.patch_client
def test_lun_has_snapshot_true(self, client, mocked):
re = client.lun_has_snapshot(mocked['lun'])
self.assertTrue(re)
@res_mock.patch_client
def test_lun_has_snapshot_false(self, client, mocked):
re = client.lun_has_snapshot(mocked['lun'])
self.assertFalse(r
|
AntoineToubhans/trellearn
|
main.py
|
Python
|
mit
| 389
| 0.002571
|
#!/usr/
|
bin/env python
import sys
import src.json_importing as I
import src.data_training as T
import src.data_cross_validation as V
import src.extract_feature_multilabel as EML
if __name__ == '__main__':
print('Hello, I am Trellearn')
jsonFileName = sys.argv[1]
cards = I.parseJS
|
ON(jsonFileName)
X, Y, cv, mlb = EML.extract(cards)
V.validateML(X, Y)
exit(0)
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/pymodules/python2.7/gwibber/lib/gtk/widgets.py
|
Python
|
gpl-3.0
| 46
| 0.021739
|
/usr/sha
|
re/pyshared/gwibber/lib/gtk/widgets.p
|
y
|
ajose1024/Code_Igniter_Extended
|
user_guide_src/cilexer/cilexer/cilexer.py
|
Python
|
mit
| 2,222
| 0.00495
|
# CodeIgniter
# http://codeigniter.com
#
# An open source application development framework for PHP
#
# This content is released under the MIT License (MIT)
#
# Copyright (c) 2014 - 2015, British Columbia Institute of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copi
|
es of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVID
|
ED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Copyright (c) 2008 - 2014, EllisLab, Inc. (http://ellislab.com/)
# Copyright (c) 2014 - 2015, British Columbia Institute of Technology (http://bcit.ca/)
#
# http://opensource.org/licenses/MIT MIT License
import re
import copy
from pygments.lexer import DelegatingLexer
from pygments.lexers.web import PhpLexer, HtmlLexer
__all__ = [ 'CodeIgniterLexer' ]
class CodeIgniterLexer(DelegatingLexer):
"""
Handles HTML, PHP, JavaScript, and CSS is highlighted
PHP is highlighted with the "startline" option
"""
name = 'CodeIgniter'
aliases = [ 'ci', 'codeigniter' ]
filenames = [ '*.html', '*.css', '*.php', '*.xml', '*.static' ]
mimetypes = [ 'text/html', 'application/xhtml+xml' ]
def __init__(self, **options):
super(CodeIgniterLexer, self).__init__(HtmlLexer,
PhpLexer,
startinline=True)
|
limdauto/django-social-auth
|
social_auth/backends/contrib/rdio.py
|
Python
|
bsd-3-clause
| 4,358
| 0.000918
|
import urllib.request, urllib.parse, urllib.error
from oauth2 import Request as OAuthRequest, SignatureMethod_HMAC_SHA1
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
from django.utils import simplejson
from social_auth.backends import ConsumerBasedOAuth, OAuthBackend, BaseOAuth2
from social_auth.utils import dsa_urlopen
class RdioBaseBackend(OAuthBackend):
def get_user_id(self, details, response):
return response['key']
def get_user_details(self, response):
return {
'username': response['username'],
'first_name': response['firstName'],
'last_name': response['lastName'],
'fullname': response['displayName'],
}
class RdioOAuth1Backend(RdioBaseBackend):
"""Rdio OAuth authentication backend"""
name = 'rdio-oauth1'
EXTRA_DATA = [
('key', 'rdio_id'),
('icon', 'rdio_icon_url'),
('url', 'rdio_profile_url'),
('username', 'rdio_username'),
('streamRegion', 'rdio_stream_region'),
]
@classmethod
def tokens(cls, instance):
token = super(RdioOAuth1Backend, cls).tokens(instance)
if token and 'access_token' in token:
token = dict(tok.split('=')
for tok in token['access_token'].split('&'))
return token
class RdioOAuth2Backend(RdioBaseBackend):
name = 'rdio-oauth2'
EXTRA_DATA = [
('key', 'rdio_id'),
('icon', 'rdio_icon_url'),
('url', 'rdio_profile_url'),
('username', 'rdio_username'),
('streamRegion', 'rdio_stream_region'),
('refresh_token', 'refresh_token', True),
('token_type', 'token_type', True),
]
class RdioOAuth1(ConsumerBasedOAuth):
AUTH_BACKEND = RdioOAuth1Backend
REQUEST_TOKEN_URL = 'http://api.rdio.com/oauth/request_token'
AUTHORIZATION_URL = 'https://www.rdio.com/oauth/authorize'
ACCESS_TOKEN_URL = 'http://api.rdio.com/oauth/access_token'
RDIO_API_BASE = 'http://api.rdio.com/1/'
SETTINGS_KEY_NAME = 'RDIO_OAUTH1_KEY'
SETTINGS_SECRET_NAME = 'RDIO_OAUTH1_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
params = {
'method': 'currentUser',
'extras': 'username,displayName,streamRegion',
}
request = self.oauth_post_request(access_token, self.RDIO_API_BASE,
params=params)
response = dsa_urlopen(request.url, request.to_postdata())
json = '\n'.join(response.readlines())
try:
return simplejson.loads(json)['result']
except ValueError:
return None
def oauth_post_request(self, token, url, params):
"""Generate OAuth request, setups callback url"""
if 'oauth_verifier' in self.data:
params['oauth_verifier'] = self.data['oauth_verifier']
request = OAuthRequest.from_consumer_and_token(self.consumer,
token=token,
http_url=url,
parameters=params,
http_method='POST')
request.sign_request(SignatureMethod_HMAC_SHA1(), self.consumer, token)
return request
class RdioOAuth2(BaseOAuth2):
AUTH_BACKEND = RdioOAuth2Backend
AUTHORIZATION_URL = 'https://www.rdio.com/oauth2/authorize'
ACCESS_TOKEN_URL = 'https://www.rdio.com/oauth2/token'
RDIO_API_BASE = 'https://www.rdio.com/api/1/'
SETTINGS_KEY_NAME = 'RDIO_OAUTH2_KEY'
SETTINGS_SECRET_NAME = 'RDIO_OAUTH2_SECRET'
SCOPE_VAR_NAM
|
E = 'RDIO2_PERMISSIONS'
EXTRA_PARAMS_VAR_NAME = 'RDIO2_EXTRA_PARAMS'
def user_data(self, access_token, *args, **kwargs):
params = {
'method': 'currentUser',
'extras': 'username,displayName,streamRegion',
'access_token': access_token,
}
response = dsa
|
_urlopen(self.RDIO_API_BASE, urllib.parse.urlencode(params))
try:
return simplejson.load(response)['result']
except ValueError:
return None
# Backend definition
BACKENDS = {
'rdio-oauth1': RdioOAuth1,
'rdio-oauth2': RdioOAuth2
}
|
dmilith/SublimeText3-dmilith
|
Packages/lsp_utils/st3/lsp_utils/_util/weak_method.py
|
Python
|
mit
| 1,266
| 0.004739
|
from LSP.plugin.core.typing import Any, Callable
from types import MethodType
import weakref
__all__ = ['weak_method']
# An implementation of weak method borrowed from sublime_lib [1]
#
# We need it to be able to weak reference bound methods as `weakref.WeakMethod` is not avail
|
able in
# 3.3 runtime.
#
# The reason this is necessary is explained in the documentation of `weakref.WeakMethod`:
# > A custom ref subclass which simulates a weak reference to a bound method (i.e., a method defined
# > on a class and looked up on an instance). Since a bound method is ephemeral, a standard weak
# > reference cannot
|
keep hold of it.
#
# [1] https://github.com/SublimeText/sublime_lib/blob/master/st3/sublime_lib/_util/weak_method.py
def weak_method(method: Callable) -> Callable:
assert isinstance(method, MethodType)
self_ref = weakref.ref(method.__self__)
function_ref = weakref.ref(method.__func__)
def wrapped(*args: Any, **kwargs: Any) -> Any:
self = self_ref()
function = function_ref()
if self is None or function is None:
print('[lsp_utils] Error: weak_method not called due to a deleted reference', [self, function])
return
return function(self, *args, **kwargs)
return wrapped
|
enthought/etsproxy
|
enthought/util/math.py
|
Python
|
bsd-3-clause
| 1,984
| 0.003528
|
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.t
|
xt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#------------------------------------------------------------------------------
""" A placeholder for math functional
|
ity that is not implemented in SciPy.
"""
import warnings
warnings.warn("Module is deprecated.", DeprecationWarning)
import numpy
def is_monotonic(array):
""" Does the array increase monotonically?
>>> is_monotonic(array((1, 2, 3, 4)))
True
>>> is_monotonic(array((1, 2, 3, 0, 5)))
False
This may not be the desired response but:
>>> is_monotonic(array((1)))
False
"""
try:
min_increment = numpy.amin(array[1:] - array[:-1])
if min_increment >= 0:
return True
except Exception:
return False
return False;
def brange(min_value, max_value, increment):
""" Returns an inclusive version of arange().
The usual arange() gives:
>>> arange(1, 4, 1)
array([1, 2, 3])
However brange() returns:
>>> brange(1, 4, 1)
array([ 1., 2., 3., 4.])
"""
return numpy.arange(min_value, max_value + increment / 2.0, increment)
def norm(mean, std):
""" Returns a single random value from a normal distribution. """
return numpy.random.normal(mean, std)
def discrete_std (counts, bin_centers):
""" Returns a standard deviation from binned data. """
mean = numpy.sum(counts * bin_centers)/numpy.sum(counts)
return numpy.sqrt((numpy.sum((counts-mean)**2))/len(counts))
|
idaholab/civet
|
ci/recipe/recipe_to_bash.py
|
Python
|
apache-2.0
| 6,708
| 0.00641
|
#!/usr/bin/env python
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Converts a recipe given in a .cfg file into a full bash shell script
which would be similar to what CIVET would end up running.
"""
from __future__ import unicode_literals, absolute_import
import argparse, sys, os
import re
from RecipeReader import RecipeReader
def read_script(filename):
top_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
script_file = os.path.join(top_dir, filename)
with open(script_file, "r") as f:
out = f.read()
return out
def step_functions(recipe):
step_cmds = ''
for step in recipe["steps"]:
step_cmds += "function step_%s\n{\n" % step["position"]
for key, value in step["environment"].items():
step_cmds += write_env(key, value, " local")
step_cmds += ' local step_name="%s"\n' % step["name"]
step_cmds += ' local step_position="%s"\n' % step["position"]
script = read_script(step["script"])
for l in script.split('\n'):
if l.strip():
step_cmds += ' %s\n' % l
else:
step_cmds += "\n"
step_cmds += "}\nexport -f step_%s\n\n" % step["position"]
step_cmds += "function step_exit()\n"
step_cmds += '{\n'
step_cmds += ' if bash -c $1; then\n'
step_cmds += ' printf "\\n$1 passed\\n\\n"\n'
step_cmds += ' elif [ "$2" == "True" ]; then\n'
step_cmds += ' printf "\\n$1 failed. Aborting\\n\\n"\n'
step_cmds += ' exit 1\n'
step_cmds += ' else\n'
step_cmds += ' printf "\\n$1 failed but continuing\\n\\n"\n'
step_cmds += ' fi\n'
step_cmds += '}\n\n'
# now write out all the functions
for step in recipe["steps"]:
step_cmds += "step_exit step_%s %s\n" % (step["position"], step["abort_on_failure"])
return step_cmds
def write_env(key, value, prefix="export"):
return '%s %s="%s"\n' % (prefix, key, re.sub("^BUILD_ROOT", "$BUILD_ROOT", value))
def recipe_to_bash(recipe,
base_repo,
base_branch,
base_sha,
head_repo,
head_branch,
head_sha,
pr,
push,
manual,
build_root,
moose_jobs,
args):
script = "#!/bin/bash\n"
script += '# Generated by: %s %s\n' % (__file__, ' '.join(args))
script += '# Script for job %s\n' % recipe["filename"]
script += '# It is a good idea to redirect stdin, ie "./script.sh < /dev/null"\n'
script += '# Be sure to have the proper modules loaded as well.\n'
script += '\n\n'
script += 'module list\n'
script += 'export BUILD_ROOT="%s"\n' % build_root
script += 'export MOOSE_JOBS="%s"\n' % moose_jobs
script += '\n\n'
script += 'export CIVET_RECIPE_NAME="%s"\n' % recipe["name"]
script += 'export CIVET_BASE_REPO="%s"\n' % base_repo
script += 'export CIVET_BASE_SSH_URL="%s"\n' % base_repo
script += 'export CIVET_BASE_REF="%s"\n' % base_branch
script += 'export CIVET_BASE_SHA="%s"\n' % base_sha
script += 'export CIVET_HEAD_REPO="%s"\n' % head_repo
script += 'export CIVET_HEAD_REF="%s"\n' % head_branch
script += 'export CIVET_HEAD_SHA="%s"\n' % head_sha
script += 'export CIVET_HEAD_SSH_URL="%s"\n' % head_repo
script += 'export CIVET_JOB_ID="1"\n'
cause_str = ""
if pr:
cause_str = "Pull Request"
elif push:
cause_str = "Push"
elif manual:
cause_str = "Manual"
script += 'export CIVET_EVENT_CAUSE="%s"\n' % cause_str
script += '\n\n'
for source in recipe["global_sources"]:
s = read_script(source)
script += "# %s\n%s\n" % (source, s)
script += "\n\n"
for key, value in recipe["global_env"].items():
script += write_env(key, value)
script += "\n\n"
script += step_functions(recipe)
return script
def convert_recipe(args):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--recipe", dest="recipe", help="The recipe file to convert.", required=True)
parser.add_argument("--output", dest="output", help="Where to write the script to")
parser.add_argument("--build-root", dest="build_root", default="/tmp/", help="Where to set BUILD_ROOT")
parser.add_argument("--num-jobs", dest="num_jobs", default="4", help="What to set MOOSE_JOBS to")
parser.add_argument("--head", nargs=3, dest="head", help="Head repo to work on. Format is: repo branch sha", required=True)
parser.add_argument("--base", nargs=3, dest="base", help="Base repo to work on. Format is: repo branch sha", required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--pr", action="store_true")
group.add_argument("--push", action="store_true")
group.add_argument("--manual
|
", action=
|
"store_true")
parsed = parser.parse_args(args)
dirname = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.dirname(dirname)
# RecipeReader takes a relative path from the base repo directory
real_path = os.path.realpath(parsed.recipe)
rel_path = os.path.relpath(real_path, parent_dir)
try:
reader = RecipeReader(parent_dir, rel_path)
recipe = reader.read()
except Exception as e:
print("Recipe '%s' is not valid: %s" % (real_path, e))
return 1
try:
script = recipe_to_bash(recipe,
base_repo=parsed.base[0],
base_branch=parsed.base[1],
base_sha=parsed.base[2],
head_repo=parsed.head[0],
head_branch=parsed.head[1],
head_sha=parsed.head[2],
pr=parsed.pr,
push=parsed.push,
manual=parsed.manual,
build_root=parsed.build_root,
moose_jobs=parsed.num_jobs,
args=args,
)
if parsed.output:
with open(parsed.output, "w") as f:
f.write(script)
else:
print(script)
except Exception as e:
print("Failed to convert recipe: %s" % e)
return 1
if __name__ == "__main__":
convert_recipe(sys.argv[1:])
|
llvm-mirror/lldb
|
packages/Python/lldbsuite/test/functionalities/multiword-commands/TestMultiWordCommands.py
|
Python
|
apache-2.0
| 1,161
| 0.002584
|
"""
Test multiword commands ('platform' in this case).
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
class MultiwordCommandsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test_ambiguous_subcommand(self):
self.expect("platform s", error=True,
substrs=["ambiguous command 'platform s'. Possible completions:",
"\tselect\n",
"\tshell\n",
"\tsettings\n"])
@no_debug_info_test
def test_e
|
mpty_subcommand(self):
self.expect("platform \"\"", error=True, substrs=["Need to specify a non-empty subcommand."])
@no_debug_info_test
def test_help(self):
# <multiword> help brings up help.
self.expect("platform help",
substrs=["Commands to manage and create platforms.",
"Syntax: platform [",
|
"The following subcommands are supported:",
"connect",
"Select the current platform"])
|
openweave/openweave-core
|
src/test-apps/happy/bin/weave-ping.py
|
Python
|
apache-2.0
| 2,876
| 0.001043
|
#!/usr/bin/env python3
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# A Happy command line utility that tests Weave Ping among Weave nodes.
#
# The command is executed by instantiating and running WeavePing class.
#
from __future__ import absolute_import
from __future__ import print_function
import getopt
import sys
import set_test_path
from happy.Utils import *
import WeavePing
if __name__ == "__main__":
options = WeavePing.option()
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:s:c:tuwqp:i:a:e:n:CE:T:",
["help", "origin=", "server=", "count=", "tcp", "udp", "wrmp", "interval=", "quiet",
"tap=", "case", "case_cert_path=", "case_key_path="])
except getopt.GetoptError as err:
print(WeavePing.WeavePing.__doc__)
print(hred(str(err)))
sys.exit(hred("%s: Failed server parse arguments." % (__file__)))
for o, a in opts:
if o in ("-h", "--help"):
print(WeavePing.WeavePing.__doc__)
sys.exit(0)
elif o in ("-q", "--quiet"):
options["quiet"] = True
elif o in ("-t", "--tcp"):
options["tcp"] = True
elif o in ("-u", "--udp"):
options["udp"] = True
elif o in ("-w", "--wrmp"):
options["wrmp"] = True
elif o in ("-o", "--origin"):
options["client"] = a
elif o in ("-s", "--server"):
options["server"] = a
el
|
if o in ("-c", "--count"):
options["count"] = a
elif o
|
in ("-i", "--interval"):
options["interval"] = a
elif o in ("-p", "--tap"):
options["tap"] = a
elif o in ("-C", "--case"):
options["case"] = True
elif o in ("-E", "--case_cert_path"):
options["case_cert_path"] = a
elif o in ("-T", "--case_key_path"):
options["case_key_path"] = a
else:
assert False, "unhandled option"
if len(args) == 1:
options["origin"] = args[0]
if len(args) == 2:
options["client"] = args[0]
options["server"] = args[1]
cmd = WeavePing.WeavePing(options)
cmd.start()
|
Awesomeomics/webserver
|
guava/settings.py
|
Python
|
mit
| 210
| 0.009524
|
#!/usr/bin/env python
# -*
|
- coding: utf-8 -*-
debug = True
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
REDIS_PASSWORD = '820AEC1BFC5D2C71E06CBF947A3A6191'
GUAVA_API_URL = 'http://localhost:5
|
000'
|
huangshenno1/algo
|
ml/iris/ada_MO/weakclassifier.py
|
Python
|
mit
| 425
| 0.030588
|
from sklearn
|
.tree import DecisionTreeClassifier
# weak classifier
# decision tree (max depth = 2) using scikit-learn
class WeakClassifier:
# initialize
def __init__(self):
self.clf = DecisionTreeClassifier(max_depth = 2)
# train on dataset (X,
|
y) with distribution weight w
def fit(self, X, y, w):
self.clf.fit(X, y, sample_weight = w)
# predict
def predict(self, X):
return self.clf.predict(X)
|
sbarratt/flask-prometheus
|
flask_prometheus/__init__.py
|
Python
|
bsd-3-clause
| 1,126
| 0.008881
|
import time
from prometheus_client import Counter, Histogram
from prometheus_client import start_http_server
from flask import request
FLASK_REQUEST_LATENCY = Histogram('flask_request_latency_seconds', 'Flask Request Latency',
['method', 'endpoint'])
FLASK_REQUEST_COUNT = Counter('flask_request_count', 'Flask Request Count',
['method', 'endpoint', 'http_status'])
def before_request():
request.start_time = time.time()
def after_request(response):
request_latency = time.time() - request.start_time
FLASK_REQUEST_LATENCY.labels(request.method, request.path).observe(request_latency)
FLASK_REQUEST_COUNT.labels(request.method, req
|
uest.path, response.status_code).inc()
return respons
|
e
def monitor(app, port=8000, addr=''):
app.before_request(before_request)
app.after_request(after_request)
start_http_server(port, addr)
if __name__ == '__main__':
from flask import Flask
app = Flask(__name__)
monitor(app, port=8000)
@app.route('/')
def index():
return "Hello"
# Run the application!
app.run()
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-rrcov/package.py
|
Python
|
lgpl-2.1
| 932
| 0.002146
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRrcov(RPackage):
"""rrcov: Scalable Robust Estimators with High Breakdown Point"""
homepage = "https://cloud.r-project.org/package=rrcov"
url = "https://cloud.r-project.org/src/contrib/rrcov_1.4-7.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/rrcov"
version('1.4-7', sha256='cbd08ccce8b583a2f88946a3267c8fc494ee2b44ba749b9296a6e3d818f6f293')
depends_on('[email protected]:', type=('build', 'run'))
|
depends_on('[email protected]:', type=('build',
|
'run'))
depends_on('r-mvtnorm', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-cluster', type=('build', 'run'))
depends_on('r-pcapp', type=('build', 'run'))
|
ericholscher/django
|
tests/expressions_regress/models.py
|
Python
|
bsd-3-clause
| 766
| 0.001305
|
from __fu
|
ture__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
"""
Model for testing arithmetic expressions.
"""
from django.db import models
@python_2_unicode_compatible
class Number(models.Model):
integer = models.Bi
|
gIntegerField(db_column='the_integer')
float = models.FloatField(null=True, db_column='the_float')
def __str__(self):
return '%i, %.3f' % (self.integer, self.float)
class Experiment(models.Model):
name = models.CharField(max_length=24)
assigned = models.DateField()
completed = models.DateField()
start = models.DateTimeField()
end = models.DateTimeField()
class Meta:
ordering = ('name',)
def duration(self):
return self.end - self.start
|
janiheikkinen/irods
|
tests/pydevtest/configuration.py
|
Python
|
bsd-3-clause
| 802
| 0
|
import socket
import os
RUN_IN_TOPOLOGY = False
TOPOLOGY_FROM_RESOURCE_SERVER = False
HOSTNAME_1 = HOSTNAME_2 = HOSTNAME_3 = socket.gethostname()
USE_SSL = False
ICAT_HOSTNAME = socket.gethostname()
PREEXISTING_ADMIN_PASSWORD = 'rods'
# TODO: allow for arbitrary number of remote zones
class FEDERATION(object):
LOCAL_IRODS_VERSION = (4, 2, 0)
REMOTE_IRODS_VERSION = (4, 2, 0)
RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '53CR37')]
RODSADMIN_NAME_PASSWORD_LIST = []
IRODS_DIR = '/var/lib/irods/iRODS'
LOCAL_ZONE =
|
'dev'
REMOTE_ZONE = 'buntest'
REMOTE_HOST = 'bunte
|
st'
REMOTE_RESOURCE = 'demoResc'
REMOTE_VAULT = '/var/lib/irods/iRODS/Vault'
TEST_FILE_SIZE = 4*1024*1024
LARGE_FILE_SIZE = 64*1024*1024
TEST_FILE_COUNT = 300
MAX_THREADS = 16
|
frederick623/pb
|
fa_collateral_upload/HTI_Loan_Collateral_Automation.py
|
Python
|
apache-2.0
| 25,027
| 0.010349
|
# 20170226 Add more additional info
import acm
import ael
import HTI_Util
import HTI_FeedTrade_EDD_Util
import fnmatch
import datetime
import os
import sys
import csv
import re
import sqlite3
import math
import glob
import win32com.client
import traceback
ael_variables = [['asofdate', 'Date', 'string', [str(ael.date_today()), 'Today'], 'Today', 1, 0, 'Report Date', None, 1], \
['acquirers', 'Acquirer(s)', 'string', HTI_Util.getAllAcquirers(), 'HTISEC - EDD,HTIFS - EDD', 1, 1, 'Acquirer(s)', None, 1], \
['counterparties', 'Counterparty(s)', 'string', HTI_Util.getAllParties(), None, 0, 1, 'Counterparty(s)', None, 1], \
['portfolio', 'Portfolio', 'string', HTI_Util.getAllPortfolios(), 'EDD Deltaone', 1, 1, 'Portfolio', None, 1], \
['currclspricemkt', 'Current Closing Price Market', 'string', None, 'Bloomberg_5PM', 1, 0, 'Current Closing Price Market', None, 1],
['histclspricemkt', 'Historical Closing Price Market', 'string', None, 'Bloomberg_5PM_Cls', 1, 0, 'Historical Closing Price
|
Market', None, 1],
['pb_trd_file', 'PB
|
Trade File', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\FA_Trade_Import\\pb_to_fa_YYYYMMDD.csv', 1, 0, 'PB Trade File', None, 1],
['loan_xls_template', 'Loan Template', 'string', None, 'S:\\Prime Brokerage (PB)\\Tools\\Stock Loan Collateral\\template\\ExcelUpload - Cash Entry.xlsm', 1, 0, 'Loan Template', None, 1],
['loan_xls_output', 'Loan Output', 'string', None, 'S:\\Prime Brokerage (PB)\\Tools\\Stock Loan Collateral\\ExcelUpload - Cash Entry YYYYMMDD.xlsm', 1, 0, 'Loan Output', None, 1],
['ss_bb_output', 'SS/BB Output', 'string', None, 'S:\\Prime Brokerage (PB)\\Tools\\Stock Loan Collateral\\ss_bb_trd_YYYYMMDD.xlsx', 1, 0, 'SS/BB Output', None, 1],
['base_ccy', 'Base Ccy', 'string', None, 'HKD', 1, 0, 'Base Ccy', None, 1]]
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def db_cur(source = ":memory:"):
# sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def create_tbl(cur, tbl_name, header, arr = None, index_arr = None):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
if index_arr is not None:
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr is not None:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def getTRSUnderlying(acm_ins):
acm_und_ins = None
bbticker = ""
for acm_leg in acm_ins.Legs():
if acm_leg.PayLeg() == False:
acm_und_ins = acm_leg.FloatRateReference()
break
return acm_und_ins
def getUndInstrumentBBTicker(acm_ins):
bbticker = ''
acm_und_ins = getTRSUnderlying(acm_ins)
if acm_und_ins != None:
for aliase in acm_und_ins.Aliases():
if aliase.Type().Name() == 'BB_TICKER':
bbticker = aliase.Alias().strip()
break
return bbticker
def getGroupTradeRef(external_ref):
groupTradeRef = None
strSql = """
select trdnbr, t.time
from trade t, instrument i, party a, party c, portfolio pf, leg l, instrument u
where t.insaddr = i.insaddr
and i.instype = 'TotalReturnSwap'
and t.status not in ('Void', 'Simulated')
and t.acquirer_ptynbr = a.ptynbr
and t.counterparty_ptynbr = c.ptynbr
and t.prfnbr = pf.prfnbr
and add_info(t, 'External Reference') = '%s'
and i.insaddr = l.insaddr
and l.float_rate = u.insaddr
order by t.time, trdnbr
""" % (external_ref)
print strSql
res = ael.asql(strSql)
columns, buf = res
for table in buf:
for row in table:
groupTradeRef = row[0]
break
return groupTradeRef
def getFirstTRS(external_ref, und_insaddr):
strSql = """select i.insid
from trade t, instrument i, leg l
where i.insaddr = t.insaddr
and i.instype = 'TotalReturnSwap'
and t.status not in ('Void', 'Simulated')
and add_info(t, 'External Reference') = '%s'
and i.insaddr = l.insaddr and l.payleg = 'No' and l.type = 'Total Return'
and add_info(t, 'Trd Pos Closed') ~= 'Yes'
and l.float_rate = %s
and t.trdnbr = t.trx_trdnbr""" % (external_ref, str(und_insaddr))
#print strSql
rs = ael.asql(strSql)
columns, buf = rs
insid = ''
for table in buf:
for row in table:
insid = str(row[0]).strip()
break
if insid == '':
return None
acm_ins = acm.FInstrument[insid]
return acm_ins
def getTotalTradeQuantity(external_ref, und_insaddr, asofdate):
acm_ins = getFirstTRS(external_ref, und_insaddr)
if acm_ins == None:
return None
#print "instrument='%s' and status <> 'Void' and status <> 'Simulated'" % acm_ins.Name()
#acm_trds = acm.FTrade.Select("instrument='%s' and status <> 'Void' and status <> 'Simulated' and tradeTime <= '%s'" % (acm_ins.Name(), asofdate.add_days(1)))
acm_trds = acm.FTrade.Select("instrument='%s' and status <> 'Void' and status <> 'Simulated' and tradeTime < '%s'" % (acm_ins.Name(), asofdate.add_days(1)))
acm_trd = None
if acm_trds != None:
for acm_trd in acm_trds:
if acm_trd.TrxTrade() != None:
if acm_trd.Oid() == acm_trd.TrxTrade().Oid():
break
else:
return None
total_quantity = 0.0
if acm_trd.TrxTrade() == None:
if acm_trd.Status() not in ('Void', 'Simulated'):
total_quantity = total_quantity + acm_trd.Quantity()
return abs(total_quantity)
else:
return None
elif acm_trd.Oid() == acm_trd.TrxTrade().Oid():
if acm_trd.Status() not in ('Void', 'Simulated'):
total_quantity = total_quantity + acm_trd.Quantity()
# find all other trade
#acm_trs_trds = acm.FTrade.Select("trxTrade=%s and tradeTime <= '%s'" % (acm_trd.Oid(), asofdate.add_days(1)))
acm_trs_trds = acm.FTrade.Select("trxTrade=%s and tradeTime < '%s'" % (acm_trd.Oid(), asofdate.add_days(1)))
for acm_trs_trd in acm_trs_trds:
# add this to handle tradeTime lag 8 hours from gmt
ael_trd_date = ael.date(str(acm_trs_trd.TradeTime())[0:10])
if ael_trd_date >= asofdate.add_days(1):
continue
if acm_trs_trd.Oid() != acm_trs_trd.TrxTrade().Oid() and \
acm_trs_trd.Status() not in ('Void', 'Simulated') and \
acm_trs_trd.Instrument().InsType() == 'TotalReturnSwap':
total_quantity = total_quantity + acm_trs_trd.Quantity()
#print total_quantity
'''
if total_quantity == 0.0:
return None
else:
return abs(total_quantity)
'''
return -total_quantity
else:
return -total_quantity
def getUnderlyingPrice(dt, ael_und_ins, currclspricemkt, histclspricemkt):
try:
if dt == ael.date_today():
cls_price = ael_und_ins.used_price(dt, ael_und_ins.
|
DigitalGlobe/gbdxtools
|
examples/basic_workflow_client.py
|
Python
|
mit
| 211
| 0.009479
|
from gbdxtools import Interface
|
gbdx = None
def go():
print(gbdx.task_registry.list())
print(gbdx.task_registry.get_definition('HelloGBDX'))
|
if __name__ == "__main__":
gbdx = Interface()
go()
|
nordicdyno/cfbackup
|
cfbackup/core.py
|
Python
|
bsd-2-clause
| 6,025
| 0.001494
|
"""This module provides the main functionality of cfbackup
"""
from __future__ import print_function
import sys
import argparse
import json
import CloudFlare
# https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records
class CF_DNS_Records(object):
"""
commands for zones manipulation
"""
def __init__(self, ctx):
self._ctx = ctx
def run(self):
"""
run - entry point for DNS records manipulations
"""
cmd = self._ctx.command
if cmd == "show":
self.show()
else:
sys.exit("Command " + cmd + " not implemened for zones")
def show(self):
"""Show CF zones"""
# print("Show DSN records")
try:
records = self._all_records()
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones %d %s - api call failed' % (e, e))
if not self._ctx.pretty:
print(json.dumps(records, indent=4))
return
recor
|
ds_by_type = {}
types = {}
for rec in records:
if not records_by_type.get(rec["type"]):
types[rec["type"]] = 0
records_by_type[rec["type"]] = []
types[rec["type"]] += 1
records_by_type[rec["type"]].append(rec)
for t in sorted(list(types)):
for rec in records_by_type[t]:
# print(json.dumps(rec, indent=4))
print("Type: {}".format(rec["typ
|
e"]))
print("Name: {}".format(rec["name"]))
print("Content: {}".format(rec["content"]))
print("TTL: {}{}".format(
rec["ttl"],
" (auto)" if str(rec["ttl"]) == "1" else "",
))
print("Proxied: {}".format(rec["proxied"]))
print("Auto: {}".format(rec["meta"]["auto_added"]))
print("")
print("")
print("-------------------")
print("Records stat:")
print("-------------------")
print("{0: <11} {1: >4}".format("<type>", "<count>"))
for t in sorted(list(types)):
print("{0: <11} {1: >4}".format(t, types[t]))
print("-------------------")
print("{0: <11} {1: >4}".format("Total:", len(records)))
def _all_records(self):
cf = CloudFlare.CloudFlare()
zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1})
if len(zones) == 0:
exit('No zones found')
zone_id = zones[0]['id']
cf_raw = CloudFlare.CloudFlare(raw=True)
page = 1
records = []
while True:
raw_results = cf_raw.zones.dns_records.get(
zone_id,
params={'per_page':100, 'page':page},
)
total_pages = raw_results['result_info']['total_pages']
result = raw_results['result']
for rec in result:
records.append(rec)
if page == total_pages:
break
page += 1
return records
# https://api.cloudflare.com/#zone-list-zones
class CF_Zones(object):
"""
commands for zones manipulation
"""
def __init__(self, ctx):
self._ctx = ctx
def run(self):
"""
run - entry point for zones manipulations
"""
cmd = self._ctx.command
if cmd == "show":
self.show()
else:
sys.exit("Command " + cmd + " not implemened for zones")
def show(self):
"""Show CF zones"""
# print("Show cf zones")
try:
zones = self._all_zones()
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones %d %s - api call failed' % (e, e))
if not self._ctx.pretty:
print(json.dumps(zones, indent=4))
return
for z in zones:
print("Zone: {0: <16} NS: {1}".format(
z["name"],
z["name_servers"][0],
))
for ns in z["name_servers"][1:]:
print(" {0: <16} {1}".format("", ns))
def _all_zones(self):
cf = CloudFlare.CloudFlare(raw=True)
if self._ctx.zone_name:
raw_results = cf.zones.get(params={
'name': self._ctx.zone_name,
'per_page': 1,
'page': 1,
})
return raw_results['result']
page = 1
domains = []
while True:
raw_results = cf.zones.get(params={'per_page':5, 'page':page})
total_pages = raw_results['result_info']['total_pages']
zones = raw_results['result']
for z in zones:
domains.append(z)
if page == total_pages:
break
page += 1
return domains
COMMANDS = [
"show",
# "restore"
]
OBJECT_ENTRYPOINT = {
"zones": CF_Zones,
"dns": CF_DNS_Records,
}
def main():
"""Main entry"""
parser = argparse.ArgumentParser(
prog="cfbackup",
description='Simple Cloudflare backup tool.',
)
parser.add_argument(
"command",
choices=[x for x in COMMANDS],
help="command",
)
subparsers = parser.add_subparsers(
help='Object of command',
dest="object"
)
parser_zones = subparsers.add_parser("zones")
parser_zones.add_argument(
"--pretty",
action='store_true',
help="show user friendly output",
)
parser_zones.add_argument(
"-z", "--zone-name",
help="optional zone name",
)
parser_dns = subparsers.add_parser("dns")
parser_dns.add_argument(
"-z", "--zone-name",
required=True,
help="required zone name",
)
parser_dns.add_argument(
"--pretty",
action='store_true',
help="show user friendly output",
)
args = parser.parse_args()
OBJECT_ENTRYPOINT[args.object](args).run()
|
kidmillions/Stino
|
stino/st_base.py
|
Python
|
mit
| 1,584
| 0.000631
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# Documents
#
"""
Documents
"""
from __future__ import absolute_import
from __future__ import print_function
|
from __future__ import division
from __future__ import unicode_literals
import os
import inspect
from . import pyarduino
thi
|
s_file_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
def get_plugin_path():
this_folder_path = os.path.dirname(this_file_path)
plugin_path = os.path.dirname(this_folder_path)
return plugin_path
def get_packages_path():
plugin_path = get_plugin_path()
packages_path = os.path.dirname(plugin_path)
return packages_path
def get_stino_user_path():
packages_path = get_packages_path()
user_path = os.path.join(packages_path, 'User')
stino_user_path = os.path.join(user_path, 'Stino')
return stino_user_path
def get_preset_path():
plugin_path = get_plugin_path()
preset_path = os.path.join(plugin_path, 'preset')
return preset_path
def get_user_preset_path():
stino_user_path = get_stino_user_path()
preset_path = os.path.join(stino_user_path, 'preset')
return preset_path
def get_user_menu_path():
stino_user_path = get_stino_user_path()
preset_path = os.path.join(stino_user_path, 'menu')
return preset_path
def get_settings():
settings = pyarduino.base.settings.get_arduino_settings()
return settings
def get_arduino_info():
arduino_info = pyarduino.arduino_info.get_arduino_info()
return arduino_info
def get_i18n():
i18n = pyarduino.base.i18n.I18N()
return i18n
|
kuangliu/pytorch-cifar
|
models/mobilenetv2.py
|
Python
|
mit
| 3,092
| 0.003558
|
'''MobileNetV2 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''e
|
xpand + depthwise + pointwise'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.
|
BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = out + self.shortcut(x) if self.stride==1 else out
return out
class MobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 1),
(6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.linear = nn.Linear(1280, num_classes)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNetV2()
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
|
RIFTIO/rift.ware-descriptor-packages
|
4.3/src/nsd/haproxy_waf_http_ns/scripts/waf_config.py
|
Python
|
apache-2.0
| 9,663
| 0.003415
|
#!/usr/bin/env python3
############################################################################
# Copyright 2017 RIFT.IO Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import argparse
import logging
import os
import stat
import subprocess
import sys
import time
import yaml
class ConfigurationError(Exception):
pass
def configure_waf_haproxy_cp(logger, run_dir, mgmt_ip, haproxy_cp_ip):
sh_file = "{}/waf_set_haproxy_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "echo \"<VirtualHost *:80>\r"
send " AddDefaultCharset UTF-8\r"
send " ProxyPreserveHost On\r"
send " ProxyRequests off\r"
send " ProxyVia Off\r"
send " ProxyPass / http://{haproxy_cp_ip}:5000/\r"
send " ProxyPassReverse / http://{haproxy_cp_ip}:5000/\r"
send " </VirtualHost>\" > /etc/httpd/conf.d/waf_proxy.conf\r"
expect "]# "
send "echo \"<IfModule mod_security2.c>\r"
send " IncludeOptional modsecurity.d/owasp-modsecurity-crs/modsecurity_crs_10_setup.conf\r"
send " IncludeOptional modsecurity.d/owasp-modsecurity-crs/base_rules/*.conf\r\r"
send " SecRuleEngine On\r"
send " SecRequestBodyAccess On\r"
send " SecResponseBodyAccess On\r"
send " SecDebugLog /var/log/httpd/modsec-debug.log\r"
send " SecDebugLogLevel 3\r"
send "</IfModule>\" > /etc/httpd/conf.d/mod_security.conf\r"
expect "]# "
send "systemctl stop httpd\r"
expect "]# "
send "systemctl start httpd\r"
expect "]# "
'''.format(mgmt_ip=mgmt_ip, haproxy_cp_ip=haproxy_cp_ip))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add waf config failed: {}".format(rc))
def configure_haproxy_add_waf(logger, run_dir, haproxy_mgmt_ip, waf_cp_ip, waf_server_name):
sh_file = "{}/haproxy_add_waf_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "grep \"server {waf_server_name} {waf_cp_ip}\" /etc/haproxy/haproxy.cfg && echo \"Already configured\" && exit 0\r"
expect {{
"]$ " {{ exit }}
"]# "
}}
send "sed -i \'s/\\(.*WAF list.*\\)/\\1\\n server {waf_server_name} {waf_cp_ip}:80 check/g\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, waf_cp_ip=waf_cp_ip, waf_server_name=waf_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add waf config failed: {}".format(rc))
def configure_haproxy_remove_waf(logger, run_dir, haproxy_mgmt_ip, waf_server_name):
sh_file = "{}/haproxy_remove_httpd_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "sed -i \'/server {waf_server_name}/d\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, waf_server_name=waf_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy remove waf config failed: {}".format(rc))
def main(argv=sys.argv[1:]):
try:
parser = argparse.ArgumentParser()
parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--quiet", "-q", dest="verbose", action="store_false")
args = parser.parse_args()
run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
if not os.path.exists(run_dir):
os.makedirs(run_dir)
log_file = "{}/rift_waf_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
|
logging.basicConfig(filename=log_file, level=logging.DEBUG)
logger = logging.getLogger()
ch = logging.StreamHandler()
if args.verbose:
|
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
except Exception as e:
print("Got exception:{}".format(e))
raise
try:
dry_run = args.dry_run
yaml_str = args.yaml_cfg_file.read()
logger.debug("Input YAML file: %s", yaml_str)
yaml_cfg = yaml.load(yaml_str)
logger.debug("Input YAML cfg: %s", yaml_cfg)
# Check if this is post scale out trigger
def find_cp_ip(vnfr_list, vnfd_name, cp_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
for cp in vnfr['connection_points']:
logger.debug("Connection point: %s", format(cp))
if cp_name in cp['name']:
return cp['ip_address']
raise ValueError("Could not find vnfd %s connection point %s", vnfd_name, cp_name)
def find_mgmt_ip(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr['rw_mgmt_ip']
raise ValueErr
|
dangoldin/poor-mans-data-pipeline
|
setup.py
|
Python
|
mit
| 491
| 0.04277
|
# Mostly from http://peterdowns.com/posts/first-time-with-pypi.html
from di
|
stutils.core import setup
setup(
name = 'pmdp',
packages = ['pmdp'],
version = '0.3',
description = 'A poor man\'s data pipeline',
author = 'Dan Goldin',
author_email = '[email protected]',
url = 'https://github.com/dangoldin/poor-mans-data-pipeline',
download_url = 'https://github.com/dangoldin/poor-mans-data-pipeline/tarball/0.3',
keywords = ['data', '
|
data-pipeline'],
classifiers = [],
)
|
lawphill/PhillipsPearl_Corpora
|
Spanish/scripts/dict_convert.py
|
Python
|
mit
| 8,349
| 0.010189
|
# -*- coding: utf-8 -*-
import os
import glob
import sys
import re
# Display debug information, print to statistics file
verbose = 0
obstruents = {'b':'B', 'd':'D', 'g':'G'}
nasals = ['m', 'n', 'N']
# Vocales
vowels = ['a', 'e', 'i', 'o', 'u']
# Semivocales
semivowels = ['%', '#', '@', '$', '&', '!', '*', '+', '-', '3']
# Voiced consonants
voiced = ['b', 'B', 'd', 'D', 'g', 'G', 'm', 'n', 'N', '|', 'J', 'r', 'R']
# Track the number of utterances
numUtterances = 0
# Track the number of words
numWords = 0
#wordsPerUtterance = []
p
|
honemesPerWord = []
def interVocalicRules(sent):
newSent = sent
# Create all the dipthongs that occur between words
newSent = newSent.replace('a i', '- ')
newSent = newSent.replace('a u', '+ ')
# Do I in
|
dicate vowel lengthening?
# newSent = newSent.replace('a a', 'aa ')
newSent = newSent.replace('e i', '* ')
# newSent = newSent.replace('e e', 'ee ')
newSent = newSent.replace('i a', '% ')
newSent = newSent.replace('i e', '# ')
newSent = newSent.replace('i o', '@ ')
# newSent = newSent.replace('i i', 'ii ')
newSent = newSent.replace('o i', '3 ')
# newSent = newSent.replace('o o', 'oo ')
# This is not a dipthong replacement but it still needs to happen:
# lo ultimo = [lultimo]
newSent = newSent.replace('o u', 'u ')
newSent = newSent.replace('u a', '& ')
newSent = newSent.replace('u e', '$ ')
newSent = newSent.replace('u i', '! ')
# newSent = newSent.replace('u u', 'uu ')
# Avoid creating onsets that are illegal
newSent = newSent.replace(' nt','n t')
newSent = newSent.replace(' nR','n R')
newSent = newSent.replace(' zl','z l')
newSent = newSent.replace(' zR','z R')
newSent = newSent.replace(' ts','t s')
newSent = newSent.replace(' tl','t l')
newSent = newSent.replace(' tR','t R')
newSent = newSent.replace(' nd','n d')
newSent = newSent.replace(' ks','k s')
newSent = newSent.replace(' kl','k l')
# Turn b/d/g's into B/D/G's where appropriate
strList = list(newSent)
i = 0
prev = None
for symbol in strList:
if symbol in obstruents:
if not prev or prev in nasals:
i += 1
continue
else:
strList[i] = obstruents[symbol]
if symbol in voiced:
if prev == 's':
strList[i-1] = 'z'
prev = symbol
i += 1
newSent = "".join(strList)
return newSent
def sententialRules(sentence):
# Apply rules between words, like when a [b] occurs between vowels, turn it into a [B]
# Vowels together.. a aser = aser
# Apply rule for two vowels being together.. si aqui = s(ia dipthong)ki...
# Split the sentence into chunks based on pauses.
# This distinction exists because:
chunks = sentence.split('[/]')
# This has to be done here because I allow [/] to be remain up until this point
# for the purpose of knowing where boundaries occur, but we don't want to count [/]
newChunkList = []
for chunk in chunks:
#wordsPerUtterance.append(len(chunk.split()))
globals()["numWords"] += len(chunk.split())
newChunk = interVocalicRules(chunk)
if verbose == 1:
print newChunk
newChunkList.append(newChunk)
return newChunkList
def main():
dictFile = "Spanish/dicts/dict_converted.txt"
chaDir = "Spanish/cha_files/"
file = open(dictFile, 'r')
lines = file.readlines()
file.close()
# Word bank is a dictionary - lookup by its key retrieves its IPA translation
word = {}
# Split by whitespace since that's how it's set up
for line in lines:
x = line.split()
word[x[0].lower()] = x[1]
keyErrors = open("Spanish/dicts/keyErrors.txt", "w")
outFile = open("Spanish/Spanish-phon.txt", 'w')
outFileOrig = open("Spanish/Spanish-ortho.txt", 'w')
for fileName in sorted(glob.glob(os.path.join(chaDir, '*.cha'))):
# Skip file if it's not below 20 months
if fileName.startswith(tuple([chaDir + str(28),chaDir + str(36)])):
continue
if verbose == 1:
print fileName
file = open(fileName, 'r')
lines = file.readlines()
file.close()
#file = open(fileName.replace('.cha', '_ipa.txt'), 'w')
for line in lines:
# Only look at child-directed speech(from INV or PAR)
if line.startswith('*INV') or line.startswith('*PAR') or line.startswith('*TEA') or line.startswith('*FAT'):
if verbose == 1:
print 'Original line: ' + line
# Split on pauses to separate utterances and count them
#numUtterances += len(line.split('[/]'))
# Split the sentence into individual words
words = line.split()
# Build the IPA-translated sentence
ipaSentence = ""
# Look up individual words
for x in words[1:]:
# Ignore punctuation
if x == '.' or x == '?' or x == '!':
continue
outFileOrig.write(x + ' ')
# Need to make some character substitions to make dictionary search work
x = re.sub('é','}',x)
x = re.sub('á','{',x)
x = re.sub('í','<',x)
x = re.sub('ó','>',x)
x = re.sub('ú','}',x)
x = re.sub('ñ','|',x)
x = re.sub('ü','=',x)
x = re.sub(':','',x)
x = re.sub('<.+>','',x)
try:
ipaSentence += word[x.lower()]
ipaSentence += " "
except KeyError:
keyErrors.write("KeyError with: " + x.lower() + "\n")
continue
outFileOrig.write('\n')
newChunks = sententialRules(ipaSentence)
ipaSentence = ""
for chunk in newChunks:
ipaSentence += chunk
ipaSentence += " "
newChunks = ipaSentence.split()
ipaSentence = ""
for chunk in newChunks:
ipaSentence += chunk
ipaSentence += " "
# Remove trailing whitespace
ipaSentence = ipaSentence.rstrip()
# Calculate phonemes per word
ipaWords = ipaSentence.split()
phonemesInWord = 0
for ipaWord in ipaWords:
phonemesInWord += len(ipaWord)
# Number of original words is the length of the "words" variable beyond the first
# part that indicates the speaker(i.e. *INV:)
globals()["phonemesPerWord"].append(float(float(phonemesInWord) / float(len(words[1:]))))
if verbose == 1:
print ipaSentence
if len(ipaSentence) > 0:
outFile.write(ipaSentence + '\n')
globals()["numUtterances"] += 1
#file.write(ipaSentence + '\n')
#file.close()
outFile.close()
keyErrors.close()
if verbose == 1:
statisticsFile = open("statistics.txt", 'w')
statisticsFile.write("Number of utterances: " + str(globals()["numUtterances"]) + "\n")
statisticsFile.write("Number of words by tokens: " + str(globals()["numWords"]) + "\n")
statisticsFile.write("Number of words by type: " + str(len(word)) + "\n")
averageWordsPerUtterance = float(float(globals()["numWords"]) / float(numUtterances))
statisticsFile.write("Words per utterance on average: " + str(averageWordsPerUtterance) + "\n")
averagePhonemesPerWord = float(float(sum(globals()["phonemesPerWord"])) / float(len(globals()["phonemesPerWord"])))
statist
|
kfoss/try41
|
dockerfiles/redwood/visual.py
|
Python
|
apache-2.0
| 2,481
| 0.008061
|
#!/usr/bin/env python
#
# Copyright (c) 2013 In-Q-Tel, Inc/Lab41, All Rights Reserved.
#
# Licensed un
|
der the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl
|
e law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on 19 October 2013
@author: Lab41
Helper functions for creating visualizations
"""
import array
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
def visualize_scatter(counts, codes, data, codebook, num_clusters, xlabel="", ylabel="", title=""):
"""
Generates a 2-d scatter plot visualization of two feature data for
:param counts: dictionary of counts for the number of observations pairs for
each cluster
:param codes: list of codes for each observation row in the order returned by the original query
:param data: list of observations returned from query in their original order
:param codebook: the coordinates of the centroids
:param num_clusters: number of specified clusters up to 8
:param xlabel: a label for the x axis (Default: None)
:param ylabel: a label for the y axis (Default: None)
"""
if num_clusters > 8:
print "Visualize scatter only supports up to 8 clusters"
return
num_features = 2
list_arrays = list()
list_arr_idx = array.array("I", [0, 0, 0])
for idx in range(num_clusters):
list_arrays.append(np.zeros((counts[idx], num_features)))
for i, j in zip(codes, data):
list_arrays[i][list_arr_idx[i]][0] = j[0]
list_arrays[i][list_arr_idx[i]][1] = j[1]
list_arr_idx[i] += 1
#plot the clusters first as relatively larger circles
plt.scatter(codebook[:,0], codebook[:,1], color='orange', s=260)
colors = ['red', 'blue', 'green', 'purple', 'cyan', 'black', 'brown', 'grey']
for idx in range(num_clusters):
plt.scatter(list_arrays[idx][:,0], list_arrays[idx][:,1], c=colors[idx])
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
#plt.show()
plt.savefig('/home/docker/foo.png')
plt.close()
|
iwm911/plaso
|
plaso/parsers/winreg_plugins/outlook.py
|
Python
|
apache-2.0
| 2,761
| 0.010503
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains an Outlook Registry parser."""
from plaso.lib import event
from plaso.parsers.winreg_plugins import interface
__author__ = 'David Nides ([email protected])'
class OutlookSearchMRUPlugin(interface.KeyPlugin):
"""Windows Registry plugin parsing Outlook Search MRU keys."""
NAME = 'winreg_outlook_mru'
DESCRIPTION = 'PST Paths'
REG_KEYS = [
u'\\Software\\Microsoft\\Office\\15.0\\Outlook\\Search',
u'\\Software\\Microsoft\\Office\\14.0\\Outlook\\Search']
# TODO: The catalog for Office 2013 (15.0) contains binary values not
# dword values. Check if Office 2007 and 2010 have the same. Re-enable the
# plug-ins once confirmed and OutlookSearchMRUPlugin has been extended to
# handle the binary data or create a OutlookSearchCatalogMRUPlugin.
# Registry keys for:
# MS Outlook 2007 Search Catalog:
# '\\Software\\Microsoft\\Office\\12.0\\Outlook\\Catalog'
# MS Outlook 2010 Search Catalog:
# '\\Software\\Microsoft\\Office\\14.0\\Outlook\\Search\\Catalog'
# MS Outlook 2013 Search Catalog:
# '\\Software\\Microsoft\\Office\\15.0\\Outlook\\Search\\Catalog'
REG_TYPE = 'NTUSER'
def GetEntries(self, key, **unused_kwargs):
"""Collect the values under Outlook and return event for each one."""
value_index = 0
for value in key.GetValues():
# Ignore the default value.
if not v
|
alue.name:
continue
# Ignore any value that is empty or that does not contain an integer.
if not value.data or not value.DataIsInteger():
continue
# TODO: change this 32-bit integer into something meaningful, for now
# the value name is the most interesting part.
text_dict = {}
text_dict[value.name] = '0x{0:08x}'.format(value.data)
if value_index == 0:
timestamp = key.last_wr
|
itten_timestamp
else:
timestamp = 0
yield event.WinRegistryEvent(
key.path, text_dict, timestamp=timestamp,
source_append=': {0:s}'.format(self.DESCRIPTION))
value_index += 1
|
DylannCordel/django-filer
|
filer/fields/folder.py
|
Python
|
bsd-3-clause
| 4,982
| 0.001004
|
# -*- coding: utf-8 -*-
import warnings
from django import forms
from django.contrib.admin.sites import site
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.urlresolvers import reverse
from django.db import models
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from filer.models import Folder
from filer.utils.compatibility import truncate_words
from filer.utils.model_label import get_model_label
class AdminFolderWidget(ForeignKeyRawIdWidget):
choices = None
input_type = 'hidden'
is_hidden = False
def render(self, name, value, attrs=None):
obj = self.obj_for_value(value)
css_id = attrs.get('id')
css_id_folder = "%s_folder" % css_id
css_id_description_txt = "%s_description_txt" % css_id
if attrs is None:
attrs = {}
related_url = None
if value:
try:
folder = Folder.objects.get(pk=value)
related_url = folder.get_admin_directory_listing_url_path()
except Exception:
pass
if not related_url:
related_url = reverse('admin:filer-directory_listing-last')
params = self.url_parameters()
params['select_folder'] = 1
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in list(params.items())])
else:
url = ''
if 'class' not in attrs:
# The JavaScript looks for this hook.
attrs['class'] = 'vForeignKeyRawIdAdminField'
super_attrs = attrs.copy()
hidden_input = super(ForeignKeyRawIdWidget, self).render(name, value, super_attrs)
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
context = {
'hidden_input': hidden_input,
'lookup_url': '%s%s' % (related_url, url),
'lookup_name': name,
'span_id': css_id_description_txt,
'object': obj,
'clear_id': '%s_clear' % css_id,
'descid': css_id_description_txt,
'noimg': 'filer/icons/nofile_32x32.png',
'foldid': css_id_folder,
'id': css_id,
}
html = render_to_string('admin/filer/widgets/admin_folder.html', context)
return mark_safe(html)
def label_for_value(self, value):
obj = self.obj_for_value(value)
return ' <strong>%s</strong>' % truncate_words(obj, 14)
def obj_for_value(self, value):
try:
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
except:
obj = None
return obj
class Media(object):
js = (static('filer/js/addons/popup_handling.js'), )
class AdminFolderFormField(forms.ModelChoiceField):
widget = AdminFolderWidget
def __init__(self, rel, queryset, to_field_name, *args, **kwargs):
self.rel = rel
self.queryset = queryset
self.limit_choices_to = kwargs.pop('limit_cho
|
ices_to', None)
self.to_field_name = to_field_name
self.max_value = None
self.min_value = None
kwargs.pop('widget', None)
|
forms.Field.__init__(self, widget=self.widget(rel, site), *args, **kwargs)
def widget_attrs(self, widget):
widget.required = self.required
return {}
class FilerFolderField(models.ForeignKey):
default_form_class = AdminFolderFormField
default_model_class = Folder
def __init__(self, **kwargs):
# We hard-code the `to` argument for ForeignKey.__init__
dfl = get_model_label(self.default_model_class)
if "to" in kwargs.keys(): # pragma: no cover
old_to = get_model_label(kwargs.pop("to"))
if old_to != dfl:
msg = "%s can only be a ForeignKey to %s; %s passed" % (
self.__class__.__name__, dfl, old_to
)
warnings.warn(msg, SyntaxWarning)
kwargs['to'] = dfl
super(FilerFolderField, self).__init__(**kwargs)
def formfield(self, **kwargs):
# This is a fairly standard way to set up some defaults
# while letting the caller override them.
defaults = {
'form_class': self.default_form_class,
'rel': self.rel,
}
defaults.update(kwargs)
return super(FilerFolderField, self).formfield(**defaults)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.related.ForeignKey"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
|
akmcinto/TodoApp
|
ToDoApp/polls/views.py
|
Python
|
apache-2.0
| 1,788
| 0.003356
|
"""
Copyright 2016 Andrea McIntosh
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views import generic
from .models import Question, Choice
class IndexView(generic.ListView):
template_name = "polls/index.html"
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic
|
.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except:
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
}
|
)
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
|
carefree0910/MachineLearning
|
_Dist/NeuralNetworks/i_CNN/CNN.py
|
Python
|
mit
| 3,860
| 0.00544
|
import os
import sys
root_path = os.path.abspath("../../../")
if root_path not in sys.path:
sys.path.append(root_path)
import numpy as np
import tensorflow as tf
from _Dist.NeuralNetworks.Base import Generator4d
from _Dist.NeuralNetworks.h_RNN.RNN import Basic3d
from _Dist.NeuralNetworks.NNUtil import Activations
class Basic4d(Basic3d):
def _calculate(self, x, y=None, weights=None, tensor=None, n_elem=1e7, is_training=False):
return super(Basic4d, self)._calculate(x, y, weights, tensor, n_elem / 10, is_training)
class CNN(Basic4d):
def __init__(self, *args, **kwargs):
self.hei
|
ght, self.width = kwargs.pop("height", None), kwargs.pop("width", None)
super(CNN, self).__init__(*args, **kwargs)
self._name_appendix = "CNN"
self._generator_base = Generator4d
self.conv_activations = None
self.n_filters = self.filter_sizes = self.poolings = None
def init_model_param_settings(self):
super(CNN, self).init_model_param_settings()
self.conv_activations = self.model_param_settings.get("conv_activa
|
tions", "relu")
def init_model_structure_settings(self):
super(CNN, self).init_model_structure_settings()
self.n_filters = self.model_structure_settings.get("n_filters", [32, 32])
self.filter_sizes = self.model_structure_settings.get("filter_sizes", [(3, 3), (3, 3)])
self.poolings = self.model_structure_settings.get("poolings", [None, "max_pool"])
if not len(self.filter_sizes) == len(self.poolings) == len(self.n_filters):
raise ValueError("Length of filter_sizes, n_filters & pooling should be the same")
if isinstance(self.conv_activations, str):
self.conv_activations = [self.conv_activations] * len(self.filter_sizes)
def init_from_data(self, x, y, x_test, y_test, sample_weights, names):
if self.height is None or self.width is None:
assert len(x.shape) == 4, "height and width are not provided, hence len(x.shape) should be 4"
self.height, self.width = x.shape[1:3]
if len(x.shape) == 2:
x = x.reshape(len(x), self.height, self.width, -1)
else:
assert self.height == x.shape[1], "height is set to be {}, but {} found".format(self.height, x.shape[1])
assert self.width == x.shape[2], "width is set to be {}, but {} found".format(self.height, x.shape[2])
if x_test is not None and len(x_test.shape) == 2:
x_test = x_test.reshape(len(x_test), self.height, self.width, -1)
super(CNN, self).init_from_data(x, y, x_test, y_test, sample_weights, names)
def _define_input_and_placeholder(self):
self._is_training = tf.placeholder(tf.bool, name="is_training")
self._tfx = tf.placeholder(tf.float32, [None, self.height, self.width, self.n_dim], name="X")
self._tfy = tf.placeholder(tf.float32, [None, self.n_class], name="Y")
def _build_model(self, net=None):
self._model_built = True
if net is None:
net = self._tfx
for i, (filter_size, n_filter, pooling) in enumerate(zip(
self.filter_sizes, self.n_filters, self.poolings
)):
net = tf.layers.conv2d(net, n_filter, filter_size, padding="same")
net = tf.layers.batch_normalization(net, training=self._is_training)
activation = self.conv_activations[i]
if activation is not None:
net = getattr(Activations, activation)(net, activation)
net = tf.layers.dropout(net, training=self._is_training)
if pooling is not None:
net = tf.layers.max_pooling2d(net, 2, 2, name="pool")
fc_shape = np.prod([net.shape[i].value for i in range(1, 4)])
net = tf.reshape(net, [-1, fc_shape])
super(CNN, self)._build_model(net)
|
ukch/gae_simple_blog
|
settings.py
|
Python
|
bsd-3-clause
| 2,108
| 0.000949
|
# Initialize App Engine and import the default settings (DB backend, etc.).
# If you want to use a different backend you have to remove all occurences
# of "djangoappengine" from this file.
from djangoappengine.settings_base import *
from private_settings import SECRET_KEY
import os
# Activate django-dbindexer for the default database
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
AUTOLOAD_SITECONF = 'indexes'
INSTALL
|
ED_APPS = (
# 'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.markup',
'djangotoolbox',
'autoload',
'dbindexer',
"simpleblog.content",
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = [
# This loads the index definitions, so it has to come first
'autoload.middlewa
|
re.AutoloadMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
]
if not DEBUG:
# Put the stats middleware after autoload
MIDDLEWARE_CLASSES.insert(
1, 'google.appengine.ext.appstats.recording.AppStatsDjangoMiddleware')
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
)
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
ROOT_URLCONF = 'simpleblog.urls'
if DEBUG:
STATIC_URL = "/devstatic/"
else:
STATIC_URL = "/static/"
STATICFILES_DIRS = ("staticfiles", )
STATIC_ROOT = "static_collected"
PRE_DEPLOY_COMMANDS = ("collectstatic", )
LOGIN_URL = "/login"
LOGIN_REDIRECT_URL = "/"
|
choffmeister/transcode
|
lib/ffmpeg.py
|
Python
|
mit
| 1,356
| 0.030236
|
import utils
import re
import subprocess
#regexes
duration_regex = re.compile('Duration:\s*(?P<time>\d{2}:\d{2}:\d{2}.\d{2})')
stream_regex = re.compile('Stream #(?P<stream_id>\d+:\d+)(\((?P<language>\w+)\))?: (?P<type>\w+): (?P<format>[\w\d]+)')
crop_regex = re.compile('crop=(?P<width>\d+):(?P<height>\d+):(?P<x>\
|
d+):(?P<y>\d+)')
# detect crop settings
def detect_crop(src):
proc = subprocess.Popen(['ffmpeg', '-i', src, '-t', str(100), '-filter:v', 'cropdetect', '-f', 'null', '-'], stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
crops = crop_rege
|
x.findall(stderr)
return max(set(crops), key=crops.count)
# detect duration
def detect_duration(src):
proc = subprocess.Popen(['ffmpeg', '-i', src], stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
match = duration_regex.search(stderr)
duration_str = match.group('time')
duration_secs = utils.timestring_to_seconds(duration_str)
return (duration_str, duration_secs)
# detects stream IDs
def detect_streams(src):
proc = subprocess.Popen(['ffmpeg', '-i', src], stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
streams = []
for m in stream_regex.finditer(stderr):
streams.append({
'id': m.group('stream_id'),
'lang': m.group('language'),
'type': m.group('type'),
'fmt': m.group('format')
})
return streams
|
imait/HtmlDocument
|
for_python2.x/htmldocument.py
|
Python
|
mit
| 46,242
| 0.00093
|
# -*- coding: utf-8-unix; mode: python -*-
"""Module to assist to make HTML.
This module provides class that assist to make HTML.
Author: 2011 IMAI Toshiyuki
Copyright (c) 2011 IMAI Toshiyuki
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
Class:
HTML -- Assist to make HTML.
"""
__author__ = 'IMAI Toshiyuki'
__version__ = '1.0'
import os
import Cookie
import cgi
class HTML:
"""Assist to make HTML.
Attributes:
encode -- encoding
lang -- lang attribute of html element
sitetitle -- site title
pagetitle -- default page title
titledelimiter -- delimiter of site title and page title
cssfiles -- list object that contains path strings to css files
jsfiles -- list object that contains path string to JavaScript files
jstext -- text of JavaScript code
cookie -- http cookie
nocache -- if it is True then do not make user agents create cache
Methodes:
set_encode(encode) -- Set attribute encode.
set_lang(lang) -- Set attribute lang.
set_site_title(sitetitle) -- Set attribute sitetitle.
set_page_title(pagetitle) -- Set attribute pagetitle.
set_titledelimiter(titledelimiter) -- Set attribute titledelimiter.
set_cookie(cookie) -- Set attribute cookie.
set_nocache(nocache) -- Set attribute nocache.
print_resp_header() -- Print HTTP Response Header.
print_html_header() -- Print xhtml DTD, html start tag, head element
and body start tag.
print_html_close() -- Print end tags of body element and html element.
printe(value) -- Encode value and print it.
h1(content, [attrs]) -- Create h1 element.
h2(content, [attrs]) -- Create h2 element.
h3(content, [attrs]) -- Create h3 element.
h4(content, [attrs]) -- Create h4 element.
h5(content, [attrs]) -- Create h5 element.
h6(content, [attrs]) -- Create h6 element.
p(content, [attrs]) -- Create p element.
start_p([attrs]) -- Create start tag of p element.
end_p() -- Create end tag of p element.
div(content, [attrs]) -- Create div element.
start_div([attrs]) -- Create start tag of div element.
end_div() -- Create end tag of div element.
blockquote(content, [cite], [attrs]) -- Create blockquote element.
start_blockquote([cite], [attrs]) -- Create start tag of blockquote
element.
end_blockquote() -- Create end tag of blockquote element.
pre(content, [attrs]) -- Create pre element.
start_pre([attrs]) -- Create start tag of pre element.
end_pre() -- Create end tag of pre element.
address(content, [attrs]) -- Create address element.
Del(content, [attrs]) -- Create del element.
ins(content, [attrs]) -- Create ins element.
a(content, [attrs]) -- Create a element.
em(content, [attrs]) -- Create em element.
strong(content, [attrs]) -- Create strong element.
abbr(content, [attrs]) -- Create abbr element.
acronym(content, [attrs]) -- Create acronym element.
bdo(content, [attrs]) -- Create bdo element.
cite(content, [attrs]) -- Create cite element.
code(content, [attrs]) -- Create code element.
dfn(content, [attrs]) -- Create dfn element.
kbd(content, [attrs]) -- Create kbd element.
q(content, [attrs]) -- Create q element.
samp(content, [attrs]) -- Create samp element.
span(content, [attrs]) -- Create span element.
sub(content, [attrs]) -- Create sub element.
sup(content, [attrs]) -- Create sup element.
var(content, [attrs]) -- Create var element.
ruby(content, title, [attrs]) -- Create ruby element.
ol(content, [attrs]) -- Create ol element.
start_ol([attrs]) -- Create start tag of ol
|
element.
end_ol() -- Crea
|
te end tag of ol element.
ul(content, [attrs]) -- Create ul element.
start_ul([attrs]) -- Create start tag of ul element.
end_ul() -- Create end tag of ul element.
li(content, [attrs]) -- Create li element.
dl(content, [attrs]) -- Create dl element.
start_dl([attrs]) -- Create start tag of dl element.
end_dl() -- Create end tag of p element.
dt(content, [attrs]) -- Create dt element.
dd(content, [attrs]) -- Create dd element.
br([attrs]) -- Create br element.
hr([attrs]) -- Create hr element.
start_form([method], [action], [enctype], [attrs]) -- Create start tag
of form element.
start_multipart_form([method], [action], [enctype], [attrs]) -- Create
start tag of form element for multipart.
end_form() -- Create end tag of form element.
textfield([name], [value], [size], [maxlength], [attrs]) -- Create
input element as form item text field.
textarea([name], [value], [rows], [columns], [attrs]) -- Create textarea
element.
password_field([name], [value], [size], [maxlength], [attrs]) -- Create
input element as form item password field.
filefield([name], [value], [size], [maxlength], [attrs]) -- Create input
element as form item file field.
popup_menu([name], [values], [default], [labels], [attributes], [attrs])
-- Create select element as form item popup menu.
scrolling_list([name], [values], [default], [size], [multiple],
[labels], [attributes], [attrs]) -- Create select element
as form item scrolling list.
select_list([name], [values], [default], [labels], [attributes], [size],
[multiple], [attrs]) -- Create select element.
checkbox_group([name], [values], [default], [delimiter], [labels],
[attributes], [attrs]) -- Create input elements as form
item check box group.
checkbox([name], [checked], [value], [label], [attrs]) -- Create input
element as form item check box group.
radio_group([name], [values], [default], [delimiter], [labels],
[attributes], [attrs]) -- Create input elements as form item
radio button group.
button_group([type], [name], [values], [default], [delimiter], [labels],
[attributes], [attrs]) -- Create input elements.
submit([name], [value], [attrs]) -- Create input element as form item
submit button.
reset([name], [value], [attrs]) -- Create input element as form item
reset button.
button([name], [value], [attrs]) -- Create input element as form item
button.
hidden([name], [value], [attrs]) -- Create input element as form item
hidden.
input(type, [attrs]) -- Create input element.
Useage:
import htmldocument
ht = htmldocument.HTML(
encode='utf-8',
lang='ja',
sitetitle='Site Name',
cssfiles=['./css/main.css'],
jsfiles=['./js/main.js'])
html.print_resp_header()
html.print_html_header()
ht.printe(ht.h1('Header Level 1'))
ht.printe(ht.p('Text body.'))
html.print_html_close()
"""
def __init__(self, encode='utf-8', lang='en', sitetitle=u'Untitled Site',
pagetitle=u'Untitled', titledelimiter=u' :: ',
cssfiles=None, jsfiles=None, jstext=None, cookie=None,
nocache=False):
"""Constructor of
|
maurobaraldi/quokka
|
quokka/utils/__init__.py
|
Python
|
mit
| 714
| 0
|
# -*- coding: utf-8 -*-
import logging
from speaklater import make_lazy_string
from quokka.modules.accounts.models import User
logger = logging.getLogger()
def lazy_str_setting(key, default=None):
from flask import
|
current_app
return make_lazy_string(
lambda: current_app.config.get(key, default)
)
def get_current_user():
from flask.ext.security im
|
port current_user
try:
if not current_user.is_authenticated():
return None
except RuntimeError:
# Flask-Testing will fail
pass
try:
return User.objects.get(id=current_user.id)
except Exception as e:
logger.warning("No user found: %s" % e.message)
return None
|
cloudnsru/PyCloudNS
|
PyCloudNS/records.py
|
Python
|
mit
| 967
| 0
|
from .req import Req
class Records(Req):
def __init__(self, url, email, secret):
super().__init__(url=url, email=email, secret=secret)
def get(self, zone_id, layer='default'):
return self.do_get("/zones/{}/{}/records".format(zone_id, layer))
def create(self, zone, layer, name, ttl, rtype, data, priority=0):
url = "/zones/{}/{}/records".format(zone, layer)
data = {
'layer': layer,
'name':
|
name,
'ttl': ttl,
'record_type': rtype,
'value': data,
'priority': priority
}
return self.do_post(url, data=data)
def delete(self, zone, layer, record_id):
url = "/zones/{}/{}/records/{}".format(zone, layer, recor
|
d_id)
return self.do_delete(url)
def update(self, zone, layer, record_id, **params):
url = "/zones/{}/{}/records/{}".format(zone, layer, record_id)
return self.do_put(url, data=params)
|
MoonRaker/cons2-python
|
cons2/crop.py
|
Python
|
gpl-3.0
| 6,619
| 0.009216
|
# crop.py
# Derek Groenendyk
# 2/15/2017
# reads input data from Excel workbook
from collections import OrderedDict
import logging
import numpy as np
import os
import sys
from cons2.cu import CONSUMPTIVE_USE
# from utils import excel
logger = logging.getLogger('crop')
logger.setLevel(logging.DEBUG)
class CROP(object):
"""docstring for CROP"""
def __init__(self, shrtname, longname, crop_type, mmnum, directory, sp):
self.sname = shrtname
self.lname = longname
self.crop_type = crop_type
self.directory = directory
if self.crop_type == 'ANNUAL':
self.mmnum = mmnum
if sp.et_method == 'fao':
self.stages = {}
self.kc = {}
# self.read_cropdev()
self.read_stages()
self.read_kc()
elif sp.et_method == 'scs':
self.get_nckc()
self.get_ckc()
# methods = {
# 'ANNUAL': ANNUAL,
# 'PERENNIAL': PERENNIAL
# }
# self.cu = methods[crop_type](sp, self)
self.cu = CONSUMPTIVE_USE(sp, self)
def read_cropdev(self):
try:
infile = open(os.path.join(self.directory,'data','crop_dev_coef.csv'),'r')
except TypeError:
logger_fn.critical('crop_dev_coef.csv file not found.')
raise
lines = infile.readlines()
infile.close()
# sline = lines[1].split(',')
# cname = sline[0].replace(' ','')
# temp_cname = cname
stage_flag = False
kc_flag = False
switch = False
i = 1
# while i < len(lines):
while i < len(lines):
sline = lines[i].split(',')
cname = sline[0].replace(' ','')
# print(cname,self.sname)
if cname != '':
if cname == self.sname:
# print(i)
if not switch:
stage = sline[1].lower()
self.stages[stage] = np.array([float(item) for item in sline[2:6]])
# print(1.0-np.sum(self.stages[stage]))
stage_flag = True
else:
num = int(sline[1].replace(' ',''))
self.kc[num] = np.array([float(item) for item in sline[2:5]])
kc_flag = True
else:
if switch:
break
i += 1
switch = True
i += 1
if stage_flag == False or kc_flag == False:
logger.critical('Crop, ' + self.sname + ', not found in crop_dev_coef.csv.') # include site??
raise
def read_stages(self):
try:
infile = open(os.path.join(self.directory,'data','fao_crop_stages.csv'),'r')
except TypeError:
logger_fn.critical('fao_crop_stages.csv file not found.')
raise
lines = infile.readlines()
infile.close()
flag = False
i = 1
while i < len(lines):
sline = lines[i].split(',')
cname = sline[0].replace(' ','')
if cname != '':
if cname == self.sname:
stage = sline[1].lower()
self.stages[stage] = np.array([float(item) for item in sline[2:6]])
flag = True
else:
if flag:
break
flag = False
i += 1
if not flag:
logger.critical('Crop, ' + self.sname + ', not found in fao_crop_stages.csv.') # include site??
raise
def read_kc(self):
try:
infile = open(os.path.join(self.directory,'data','fao_crop_coef.csv'),'r')
except TypeError:
logger_fn.critical('fao_crop_coef.csv file not found.')
raise
lines = infile.readlines()
infile.close()
flag = False
i = 1
while i < len(lines):
sline = lines[i].split(',')
cname = sline[0].replace(' ','')
if cname != '':
if cname == self.sname:
num = int(sline[1].replace(' ',''))
self.kc[num] = np.array([float(item) for item in sline[2:5]])
flag = True
else:
if flag:
break
flag = False
i += 1
if not flag:
logger.critical('Crop, ' + self.sname + ', not found in fao_crop_coef.csv.') # include site??
raise
def get_nckc(self):
"""
Reads in crop coefficients.
Parameters
----------
name: string
Name of the crop
Returns
-------
nckc: list
List of crop coefficients
"""
try:
infile = open(os.path.join(self.directory,'data','scs_crop_stages.csv'),'r')
except TypeError:
logger.critical('scs_crop_stages.csv file not found.')
raise
lines = infile.readlines()
infile.close()
nckca = [float(item) for item in lines[0].split(',')[1:]]
nckcp = [float(item) for item in lines[1].split(',')[1:]]
if self.crop_type == 'PERENNIAL':
self.nckc= nckcp
else:
self.nckc = nckca
def get_ckc(self):
"""
Reads in crop coefficients.
Parameters
----------
name: string
Name of the crop
Returns
-------
ckc: list
List of crop coefficients
"""
try:
infile = open(os.path.join(self.directory,'data',
|
'scs_crop_coef.csv'),'r')
except TypeError:
logger_fn.critical('scs_crop_coef.csv file not found.')
rai
|
se
else:
lines = infile.readlines()
infile.close()
if self.crop_type == 'PERENNIAL':
end = 26
else:
end = 22
for line in lines:
sline = line.split(',')
sline[-1] = sline[-1][:-1]
# print(sline[0],self.sname)
if sline[0] == self.sname:
vals = [float(item) for item in sline[1:end]]
self.ckc = vals
break
|
daviddrysdale/python-phonenumbers
|
python/phonenumbers/shortdata/region_BS.py
|
Python
|
apache-2.0
| 677
| 0.008863
|
"""Auto-generated file, do not edit by hand. BS metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BS = PhoneMetadata(id='BS', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='9\\d\\d', possible_length=(3,)),
toll_free=PhoneNumberDesc(natio
|
nal_number_pattern='9(?:1[19]|88)', example_number='911', possible_length=(3,)),
emergency=PhoneNu
|
mberDesc(national_number_pattern='91[19]', example_number='911', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='9(?:1[19]|88)', example_number='911', possible_length=(3,)),
short_data=True)
|
chiffa/numpy
|
numpy/lib/nanfunctions.py
|
Python
|
bsd-3-clause
| 46,492
| 0.000022
|
"""
Functions that ignore NaN.
Functions
---------
- `nanmin` -- minimum non-NaN value
- `nanmax` -- maximum non-NaN value
- `nanargmin` -- index of minimum non-NaN value
- `nanargmax` -- index of maximum non-NaN value
- `nansum` -- sum of non-NaN values
- `nanprod` -- product of non-NaN values
- `nanmean` -- mean of non-NaN values
- `nanvar` -- variance of non-NaN values
- `nanstd` -- standard deviation of non-NaN values
- `nanmedian` -- median of non-NaN values
- `nanpercentile` -- qth percentile of non-NaN values
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.lib.function_base import _ureduce as _ureduce
__all__ = [
'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
]
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Note that scalars will end up as array scalars, which is important
for using the resul
|
t as the value of the out argument in some
operations.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, No
|
ne}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
is_new = not isinstance(a, np.ndarray)
if is_new:
a = np.array(a)
if not issubclass(a.dtype.type, np.inexact):
return a, None
if not is_new:
# need copy
a = np.array(a, subok=True)
mask = np.isnan(a)
np.copyto(a, val, where=mask)
return a, mask
def _copyto(a, val, mask):
"""
Replace values in `a` with NaN where `mask` is True. This differs from
copyto in that it will deal with the case where `a` is a numpy scalar.
Parameters
----------
a : ndarray or numpy scalar
Array or numpy scalar some of whose values are to be replaced
by val.
val : numpy scalar
Value used a replacement.
mask : ndarray, scalar
Boolean array. Where True the corresponding element of `a` is
replaced by `val`. Broadcasts.
Returns
-------
res : ndarray, scalar
Array with elements replaced or scalar `val`.
"""
if isinstance(a, np.ndarray):
np.copyto(a, val, where=mask, casting='unsafe')
else:
a = a.dtype.type(val)
return a
def _divide_by_count(a, b, out=None):
"""
Compute a/b ignoring invalid results. If `a` is an array the division
is done in place. If `a` is a scalar, then its type is preserved in the
output. If out is None, then then a is used instead so that the
division is in place. Note that this is only called with `a` an inexact
type.
Parameters
----------
a : {ndarray, numpy scalar}
Numerator. Expected to be of inexact type but not checked.
b : {ndarray, numpy scalar}
Denominator.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
Returns
-------
ret : {ndarray, numpy scalar}
The return value is a/b. If `a` was an ndarray the division is done
in place. If `a` is a numpy scalar, the division preserves its type.
"""
with np.errstate(invalid='ignore'):
if isinstance(a, np.ndarray):
if out is None:
return np.divide(a, b, out=a, casting='unsafe')
else:
return np.divide(a, b, out=out, casting='unsafe')
else:
if out is None:
return a.dtype.type(a / b)
else:
# This is questionable, but currently a numpy scalar can
# be output to a zero dimensional array.
return np.divide(a, b, out=out, casting='unsafe')
def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
Nan is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the minimum is computed. The default is to compute
the minimum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `min` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmin : ndarray
An array with the same shape as `a`, with the specified axis
removed. If `a` is a 0-d array, or if axis is None, an ndarray
scalar is returned. The same dtype as `a` is returned.
See Also
--------
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
amin :
The minimum value of an array along a given axis, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amax, fmax, maximum
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if not isinstance(a, np.ndarray) or type(a) is np.ndarray:
# Fast, but not safe for subclasses of ndarray
res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN axis encountered", RuntimeWarning)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
res = np.amin(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning)
return res
def nanmax(a,
|
chheplo/jaikuengine
|
common/models.py
|
Python
|
apache-2.0
| 22,503
| 0.016531
|
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
from google.appengine.ext import db as models
import appengine_django.models as aed_models
from oauth import oauth
from django.conf import settings
from django.db import models as django_models
from common import profile
from common import properties
from common import util
import settings
PRIVACY_PRIVATE = 1
PRIVACY_CONTACTS = 2
PRIVACY_PUBLIC = 3
ACTOR_ALLOWED_EXTRA = ('contact_count',
'follower_count',
'icon',
'description',
'member_count',
'admin_count',
'given_name',
'family_name'
'homepage'
)
ACTOR_LIMITED_EXTRA = ('icon',
'description',
'given_name',
'family_name'
)
# Internal Utility Functions
def _get_actor_type_from_nick(nick):
if nick[0] == "#":
return "channel"
return "user"
def _get_actor_urlnick_from_nick(nick):
parts = nick.split('@')
nick = parts[0]
if nick[0] == "#":
nick = nick[1:]
return nick
def _to_api(v):
if hasattr(v, 'to_api'):
v = v.to_api()
elif isinstance(v, type([])):
v = [_to_api(x) for x in v]
elif isinstance(v, type({})):
v = dict([(key, _to_api(value)) for (key, value) in v.iteritems()])
elif isinstance(v, datetime.datetime):
v = str(v)
return v
# Base Models, Internal Only
class ApiMixinModel(aed_models.BaseModel):
def to_api(self):
o = {}
for prop in self.properties().keys():
value = getattr(self, prop)
o[prop] = _to_api(value)
return o
class CachingModel(ApiMixinModel):
"""A simple caching layer for model objects: caches any item read with
get_by_key_name and removes from the cache on put() and delete()
You must call reset_cache() in the beginning of any HTTP request or test.
The design idea is that this should give a consistent view of the data within
the processing a single request.
"""
# TODO(mikie): appengine has non-Model put() and delete() that act on a bunch
# of items at once. To be correct this should hook those as well.
# TODO(mikie): should hook to the django sync_db signal so that the cache is
# reset when database is (to support fixtures in tests correctly).
# TODO(mikie): should cache items read through methods other than
# get_by_key_name()
_cache = { }
_cache_enabled = False
_get_count = 0
def __init__(self, parent=None, key_name=None, _app=None, **kw):
if not key_name and 'key' not in kw:
|
key_name = self.key_from(**kw)
super(CachingModel, self).__init__(
parent, key_name=key_name, _app=_app, **kw)
if not key_name:
key_name = self.key_from(**kw)
self._cache_keyname__ = (key_name, parent)
@classmethod
def key_from(cls, **kw):
if hasattr(cls, '
|
key_template'):
try:
return cls.key_template % kw
except KeyError:
logging.warn('Automatic key_name generation failed: %s <- %s',
cls.key_template,
kw)
return None
def _remove_from_cache(self):
clsname = self.__class__.__name__
if CachingModel._cache_enabled:
if CachingModel._cache.has_key(clsname):
if CachingModel._cache[clsname].has_key(self._cache_keyname__):
CachingModel._cache[clsname].pop(self._cache_keyname__)
@profile.log_write
def put(self):
self._remove_from_cache()
ret = super(CachingModel, self).put()
self._cache_keyname__ = (self.key().name(), self.parent_key())
self._remove_from_cache()
return ret
def save(self):
return self.put()
@profile.log_write
def delete(self):
self._remove_from_cache()
return super(CachingModel, self).delete()
@classmethod
@profile.log_call('threadlocal_cached_read')
def get_by_key_name(cls, key_names, parent=None):
if not key_names:
return
# Only caches when called with a single key
if CachingModel._cache_enabled and (
isinstance(key_names, str) or isinstance(key_names, unicode)):
clsname = cls.__name__
if not CachingModel._cache.has_key(clsname):
CachingModel._cache[clsname] = { }
elif CachingModel._cache[clsname].has_key((key_names, parent)):
profile.store_call(cls, 'get_by_key_name', 'threadlocal_cache_hit')
return CachingModel._cache[clsname][(key_names, parent)]
profile.store_call(cls, 'get_by_key_name', 'threadlocal_cache_miss')
ret = super(CachingModel, cls).get_by_key_name(key_names, parent)
CachingModel._get_count += 1
CachingModel._cache[clsname][(key_names, parent)] = ret
if ret:
ret._cache_keyname__ = (key_names, parent)
return ret
else:
CachingModel._get_count += len(key_names)
return super(CachingModel, cls).get_by_key_name(key_names, parent)
@classmethod
def db_get_count(cls):
return CachingModel._get_count
@classmethod
def reset_cache(cls):
CachingModel._cache = { }
@classmethod
def enable_cache(cls, enabled = True):
CachingModel._cache_enabled = enabled
if not enabled:
CachingModel._cache = { }
@classmethod
def reset_get_count(cls):
CachingModel._get_count = 0
@classmethod
@profile.log_read
def gql(cls, *args, **kw):
return super(CachingModel, cls).gql(*args, **kw)
@classmethod
@profile.log_read
def Query(cls):
# TODO(termie): I don't like that this module is called "models" here,
# I'd prefer to be accessing it by "db"
return models.Query(cls)
class DeletedMarkerModel(CachingModel):
deleted_at = properties.DateTimeProperty()
def mark_as_deleted(self):
self.deleted_at = datetime.datetime.utcnow()
self.put()
def is_deleted(self):
return self.deleted_at
# Public Models
class AbuseReport(CachingModel):
entry = models.StringProperty() # ref - entry
actor = models.StringProperty() # ref - actor for entry
reports = models.StringListProperty() # the actors who have reported this
count = models.IntegerProperty() # the count of the number of reports so far
key_template = '%(entry)s'
class Activation(CachingModel):
actor = models.StringProperty()
content = models.StringProperty()
code = models.StringProperty()
type = models.StringProperty()
key_template = 'activation/%(actor)s/%(type)s/%(content)s'
def actor_url(nick, actor_type, path='', request=None, mobile=False):
""" returns a url, with optional path appended
NOTE: if appending a path, it should start with '/'
"""
prefix = ""
mobile = mobile or (request and request.mobile)
if mobile:
prefix = "m."
if (settings.WILDCARD_USER_SUBDOMAINS_ENABLED
and actor_type == 'user'
and not mobile):
return 'http://%s.%s%s' % (nick, settings.HOSTED_DOMAIN, path)
elif mobile and settings.SUBDOMAINS_ENABLED:
return 'http://%s%s/%s/%s%s' % (prefix,
settings.HOSTED_DOMAIN,
actor_type,
nick,
path)
else:
return 'http://%s/%s/%s%s' % (settings.DOMAIN,
actor_type,
nick,
path)
class Actor(DeletedMarkerModel):
"""
extra:
channel_count - int; number of channels
contact_count - int; number of contacts
follower_count - int; number of followers
icon - stri
|
DoubleNegativeVisualEffects/gaffer
|
python/GafferUI/OpDialogue.py
|
Python
|
bsd-3-clause
| 4,512
| 0.038121
|
##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PA
|
RTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABI
|
LITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import IECore
import Gaffer
import GafferUI
## A dialogue which allows a user to edit the parameters of an
# IECore.Op instance and then execute it.
class OpDialogue( GafferUI.Dialogue ) :
def __init__( self, opInstance, title=None, sizeMode=GafferUI.Window.SizeMode.Manual, **kw ) :
if title is None :
title = IECore.CamelCase.toSpaced( opInstance.typeName() )
GafferUI.Dialogue.__init__( self, title, sizeMode=sizeMode, **kw )
self.__node = Gaffer.ParameterisedHolderNode()
self.__node.setParameterised( opInstance )
frame = GafferUI.Frame()
frame.setChild( GafferUI.NodeUI.create( self.__node ) )
self._setWidget( frame )
self.__cancelButton = self._addButton( "Cancel" )
self.__cancelButtonConnection = self.__cancelButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ) )
executeLabel = "OK"
with IECore.IgnoredExceptions( KeyError ) :
executeLabel = opInstance.userData()["UI"]["buttonLabel"].value
self.__executeButton = self._addButton( executeLabel )
self.__executeButtonConnection = self.__executeButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ) )
self.__opExecutedSignal = Gaffer.Signal1()
## A signal called when the user has pressed the execute button
# and the Op has been successfully executed. This is passed the
# result of the execution.
def opExecutedSignal( self ) :
return self.__opExecutedSignal
## Causes the dialogue to enter a modal state, returning the result
# of executing the Op, or None if the user cancelled the operation. Any
# validation or execution errors will be reported to the user and return
# to the dialogue for them to cancel or try again.
def waitForResult( self, **kw ) :
# block our button connection so we don't end up executing twice
with Gaffer.BlockedConnection( self.__executeButtonConnection ) :
while 1 :
button = self.waitForButton( **kw )
if button is self.__executeButton :
result = self.__execute()
if result is not None :
return result
else :
return None
def __execute( self ) :
try :
self.__node.setParameterisedValues()
result = self.__node.getParameterised()[0]()
self.opExecutedSignal()( result )
## \todo Support Op userData for specifying closing of Dialogue?
self.close()
return result
except :
GafferUI.ErrorDialogue.displayException( parentWindow=self )
return None
def __buttonClicked( self, button ) :
if button is self.__executeButton :
self.__execute()
else :
self.close()
|
hroncok/freeipa
|
ipapython/ipa_log_manager.py
|
Python
|
gpl-3.0
| 8,302
| 0.004095
|
# Authors: John Dennis <[email protected]>
#
# Copyright (C) 2011 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
# Module exports
__all__ = ['log_mgr', 'root_logger', 'standard_logging_setup',
'IPA_ROOT_LOGGER_NAME', 'ISO8601_UTC_DATETIME_FMT',
'LOGGING_FORMAT_STDERR', 'LOGGING_FORMAT_STDOUT', 'LOGGING_FORMAT_FILE']
#-------------------------------------------------------------------------------
import sys
import re
import
|
copy
from log_manager import LogManager, parse_log_level
#-------------------------------------------------------------------------------
# Our root logger, all loggers will be descendents of this.
IPA_ROOT_LOGGER_NAME = 'ipa'
# Format string for time.strftime() to produc
|
e a ISO 8601 date time
# formatted string in the UTC time zone.
ISO8601_UTC_DATETIME_FMT = '%Y-%m-%dT%H:%M:%SZ'
# Logging format string for use with logging stderr handlers
LOGGING_FORMAT_STDERR = 'ipa: %(levelname)s: %(message)s'
# Logging format string for use with logging stdout handlers
LOGGING_FORMAT_STDOUT = '[%(asctime)s %(name)s] <%(levelname)s>: %(message)s'
# Logging format string for use with logging file handlers
LOGGING_FORMAT_FILE = '\t'.join([
'%(asctime)s',
'%(process)d',
'%(threadName)s',
'%(name)s',
'%(levelname)s',
'%(message)s',
])
# Used by standard_logging_setup() for console message
LOGGING_FORMAT_STANDARD_CONSOLE = '%(name)-12s: %(levelname)-8s %(message)s'
# Used by standard_logging_setup() for file message
LOGGING_FORMAT_STANDARD_FILE = '%(asctime)s %(levelname)s %(message)s'
#-------------------------------------------------------------------------------
class IPALogManager(LogManager):
'''
Subclass the LogManager to enforce some IPA specfic logging
conventions.
* Default to timestamps in UTC.
* Default to ISO 8601 timestamp format.
* Default the message format.
'''
log_logger_level_config_re = re.compile(r'^log_logger_level_(debug|info|warn|warning|error|critical|\d+)$')
def __init__(self, configure_state=None):
'''
:parameters:
configure_state
Used by clients of the log manager to track the
configuration state, may be any object.
'''
super(IPALogManager, self).__init__(IPA_ROOT_LOGGER_NAME, configure_state)
def configure_from_env(self, env, configure_state=None):
'''
Read the loggger configuration from the Env config. The
following items may be configured:
Logger Levels
*log_logger_XXX = comma separated list of regexps*
Logger levels can be explicitly specified for specific loggers as
opposed to a global logging level. Specific loggers are indiciated
by a list of regular expressions bound to a level. If a logger's
name matches the regexp then it is assigned that level. The keys
in the Env config must begin with "log_logger_level\_" and then be
followed by a symbolic or numeric log level, for example::
log_logger_level_debug = ipapython\.dn\..*
log_logger_level_35 = ipalib\.plugins\.dogtag
The first line says any logger belonging to the ipapython.dn module
will have it's level configured to debug.
The second line say the ipa.plugins.dogtag logger will be
configured to level 35.
Note: logger names are a dot ('.') separated list forming a path
in the logger tree. The dot character is also a regular
expression metacharacter (matches any character) therefore you
will usually need to escape the dot in the logger names by
preceeding it with a backslash.
The return value of this function is a dict with the following
format:
logger_regexps
List of (regexp, level) tuples
:parameters:
env
Env object configuration values are read from.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
'''
logger_regexps = []
config = {'logger_regexps' : logger_regexps,
}
for attr in ('debug', 'verbose'):
value = getattr(env, attr, None)
if value is not None:
config[attr] = value
for attr in list(env):
# Get logger level configuration
match = IPALogManager.log_logger_level_config_re.search(attr)
if match:
value = match.group(1)
level = parse_log_level(value)
value = getattr(env, attr)
regexps = re.split('\s*,\s*', value)
# Add the regexp, it maps to the configured level
for regexp in regexps:
logger_regexps.append((regexp, level))
continue
self.configure(config, configure_state)
return config
def create_log_handlers(self, configs, logger=None, configure_state=None):
'Enforce some IPA specific configurations'
configs = copy.copy(configs)
for cfg in configs:
if not 'time_zone_converter' in cfg:
cfg['time_zone_converter'] = 'utc'
if not 'datefmt' in cfg:
cfg['datefmt'] = ISO8601_UTC_DATETIME_FMT
if not 'format' in cfg:
cfg['format'] = LOGGING_FORMAT_STDOUT
return super(IPALogManager, self).create_log_handlers(configs, logger, configure_state)
#-------------------------------------------------------------------------------
def standard_logging_setup(filename=None, verbose=False, debug=False,
filemode='w', console_format=LOGGING_FORMAT_STANDARD_CONSOLE):
handlers = []
# File output is always logged at debug level
if filename is not None:
file_handler = dict(name='file',
filename=filename,
filemode=filemode,
permission=0o600,
level='debug',
format=LOGGING_FORMAT_STANDARD_FILE)
handlers.append(file_handler)
if log_mgr.handlers.has_key('console'):
log_mgr.remove_handler('console')
level = 'error'
if verbose:
level = 'info'
if debug:
level = 'debug'
console_handler = dict(name='console',
stream=sys.stderr,
level=level,
format=console_format)
handlers.append(console_handler)
# default_level must be debug becuase we want the file handler to
# always log at the debug level.
log_mgr.configure(dict(default_level='debug',
handlers=handlers),
configure_state='standard')
return log_mgr.root_logger
#-------------------------------------------------------------------------------
# Single shared instance of log manager
#
# By default always starts with stderr console handler at error level
# so messages generated before logging is fully configured have some
# place to got and won't get lost.
log_mgr = IPALogManager()
log_mgr.configure(dict(default_level='error',
handlers=[dict(name='console',
|
alanjw/GreenOpenERP-Win-X86
|
python/Lib/test/test_cfgparser.py
|
Python
|
agpl-3.0
| 28,482
| 0.000632
|
import ConfigParser
import StringIO
import os
import unittest
import UserDict
from test import test_support
class SortedDict(UserDict.UserDict):
def items(self):
result = self.data.items()
result.sort()
return result
def keys(self):
result = self.data.keys()
result.sort()
return result
def values(self):
# XXX never used?
result = self.items()
return [i[1] for i in result]
def iteritems(self): return iter(self.items())
def iterkeys(self): return iter(self.keys())
__iter__ = iterkeys
def itervalues(self): return iter(self.values())
class TestCaseBase(unittest.TestCase):
allow_no_value = False
def newconfig(self, defaults=None):
if defaults is None:
self.cf = self.config_class(allow_no_value=self.allow_no_value)
else:
self.cf = self.config_class(defaults,
allow_no_value=self.allow_no_value)
return self.cf
def fromstring(self, string, defaults=None):
cf = self.newconfig(defaults)
sio = StringIO.StringIO(string)
cf.readfp(sio)
return cf
def test_basic(self):
config_string = (
"[Foo Bar]\n"
"foo=bar\n"
"[Spacey Bar]\n"
"foo = bar\n"
"[Commented Bar]\n"
"foo: bar ; comment\n"
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[Section\\with$weird%characters[\t]\n"
"[Internationalized Stuff]\n"
"foo[bg]: Bulgarian\n"
"foo=Default\n"
"foo[en]=English\n"
"foo[de]=Deutsch\n"
"[Spaces]\n"
"key with spaces : value\n"
"another with spaces = splat!\n"
)
if self.allow_no_value:
config_string += (
"[NoValue]\n"
"option-without-value\n"
)
cf = self.fromstring(config_string)
L = cf.sections()
L.sort()
E = [r'Commented Bar',
r'Foo Bar',
r'Internationalized Stuff',
r'Long Line',
r'Section\with$weird%characters[' '\t',
r'Spaces',
r'Spacey Bar',
]
if self.allow_no_value:
E.append(r'NoValue')
E.sort()
eq = self.assertEqual
eq(L, E)
# The use of spaces in the section names serves as a
# regression test for SourceForge bug #583248:
# http://www.python.org/sf/583248
eq(cf.get('Foo Bar', 'foo'), 'bar')
eq(cf.get('Spacey Bar', 'foo'), 'bar')
eq(cf.get('Commented Bar', 'foo'), 'bar')
eq(cf.get('Spaces', 'key with spaces'), 'value')
eq(cf.get('Spaces', 'another with spaces'), 'splat!')
if self.allow_no_value:
eq(cf.get('NoValue', 'option-without-value'), None)
self.assertNotIn('__name__', cf.options("Foo Bar"),
'__name__ "option" should not be exposed by the API!')
# Make sure the right things happen for remove_option();
# added to include check for SourceForge bug #123324:
self.assertTrue(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report existence of option")
self.assertFalse(cf.has_option('Foo Bar', 'foo'),
"remove_option() failed to remove option")
self.assertFalse(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report non-existence of option"
" that was removed")
self.assertRaises(ConfigParser.NoSectionError,
cf.remove_option, 'No Such Section', 'foo')
eq(cf.get('Long Line', 'foo'),
'this line is much, much longer than my editor\nlikes it.')
def test_case_sensitivity(self):
cf = self.newconfig()
cf.add_section("A")
cf.add_section("a")
L = cf.sections()
L.sort()
eq = self.assertEqual
eq(L, ["A", "a"])
cf.set("a", "B", "value")
eq(cf.options("a"), ["b"])
eq(cf.get("a", "b"), "value",
"could not locate option, expecting case-insensitive option names")
self.assertTrue(cf.has_option("a", "b"))
cf.set("A", "A-B", "A-B value")
for opt in ("a-b", "A-b", "a-B", "A-B"):
self.assertTrue(
cf.has_option("A", opt),
"has_option() returned false for option which should exist")
eq(cf.options("A"), ["a-b"])
eq(cf.options("a"), ["b"])
cf.remove_option("a", "B")
eq(cf.options("a"), [])
# SF bug #432369:
cf = self.fromstring(
"[MySection]\nOption: first line\n\tsecond line\n")
eq(cf.options("MySection"), ["option"])
eq(cf.get("MySection", "Option"), "first line\nsecond line")
# SF bug #561822:
cf = self.fromstring("[section]\nnekey=nevalue\n",
defaults={"key":"value"})
self.assertTrue(cf.has_option("section", "Key"))
def test_default_case_sensitivity(self):
cf = self.newconfig({"foo": "Bar"})
self.assertEqual(
cf.get("DEFAULT", "Foo"), "Bar",
"could not locate option, expecting case-insensitive option names")
cf = self.newconfig({"Foo": "Bar"})
self.asser
|
tEqual(
cf.get("DEFAULT", "Foo"), "Bar",
"could not locate option, expecting case-insensitive defaults")
def test_parse_errors(self):
self.newconfig()
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces: splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces= splat\n")
|
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n:value-without-option-name\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n=value-without-option-name\n")
self.parse_error(ConfigParser.MissingSectionHeaderError,
"No Section!\n")
def parse_error(self, exc, src):
sio = StringIO.StringIO(src)
self.assertRaises(exc, self.cf.readfp, sio)
def test_query_errors(self):
cf = self.newconfig()
self.assertEqual(cf.sections(), [],
"new ConfigParser should have no defined sections")
self.assertFalse(cf.has_section("Foo"),
"new ConfigParser should have no acknowledged "
"sections")
self.assertRaises(ConfigParser.NoSectionError,
cf.options, "Foo")
self.assertRaises(ConfigParser.NoSectionError,
cf.set, "foo", "bar", "value")
self.get_error(ConfigParser.NoSectionError, "foo", "bar")
cf.add_section("foo")
self.get_error(ConfigParser.NoOptionError, "foo", "bar")
def get_error(self, exc, section, option):
try:
self.cf.get(section, option)
except exc, e:
return e
else:
self.fail("expected exception type %s.%s"
% (exc.__module__, exc.__name__))
def test_boolean(self):
cf = self.fromstring(
"[BOOLTEST]\n"
"T1=1\n"
"T2=TRUE\n"
"T3=True\n"
"T4=oN\n"
"T5=yes\n"
"F1=0\n"
"F2=FALSE\n"
"F3=False\n"
"F4=oFF\n"
"F5=nO\n"
"E1=2\n"
"E2=foo\n"
"E3=-1\n"
"E4=0.1\n"
"E5=FALSE AND MORE"
)
for x in range(1, 5):
self.assertTrue(cf.getboolean('BOOLTEST', 't%d' % x))
|
burnpanck/chaco
|
examples/demo/shell/scatter.py
|
Python
|
bsd-3-clause
| 533
| 0
|
"""This ex
|
ample shows how to create a scatter plot using the `shell` package.
"""
# Major library imports
from numpy import linspace, random, pi
# Enthought library imports
from chaco.shell import plot, hold, title, show
# Create some data
x = linspace(-2*pi, 2*pi, 100)
y1 = random.random(100)
y2 = random.random(100)
# Create some scatter plots
plot(x, y1, "b.")
hold(True)
plot(x, y2, "g+", marker_size=2)
# Add some titles
title("simple scatter plots")
# This command is only necessary if running from command
|
line
show()
|
luanjunyi/cortana
|
feat/bow/tokenize.py
|
Python
|
mit
| 834
| 0.020384
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 05 17:10:34 2014
@author: Ning
"""
from util import *
from util.log import _logger
from feat.terms.term_categorize import term_category
import codecs
def parse(sentence):
for term in sentence.split():
yield term_category(term)
def tokenize():
rows = tsv.reader(conv.redirect("data|train.dat"))
with codecs.open("train.tokenized.dat",'w',encoding='utf-8') as fl:
for row in rows:
fl.write("%s\t%s\n" % (' '.join(list(parse(row[0]))) , row[1]) )
rows = tsv.rea
|
der(conv.redirect("data|test.dat"))
with codecs.open("test.tokenized.dat",'w',encoding='utf-8') as fl:
for row in rows:
fl.write("%s\t%s\n" % (' '.join(list(parse(row[0]))) , row[1]) )
if __name__ == "__main__":
|
tokenize()
|
eaton-lab/toytree
|
toytree/utils.py
|
Python
|
bsd-3-clause
| 22,118
| 0.003979
|
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
import re
import os
from copy import deepcopy
import numpy as np
import toytree
import toyplot
#######################################################
# Exception Classes
#######################################################
class ToytreeError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class TreeError(Exception):
"A problem occurred during a TreeNode operation"
def __init__(self, value=''):
self.value = value
def __str__(self):
return repr(self.value)
# TREE FORMATS
NW_FORMAT = {
# flexible with support
# Format 0 = (A:0.35,(B:0.72,(D:0.60,G:0.12)1.00:0.64)1.00:0.56);
0: [
('name', str, True),
('dist', float, True),
('support', float, True),
('dist', float, True),
],
# flexible with internal node names
# Format 1 = (A:0.35,(B:0.72,(D:0.60,G:0.12)E:0.64)C:0.56);
1: [
('name', str, True),
('dist', float, True),
('name', str, True),
('dist', float, True),
],
# strict with support values
# Format 2 = (A:0.35,(B:0.72,(D:0.60,G:0.12)1.00:0.64)1.00:0.56);
2: [
('name', str, False),
('dist', float, False),
('support', str, False),
('dist', float, False),
],
# strict with internal node names
# Format 3 = (A:0.35,(B:0.72,(D:0.60,G:0.12)E:0.64)C:0.56);
3: [
('name', str, False),
('dist', float, False),
('name', str, False),
('dist', float, False),
],
# strict with internal node names
# Format 4 = (A:0.35,(B:0.72,(D:0.60,G:0.12)));
4: [
('name', str, False),
('dist', float, False),
(None, None, False),
(None, None, False),
],
# Format 5 = (A:0.35,(B:0.72,(D:0.60,G:0.12):0.64):0.56);
5: [
('name', str, False),
('dist', float, False),
(None, None, False),
('dist', float, False),
],
# Format 6 = (A:0.35,(B:0.72,(D:0.60,G:0.12)E)C);
6: [
('name', str, False),
(None, None, False),
(None, None, False),
('dist', float, False),
],
# Format 7 = (A,(B,(D,G)E)C);
7: [
('name', str, False),
('dist', float, False),
('name', str, False),
(None, None, False),
],
# Format 8 = (A,(B,(D,G)));
8: [
('name', str, False),
(None, None, False),
('name', str, False),
(None, None, False),
],
# Format 9 = (,(,(,)));
9: [
('name', str, False),
(None, None, False),
(None, None, False),
(None, None, False),
],
# Format 10 = ((a[&Z=1,Y=2]:1.0[&X=3], b[&Z=1,Y=2]:3.0[&X=2]):1.0[&L=1,W=0], ...
# NHX Like mrbayes NEXUS common
10: [
('name', str, True),
('dist', str, True),
('name', str, True),
('dist', str, True),
]
}
def parse_network(net, disconnect=True, root=None):
"""
Parse network to extract the major topology.
This leaves the hybrid nodes in the tree and labels each with
.name="H{int}" and .gamma={float}.
root: list of tip names used to root the tree. If "None" then roots on a
random tip.
"""
# if net is a file then read the first line
if os.path.exists(net):
with open(net, 'r') as infile:
net = infile.readline()
# trim off loglik and anything after it (TODO: keep loglik)
if ";" in net:
net = net.split(";")[0] + ';'
# sub :xxx:: to be ::: b/c I don't care about admix edge bls
net = re.sub(r":\d.\w*::", ":::", net)
# change H nodes to proper format
while ",#" in net:
pre, post = net.split(",#", 1)
npre, npost = post.split(")", 1)
newpre = npre.split(":")[0] + "-" + npre.split(":")[-1]
net = pre + ")#" + newpre + npost
net = net.replace(":::", "-")
# parse cleaned newick and set empty gamma on all nodes
net = toytree.tree(net, tree_format=1)
# store admix data
admix = {}
# root on tips if provided by user -- otherwise pick a non-H root
if not root:
# if not rooted choose any non-H root
if not net.is_rooted():
net = net.root(
[i for i in net.get_tip_labels() if not i.startswith("#H")][0]
)
else:
net = net.root(root)
# Traverse tree to find hybrid nodes. If a hybrid node is labeled as a
# distinct branch in the tree then it is dropped from the tree and
for node in net.treenode.traverse("postorder"):
# find hybrid nodes as internal nchild=1, or external with H in name
if (len(node.children) == 1) or node.name.startswith("#H"):
# assign name and gamma to hybrid nodes
aname, aprop = node.name.split("-")
aname = aname.lstrip("#")
node.name = aname
# assign hybrid to closest nodes up and down from edge
# node.children[0].hybrid = int(aname[1:])
# node.gamma = round(float(aprop), 3)
# node.up.hybrid = int(aname[1:])
# if root is a hybrid edge (ugh)
if node.up is None:
small, big = sorted(node.children, key=lambda x: len(x))
root = toytree.TreeNode.TreeNode(name='root')
node.children = [small]
small.up = node
node.up = root
big.up = root
root.children = [node, big]
net.treenode = root
# disconnect node by connecting children to parent
if disconnect:
# if tip is a hybrid
if not node.children:
# get sister node
sister = [i for i in node.up.children if i != node][0]
# connect sister to gparent
sister.up = node.up.up
node.up.up.children.remove(node.up)
node.up.up.children.append(sister)
# if hybrid is internal
else:
node.up.children.remove(node)
for child in node.children:
child.up = node.up
node.up.children.append(child)
# store admix data by descendants but remove hybrid tips
desc = node.get_leaf_names()
if aname in desc:
desc = [i for i in node.up.get_leaf_names() if i != aname]
desc = [i for i in desc if not i.startswith("#H")]
# put this node into admix
if aname not in admix:
admix[aname] = (desc, aprop)
# matching edge in admix, no arrange into correct order by minor
else:
# this is the minor edge
if aprop < admix[aname][1]:
admix[aname] = (
admix[aname][0],
desc,
0.5,
{},
str(round(float(aprop), 3)),
)
# this is the major edge
else:
admix[aname] = (
desc,
admix[aname][0],
0.5,
{},
str(round(float(admix[aname][1]), 3)),
)
# update coords needed if node disconnection is turned back on.
net._coords.update()
net = net.ladderize()
return net, admix
class Annotator(object):
"""
|
Add annotations as a new
|
mark on top of an existing toytree mark.
"""
def __init__(self, tree, axes, mark):
self.tree = tree
self.axes = axes
self.mark = mark
def draw_clade_box(
self,
names=None,
regex=None,
wildcard=None,
yspace=None,
xspace=None,
**kwargs):
"""
Draw a rectangle aroun
|
Micronaet/micronaet-xmlrpc
|
xmlrpc_operation_invoice/__openerp__.py
|
Python
|
agpl-3.0
| 1,553
| 0.001288
|
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'XMLRPC Operation Invoice',
'version': '0.1',
'category': 'ETL',
'description': '''
XMLRPC Import invoice
''',
'author': 'Micronaet S.r.l
|
. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'xmlrpc_base',
'account',
],
'init_xml': [],
'demo': [],
'data': [
'security/xml_groups.xml',
#'oper
|
ation_view.xml',
'invoice_view.xml',
'data/operation.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
|
datamade/yournextmp-popit
|
candidates/feeds.py
|
Python
|
agpl-3.0
| 1,820
| 0.001099
|
import re
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.utils.feedgenerator import Atom1Feed
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from .models import LoggedAction
lock_re = re.compile(r'^(?:Unl|L)ocked\s*constituency (.*) \((\d+)\)$')
class RecentChangesFeed(Feed):
site_name = Site.objects.get_current().name
title = _("{site_name} recent changes").format(site_name=site_name)
description = _("Changes to {site_name} candidates").format(site_name=site_name)
link = "/feeds/changes.xml"
feed_type = Atom1Feed
def items(self):
return LoggedAction.objects.order_by('-updated')[:50]
def item_title(self, item):
m = lock_re.search(item.source)
if m:
return u"{0} - {1}".format(
m.group(1),
item.action_type
)
else:
return u"{0} - {1}".format(
item.person_id,
item.action_type
)
def item_description(self, item):
updated = _(u"Updated at {0}").format(str(item.updated))
description = u"{0}\n\n{1}\n".format(item.source, updated)
return description
def item_link(self, item):
# As a hack for the moment, constituencies are just mentioned
# in the source message:
|
m = lock_re.search(item.source)
if m:
return reverse('constituency', kwargs={
'post_id': m.group(2),
'ignored_slug': slugify(m.group(1))
})
else:
if item.person_id:
return reverse('person-view', args=[item.person_id])
|
else:
return '/'
|
openslack/openslack-crawler
|
crawler/schedulers/kafka/scheduler.py
|
Python
|
apache-2.0
| 6,484
| 0.000617
|
import json
import random
import time
import urllib
import re
from scrapy.utils.misc import load_object
from scrapy.http import Request
from scrapy.conf import settings
import redis
from crawler.schedulers.redis.dupefilter import RFPDupeFilter
from crawler.schedulers.redis.queue import RedisPriorityQueue
try:
import cPickle as pickle
except ImportError:
import pickle
class DistributedScheduler(object):
'''
Scrapy request scheduler that utilizes Priority Queues
to moderate scrape requests within a distributed scrapy
cluster
'''
redis_conn = None # the redis connection
queue = None # the queue to use for crawling
spider = None # the spider using this scheduler
queue_class = None # the class to use for the queue
dupefilter = None # the redis dupefilter
item_retries = 0 # the number of extra tries to get an item
def __init__(self, server, persist, timeout, retries):
'''
Initialize the scheduler
'''
self.redis_conn = server
self.persist = persist
self.rfp_timeout = timeout
self.item_retires = retries
def setup(self):
'''
Used to initialize things when using mock
spider.name is not set yet
'''
self.queue = R
|
edisPriorityQueue(self.redis_conn,
self.spider.name + ":queue")
@classmethod
def from_settings(cls, settings):
server = redis.Redis(host=settings.get('REDIS_HOST'),
|
port=settings.get('REDIS_PORT'))
persist = settings.get('SCHEDULER_PERSIST', True)
timeout = settings.get('DUPEFILTER_TIMEOUT', 600)
retries = settings.get('SCHEDULER_ITEM_RETRIES', 3)
return cls(server, persist, timeout, retries)
@classmethod
def from_crawler(cls, crawler):
return cls.from_settings(crawler.settings)
def open(self, spider):
self.spider = spider
self.setup()
self.dupefilter = RFPDupeFilter(self.redis_conn,
self.spider.name + ':dupefilter', self.rfp_timeout)
def close(self, reason):
if not self.persist:
self.dupefilter.clear()
self.queue.clear()
def is_blacklisted(self, appid, crawlid):
'''
Checks the redis blacklist for crawls that should not be propagated
either from expiring or stopped
@return: True if the appid crawlid combo is blacklisted
'''
key_check = '{appid}||{crawlid}'.format(appid=appid,
crawlid=crawlid)
redis_key = self.spider.name + ":blacklist"
return self.redis_conn.sismember(redis_key, key_check)
def enqueue_request(self, request):
'''
Pushes a request from the spider back into the queue
'''
if not request.dont_filter and self.dupefilter.request_seen(request):
return
req_dict = self.request_to_dict(request)
if not self.is_blacklisted(req_dict['meta']['appid'],
req_dict['meta']['crawlid']):
key = "{sid}:queue".format(sid=req_dict['meta']['spiderid'])
curr_time = time.time()
# insert if crawl never expires (0) or time < expires
if req_dict['meta']['expires'] == 0 or \
curr_time < req_dict['meta']['expires']:
self.queue.push(req_dict, req_dict['meta']['priority'])
def request_to_dict(self, request):
'''
Convert Request object to a dict.
modified from scrapy.utils.reqser
'''
req_dict = {
# urls should be safe (safe_string_url)
'url': request.url.decode('ascii'),
'method': request.method,
'headers': dict(request.headers),
'body': request.body,
'cookies': request.cookies,
'meta': request.meta,
'_encoding': request._encoding,
'priority': request.priority,
'dont_filter': request.dont_filter,
}
return req_dict
def find_item(self):
'''
Finds an item from the queue
'''
count = 0
while count <= self.item_retries:
item = self.queue.pop()
if item:
# very basic limiter
time.sleep(1)
return item
# we want the spiders to get slightly out of sync
# with each other for better performance
time.sleep(random.random())
count = count + 1
return None
def next_request(self):
'''
Logic to handle getting a new url request
'''
t = time.time()
item = self.find_item()
if item:
try:
req = Request(item['url'])
except ValueError:
# need absolute url
# need better url validation here
req = Request('http://' + item['url'])
if 'meta' in item:
item = item['meta']
# defaults
if "attrs" not in item:
item["attrs"] = {}
if "allowed_domains" not in item:
item["allowed_domains"] = ()
if "allow_regex" not in item:
item["allow_regex"] = ()
if "deny_regex" not in item:
item["deny_regex"] = ()
if "deny_extensions" not in item:
item["deny_extensions"] = None
if 'curdepth' not in item:
item['curdepth'] = 0
if "maxdepth" not in item:
item["maxdepth"] = 0
if "priority" not in item:
item['priority'] = 0
if "retry_times" not in item:
item['retry_times'] = 0
if "expires" not in item:
item['expires'] = 0
for key in ('attrs', 'allowed_domains', 'curdepth', 'maxdepth',
'appid', 'crawlid', 'spiderid', 'priority', 'retry_times',
'expires', 'allow_regex', 'deny_regex', 'deny_extensions'):
req.meta[key] = item[key]
return req
return None
def has_pending_requests(self):
'''
We never want to say we have pending requests
If this returns True scrapy sometimes hangs.
'''
return False
|
jcrudy/sklearntools
|
sklearntools/test/test_kfold.py
|
Python
|
bsd-3-clause
| 809
| 0.003708
|
from sklearntools.kfold import ThresholdHybridCV
import numpy as np
from six.moves import reduce
from operator import __add__
from numpy.testing.utils import assert_array_equal
from nose.tools import assert_equal
def test_hybrid_cv():
X = np.random.normal(size=(100,10))
y = np.random.normal(size=100)
cv = ThresholdHybridCV(n_folds=10, upper=1.)
folds = list(cv._iter_test_masks(X, y))
assert_array_equal(reduce(__add__, folds), np.ones(100, dtype=int))
assert_equal(len(folds), cv.get_n_splits(X, y))
if __name__ == '__main__':
import sys
import nose
# This code will
|
run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_nam
|
e,
'-s', '-v'])
|
mitsuhiko/sentry
|
src/sentry/utils/email.py
|
Python
|
bsd-3-clause
| 15,463
| 0.000776
|
"""
sentry.utils.email
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import os
import subprocess
import tempfile
import time
from email.utils import parseaddr
from functools import partial
from operator import attrgetter
from random import randrange
from django.conf import settings
from django.core.mail import get_connection as _get_connection
from django.core.mail import send_mail as _send_mail
from django.core.mail import EmailMultiAlternatives
from django.core.mail.backends.base import BaseEmailBackend
from django.core.signing import BadSignature, Signer
from django.utils.crypto import constant_time_compare
from django.utils.encoding import force_bytes, force_str, force_text
from toronado import from_string as inline_css
from sentry import options
from sentry.logging import LoggingFormat
from sentry.models import (
Activity, Event, Group, GroupEmailThread, Project, User, UserOption
)
from sentry.utils import metrics
from sentry.utils.safe import safe_execute
from sentry.utils.strings import is_valid_dot_atom
from sentry.web.helpers import render_to_string
# The maximum amount of recipients to display in human format.
MAX_RECIPIENTS = 5
logger = logging.getLogger('sentry.mail')
class _CaseInsensitiveSigner(Signer):
"""
Generate a signature that is comprised of only lowercase letters.
WARNING: Do not use this for anything that needs to be cryptographically
secure! This is losing entropy and has a much higher chance of collision
due to dropping to lowercase letters. For our purposes, this lack of entropy
is ok and doesn't pose a risk.
NOTE: This is needed strictly for signatures used in email addresses. Some
clients, coughAirmailcough, treat email addresses as being case-insensitive,
and sends the value as all lowercase.
"""
def signature(self, value):
sig = super(_CaseInsensitiveSigner, self).signature(value)
return sig.lower()
def unsign(self, signed_value):
# This unsign is identical to subclass except for the lowercasing
# See: https://github.com/django/django/blob/1.6.11/django/core/signing.py#L165-L172
signed_value = force_str(signed_value)
if self.sep not in signed_value:
raise BadSignature('No "%s" found in value' % self.sep)
value, sig = signed_value.rsplit(self.sep, 1)
if constant_time_compare(sig.lower(), self.signature(value)):
return force_text(value)
raise BadSignature('Signature "%s" does not match' % sig)
signer = _CaseInsensitiveSigner()
def email_to_group_id(address):
"""
Email address should be in the form of:
{group_id}+{signature}@example.com
"""
address = address.split('@', 1)[0]
signed_data = address.replace('+', ':')
return int(force_bytes(signer.unsign(signed_data)))
def group_id_to_email(group_id):
signed_data = signer.sign(str(group_id))
return '@'.join((
signed_data.replace(':', '+'),
options.get('mail.reply-hostname') or get_from_email_domain(),
))
def domain_from_email(email):
email = parseaddr(email)[1]
try:
return email.split('@', 1)[1]
except IndexError:
# The email address is likely malformed or something
return email
# Slightly modified version of Django's
# `django.core.mail.message:make_msgid` becuase we need
# to override the domain. If we ever upgrade to
# django 1.8, we can/should replace this.
def make_msgid(domain):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<2
|
[email protected]>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id. Optional domain if given provides the
portion of the message id after the '@'. It defaults to the locally
defined hostname.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
pid =
|
os.getpid()
randint = randrange(100000)
msgid = '<%s.%s.%s@%s>' % (utcdate, pid, randint, domain)
return msgid
# cache the domain_from_email calculation
# This is just a tuple of (email, email-domain)
_from_email_domain_cache = (None, None)
def get_from_email_domain():
global _from_email_domain_cache
from_ = options.get('mail.from')
if not _from_email_domain_cache[0] == from_:
_from_email_domain_cache = (from_, domain_from_email(from_))
return _from_email_domain_cache[1]
def get_email_addresses(user_ids, project=None):
pending = set(user_ids)
results = {}
if project:
queryset = UserOption.objects.filter(
project=project,
user__in=pending,
key='mail:email',
)
for option in (o for o in queryset if o.value):
results[option.user_id] = option.value
pending.discard(option.user_id)
if pending:
queryset = UserOption.objects.filter(
user__in=pending,
key='alert_email',
)
for option in (o for o in queryset if o.value):
results[option.user_id] = option.value
pending.discard(option.user_id)
if pending:
queryset = User.objects.filter(pk__in=pending, is_active=True)
for (user_id, email) in queryset.values_list('id', 'email'):
if email:
results[user_id] = email
pending.discard(user_id)
if pending:
logger.warning('Could not resolve email addresses for user IDs in %r, discarding...', pending)
return results
class ListResolver(object):
"""
Manages the generation of RFC 2919 compliant list-id strings from varying
objects types.
"""
class UnregisteredTypeError(Exception):
"""
Error raised when attempting to build a list-id from an unregisted object type.
"""
def __init__(self, namespace, type_handlers):
assert is_valid_dot_atom(namespace)
# The list-id-namespace that will be used when generating the list-id
# string. This should be a domain name under the control of the
# generator (see RFC 2919.)
self.__namespace = namespace
# A mapping of classes to functions that accept an instance of that
# class, returning a tuple of values that will be used to generate the
# list label. Returned values must be valid RFC 2822 dot-atom-text
# values.
self.__type_handlers = type_handlers
def __call__(self, instance):
"""
Build a list-id string from an instance.
Raises ``UnregisteredTypeError`` if there is no registered handler for
the instance type. Raises ``AssertionError`` if a valid list-id string
cannot be generated from the values returned by the type handler.
"""
try:
handler = self.__type_handlers[type(instance)]
except KeyError:
raise self.UnregisteredTypeError(
'Cannot generate mailing list identifier for {!r}'.format(instance)
)
label = '.'.join(map(str, handler(instance)))
assert is_valid_dot_atom(label)
return '{}.{}'.format(label, self.__namespace)
default_list_type_handlers = {
Activity: attrgetter('project.slug', 'project.organization.slug'),
Project: attrgetter('slug', 'organization.slug'),
Group: attrgetter('project.slug', 'organization.slug'),
Event: attrgetter('project.slug', 'organization.slug'),
}
make_listid_from_instance = ListResolver(
options.get('mail.list-namespace'),
default_list_type_handlers,
)
class MessageBuilder(object):
def __init__(self, subject, context=None, template=None, html_template=None,
body=None, html_body=None, headers=None, reference=None,
reply_reference=None, from_email=None, type=None):
assert not (body and template)
assert not (html_body and html_template)
assert context or not (template or html_template)
if he
|
hutcho66/imagerecognition
|
softmax_scripts/plot_softmax_results.py
|
Python
|
mit
| 636
| 0.003145
|
#!/usr/bin/env python
"""plot_softmax_results.py: Plot results of mnist softmax tests."""
from helper_scripts.mnist_read_log import plot_results
import matplotlib.pyplot as plt
# Produce cross entropy and accuracy plots for softmax models.
# Requires the training data for each of the models.
files = [r"""../mnist_softmax_models\softmax_alpha=0.1_keepprob=0.9\log\validation"""
]
scalar_names = ['accuracy_1', 'cr
|
oss_entropy_1']
ylabels = ['Validation Accuracy', 'Cross Entropy (Validation Set)']
legend = [r'$\alpha=0.1, keep\_prob=
|
0.9$']
plot_results(files, scalar_names, ylabels, legend, 'Softmax Models')
plt.show()
|
levental/visualizer
|
docs/conf.py
|
Python
|
bsd-3-clause
| 7,793
| 0.001155
|
# -*- coding: utf-8 -*-
#
# mfp documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mfp'
copyright = u'2014, Simcha Levental'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_
|
title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of a
|
n image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'visualizerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'visualizer.tex',
u'mfp Documentation',
u'Simcha Levental', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'visualizer', u'mfp Documentation',
[u'Simcha Levental'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'visualizer', u'mfp Documentation',
u'Simcha Levental', 'mfp',
'Mapping and data visualization toolkit.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
decarboxy/py_protein_utils
|
scripts/best_models.py
|
Python
|
mit
| 1,426
| 0.018934
|
#!/usr/bin/env python2.5
from optparse import OptionParser
from rosettautil.rosetta import rosettaScore
usage = "%prog [options] --term=scoreterm silent files"
parser=OptionParser(usage)
parser.add_option("--term",dest="term",help="score term to use")
(options,args) = parser.parse_args()
if len(args) < 1:
parser.error("you must specify at least one silent file")
#score_gen = scores.score_generator(options.term)
best_models = {} # key is a structure ID, value is a pair in form (tag,score)
for silent_file in args:
#file = silent_file
scores=rosettaScore.SilentScoreTable()
scores.add_file(silent_file)
score_gen = scores.score_generato
|
r(options.term)
for tag,score in score_gen:
split_tag = tag.split("_")
model_id = "_".join(split_tag[0:len(split_tag)-1])
#file = scores.get_file_from_tag(tag)
try:
(current_file,current_best_tag,current_best_score) = best_models[model_id]
except KeyError:
|
best_models[model_id] = (silent_file,tag,score)
continue
if score < current_best_score:
#print "changed"
best_models[model_id] = (silent_file,tag,score)
#print best_models
#print silent_file
#print file,score , current_best_score
print "file","tag",options.term
for tag in best_models:
print best_models[tag][0],best_models[tag][1],best_models[tag][2]
|
geneontology/go-site
|
scripts/sanity-check-users-and-groups.py
|
Python
|
bsd-3-clause
| 7,704
| 0.007269
|
####
#### Give a report on the "sanity" of the users and groups YAML
#### metadata files.
####
#### Example usage to analyze the usual suspects:
#### python3 sanity-check-users-and-groups.py --help
#### Get report of current problems:
#### python3 ./scripts/sanity-check-users-and-groups.py
|
--users m
|
etadata/users.yaml --groups metadata/groups.yaml
#### Attempt to repair file (note that we go through json2yaml as libyaml output does not seem compatible with kwalify):
#### python3 ./scripts/sanity-check-users-and-groups.py --users metadata/users.yaml --groups metadata/groups.yaml --repair --output /tmp/output.json && json2yaml --depth 10 /tmp/output.json > /tmp/users.yaml
#### Check new yaml:
#### kwalify -E -f metadata/users.schema.yaml /tmp/users.yaml
#### Run report on new yaml.
#### reset && python3 ./scripts/sanity-check-users-and-groups.py --users /tmp/users.yaml --groups metadata/groups.yaml
import sys
import argparse
import logging
import yaml
import json
## Logger basic setup.
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger('sanity')
LOGGER.setLevel(logging.WARNING)
## Make sure we exit in a way that will get Jenkins's attention.
DIED_SCREAMING_P = False
def die_screaming(string):
""" Die and take our toys home. """
global DIED_SCREAMING_P
LOGGER.error(string)
DIED_SCREAMING_P = True
#sys.exit(1)
def main():
## Deal with incoming.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='More verbose output')
parser.add_argument('-u', '--users',
help='The users.yaml file to act on')
parser.add_argument('-g', '--groups',
help='The groups.yaml file to act on')
parser.add_argument("-r", "--repair", action="store_true",
help="Attempt to repair groups and update old permissions")
parser.add_argument("-o", "--output",
help="The file to output internal structure to (if repairing)")
args = parser.parse_args()
if args.verbose:
LOGGER.setLevel(logging.INFO)
LOGGER.info('Verbose: on')
## Ensure targets.
if not args.users:
die_screaming('need a users argument')
LOGGER.info('Will operate on users: ' + args.users)
if not args.groups:
die_screaming('need a groups argument')
LOGGER.info('Will operate on groups: ' + args.groups)
## Read.
users = None
with open(args.users) as mhandle:
users = yaml.safe_load(mhandle.read())
groups_linear = None
with open(args.groups) as mhandle:
groups_linear = yaml.safe_load(mhandle.read())
## Switch linear groups to lookup by URI.
groups_lookup = {}
for group in groups_linear:
groups_lookup[group['id']] = group['label']
violations = {
"uri": [],
"groups": [],
}
## Cycle through users and see if we find any violations.
for index, user in enumerate(users):
nick = user.get('nickname', '???')
## Update old authorizations type.
if args.repair:
if user.get("authorizations", {}).get("noctua-go", False):
print('REPAIR?: Update perms for ' + nick)
auths = user["authorizations"]["noctua-go"]
del user["authorizations"]["noctua-go"] # delete old way
user["authorizations"]["noctua"] = {
"go": auths
}
users[index] = user # save new back into list
## Does the user have noctua perms?
if user.get('authorizations', False):
auth = user.get('authorizations', {})
if auth.get('noctua-go', False) or \
(auth.get('noctua', False) and auth['noctua'].get('go', False)):
#print('Has perms: ' + user.get('nickname', '???'))
## 1: If so, do they have a URI?
if not user.get('uri', False):
die_screaming(user.get('nickname', '???') +\
' has no "uri"')
#print(nick + ' has no "uri"')
violations["uri"].append(nick)
else:
## 2: Is it an ORCID?
if user.get('uri', 'NIL').find('orcid') == -1:
die_screaming(user.get('nickname', '???') +\
' "uri" is not an ORCID.')
#print(nick + ' "uri" is not an ORCID.')
violations["uri"].append(nick)
## 3: If so, do they have a populated groups?
if not user.get('groups', False) or len(user["groups"]) == 0:
die_screaming(user.get('nickname', '???') +\
' has no "groups"')
#print(nick + ' has no "groups"')
if user.get("organization", False):
org = user["organization"]
print(nick + " could try org {}".format(org))
matching_groups = list(filter(lambda g: org == g["label"] or org == g["shorthand"], groups_linear))
if len(matching_groups) > 0:
print("REPAIR?: Use group: {}".format(matching_groups[0]["id"]))
if args.repair:
user["groups"] = [matching_groups[0]["id"]]
users[index] = user
else:
violations["groups"].append(nick)
else:
## 4: If so, are all entries in groups?
for gid in user.get('groups'):
if not groups_lookup.get(gid, False):
die_screaming(user.get('nickname', '???') +\
' has mistaken group entry: ' + gid)
#print(nick + ' has mistaken group entry: ' + gid)
violates_both = set(violations["uri"]).intersection(violations["groups"])
just_uri = set(violations["uri"]).difference(violates_both)
just_groups = set(violations["groups"]).difference(violates_both)
## Check privs.
for index, user in enumerate(users):
if user["nickname"] in just_uri or user["nickname"] in just_groups:
# If we have an auth with noctua-go with allow-edit set to True
if user.get("authorizations", {}).get("noctua", {}).get("go", {}).get("allow-edit", False):
print("REPAIR?: Revoke {} noctua-go edit privileges.".format(user["nickname"]))
if args.repair:
del user["authorizations"]
users[index] = user
print("\nNo URI, or no ORCID:")
print("===================")
print("\n".join(just_uri))
print("\nNo Groups:")
print("===================")
print("\n".join(just_groups))
print("\nBoth Bad:")
print("===================")
print("\n".join(violates_both))
#print(json.dumps(users))
#print(yaml.dump(users, default_flow_style=False))
#yaml.dump(data, default_flow_style=False)
if args.output:
with open(args.output, 'w+') as fhandle:
fhandle.write(json.dumps(users, sort_keys=True, indent=4))
## TODO: implement hard checks above later.
if DIED_SCREAMING_P:
print('Errors happened, alert the sheriff.')
sys.exit(1)
else:
print('Non-failing run.')
## You saw it coming...
if __name__ == '__main__':
main()
|
Olamyy/Norman
|
Norman/api/base.py
|
Python
|
bsd-3-clause
| 846
| 0.002364
|
import requests
from Norman.errors import HttpMethodError
class BaseAPI(object):
"""
"""
_content_type = "application/json"
def __init__(self):
pass
def _json_parser(self, json_response):
response = json_response
|
.js
|
on()
return response
def exec_request(self, method, url, data=None):
method_map = {
'GET': requests.get,
'POST': requests.post,
'PUT': requests.put,
'DELETE': requests.delete
}
payload = data if data else data
request = method_map.get(method)
if not request:
raise HttpMethodError(
"Request method not recognised or implemented")
response = request(
url=url, json=payload, verify=True)
return response.content
base = BaseAPI()
|
apanda/modeling
|
mcnet/components/erroneous_aclfull_proxy.py
|
Python
|
bsd-3-clause
| 10,082
| 0.019242
|
from . import NetworkObject
import z3
class ErroneousAclWebProxy (NetworkObject):
"""A caching web proxy which enforces ACLs erroneously.
The idea here was to present something that is deliberately not path independent"""
def _init (self, node, network, context):
super(ErroneousAclWebProxy, self).init_fail(node)
self.proxy = node.z3Node
self.ctx = context
self.constraints = list ()
self.acls = list ()
network.SaneSend(self)
self._webProxyFunctions ()
self._webProxyConstraints ()
@property
def z3Node (self):
return self.proxy
def SetPolicy (self, policy):
"""Wrap add acls"""
self.AddAcls(policy)
def AddAcls(self, acls):
if not isinstance(acls, list):
acls = [acls]
self.acls.extend(acls)
@property
def ACLs (self):
return self.acls
def _addConstraints (self, solver):
self.constraints = list ()
self._webProxyFunctions ()
self._webProxyConstraints ()
solver.add(self.constraints)
def _webProxyConstraints (self):
eh = z3.Const('__webproxy_contraint_eh_%s'%(self.proxy), self.ctx.node)
eh2 = z3.Const('__webproxy_contraint_eh2_%s'%(self.proxy), self.ctx.node)
a = z3.Const('__webproxyfunc_cache_addr_%s'%(self.proxy), self.ctx.address)
i = z3.Const('__webproxyfunc_cache_body_%s'%(self.proxy), z3.IntSort())
p = z3.Const('__webproxy_req_packet_%s'%(self.proxy), self.ctx.packet)
p2 = z3.Const('__webproxy_req_packet_2_%s'%(self.proxy), self.ctx.packet)
p3 = z3.Const('__webproxy_res_packet_%s'%(self.proxy), self.ctx.packet)
e1 = z3.Const('__webproxy_e1_%s'%(self.proxy), self.ctx.node)
e2 = z3.Const('__webproxy_e2_%s'%(self.proxy), self.ctx.node)
e3 = z3.Const('__webproxy_e3_%s'%(self.proxy), self.ctx.node)
e4 = z3.Const('__webproxy_e4_%s'%(self.proxy), self.ctx.node)
e5 = z3.Const('__webproxy_e5_%s'%(self.proxy), self.ctx.node)
e6 = z3.Const('__webproxy_e6_%s'%(self.proxy), self.ctx.node)
# \forall e, p: send(w, e, p) \Rightarrow hostHasAddr(w, p.src)
# \forall e_1, p_1: send(w, e, p_1) \Rightarrow \exists e_2, p_2: recv(e_2, w, p_2) \land
# p_2.origin == p_1.origin \land p_2.dest == p_1.dest \land hostHasAddr(p_2.origin, p_2.src)
self.constraints.append(z3.ForAll([eh, p], z3.Implies(self.ctx.send(self.proxy, eh, p), \
self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p)))))
cached_packet = z3.And(self.cached(self.ctx.packet.dest(p2), self.ctx.packet.body(p2
|
)), \
|
self.ctx.etime(self.proxy, p2, self.ctx.recv_event) > \
self.ctime(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \
self.ctx.etime(self.proxy, p, self.ctx.send_event) > \
self.ctx.etime(self.proxy, p2, self.ctx.recv_event), \
self.ctx.packet.body(p) == self.cresp(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \
self.ctx.packet.orig_body(p) == self.corigbody(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \
self.ctx.packet.dest(p) == self.ctx.packet.src(p2), \
self.ctx.dest_port(p) == self.ctx.src_port(p2), \
self.ctx.src_port(p) == self.ctx.dest_port(p2), \
self.ctx.packet.options(p) == 0, \
self.ctx.packet.origin(p) == self.corigin(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)))
request_constraints = [z3.Not(self.ctx.hostHasAddr(self.proxy, self.ctx.packet.dest(p2))), \
self.ctx.packet.origin(p2) == self.ctx.packet.origin(p),
self.ctx.packet.dest(p2) == self.ctx.packet.dest(p), \
self.ctx.packet.body(p2) == self.ctx.packet.body(p), \
self.ctx.packet.orig_body(p2) == self.ctx.packet.orig_body(p), \
self.ctx.packet.options(p) == 0, \
self.ctx.packet.seq(p2) == self.ctx.packet.seq(p), \
self.ctx.hostHasAddr(self.ctx.packet.origin(p2), self.ctx.packet.src(p2)), \
self.ctx.dest_port(p2) == self.ctx.dest_port(p), \
self.ctx.etime(self.proxy, p, self.ctx.send_event) > \
self.ctx.etime(self.proxy, p2, self.ctx.recv_event), \
self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p))]
if len(self.acls) != 0:
acl_constraint = map(lambda (s, d): \
z3.Not(z3.And(self.ctx.packet.src(p2) == s, \
self.ctx.packet.dest(p2) == d)), self.acls)
request_constraints.extend(acl_constraint)
self.constraints.append(z3.ForAll([eh, p], z3.Implies(self.ctx.send(self.proxy, eh, p), \
z3.Or(\
z3.Exists([p2, eh2], \
z3.And(self.ctx.recv(eh2, self.proxy, p2), \
z3.Not(self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p2))),\
z3.And(request_constraints))), \
z3.Exists([p2, eh2], \
z3.And(self.ctx.recv(eh2, self.proxy, p2), \
z3.Not(self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p2))),\
cached_packet))))))
cache_conditions = \
z3.ForAll([a, i], \
z3.Implies(self.cached(a, i), \
z3.And(\
z3.Not(self.ctx.hostHasAddr (self.proxy, a)), \
z3.Exists([e1, e2, e3, p, p2, p3], \
z3.And(\
self.ctx.recv(e1, self.proxy, p2), \
self.ctx.packet.dest(p2) == a, \
self.ctx.packet.body(p2) == i, \
self.ctx.packet.body(p) == i, \
self.ctx.packet.dest(p) == a, \
self.ctx.dest_port(p) == self.ctx.dest_port(p2), \
self.creqpacket(a, i) == p2, \
self.creqopacket(a, i) == p, \
self.ctime(a, i) > self.ctx.etime(self.proxy, p2, self.ctx.recv_event), \
self.ctx.send(self.proxy, e2, p), \
self.ctime(a, i) > self.ctx.etime(self.proxy, p, self.ctx.send_event), \
self.ctx.recv(e3, self.proxy, p3), \
self.crespacket(a, i) == p3, \
self.ctx.src_port(p3) == self.ctx.dest_port(p), \
self.ctx.dest_port(p3) == self.ctx.src_port(p), \
self.ctx.packet.src(p3) == self.ctx.packet.dest(p), \
self.ctx.packet.dest(p3) == self.ctx.packet.src(p), \
z3.Exists([e5, e6], \
z3.And(
self.ctx.hostHasAddr (e5, a), \
self.ctx.recv(e6, e5, p), \
z3.ForAll([e4], \
z3.Or(self.ctx.etime(e4, p3, self.ctx.send_event) == 0, \
self.ctx.etime(e4, p3, self.ctx.send_event) > self.ctx.etime(e5, p, self.ctx.recv_event))))), \
self.cresp(a, i) == self.ctx.packet.body(p3), \
self.corigbody(a, i) == self.ctx.packet.orig
|
karuppiah7890/Mail-for-Good
|
utility/generateEmailCsv.py
|
Python
|
bsd-3-clause
| 664
| 0.004518
|
import sys
import random, string
import os
numberOfEmailsToGenerate = sys.argv[1]
try:
int(numberOfEmailsToGenerate)
print('Generating a CSV with ' + numberOfEmailsToGenerate + ' random emails')
print('This make take some time if the CSV i
|
s large ...')
except:
sys.exit('Please pass a number as the first arg')
numberOfEmailsToGenerate = int(numberOfEmailsToGenerate)
# Delete ./generated.csv, then create it
os.system('touch ./generated.csv')
for x in range(0, numberOf
|
EmailsToGenerate):
randomString = ''.join(random.choice(string.lowercase) for i in range(20))
os.system('echo ' + randomString + '@email.com' ' >> ./generated.csv')
|
kustodian/ansible
|
lib/ansible/module_utils/network/common/utils.py
|
Python
|
gpl-3.0
| 21,313
| 0.001314
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Networking tools for network modules only
import re
import ast
import operator
import socket
import json
from itertools import chain
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils import basic
from ansible.module_utils.parsing.convert_bool import boolean
# Backwards compatibility for 3rd party modules
# TODO(pabelanger): With move to ansible.netcommon, we should clean this code
# up and have modules import directly themself.
from ansible.module_utils.common.network import ( # noqa: F401
to_bits, is_netmask, is_masklen, to_netmask, to_masklen, to_subnet, to_ipv6_network, VALID_MASKS
)
try:
from jinja2 import Environment, StrictUndefined
from jinja2.exceptions import UndefinedError
HAS_JINJA2 = True
except ImportError:
HAS_JINJA2 = False
OPERATORS = frozenset(['ge', 'gt', 'eq', 'neq', 'lt', 'le'])
ALIASES = frozenset([('min', 'ge'), ('max', 'le'), ('exactly', 'eq'), ('neq', 'ne')])
def to_list(val):
if isinstance(val, (list, tuple, set)):
return list(val)
elif val is not None:
return [val]
else:
return list()
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = to_text(item).split('\n')
yield item
def transform_commands(module):
transform = ComplexList(dict(
command=dict(key=True),
output=dict(),
prompt=dict(type='list'),
answer=dict(type='list'),
newline=dict(type='bool', default=True),
sendonly=dict(type='bool', default=False),
check_all=dict(type='bool', default=False),
), module)
return transform(module.params['commands'])
def sort_list(val):
if isinstance(val, list):
return sorted(val)
return val
class Entity(object):
"""Transforms a dict to with an argument spec
This class will take a dict and apply an Ansible argument spec to the
values. The resulting dict will contain all of the keys in the param
with appropriate values set.
Example::
argument_spec = dict(
command=dict(key=True),
display=dict(default='text', choices=['text', 'json']),
validate=dict(type='bool')
)
transform = Entity(module, argument_spec)
value = dict(command='foo')
result = transform(value)
print result
{'command': 'foo', 'display': 'text', 'validate': None}
Supported argument spec:
* key - specifies how to map a single value to a dict
* read_from - read and apply the argument_spec from the module
* required - a value is required
* type - type of value (uses AnsibleModule type checker)
* fallback - implements fallback function
* choices - set of valid options
* default - default value
"""
def __init__(self, module, attrs=None, args=None, keys=None, from_argspec=False):
args = [] if args is None else args
self._attributes = attrs or {}
self._module = module
for arg in args:
self._attributes[arg] = dict()
if from_argspec:
self._attributes[arg]['read_from'] = arg
if keys and arg in keys:
self._attributes[arg]['key'] = True
self.attr_names = frozenset(self._attributes.keys())
_has_key = False
for name, attr in iteritems(self._attributes):
if attr.get('read_from'):
if attr['read_from'] not in self._module.argument_spec:
module.fail_json(msg='argument %s does not exist' % attr['read_from'])
spec = self._module.argument_spec.get(attr['read_from'])
for key, value in iteritems(spec):
if key not in attr:
attr[key] = value
if attr.get('key'):
if _has_key:
module.fail_json(msg='only one key value can be specified')
_has_key = True
attr['required'] = True
def serialize(self):
return self._attributes
def to_dict(self, value):
obj = {}
for name, attr in iteritems(self._attributes):
if attr.get('key'):
obj[name] = value
else:
obj[name] = attr.get('default')
return obj
def __call__(self, value, strict=True):
if not isinstance(value, dict):
value = self.to_dict(value)
if strict:
unknown = set(value).difference(self.attr_names)
if unknown:
self._module.fail_json(msg='invalid keys: %s' % ','.join(unknown))
for name, attr in iteritems(self._attributes):
if value.get(name) is None:
value[name] = attr.get('default')
if attr.get('fallback') and not value.get(name):
fallback = attr.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
|
else:
fallback_args = item
try:
value[name] = fallback_strategy(*fa
|
llback_args, **fallback_kwargs)
except basic.AnsibleFallbackNotFound:
continue
if attr.get('required') and value.get(name) is None:
self._module.fail_json(msg='missing required attribute %s' % name)
if 'choices' in attr:
if value[name] not in attr['choices']:
self._module.fail_json(msg='%s must be one of %s, got %s' % (name, ', '.join(attr['choices']), value[name]))
if value[name] is not None:
value_type = attr.get('type', 'str')
type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(value[name])
elif value.get(name):
value[name] = self._module.params[name]
return value
class EntityCollection(Entity):
"""Extends ```Entity``` to handle a list of dicts """
def __call__(self, iterable, strict=True):
if ite
|
djkonro/client-python
|
kubernetes/test/test_v1beta1_storage_class.py
|
Python
|
apache-2.0
| 891
| 0.003367
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import uni
|
ttest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_storage_class import V1beta1StorageClass
class TestV1beta1StorageClass(unittest.TestCase):
""" V1beta1Storag
|
eClass unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1StorageClass(self):
"""
Test V1beta1StorageClass
"""
model = kubernetes.client.models.v1beta1_storage_class.V1beta1StorageClass()
if __name__ == '__main__':
unittest.main()
|
FCP-INDI/nipype
|
nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py
|
Python
|
bsd-3-clause
| 1,372
| 0.019679
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from .....testing import assert_equal
from ..histogrammatching import HistogramMatching
def test_HistogramMatching_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='%s',
position=-3,
),
numberOfHistogramLevels=dict(argstr='--numberOfHistogramLevels %d',
),
numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d',
),
outputVolume=dict(argstr='%s',
hash_files=False,
position=-1,
),
referenceVolume=dict(argstr='%s',
position=-2,
),
terminal_output=dict(nohash=True,
),
threshold=dict(argstr='--thresh
|
old ',
),
)
inputs = HistogramMatching.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def tes
|
t_HistogramMatching_outputs():
output_map = dict(outputVolume=dict(position=-1,
),
)
outputs = HistogramMatching.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
ecederstrand/exchangelib
|
exchangelib/services/get_folder.py
|
Python
|
bsd-2-clause
| 2,503
| 0.004395
|
from ..errors import ErrorFolderNotFound, ErrorInvalidOperation, ErrorNoPublicFolderReplicaAvailable
from ..util import MNS, create_element
from .common import EWSAccountService, folder_ids_element, parse_folder_elem, shape_element
class GetFolder(EWSAccountService):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getfolder-operation"""
SERVICE_NAME = "GetFolder"
element_container_name = f"{{{MNS}}}Folders"
ERRORS_TO_CATCH_I
|
N_RESPONSE = EWSAccountService.ERRORS_TO_CATCH_IN_RESPONSE + (
Er
|
rorFolderNotFound,
ErrorNoPublicFolderReplicaAvailable,
ErrorInvalidOperation,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.folders = [] # A hack to communicate parsing args to _elems_to_objs()
def call(self, folders, additional_fields, shape):
"""Take a folder ID and returns the full information for that folder.
:param folders: a list of Folder objects
:param additional_fields: the extra fields that should be returned with the folder, as FieldPath objects
:param shape: The set of attributes to return
:return: XML elements for the folders, in stable order
"""
# We can't easily find the correct folder class from the returned XML. Instead, return objects with the same
# class as the folder instance it was requested with.
self.folders = list(folders) # Convert to a list, in case 'folders' is a generator. We're iterating twice.
return self._elems_to_objs(
self._chunked_get_elements(
self.get_payload,
items=self.folders,
additional_fields=additional_fields,
shape=shape,
)
)
def _elems_to_objs(self, elems):
for folder, elem in zip(self.folders, elems):
if isinstance(elem, Exception):
yield elem
continue
yield parse_folder_elem(elem=elem, folder=folder, account=self.account)
def get_payload(self, folders, additional_fields, shape):
payload = create_element(f"m:{self.SERVICE_NAME}")
payload.append(
shape_element(
tag="m:FolderShape", shape=shape, additional_fields=additional_fields, version=self.account.version
)
)
payload.append(folder_ids_element(folders=folders, version=self.account.version))
return payload
|
jkonecki/autorest
|
AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/models/bar.py
|
Python
|
mit
| 1,021
| 0.000979
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Bar(Model):
"""
|
The URIs that are used to perform a retrieval of a public blob, queue or
table object.
:param recursive_point: Recursive Endpoints
:type recursive_point: :class:`Endpoints
<fixtures.acceptancetestsstoragemanagementclient.models.Endpoints>`
"""
_attribute_map = {
're
|
cursive_point': {'key': 'RecursivePoint', 'type': 'Endpoints'},
}
def __init__(self, recursive_point=None, **kwargs):
self.recursive_point = recursive_point
|
sam-m888/gprime
|
gprime/app/forms/nameform.py
|
Python
|
gpl-2.0
| 2,701
| 0.00074
|
#
# gPrime - a web-based genealogy program
#
# Copyright (c) 2015 Gramps Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
|
from .forms import Form
class NameForm(Form):
"""
A form for listing, viewing, and editing user settings.
"""
table = "Person"
def __init__(self, handler, instance, handle, row):
super().__init__(handler, instance)
self.tview = self._("Na
|
me")
self.view = "Name"
self.row = row
self.handle = handle
if int(row) == 1:
self.path = "primary_name"
else:
self.path = "alternate_name.%s" % (int(self.row) - 2)
self.edit_fields = []
if int(row) == 1:
for field in [
'primary_name.type',
'primary_name.first_name',
'primary_name.call',
'primary_name.nick',
'primary_name.famnick',
'primary_name.private',
'primary_name.date',
'primary_name.suffix',
'primary_name.title',
'primary_name.group_as',
'primary_name.sort_as',
'primary_name.display_as',
]:
self.edit_fields.append(field)
else:
for field in [
'alternate_names.%s.type',
'alternate_names.%s.first_name',
'alternate_names.%s.call',
'alternate_names.%s.nick',
'alternate_names.%s.famnick',
'alternate_names.%s.private',
'alternate_names.%s.date',
'alternate_names.%s.suffix',
'alternate_names.%s.title',
'alternate_names.%s.group_as',
'alternate_names.%s.sort_as',
'alternate_names.%s.display_as',
]:
self.edit_fields.append(field % (int(self.row) - 2))
|
mozilla/peekaboo
|
peekaboo/main/views.py
|
Python
|
mpl-2.0
| 17,096
| 0
|
import calendar
import os
import subprocess
import tempfile
import shutil
import stat
import datetime
import time
import csv
from collections import defaultdict
from io import StringIO
import pytz
from pyquery import PyQuery as pq
from django import http
from django.core.cache import cache
from django.utils.timezone import utc
from django.shortcuts import render, get_object_or_404, redirect
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.conf import settings
from django.db import transaction
from django.contrib import messages
from funfactory.urlresolvers import reverse
from sorl.thumbnail import get_thumbnail
from . import forms
from .models import Visitor, Location, VisitorCount
from .utils import json_view, non_mortals_required
from peekaboo.base.utils import ajax_login_required
def robots_txt(request):
return http.HttpResponse(
'User-agent: *\n'
'%s: /' % ('Allow' if settings.ENGAGE_ROBOTS else 'Disallow'),
mimetype='text/plain',
)
@login_required
def home(request):
context = {
'count_users': User.objects.all().count(),
'count_locations': Location.objects.all().count(),
}
return render(request, 'main/home.html', context)
@non_mortals_required
def log_start(request):
data = {}
return render(request, 'main/log-start.html', data)
@non_mortals_required
def log(request, location):
location = get_object_or_404(Location, slug=location)
data = {
'edit_form': forms.SignInForm(),
'location': location,
}
request.session['default-location'] = location.slug
return render(request, 'main/log.html', data)
@json_view
@non_mortals_required
@ajax_login_required
def log_entries(request, location):
data = {
'latest': None,
'created': [],
'modified': []
}
location = get_object_or_404(Location, slug=location)
thumbnail_geometry = request.GET.get('thumbnail_geometry', '100')
def format_date(dt):
dt_date = dt.strftime('%m/%d/%Y')
dt_time = dt.strftime('%H:%M')
dt_tz = dt.tzname() or 'UTC'
return ' '.join([dt_date, dt_time, dt_tz])
qs = Visitor.objects.filter(location=location)
if request.GET.get('latest'):
latest = datetime.datetime.utcfromtimestamp(
float(request.GET['latest'])
)
latest = latest.replace(tzinfo=utc)
# because latest is potentially lacking in microseconds
# add some to prevent fetching it again
latest += datetime.timedelta(seconds=1)
recently_created = qs.filter(created__gte=latest)
else:
latest = None
recently_created = qs
def make_row(visitor):
row = {
'id': visitor.pk,
'created': format_date(visitor.created),
'created_iso': visitor.created.isoformat(),
'modified_iso': visitor.modified.isoformat(),
'job_title': visitor.job_title,
'name': visitor.get_name(formal=True),
'thumbnail': None,
'visiting': visitor.visiting,
'company': visitor.company,
}
if visitor.picture and os.path.isfile(visitor.picture.path):
thumbnail = get_thumbnail(
visitor.picture,
thumbnail_geometry
)
row['thumbnail'] = {
'url': thumbnail.url,
'width': thumbnail.width,
'height': thumbnail.height,
}
row['picture_url'] = (
reverse('main:log_entry_picture', args=(visitor.pk,)) +
'?width=600&height=400'
)
return row
first = None
for visitor in recently_created.order_by('-created')[:100]:
row = make_row(visitor)
data['created'].append(row)
if first is None:
first = max(visitor.created, visitor.modified)
data['created'].reverse()
# now how about those recently updated
if latest:
recently_modified = qs.filter(
created__lt=latest,
modified__gt=latest
)
for visitor in recently_modified.order_by('modified'
|
):
row = make_row(visitor)
assert row not in data['created']
|
data['modified'].append(row)
first = visitor.modified
if first:
data['latest'] = calendar.timegm(first.utctimetuple())
# from time import sleep
# sleep(1)
# from pprint import pprint
# pprint(data)
return data
@json_view
@csrf_exempt
@non_mortals_required
def log_entry(request, pk):
visitor = get_object_or_404(Visitor, pk=pk)
thumbnail_geometry = request.GET.get('thumbnail_geometry', '100')
if request.method == 'POST':
form = forms.SignInEditForm(request.POST, instance=visitor)
if form.is_valid():
form.save()
data = form.cleaned_data
data['name'] = visitor.get_name(formal=True)
else:
return {'errors': form.errors}
else:
data = {
'first_name': visitor.first_name,
'last_name': visitor.last_name,
'job_title': visitor.job_title,
'company': visitor.company,
'visiting': visitor.visiting,
'thumbnail_url': None,
}
if visitor.picture:
thumbnail = get_thumbnail(
visitor.picture,
thumbnail_geometry
)
data['thumbnail'] = {
'url': thumbnail.url,
'width': thumbnail.width,
'height': thumbnail.height,
}
return data
@json_view
@csrf_exempt
@non_mortals_required
def log_entry_picture(request, pk, format):
visitor = get_object_or_404(Visitor, pk=pk)
if not visitor.picture:
return http.HttpResponseBadRequest("Doesn't have a picture")
geometry = (
'%sx%s' %
(request.GET.get('width', 600),
request.GET.get('width', 500))
)
picture = get_thumbnail(
visitor.picture,
geometry
)
return http.HttpResponse(picture.read(), mimetype='image/jpeg')
@login_required
@json_view
@require_POST
@non_mortals_required
def delete_entry(request, pk):
visitor = get_object_or_404(Visitor, pk=pk)
visitor.delete()
# XXX delete all images too??
return {'deleted': True}
@non_mortals_required
def print_entry(request, pk):
visitor = get_object_or_404(Visitor, pk=pk)
data = {
'visitor': visitor,
'print': request.GET.get('print', False)
}
response = render(request, 'main/print-entry.html', data)
if request.GET.get('iframe'):
response['X-Frame-Options'] = 'SAMEORIGIN'
response["Access-Control-Allow-Origin"] = "*"
return response
@non_mortals_required
def print_entry_pdf(request, pk):
visitor = get_object_or_404(Visitor, pk=pk)
data = {
'visitor': visitor,
}
response = render(request, 'main/print-entry.pdf.html', data)
html = response.content
tmp_dir = os.path.join(
tempfile.gettempdir(),
'peekaboopdfs'
)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
input_file = os.path.join(tmp_dir, 'visitor-%s.html' % visitor.pk)
output_file = os.path.join(tmp_dir, 'visitor-%s.debug.pdf' % visitor.pk)
if os.path.isfile(output_file):
os.remove(output_file)
dom = pq(html)
copied_media_files = []
for img in dom('img'):
src = img.attrib['src']
if settings.STATIC_URL in src:
source = os.path.join(
settings.STATIC_ROOT,
src.replace(settings.STATIC_URL, '')
)
else:
source = os.path.join(
settings.MEDIA_ROOT,
src.replace(settings.MEDIA_URL, '')
)
if not os.path.isfile(source):
raise IOError("Couldn't find %s (Tried: %s)" % (
img.attrib['src'],
source
))
filename = os.path.basename(source)
|
stvstnfrd/edx-platform
|
lms/djangoapps/course_home_api/progress/v1/serializers.py
|
Python
|
agpl-3.0
| 3,620
| 0.000276
|
"""
Progress Tab Serializers
"""
from rest_framework import serializers
from rest_framework.reverse import reverse
class GradedTotalSerializer(serializers.Serializer):
earned = serializers.FloatField()
possible = serializers.FloatField()
class SubsectionSerializer(serializers.Serializer):
display_name = serializers.CharField()
due = serializers.DateTimeField()
format = serializers.CharField()
graded = serializers.BooleanField()
graded_total = GradedTotalSerializer()
# TODO: override serializer
percent_graded = serializers.FloatField()
problem_scores = serializers.SerializerMethodField()
show_correctness = serializers.CharField()
show_grades = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
def get_url(self, subsection):
relative_path = reverse('jump_to', args=[self.context['course_key'], subsection.location])
request = self.context['request']
return request.build_absolute_uri(relative_path)
def get_problem_scores(self, subsection):
problem_scores = [
{
'earned': score.earned,
'possible': score.possible,
}
for score in subsection.problem_scores.values()
]
return problem_scores
def get_show_grades(self, subsection):
return subsection.show_grades(self.context['staff_access'])
class ChapterSerializer(serializers.Serializer):
"""
Serializer for chapters in coursewaresummary
"""
display_name = serializers.CharField()
subsections = SubsectionSerializer(source='sections', many=True)
class CertificateDataSerializer(serializers.Serializer):
cert_status = serializers.CharField()
cert_web_view_url = serializers.CharField()
download_url = serializers.CharField()
msg = serializers.CharField()
title = serializers.CharField()
class CreditRequirementSerializer(serializers.Serializer):
"""
Serializer for credit requirement objects
"""
display_name = serializers.CharField()
min_grade = serializers.SerializerMethodField()
status = serializers.CharField()
status_date = serializers.DateTimeField()
def get_min_grade(self, requirement):
if requirement['namespace'] == 'grade':
return requirement['criteria']['min_grade'] * 100
else:
return None
class CreditCourseRequirementsSerializer(serializers.Serializer):
"""
Serializer for credit_course_requirements
"""
dashboard_url = serializers.SerializerMethodField()
eligibility_status = serializers.CharField()
requirements = CreditRequirementSerializer(many=True)
def get_dashboard_url(self, _):
relative_path = reverse('dashboard')
req
|
uest = self.context['request']
return request.build_absolute_uri(relative_path)
class VerificationDataSerializer(serializers.Serializer):
"""
Serializer for verification data object
"""
link = serializers.
|
URLField()
status = serializers.CharField()
status_date = serializers.DateTimeField()
class ProgressTabSerializer(serializers.Serializer):
"""
Serializer for progress tab
"""
certificate_data = CertificateDataSerializer()
credit_course_requirements = CreditCourseRequirementsSerializer()
credit_support_url = serializers.URLField()
courseware_summary = ChapterSerializer(many=True)
enrollment_mode = serializers.CharField()
studio_url = serializers.CharField()
user_timezone = serializers.CharField()
verification_data = VerificationDataSerializer()
|
mobarski/sandbox
|
parallel/p7ex4.py
|
Python
|
mit
| 2,120
| 0.032075
|
## p7.py - parallel processing microframework
## (c) 2017 by mobarski (at) gmail (dot) com
## licence: MIT
## version: ex4 (simple fan-in of subprocess outputs)
from __future__ import print_function
# CONFIG ###################################################################################
HEAD_LEN_IN = 2
HEAD_LEN_OUT = 100
BUFSIZE = 4096
CMD = "python -c 'import sys; sys.stdout.write(sys.stdin.read())'"
N = 4
# END OF CONFIG ############################################################################
import subprocess
import threading
import shlex
import sys
from select import select
from time import time
IN = sys.stdin
OUT = sys.stdout
OUT = open('test/out.txt','wb')
LOG = sys.stderr
ctx = {}
args = shlex.split(CMD)
PIPE = subprocess.PIPE
for i in range(N):
ctx[i] = {}
proc = subprocess.Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=BUFSIZE)
ctx[i]['proc'] = proc
# metadata
ctx[i]['pid'] = proc.pid
ctx[i]['t_start'] = time()
ctx[i]['head_cnt_in'] = 0
ctx[i]['head_cnt_out'] = 0
def pump_input():
while True:
for i in range(N):
p = ctx[i]['proc']
head = IN.read(HEAD_LEN_IN)
p.stdin.write(head)
ctx[i]['head_cnt_in'] += 1
if len(head)<HEAD_LEN_IN: # End Of File
break
tail = IN.readline()
p.stdin.write(tail)
else: continue # not EOF
# EOF -> close all input streams
for i
|
in range(N):
ctx[i]['proc'].stdin.close()
break
def pump_output():
done = set()
while True:
for i in range(N):
if i in done: continue
p = ctx[i]['proc']
head = p.stdout.read(HEAD_LEN_OUT)
OUT.write(head)
ctx[i]['head_cnt_out'] += 1
if len(head)<HEAD_LEN_OUT: # End Of File
done.add(i)
p.wait() # End Of Process
ctx[i]['t_stop'] =
|
time()
ctx[i]['run_time'] = ctx[i]['t_stop'] - ctx[i]['t_start']
continue
tail = p.stdout.readline()
OUT.write(tail)
if len(done)==N:
return
# RUN DATA PUMPS
input_pump = threading.Thread(target=pump_input)
output_pump = threading.Thread(target=pump_output)
input_pump.start()
output_pump.start()
input_pump.join()
output_pump.join()
from pprint import pprint
pprint(ctx)
|
hziling/template
|
template.py
|
Python
|
mit
| 11,977
| 0.002004
|
# -*- coding: utf-8 -*-
"""
flango.template
~~~~~~~~~~~~~~
template module provide a simple template system that compiles
templates to Python code which like django and tornado template
modules.
Usage
-----
Well, you can view the tests file directly for the usage under tests.
Basically::
>>> import template
>>> template.Template('Hello, {{ name }}').render(name='flango')
Hello, flango
If, else, for...::
>>> template.Template('''
... {% for i in l %}
... {% if i > 3 %}
... {{ i }}
... {% else %}
... less than 3
... {% endif %}
... {% endfor %})
... ''' ).render(l=[2, 4])
less than 3
4
Then, user define class object maybe also works well::
>>> class A(object):
...
... def __init__(self, a, b):
... self.a = a
... self.b = b
...
>>> o = A("I am o.a", [1, 2, 3])
>>> template.Template('''
... {{ o.a }}
... {% for i in o.b %}
... {{ i }}
... {% endfor %}
... ''').render(o=o)
I am o.a
1
2
3
and Wow, function maybe suprise you::
>>> template.Template('{{ abs(-3) }}').render()
'3'
>>> template.Template('{{ len([
|
1, 2, 3]) }}').render()
'3'
>>> templ
|
ate.Template('{{ [1, 2, 3].index(2) }}').render()
'1'
and complex function like lambda expression maybe works::
>>> template.Template('{{ list(map(lambda x: x * 2, [1, 2, 3])) }}').render()
'[2, 4, 6]'
and lastly, inheritance of template, extends and include::
{% extends 'base.html' %}
{% include 'included.html' %}
Hacking with fun and joy.
"""
import re
import os
import collections
# LRU Cache capacity:
_CACHE_CAPACITY = 128
class Scanner(object):
""" Scanner is a inner class of Template which provide
custom template source reading operations.
"""
def __init__(self, source):
# pattern for variable, function, block, statement.
self.pattern = re.compile(r'''
{{\s*(?P<var>.+?)\s*}} # variable: {{ name }} or function like: {{ abs(-2) }}
| # or
{%\s*(?P<endtag>end(if|for|while|block))\s*%} # endtag: {% endfor %}
| # or
{%\s*(?P<statement>(?P<keyword>\w+)\s*(.+?))\s*%} # statement: {% for i in range(10) %}
''', re.VERBOSE)
# the pre-text before token.
self.pretext = ''
# the remaining text which have not been processed.
self.remain = source
def next_token(self):
""" Get the next token which match the pattern semantic.
return `None` if there is no more tokens, otherwise,
return matched regular expression group of token `t`, get
the pre-text and the remain text at the same time.
"""
t = self.pattern.search(self.remain)
if not t:
return None
self.pretext = self.remain[:t.start()]
self.remain = self.remain[t.end():]
return t
@property
def empty(self):
""" Return `True` if the source have been processed."""
return self.remain == ''
class BaseNode(object):
""" Base abstract class for nodes.
Subclass of BaseNode must implement 'generate' interface for
output Python intermediate code generating.
"""
def __init__(self, text, indent, block):
self.text = text
self.indent = indent
self.block = block
def generate(self):
raise NotImplementedError()
class TextNode(BaseNode):
""" Node for normal text. """
def generate(self):
return '{0}_stdout.append(\'\'\'{1}\'\'\')\n'.format(' '*self.indent, self.text)
class VariableNode(BaseNode):
""" Node for variables: such as {{ name }}. """
def generate(self):
return '{0}_stdout.append({1})\n'.format(' '*self.indent, self.text)
class KeyNode(BaseNode):
""" Node for keywords like if else... """
def generate(self):
return '{0}{1}\n'.format(' '*self.indent, self.text)
class TemplateException(Exception):
pass
class Template(object):
""" Main class for compiled template instance.
A initialized template instance will parse and compile
all the template source to Python intermediate code,
and instance function `render` will use Python builtin function
`exec` to execute the intermediate code in Python
runtime.
As function `exec` own very strong power and the ability to
execute all the python code in the runtime with given
namespace dict, so this template engine can perform all
the python features even lambda function. But, function
`exec` also has a huge problem in security, so be careful
and be serious, and I am very serious too.
"""
def __init__(self, source, path='', autoescape=False):
if not source:
raise ValueError('Invalid parameter')
self.scanner = Scanner(source)
# path for extends and include
self.path = path
self.nodes = []
# parent template
self.parent = None
self.autoescape = autoescape
self._parse()
# compiled intermediate code.
self.intermediate = self._compile()
def _parse(self):
python_keywords = ['if', 'for', 'while', 'try', 'else', 'elif', 'except', 'finally']
indent = 0
block_stack = []
def block_stack_top():
return block_stack[-1] if block_stack else None
while not self.scanner.empty:
token = self.scanner.next_token()
if not token:
self.nodes.append(TextNode(self.scanner.remain, indent, block_stack_top()))
break
# get the pre-text before token.
if self.scanner.pretext:
self.nodes.append(TextNode(self.scanner.pretext, indent, block_stack_top()))
variable, endtag, tag, statement, keyword, suffix = token.groups()
if variable:
node_text = 'escape(str({0}))'.format(variable) if self.autoescape else variable
self.nodes.append(VariableNode(node_text, indent, block_stack_top()))
elif endtag:
if tag != 'block':
indent -= 1
continue
# block placeholder in parent template nodes
if not self.parent:
node_text = 'endblock%{0}'.format(block_stack_top())
self.nodes.append(KeyNode(node_text, indent, block_stack_top()))
block_stack.pop()
elif statement:
if keyword == 'include':
filename = re.sub(r'\'|\"', '', suffix)
nodes = Loader(self.path).load(filename).nodes
for node in nodes:
node.indent += indent
self.nodes.extend(nodes)
elif keyword == 'extends':
if self.nodes:
raise TemplateException('Template syntax error: extends tag must be '
'at the beginning of the file.')
filename = re.sub(r'\'|\"', '', suffix)
self.parent = Loader(self.path).load(filename)
elif keyword == 'block':
block_stack.append(suffix)
if not self.parent:
node_text = 'block%{0}'.format(suffix)
self.nodes.append(KeyNode(node_text, indent, block_stack_top()))
elif keyword in python_keywords:
node_text = '{0}:'.format(statement)
if keyword in ['else', 'elif', 'except', 'finally']:
key_indent = indent - 1
else:
k
|
rdegges/django-sslify
|
sslify/middleware.py
|
Python
|
unlicense
| 2,315
| 0.001296
|
"""Django middlewares."""
try:
# Python 2.x
from urlparse import urlsplit, urlunsplit
except ImportError:
# Python 3.x
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
from django.conf import settings
from django.http import HttpResponsePermanentRedirect
try:
# Django 1.10
from django.utils.deprecation import MiddlewareMixin
except ImportError:
# Django <1.10
class MiddlewareMixin(object):
def __init__(self, get_response=None):
self.get_response = get_response
super(MiddlewareMixin, self).__init__()
def __call__(self, request):
response = None
if hasattr(self, 'process_request'):
response = self.process_request(request)
if not response:
response = self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
return response
class SSLifyMiddleware(MiddlewareMixin):
"""Force all requests to use HTTPs. If we get an HTTP request, we'll just
force a redirect to HTTPs.
.. note::
You can also disable this middleware when testing by setting
``settings.SSLIFY_DISABLE`` to True.
"""
@staticmethod
def process_request(request):
# If the user has explicitly disabled SSLify, do nothing.
if getattr(settings, 'SSLIFY_DISABLE', False):
return None
# Evaluate callables that can disable SSL for the current request
per_request_disables = getattr(settings, 'SSLIFY_DISABLE_FOR_REQUEST', [])
for should_disable in per_request_disables:
if should_disable(request):
return None
# If we get here, proceed as normal.
if not request.is_secure():
url = request.build_absolute_uri(request.get_full_path())
|
url_split = urlsplit(url)
scheme = 'https' if url_split.scheme == 'http' else url_split.scheme
ssl_port = getattr(settings, 'SSLIFY_PORT', 443)
url_secure_split = (scheme, "%s:
|
%d" % (url_split.hostname or '', ssl_port)) + url_split[2:]
secure_url = urlunsplit(url_secure_split)
return HttpResponsePermanentRedirect(secure_url)
|
dopuskh3/confluence-publisher
|
conf_publisher/page_dumper.py
|
Python
|
mit
| 1,397
| 0.003579
|
import argparse
import codecs
import sys
from .auth import parse_authentication
from .confluence_api import create_confluence_api
from .confluence import ConfluencePageManager
from .constants import DEFAULT_CONFLUENCE_API_VERSION
def main():
parser = argparse.ArgumentParser(description='Dumps Confluence page in storage format')
parser.add_argument('page_id', type=str, help='Configuration file')
parser.add_argument('-u', '--url', type=str, help='Confluence Url')
auth_group = parser.add_mutually_exclusive_group(required=True)
auth_group.add_argument('-a', '--auth', type=str, help='Base64 encoded user:p
|
assword string')
auth_group.add_argument('-U', '--user', type=str, help='Username (prompt password)')
parser.add_argument('-o', '--output', type=str, help='Output file|stdout|stderr', default='stdout')
|
args = parser.parse_args()
auth = parse_authentication(args.auth, args.user)
confluence_api = create_confluence_api(DEFAULT_CONFLUENCE_API_VERSION, args.url, auth)
page_manager = ConfluencePageManager(confluence_api)
page = page_manager.load(args.page_id)
if args.output.lower() == 'stdout':
f = sys.stdout
elif args.output.lower() == 'stderr':
f = sys.stderr
else:
f = codecs.open(args.output, 'w', encoding='utf-8')
with f:
f.write(page.body)
if __name__ == '__main__':
main()
|
kerckasha/ACE3
|
tools/search_privates.py
|
Python
|
gpl-2.0
| 4,384
| 0.021908
|
#!/usr/bin/env python3
import fnmatch
import os
import re
import ntpath
import sys
import argparse
def get_private_declare(content):
priv_declared = []
srch = re.compile('private.*')
priv_srch_declared = srch.findall(content)
priv_srch_declared = sorted(set(priv_srch_declared))
priv_dec_str = ''.join(priv_srch_declared)
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ ,\}\]\)";]')
priv_split = srch.findall(priv_dec_str)
priv_split = sorted(set(priv_split))
priv_declared += priv_split;
src
|
h = re.compile('params \[.*\]|PARAMS_[0-9].*|EXPLODE_[0-9]_PVT.*|DEFAULT_PARAM.*|KEY_PARAM.*|IGNORE_PRIVATE_WARNING.*')
priv_srch_declared = srch.findall(content)
priv_srch_declared = sorted(set(priv_srch_declared))
priv_dec_str = ''.join(priv_srch_declared)
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ ,\}\]\)";]')
priv_split = srch.findall(priv_dec_str)
priv_split = sorted(set(priv_split))
p
|
riv_declared += priv_split;
srch = re.compile('(?i)[\s]*local[\s]+(_[\w\d]*)[\s]*=.*')
priv_local = srch.findall(content)
priv_local_declared = sorted(set(priv_local))
priv_declared += priv_local_declared;
return priv_declared
def check_privates(filepath):
bad_count_file = 0
def pushClosing(t):
closingStack.append(closing.expr)
closing << Literal( closingFor[t[0]] )
def popClosing():
closing << closingStack.pop()
with open(filepath, 'r') as file:
content = file.read()
priv_use = []
priv_use = []
# Regex search privates
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ =,\^\-\+\/\*\%\}\]\)";]')
priv_use = srch.findall(content)
priv_use = sorted(set(priv_use))
# Private declaration search
priv_declared = get_private_declare(content)
if '_this' in priv_declared: priv_declared.remove('_this')
if '_this' in priv_use: priv_use.remove('_this')
if '_x' in priv_declared: priv_declared.remove('_x')
if '_x' in priv_use: priv_use.remove('_x')
if '_forEachIndex' in priv_declared: priv_declared.remove('_forEachIndex')
if '_forEachIndex' in priv_use: priv_use.remove('_forEachIndex')
if '_foreachIndex' in priv_declared: priv_declared.remove('_foreachIndex')
if '_foreachIndex' in priv_use: priv_use.remove('_foreachIndex')
if '_foreachindex' in priv_declared: priv_declared.remove('_foreachindex')
if '_foreachindex' in priv_use: priv_use.remove('_foreachindex')
missing = []
for s in priv_use:
if s.lower() not in map(str.lower,priv_declared):
if s.lower() not in map(str.lower,missing):
missing.append(s)
if len(missing) > 0:
print (filepath)
private_output = 'private[';
first = True
for bad_priv in missing:
if first:
first = False
private_output = private_output + '"' + bad_priv
else:
private_output = private_output + '", "' + bad_priv
private_output = private_output + '"];';
print (private_output)
for bad_priv in missing:
print ('\t' + bad_priv)
bad_count_file = bad_count_file + 1
return bad_count_file
def main():
print("#########################")
print("# Search your Privates #")
print("#########################")
sqf_list = []
bad_count = 0
parser = argparse.ArgumentParser()
parser.add_argument('-m','--module', help='only search specified module addon folder', required=False, default=".")
args = parser.parse_args()
for root, dirnames, filenames in os.walk('../addons' + '/' + args.module):
for filename in fnmatch.filter(filenames, '*.sqf'):
sqf_list.append(os.path.join(root, filename))
for filename in sqf_list:
bad_count = bad_count + check_privates(filename)
print ("Bad Count {0}".format(bad_count))
if __name__ == "__main__":
main()
|
helfertool/helfertool
|
src/registration/models/helpershift.py
|
Python
|
agpl-3.0
| 1,614
| 0.002478
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class HelperShift(models.Model):
"""
n-m relation between helper and shift.
This model then can be used by other apps to "attach" more data with OneToOne fields and signals.
The fields `present` and `manual_presence` belong to the gifts app. They are directly inserted here as it
would be too complicated to add another model for just two booleans. Additionally, this has the advantage
that the `present` flag can directly used by other apps.
Columns:
:helper: The helper
:shift: The shift
:timestamp: Timestamp when the helper registered for this shift
:present: Flag set when the helper is there (manually or automatically)
:manual_presence: `present` flag was manually set
"""
class Meta:
unique_together = ('helper', 'shift',)
helper = models.ForeignKey(
'Helper',
o
|
n_delete=models.CASCADE,
)
shift = models.ForeignKey(
'Shift',
on_delete=models.CASCADE,
)
timestamp = models.DateTimeField(
auto_now_add=True,
verbose_name=_("Registration time for this shift")
)
present = models.BooleanField(
default=False,
verbose_name=_("Present"),
help_text=_("Helper was at shift")
)
manual_presence = models.BooleanField(
default=F
|
alse,
editable=False,
verbose_name=_("Presence was manually set"),
)
def __str__(self):
return "{} - {} - {}".format(self.helper.event, self.helper, self.shift)
|
ciarand/exhausting-search-homework
|
test/test_euclidean_mst.py
|
Python
|
isc
| 2,000
| 0.0055
|
""" Tests the implementation of the solution to the Euclidean Minimum Spanning
Tree (EMST) problem """
import pytest
from exhaustive_search.point import Point
from exhaustive_search.euclidean_mst import solve, edist
def compare_solutions(actual, expected):
assert len(actual) == len(expected), "expected %d to equal %d" % (len(actual), len(expected))
assert sorted(actual, key=keyfunc) == sorted(expected, key=keyfunc)
def keyfunc(tpl):
left, right = tpl
return edist(left, right)
def test_empty_mst_list():
""" the (E)MST solution to an empty list is an empty list """
assert solve([]) == [], __doc__
def test_non_list():
""" this function should reject non-lists (invalid inputs) by raising
a TypeError """
with pytest.raises(TypeError):
solve(True)
def test_list_of_one():
""" the (E)MST solution to a list of one is an empty list """
assert solve([Point(0, 0)]) == [], __doc__
def test_list_of_two():
""" the (E)MST solution to a list of two points (i.e. [a, b]) is a list
containing a tuple of points (i.e. [(a, b)]) """
one, two = Point(3, 1), Point(1, 3)
|
actual = solve([one, two])
compare_solutions(actual, [(one, two)])
def test_triangle():
""" Given a list of points L:
L = [Point(0, 0), Point(3, 0), Point(0, 6)]
The solution is:
[(Point(0, 0), Point(3, 0)), (Point(3, 0), Point(6, 0))]
"""
graph = [Point(0, 0), Point(3, 0), Point(6, 0)]
actual = solve(graph)
compare_solutions(actual, [(Point(0, 0), Point(3, 0)), (Point(3,
|
0), Point(6, 0))])
for result in actual:
left, right = result
if left == Point(0, 0) or left == Point(6, 0):
assert right == Point(3, 0), \
"expected right (%s) to == %s (left is %s)" % (right, Point(3, 0), left)
else:
assert right == Point(0, 0) or right == Point(6, 0), \
"expected right (%s) to == %s or %s" % (right, Point(0, 0), Point(6, 0))
|
sunqm/pyscf
|
pyscf/agf2/test/test_c_agf2.py
|
Python
|
apache-2.0
| 4,413
| 0.004759
|
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Oliver J. Backhouse <[email protected]>
# George H. Booth <[email protected]>
#
import unittest
import numpy as np
from pyscf.agf2 import aux, _agf2
class KnownValues(unittest.TestCase):
@classmethod
def setUpClass(self):
self.nmo = 100
self.nocc = 20
self.nvir = 80
self.naux = 400
np.random.seed(1)
@classmethod
def tearDownClass(self):
del self.nmo, self.nocc, self.nvir, self.naux
np.random.seed()
def test_c_ragf2(self):
xija = np.random.random((self.nmo, self.nocc, self.nocc, self.nvir))
gf_occ = aux.GreensFunction(np.random.random(self.nocc), np.eye(self.nmo, self.nocc))
gf_vir = aux.GreensFunction(np.random.random(self.nvir), np.eye(self.nmo, self.nvir))
vv1, vev1 = _agf2.build_mats_ragf2_outcore(xija, gf_occ.energy, gf_vir.energy)
vv2, vev2 = _agf2.build_mats_ragf2_incore(xija, gf_occ.energy, gf_vir.energy)
sel
|
f.assertAlmostEqual(np.max(np.absolute(vv1-vv2)), 0.0, 10)
self.assertAlmostEqual(np.max(np.absolute(vev1-vev2)), 0.0, 10)
def test_c_dfragf2(self):
qxi = np.random.random((self.naux, self.nmo*self.nocc)) / self.naux
qja = np.random.random((self.naux, self.nocc*self.nvir)) / self.naux
gf_occ = aux.GreensFunction(np.random.random(self.nocc), np.eye(self.nmo, self.nocc))
gf_vir = aux.GreensFunction(np.random.random(self.nv
|
ir), np.eye(self.nmo, self.nvir))
vv1, vev1 = _agf2.build_mats_dfragf2_outcore(qxi, qja, gf_occ.energy, gf_vir.energy)
vv2, vev2 = _agf2.build_mats_dfragf2_incore(qxi, qja, gf_occ.energy, gf_vir.energy)
self.assertAlmostEqual(np.max(np.absolute(vv1-vv2)), 0.0, 10)
self.assertAlmostEqual(np.max(np.absolute(vev1-vev2)), 0.0, 10)
def test_c_uagf2(self):
xija = np.random.random((2, self.nmo, self.nocc, self.nocc, self.nvir))
gf_occ = (aux.GreensFunction(np.random.random(self.nocc), np.eye(self.nmo, self.nocc)),
aux.GreensFunction(np.random.random(self.nocc), np.eye(self.nmo, self.nocc)))
gf_vir = (aux.GreensFunction(np.random.random(self.nvir), np.eye(self.nmo, self.nvir)),
aux.GreensFunction(np.random.random(self.nvir), np.eye(self.nmo, self.nvir)))
vv1, vev1 = _agf2.build_mats_uagf2_outcore(xija, (gf_occ[0].energy, gf_occ[1].energy), (gf_vir[0].energy, gf_vir[1].energy))
vv2, vev2 = _agf2.build_mats_uagf2_incore(xija, (gf_occ[0].energy, gf_occ[1].energy), (gf_vir[0].energy, gf_vir[1].energy))
self.assertAlmostEqual(np.max(np.absolute(vv1-vv2)), 0.0, 10)
self.assertAlmostEqual(np.max(np.absolute(vev1-vev2)), 0.0, 10)
def test_c_dfuagf2(self):
qxi = np.random.random((2, self.naux, self.nmo*self.nocc)) / self.naux
qja = np.random.random((2, self.naux, self.nocc*self.nvir)) / self.naux
gf_occ = (aux.GreensFunction(np.random.random(self.nocc), np.eye(self.nmo, self.nocc)),
aux.GreensFunction(np.random.random(self.nocc), np.eye(self.nmo, self.nocc)))
gf_vir = (aux.GreensFunction(np.random.random(self.nvir), np.eye(self.nmo, self.nvir)),
aux.GreensFunction(np.random.random(self.nvir), np.eye(self.nmo, self.nvir)))
vv1, vev1 = _agf2.build_mats_dfuagf2_outcore(qxi, qja, (gf_occ[0].energy, gf_occ[1].energy), (gf_vir[0].energy, gf_vir[1].energy))
vv2, vev2 = _agf2.build_mats_dfuagf2_incore(qxi, qja, (gf_occ[0].energy, gf_occ[1].energy), (gf_vir[0].energy, gf_vir[1].energy))
self.assertAlmostEqual(np.max(np.absolute(vv1-vv2)), 0.0, 10)
self.assertAlmostEqual(np.max(np.absolute(vev1-vev2)), 0.0, 10)
if __name__ == '__main__':
print('AGF2 C implementations')
unittest.main()
|
GNOME/orca
|
test/keystrokes/gnome-clocks/timer_flat_review.py
|
Python
|
lgpl-2.1
| 3,398
| 0.000896
|
#!/usr/bin/python
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(PauseAction(3000))
sequence.append(KeyComboAction("F10"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("space"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_8"))
sequence.append(utils.AssertPresentationAction(
"1. Review current line",
["BRAILLE LINE: 'Start $l'",
" VISIBLE: 'Start $l', cursor=1",
"SPEECH OUTPUT: 'Start'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("space"))
sequence.append(utils.AssertPresentationAction(
"2. Activate timer",
["BRAILLE LINE: 'gnome-clocks application Clocks frame Pause push button'",
" VISIBLE: 'Pause push button', cursor=1",
"BRAILLE LINE: 'gnome-clocks application Clocks frame Pause push button'",
" VISIBLE: 'Pause push button', cursor=1",
"SPEECH OUTPUT: 'Clocks frame'",
"SPEECH OUTPUT: 'Pause push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_8"))
sequence.append(utils.AssertPresentationAction(
"3. Review current line",
["BRAILLE LINE: 'Pause Reset $l'",
" VISIBLE: 'Pause Reset $l', cursor=1",
"SPEECH OUTPUT: 'Pause Reset'"]))
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_7"))
sequence.append(utils.AssertPresentationAction(
"4. Review previous line",
["BRAILLE LINE: '00 ∶ 04 ∶ 5[0-9] \\$l'",
" VISIBLE: '00 ∶ 04 ∶ 5[0-9] \\$l', cursor=1",
"SPEECH OUTPUT: '00 ∶ 04 ∶ 5[0-9]'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_7"))
sequence.append(utils.AssertPresentationAction(
"5. Review previous line",
["BRAILLE LINE: '& y World & y Alarm & y Stopwatch &=y Timer $l'",
" VISIBLE: '& y World & y Alarm & y Stopwatc', cursor=1",
"SPEECH OUTPUT: 'not selected World not selected Alarm not selected Stopwatch selected Timer'"]))
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_9"))
sequence.append(utils.AssertPresentationAction(
"6. Review next line",
["BRAILLE LINE: '00 ∶ 04 ∶ 4[0-9] \\$l'",
" VISIBLE: '00 ∶ 04 ∶ 4[0-9] \\$l', cursor=1",
"SPEECH OUTPUT: '00 ∶ 04 ∶ 4[0-9]'"]))
sequence.append(PauseAction(5000))
sequence.
|
append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_8"))
sequence.append(utils.AssertPresentationAction(
"7. Review current line",
["BRAILLE LINE: '00 ∶ 04 ∶ 3[0-9] \\$l'",
" VISIBLE: '00 ∶ 04 ∶ 3[0-9] \\$l', cursor=1",
"SPEECH OUTPUT: '00 ∶ 04 ∶ 3[0-9]'"]))
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_8"))
sequence.append(utils.AssertPr
|
esentationAction(
"8. Review current line",
["BRAILLE LINE: '00 ∶ 04 ∶ 2[0-9] \\$l'",
" VISIBLE: '00 ∶ 04 ∶ 2[0-9] \\$l', cursor=1",
"SPEECH OUTPUT: '00 ∶ 04 ∶ 2[0-9]'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
tnemis/staging-server
|
baseapp/views/class_studying_views.py
|
Python
|
mit
| 3,912
| 0.005624
|
from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView
from baseapp.models import Class_Studying
from django.contrib import auth, messages
class Class_StudyingView(object):
model = Class_Studying
def get_template_names(self):
"""Nest templates within class_studying directory."""
tpl = super(Class_StudyingView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'class_studying'
self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
return [self.template_name]
class Class_StudyingDateView(Class_StudyingView):
date_field = 'created_date'
month_format = '%m'
class Class_StudyingBaseListView(Class_StudyingView):
paginate_by = 10
class Class_StudyingArchiveIndexView(
Class_StudyingDateView, Class_StudyingBaseListView, ArchiveIndexView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingCreateView(Class_StudyingView, CreateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
messages.add_message(
self.request,
messages.SUCCESS,"Successfully created."
)
return reverse('baseapp_class_studying_list')
class Class_StudyingDateDetailView(Class_StudyingDateView, DateDetailView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingDayArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, DayArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingDeleteView(Class_
|
StudyingView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingDetailView(Class_StudyingView, DetailView):
|
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingListView(Class_StudyingBaseListView, ListView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingMonthArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, MonthArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingTodayArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, TodayArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingUpdateView(Class_StudyingView, UpdateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingWeekArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, WeekArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingYearArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, YearArchiveView):
make_object_list = True
|
GeosoftInc/gxpy
|
geosoft/gxapi/GXRA.py
|
Python
|
bsd-2-clause
| 5,242
| 0.00744
|
### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this bloc
|
k
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXRA(gxapi_cy.WrapRA):
"""
GXRA class.
The `GXRA <geosoft.gxapi.GXRA>` class is used to access ASCII files sequentially or
by line number. The files are opened in read-only mode, so no
write operations are defined
"""
def __init__(self, handle=0):
super(GXRA, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
|
"""
A null (undefined) instance of `GXRA <geosoft.gxapi.GXRA>`
:returns: A null `GXRA <geosoft.gxapi.GXRA>`
:rtype: GXRA
"""
return GXRA()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
@classmethod
def create(cls, file):
"""
Creates `GXRA <geosoft.gxapi.GXRA>`
:param file: Name of the file
:type file: str
:returns: `GXRA <geosoft.gxapi.GXRA>` Object
:rtype: GXRA
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapRA._create(GXContext._get_tls_geo(), file.encode())
return GXRA(ret_val)
@classmethod
def create_sbf(cls, sbf, file):
"""
Creates `GXRA <geosoft.gxapi.GXRA>` on an `GXSBF <geosoft.gxapi.GXSBF>`
:param sbf: Storage
:param file: Name of the file
:type sbf: GXSBF
:type file: str
:returns: `GXRA <geosoft.gxapi.GXRA>` Object
:rtype: GXRA
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** This method allows you to open an `GXRA <geosoft.gxapi.GXRA>` in a structured file
storage (an `GXSBF <geosoft.gxapi.GXSBF>`). SBFs can be created inside other data
containers, such as workspaces, maps, images and databases.
This lets you store application specific information together
with the data to which it applies.
.. seealso::
sbf.gxh
"""
ret_val = gxapi_cy.WrapRA._create_sbf(GXContext._get_tls_geo(), sbf, file.encode())
return GXRA(ret_val)
def gets(self, strbuff):
"""
Get next full line from `GXRA <geosoft.gxapi.GXRA>`
:param strbuff: Buffer in which to place string
:type strbuff: str_ref
:returns: 0 - Ok
1 - End of file
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val, strbuff.value = self._gets(strbuff.value.encode())
return ret_val
def len(self):
"""
Returns the total number of lines in `GXRA <geosoft.gxapi.GXRA>`
:returns: # of lines in the `GXRA <geosoft.gxapi.GXRA>`.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._len()
return ret_val
def line(self):
"""
Returns current line #, 0 is the first
:returns: The current read line location.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** This will be the next line read.
"""
ret_val = self._line()
return ret_val
def seek(self, line):
"""
Position next read to specified line #
:param line: Line #, 0 is the first.
:type line: int
:returns: 0 if seeked line is within the range of lines,
1 if outside range, line pointer will not be moved.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._seek(line)
return ret_val
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.