repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
TomConlin/dipper
|
dipper/utils/TestUtils.py
|
Python
|
bsd-3-clause
| 2,394
| 0.000418
|
import logging
import io
from pathlib import Path
from rdflib import URIRef, RDF
from dipper.graph.RDFGraph import RDFGraph
LOG = logging.getLogger(__name__)
class TestUtils:
@staticmethod
def test_graph_equality(turtlish, graph):
"""
:param turtlish: file path or string of triples in turtle
format without prefix header
:param graph: Graph object to test against
:return: Boolean, True if graphs contain same
set of triples
"""
turtle_graph = RDFGraph()
turtle_graph.bind_all_namespaces()
prefixes = "\n".join(
["@prefix {}: <{}> .".format(
n[0], n[1]) for n in turtle_graph.namespace_manager.namespaces()]
|
)
headless_ttl = ''
try:
if Path(turtlish).exists():
headless_ttl = Path(turtlish).read_text()
|
else:
raise OSError
except OSError:
if isinstance(turtlish, str):
headless_ttl = turtlish
else:
raise ValueError("turtlish must be filepath or string")
turtle_string = prefixes + headless_ttl
mock_file = io.StringIO(turtle_string)
turtle_graph.parse(mock_file, format="turtle")
TestUtils.remove_ontology_axioms(graph)
turtle_triples = set(list(turtle_graph))
ref_triples = set(list(graph))
equality = turtle_triples == ref_triples
if not equality:
LOG.warning(
"Triples do not match\n"
"\tLeft hand difference: %s\n"
"\tRight hand difference: %s",
sorted(turtle_triples - ref_triples),
sorted(ref_triples - turtle_triples)
)
return equality
@staticmethod
def remove_ontology_axioms(graph):
"""
Given an rdflib graph, remove any triples
connected to an ontology node:
{} a owl:Ontology
:param graph: RDFGraph
:return: None
"""
ontology_iri = URIRef("http://www.w3.org/2002/07/owl#Ontology")
for subject in graph.subjects(RDF.type, ontology_iri):
for predicate, obj in graph.predicate_objects(subject):
graph.remove((subject, predicate, obj))
graph.remove((subject, RDF.type, ontology_iri))
|
nitzmahone/ansible
|
test/sanity/code-smell/ansible-requirements.py
|
Python
|
gpl-3.0
| 898
| 0.002227
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
def read_file(path):
try:
with open(path, 'r') as f:
return f.read()
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: unable to read required file %s' % (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
return None
def main():
ORIGINAL_FILE = 'requirements.txt'
VENDORED_COPY = 'test/lib/an
|
sible_test/_data/requirements/ansible.txt'
original_requirements = read_file(ORIGINAL_FILE)
vendored_requirements = read_file(VENDORED_COPY)
if original_requirements is not
|
None and vendored_requirements is not None:
if original_requirements != vendored_requirements:
print('%s:%d:%d: must be identical to %s' % (VENDORED_COPY, 0, 0, ORIGINAL_FILE))
if __name__ == '__main__':
main()
|
henrymp/coursebuilder
|
modules/dashboard/unit_lesson_editor.py
|
Python
|
apache-2.0
| 31,883
| 0.000157
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting unit and lesson editing."""
__author__ = 'John Orr ([email protected])'
import cgi
import logging
import urllib
from controllers import sites
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import roles
from models import transforms
from modules.oeditor import oeditor
from tools import verify
import filer
import messages
DRAFT_TEXT = 'Private'
PUBLISHED_TEXT = 'Public'
# The editor has severe limitations for editing nested lists of objects. First,
# it does not allow one to move a lesson from one unit to another. We need a way
# of doing that. Second, JSON schema specification does not seem to support a
# type-safe array, which has objects of different types. We also want that
# badly :). All in all - using generic schema-based object editor for editing
# nested arrayable polymorphic attributes is a pain...
def create_status_annotation():
return oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', DRAFT_TEXT,
PUBLISHED_TEXT, cla
|
ss_name='spli
|
t-from-main-group')
class CourseOutlineRights(object):
"""Manages view/edit rights for course outline."""
@classmethod
def can_view(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class UnitLessonEditor(ApplicationHandler):
"""An editor for the unit and lesson titles."""
def get_import_course(self):
"""Shows setup form for course import."""
template_values = {}
template_values['page_title'] = self.format_title('Import Course')
annotations = ImportCourseRESTHandler.SCHEMA_ANNOTATIONS_DICT()
if not annotations:
template_values['main_content'] = 'No courses to import from.'
self.render_page(template_values)
return
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(ImportCourseRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
ImportCourseRESTHandler.SCHEMA_JSON,
annotations,
None, rest_url, exit_url,
auto_return=True,
save_button_caption='Import',
required_modules=ImportCourseRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Import Course')
template_values['page_description'] = messages.IMPORT_COURSE_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def get_edit_unit_lesson(self):
"""Shows editor for the list of unit and lesson titles."""
key = self.request.get('key')
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(UnitLessonTitleRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
UnitLessonTitleRESTHandler.SCHEMA_JSON,
UnitLessonTitleRESTHandler.SCHEMA_ANNOTATIONS_DICT,
key, rest_url, exit_url,
required_modules=UnitLessonTitleRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Course Outline')
template_values[
'page_description'] = messages.COURSE_OUTLINE_EDITOR_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def post_add_lesson(self):
"""Adds new lesson to a first unit of the course."""
course = courses.Course(self)
first_unit = None
for unit in course.get_units():
if unit.type == verify.UNIT_TYPE_UNIT:
first_unit = unit
break
if first_unit:
lesson = course.add_lesson(first_unit)
course.save()
# TODO(psimakov): complete 'edit_lesson' view
self.redirect(self.get_action_url(
'edit_lesson', key=lesson.lesson_id,
extra_args={'is_newly_created': 1}))
else:
self.redirect('/dashboard')
def post_add_unit(self):
"""Adds new unit to a course."""
course = courses.Course(self)
unit = course.add_unit()
course.save()
self.redirect(self.get_action_url(
'edit_unit', key=unit.unit_id, extra_args={'is_newly_created': 1}))
def post_add_link(self):
"""Adds new link to a course."""
course = courses.Course(self)
link = course.add_link()
course.save()
self.redirect(self.get_action_url(
'edit_link', key=link.unit_id, extra_args={'is_newly_created': 1}))
def post_add_assessment(self):
"""Adds new assessment to a course."""
course = courses.Course(self)
assessment = course.add_assessment()
course.save()
self.redirect(self.get_action_url(
'edit_assessment', key=assessment.unit_id,
extra_args={'is_newly_created': 1}))
def _render_edit_form_for(
self, rest_handler_cls, title, annotations_dict=None,
delete_xsrf_token='delete-unit', page_description=None):
"""Renders an editor form for a given REST handler class."""
if not annotations_dict:
annotations_dict = rest_handler_cls.SCHEMA_ANNOTATIONS_DICT
key = self.request.get('key')
extra_args = {}
if self.request.get('is_newly_created'):
extra_args['is_newly_created'] = 1
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(rest_handler_cls.URI)
delete_url = '%s?%s' % (
self.canonicalize_url(rest_handler_cls.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(delete_xsrf_token))
}))
form_html = oeditor.ObjectEditor.get_html_for(
self,
rest_handler_cls.SCHEMA_JSON,
annotations_dict,
key, rest_url, exit_url,
extra_args=extra_args,
delete_url=delete_url, delete_method='delete',
read_only=not filer.is_editable_fs(self.app_context),
required_modules=rest_handler_cls.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit %s' % title)
if page_description:
template_values['page_description'] = page_description
template_values['main_content'] = form_html
self.render_page(template_values)
def get_edit_unit(self):
"""Shows unit editor."""
self._render_edit_form_for(
UnitRESTHandler, 'Unit',
page_description=messages.UNIT_EDITOR_DESCRIPTION)
def get_edit_link(self):
"""Shows link editor."""
self._render_edit_form_for(
LinkRESTHandler, 'Link',
page_description=messages.LINK_EDITOR_DESCRIPTION)
def get_edit_assessment(self):
"""Shows assessment editor."""
self._render_edit_form_for(
AssessmentRESTHandler, 'Assessment',
page_description=messages.ASSESSMENT_EDITOR_DESCRIPTIO
|
krocat/ToonHA
|
toon/switch.py
|
Python
|
apache-2.0
| 2,141
| 0
|
"""
Support for Eneco Slimmer stekkers (Smart Plugs).
This provides con
|
trols for the z-wave smart plugs Toon can control.
"""
impor
|
t logging
from homeassistant.components.switch import SwitchDevice
import custom_components.toon as toon_main
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup discovered Smart Plugs."""
_toon_main = hass.data[toon_main.TOON_HANDLE]
switch_items = []
for plug in _toon_main.toon.smartplugs:
switch_items.append(EnecoSmartPlug(hass, plug))
add_devices_callback(switch_items)
class EnecoSmartPlug(SwitchDevice):
"""Representation of a Smart Plug."""
def __init__(self, hass, plug):
"""Initialize the Smart Plug."""
self.smartplug = plug
self.toon_data_store = hass.data[toon_main.TOON_HANDLE]
@property
def should_poll(self):
"""No polling needed with subscriptions."""
return True
@property
def unique_id(self):
"""Return the ID of this switch."""
return self.smartplug.device_uuid
@property
def name(self):
"""Return the name of the switch if any."""
return self.smartplug.name
@property
def current_power_w(self):
"""Current power usage in W."""
return self.toon_data_store.get_data('current_power', self.name)
@property
def today_energy_kwh(self):
"""Today total energy usage in kWh."""
return self.toon_data_store.get_data('today_energy', self.name)
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self.toon_data_store.get_data('current_state', self.name)
@property
def available(self):
"""True if switch is available."""
return self.smartplug.can_toggle
def turn_on(self, **kwargs):
"""Turn the switch on."""
return self.smartplug.turn_on()
def turn_off(self):
"""Turn the switch off."""
return self.smartplug.turn_off()
def update(self):
"""Update state."""
self.toon_data_store.update()
|
xiaoqiangwang/CSDataQuick
|
tools/fixup_qtcreator.py
|
Python
|
gpl-2.0
| 9,584
| 0.003861
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script re-constucts the Qt Creator installation to include Qt libraries, plugins, QtQuick.
Windows:
|-- bin
| |-- qt.conf -> Prefix=..
| |-- qt dlls
| |-- csdataquick executibles
| |-- csdataquick dlls
| |-- qt-creator executibles
| |-- qt-creator dlls
|
|-- lib
| |-- qtcreator
| |-- plugins
|
|-- plugins
| |-- qt plugins
| |-- csdaquick plguins
|
|-- qml
|-- CSDataQuick
|-- QtQuick
Linux:
|-- bin
| |-- qt.conf -> Prefix=..
| |-- csdataquick executibles
| |-- qtcreator
|
|-- lib
| |-- qt shared libraries
| |-- csdataquick shared libraries
| |-- qtcreator
| |-- qtcreator shared libraries
| |-- plugins
|
|-- libexec
| |-- qtcreator
| |-- qt.conf -> Prefix=../..
| |-- qml2puppet
|
|-- plugins
| |-- csdaquick plguins
| |-- qt plugins
|
|-- qml
|-- CSDataQuick
|-- QtQuick
macOS:
|-- bin
| |-- csdataquick app bundles
| | |-- Contents
| | |-- Resources
| | |-- qt.conf -> Prefix=../../../..
| |-- Qt Creator.app
| |-- Contents
| |-- Resources
| |-- qt.conf -> Prefix=../../../..
| |-- qmldesigner
| |-- qt.conf -> Prefix=../../../../..
|
|-- lib
| |-- qt frameworks
| |-- csdataquick shared libraries
|
|-- plugins
| |-- qt plugins
| |-- csdataquick plugins
|
|-- qml
|-- CSDataQuick
|-- QtQuick
"""
import argparse
import glob
import os
import platform
import re
import sys
import shutil
import subprocess
if sys.hexversion < 0x03000000:
if sys.hexversion < 0x02070000:
subprocess.getoutput = lambda cmd: subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
else:
subprocess.getoutput = lambda cmd: subprocess.check_output(cmd, shell
|
=True)
parser = argparse.ArgumentParser(description='Fixup Qt and Qt Creator for packaging')
parser.add_argument('--target', required=True, help='target path')
pars
|
er.add_argument('--qtcreator', help='qt creator path')
parser.add_argument('--qmake', required=True, help='qmake file path')
args = parser.parse_args(sys.argv[1:])
qtcreator_path = args.qtcreator
target_path = args.target
qmake = args.qmake
bin_dir = os.path.join(target_path, 'bin')
lib_dir = os.path.join(target_path, 'lib')
libexec_dir = os.path.join(target_path, 'libexec')
plugins_dir = os.path.join(target_path, 'plugins')
qml_dir = os.path.join(target_path, 'qml')
def smartCopy(src, dst, follow_symlinks=True, ignore=None):
"""
same as shell cp command. If *src* is a file, it is copied into *dst* if dst is an existing directory
or as file *dst*. If *src* is a directory, it is copied recursively into *dst* if dst is an existing
directory or as as directory *dst*.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
if os.path.isdir(src):
shutil.copytree(src, dst, symlinks=not follow_symlinks, ignore=ignore)
else:
shutil.copyfile(src, dst)
shutil.copystat(src, dst)
return dst
def deployQtLibraries():
libs = ['Core', 'Gui', 'Widgets', 'Concurrent', 'Network', 'PrintSupport', 'Script',
'Qml', 'Quick', 'QuickWidgets', 'QuickControls2', 'QuickTemplates2', 'QuickParticles',
'Xml', 'Svg', 'Sql', 'Help']
qtlibs_dir = subprocess.getoutput('%s -query QT_INSTALL_LIBS' % qmake).strip()
dst_dir = lib_dir
lib_pattern = 'libQt5%s.so*'
ignore_pattern = None
if platform.system() == 'Darwin':
lib_pattern = 'Qt%s.framework'
ignore_pattern = shutil.ignore_patterns('Headers', '*_debug', '*.prl')
elif platform.system() == 'Windows':
qtlibs_dir = subprocess.getoutput('%s -query QT_INSTALL_BINS' % qmake).strip()
dst_dir = bin_dir
lib_pattern = 'Qt5%s.dll'
elif platform.system() == 'Linux':
libs += ['XcbQpa', 'DBus']
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for lib in libs:
for file in glob.glob(os.path.join(qtlibs_dir, lib_pattern%lib)):
smartCopy(file, dst_dir, follow_symlinks=False, ignore=ignore_pattern)
if platform.system() == 'Windows':
for lib in ['libEGL.dll', 'libGLESv2.dll']:
smartCopy(os.path.join(qtlibs_dir, lib), dst_dir)
def deployQtPlugins():
plugins = ['bearer', 'designer', 'iconengines', 'imageformats',
'platforms', 'sqldrivers']
qtplugins_dir = subprocess.getoutput('%s -query QT_INSTALL_PLUGINS' % qmake).strip()
if not os.path.exists(plugins_dir):
os.makedirs(plugins_dir)
if platform.system() == 'Linux':
plugins += ['xcbglintegrations']
for plugin in plugins:
if not os.path.exists(os.path.join(qtplugins_dir, plugin)):
print('plugin "%s" does not exist' % plugin)
continue
shutil.copytree(os.path.join(qtplugins_dir, plugin),
os.path.join(plugins_dir, plugin),
symlinks=True,
ignore=shutil.ignore_patterns('*_debug.dylib', '*.dylib.dSYM', '*.pdb'))
# remove debug version on windows
if platform.system() == 'Windows':
# After sorting the debug version "<pluginname>d.dll" will be
# immedietly after the release version "<pluginname>.dll".
# It is then quick to remove every 2nd file from this list.
dlls = sorted(os.listdir(os.path.join(plugins_dir, plugin)))[1::2]
for dll in dlls:
os.remove(os.path.join(plugins_dir, plugin, dll))
def deployQtQuick():
qtqml_dir = subprocess.getoutput('%s -query QT_INSTALL_QML' % qmake).strip()
if not os.path.exists(qml_dir):
os.makedirs(qml_dir)
for qml in ['Qt', 'QtQml', 'QtGraphicalEffects', 'QtQuick', 'QtQuick.2']:
if not os.path.exists(os.path.join(qtqml_dir, qml)):
print('qml module "%s" does not exist' % qml)
continue
shutil.copytree(os.path.join(qtqml_dir, qml),
os.path.join(qml_dir, qml),
symlinks=True,
ignore=shutil.ignore_patterns('*_debug.dylib', '*.dylib.dSYM', '*plugind.dll','*.pdb'))
def deployQt():
# Copy Qt libraries
deployQtLibraries()
# Copy Qt plugins
deployQtPlugins()
# Copy QtQuick modules
deployQtQuick()
def restruct_macos():
bundle_name = os.path.basename(qtcreator_path)
if not bundle_name.endswith('.app'):
print('Not a valid app bundle')
return
# Copy the app bundle to bin
if not os.path.exists(bin_dir):
os.makedirs(bin_dir)
shutil.copytree(qtcreator_path, os.path.join(bin_dir, bundle_name), symlinks=True)
# Fix rpath
for root, dirs, files in os.walk(os.path.join(bin_dir, bundle_name)):
for file in files:
fname = os.path.join(root, file)
if os.path.islink(fname):
continue
if file == 'qml2puppet' or os.path.basename(root) == 'MacOS' or os.path.splitext(file)[1] == '.dylib':
cmd = 'install_name_tool -add_rpath "@loader_path/%s" "%s"' % (os.path.relpath(lib_dir, root), fname)
subprocess.call(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
# Fix qt.conf
open(os.path.join(bin_dir, bundle_name, 'Contents', 'Resources', 'qt.conf'), 'w').write('[Paths]\nPrefix = ../../..\n')
open(os.path.join(bin_dir, bundle_name, 'Contents', 'Resources', 'qmldesigner', 'qt.conf'), 'w').write('[Paths]\nPrefix = ../../../../..\n')
def restruct_windows():
# Copy the entire directory
for d in ['bin', 'lib', 'share']:
shutil.copytree(os.path.join(qtcreator_path, d), os.path.join(target_path,
|
Keeper-Security/Commander
|
unit-tests/helper.py
|
Python
|
mit
| 1,017
| 0
|
from data_vault import VaultEnvironment
class KeeperApiHelper:
_expected_commands = []
_vault_env = VaultEnvironment()
@staticmethod
def communicate_expect(actions):
# type: (list) -> None
KeeperApiHelper._expected_commands.clear()
KeeperApiHelper._expected_commands.extend(actions)
@staticmethod
def is_expect_empty():
# type: () -> bool
return len(KeeperApiHelper._expected_commands) == 0
@staticmethod
def communicate_command(_, request):
# type: (any, dict) -> dict
rs = {
'result': 'success',
|
'result_code': '',
'message': ''
}
action = KeeperApiHelper._expected_commands.pop(0)
if callable(action):
props = act
|
ion(request)
if type(props) == dict:
rs.update(props)
return rs
if type(action) == str:
if action == request['command']:
return rs
raise Exception()
|
aleksclark/replfs
|
nosetests/basic_operations_tests.py
|
Python
|
bsd-3-clause
| 469
| 0.004264
|
import os
import shutil
class BasicOperations_TestClass:
TEST_ROOT =' __test_root__'
def setUp(self):
self.regenerate_root
print(self.TEST_ROOT)
assert os.path.isdir(self.TEST_ROOT)
def te
|
arDown(self):
return True
def test_test(self):
assert self.bar == 1
def regenerate_root(self):
if os.path.isdir(self.TEST_ROOT):
|
shutil.rmtree(self.TEST_ROOTT)
os.makedirs(self.TEST_ROOT)
|
Nikea/VisTrails
|
vistrails/packages/rpy/__init__.py
|
Python
|
bsd-3-clause
| 2,002
| 0.021978
|
###################################################
|
############################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rig
|
hts reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
name = "R"
identifier = "org.vistrails.vistrails.rpy"
version = "0.1.2"
old_identifiers = ["edu.utah.sci.vistrails.rpy"]
|
ttm/socialLegacy
|
tests/legacy/testAnim2.py
|
Python
|
mit
| 714
| 0.004202
|
# Import everything needed to edit video clips
from moviepy.edit
|
or import *
# Load myHolidays.mp4 and select the subclip 00:00:50 - 00:00:60
clip = VideoFileClip("myHolidays.mp4").subclip(50,60)
# Reduce the audio volume (volume x 0.8)
clip = clip.volumex(0.8)
# Generate a text clip. You can customize the font, color, etc.
txt_clip = TextClip("My Holidays 2013",fontsize=70,color='white')
# Say that you want it to appear 10s at the center of the screen
txt_clip = txt_clip.set_pos('center').set_duration(10)
# Overlay the text clip on the first
|
video clip
video = CompositeVideoClip([clip, txt_clip])
# Write the result to a file (many options available !)
video.write_videofile("myHolidays_edited.webm")
|
nmarley/dash
|
test/functional/bip9-softforks.py
|
Python
|
mit
| 12,924
| 0.004643
|
#!/usr/bin/env python3
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP 9 soft forks.
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
"""
from io import BytesIO
import shutil
import time
import itertools
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, network_thread_start
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
class BIP9SoftForksTest(ComparisonTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1', '-dip3params=9000:9000']]
self.setup_clean_chain = True
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
network_thread_start()
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
return info['bip9_softforks'][key]
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno):
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = self.mocktime + 1
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 1-A
# check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period
test_blocks = self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(10, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 46)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
# Test 1-B
# check stats after one additional "signalling not" block -- LOCKED_IN no longer possible this period
test_blocks = self.generate_blocks(1, 4, test_blocks) # 0x00000004 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 47)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], False)
# Test 1-C
# finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN
test_blocks = self.generate_blocks(97, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get
|
_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blo
|
cks(57, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
# check counting stats and "possible" flag before last block
|
hammerhorn/hammerhorn-jive
|
igo/cjh/files.py
|
Python
|
gpl-2.0
| 2,267
| 0.003088
|
#!/usr/bin/python
import glob, os
from cjh.cli import Cli, ListPrompt
from cjh.lists import ItemList
class Fileman(object):
@classmethod
def pwd(cls, getstr=False):
"""
Emulate 'pwd' command
"""
string = os.getcwd()
if getstr:
return string
else: print(string)
@classmethod
def mc(cls):
list_prompt = ListPrompt(['..'] + cls.ls(opts=['B'], get_list=True))
if len(list_prompt) > Cli.height():
Cli.less(str(list_prompt))
response = Cli.make_page(header=cls.pwd(getstr=True), func=list_prompt.input)
if response == 1:
os.chdir(list_prompt[response - 1])
cls.mc()
elif list_prompt[response - 1].endswith('/'):
os.chdir(list_prompt[response - 1][:-1])
cls.mc()
else: return list_prompt[response - 1]
@staticmethod
def ls(*args, **kwargs):
"""
Emulate 'ls' command
"""
if len(args) == 0:
cwd = os.getcwd()
file_list = os.listdir(cwd)
else:
file_list = []
for arg in args:
file_list += glob.glob(arg)
if 'opts' in kwargs and 'B' in kwargs['opts']:
file_list = [
file_ for file_ in file_list if not file_.endswith('~')
]
file_list.sort(key=str.lower)
dir_list = []
if 'opts' in kwargs and 'F' in kwargs['opts']:
for index, file_ in enumerate(file_list):
if os.path.isdir(file_):
dir_list.append(file_ + '/')
del file_list[index]
elif os.access(file_, os.X_OK):
file_list[index] = file_ + '*'
if 'get_list' not in kwargs or kwargs['get_list'] is not True:
string = ''
for dir_ in dir_list:
string += (dir_ + '\n')
for file_ in file_list:
string += (file_ + '\n')
if len(di
|
r_lis
|
t) + len(file_list) + 1 > Cli.height():
Cli.less(string)
else: Cli.write(string.strip())
else:
return dir_list + file_list
|
peri-source/peri
|
peri/runner.py
|
Python
|
mit
| 29,237
| 0.001163
|
"""
Basically I'm trying to cover 4 options, one of each:
(Previously featured state?, Use previous positions?)
---------------------------------------------------------
(no, no) = get_initial_featuring
(yes, no) = get_particle_featuring
(yes, yes) = translate_featuring
(no, yes) = feature_from_pos_rad
These do:
(use globals to start, start from nothing)
(use positions to start, start from trackpy)
"""
from future import standard_library
standard_library.install_aliases()
from builtins import range
import os
try:
import tkinter as tk
import tkinter.filedialog as tkfd
except ImportError:
import Tkinter as tk
import tkFileDialog as tkfd
import numpy as np
import peri
from peri import initializers, util, models, states, logger
from peri.comp import ilms
import peri.opt.optimize as opt
import peri.opt.addsubtract as addsub
RLOG = logger.log.getChild('runner')
def locate_spheres(image, feature_rad, dofilter=False, order=(3 ,3, 3),
trim_edge=True, **kwargs):
"""
Get an initial featuring of sphere positions in an image.
Parameters
-----------
image : :class:`peri.util.Image` object
Image object which defines the image file as well as the region.
feature_rad : float
Radius of objects to find, in pixels. This is a featuring radius
and not a real radius, so a better value is frequently smaller
than the real radius (half the actual radius is good). If ``use_tp``
is True, then the twice ``feature_rad`` is passed as trackpy's
``diameter`` keyword.
dofilter : boolean, optional
Whether to remove the background before featuring. Doing so can
often greatly increase the success of initial featuring and
decrease later optimization time. Filtering functions by fitting
the image to a low-order polynomial and featuring the residuals.
In doing so, this will change the mean intensity of the featured
image and hence the good value of ``minmass`` will change when
``dofilter`` is True. Default is False.
order : 3-element tuple, optional
If `dofilter`, the 2+1D Leg Poly approximation to the background
illumination field. Default is (3,3,3).
Other Parameters
----------------
invert : boolean, optional
Whether to invert the image for featuring. Set to True if the
image is dark particles on a bright background. Default is True
minmass : Float or None, optional
The minimum mass/masscut of a particle. Default is None, which
calculates internally.
use_tp : Bool, optional
Whether or not to use trackpy. Default is False, since trackpy
cuts out particles at the edge.
Returns
--------
positions : np.ndarray [N,3]
Positions of the particles in order (z,y,x) in image pixel units.
Notes
-----
Optionally filters the image by fitting the image I(x,y,z) to a
polynomial, then subtracts this fitted intensity variation and uses
centroid methods to find the particles.
"""
# We just want a smoothed field model of the image so that the residuals
# are simply the particles without other complications
m = models.SmoothFieldModel()
I = ilms.LegendrePoly2P1D(order=order, constval=image.get_image().mean())
s = states.ImageState(image, [I], pad=0, mdl=m)
if dofilter:
opt.do_levmarq(s, s.params)
pos = addsub.feature_guess(s, feature_rad, trim_edge=trim_edge, **kwargs)[0]
return pos
def get_initial_featuring(stat
|
emaker, feature_rad, actual_rad=None,
im_name=None, tile=None, invert=True, desc='', use_full_path=False,
featuring_params
|
={}, statemaker_kwargs={}, **kwargs):
"""
Completely optimizes a state from an image of roughly monodisperse
particles.
The user can interactively select the image. The state is periodically
saved during optimization, with different filename for different stages
of the optimization.
Parameters
----------
statemaker : Function
A statemaker function. Given arguments `im` (a
:class:`~peri.util.Image`), `pos` (numpy.ndarray), `rad` (ndarray),
and any additional `statemaker_kwargs`, must return a
:class:`~peri.states.ImageState`. There is an example function in
scripts/statemaker_example.py
feature_rad : Int, odd
The particle radius for featuring, as passed to locate_spheres.
actual_rad : Float, optional
The actual radius of the particles. Default is feature_rad
im_name : string, optional
The file name of the image to load. If not set, it is selected
interactively through Tk.
tile : :class:`peri.util.Tile`, optional
The tile of the raw image to be analyzed. Default is None, the
entire image.
invert : Bool, optional
Whether to invert the image for featuring, as passed to trackpy.
Default is True.
desc : String, optional
A description to be inserted in saved state. The save name will
be, e.g., '0.tif-peri-' + desc + 'initial-burn.pkl'. Default is ''
use_full_path : Bool, optional
Set to True to use the full path name for the image. Default
is False.
featuring_params : Dict, optional
kwargs-like dict of any additional keyword arguments to pass to
``get_initial_featuring``, such as ``'use_tp'`` or ``'minmass'``.
Default is ``{}``.
statemaker_kwargs : Dict, optional
kwargs-like dict of any additional keyword arguments to pass to
the statemaker function. Default is ``{}``.
Other Parameters
----------------
max_mem : Numeric
The maximum additional memory to use for the optimizers, as
passed to optimize.burn. Default is 1e9.
min_rad : Float, optional
The minimum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius smaller than this are identified
as fake and removed. Default is 0.5 * actual_rad.
max_rad : Float, optional
The maximum particle radius, as passed to addsubtract.add_subtract.
Particles with a fitted radius larger than this are identified
as fake and removed. Default is 1.5 * actual_rad, however you
may find better results if you make this more stringent.
rz_order : int, optional
If nonzero, the order of an additional augmented rscl(z)
parameter for optimization. Default is 0; i.e. no rscl(z)
optimization.
zscale : Float, optional
The zscale of the image. Default is 1.0
Returns
-------
s : :class:`peri.states.ImageState`
The optimized state.
See Also
--------
feature_from_pos_rad : Using a previous state's globals and
user-provided positions and radii as an initial guess,
completely optimizes a state.
get_particle_featuring : Using a previous state's globals and
positions as an initial guess, completely optimizes a state.
translate_featuring : Use a previous state's globals and
centroids methods for an initial particle guess, completely
optimizes a state.
Notes
-----
Proceeds by centroid-featuring the image for an initial guess of
particle positions, then optimizing the globals + positions until
termination as called in _optimize_from_centroid.
The ``Other Parameters`` are passed to _optimize_from_centroid.
"""
if actual_rad is None:
actual_rad = feature_rad
_, im_name = _pick_state_im_name('', im_name, use_full_path=use_full_path)
im = util.RawImage(im_name, tile=tile)
pos = locate_spheres(im, feature_rad, invert=invert, **featuring_params)
if np.size(pos) == 0:
msg = 'No particles found. Try using a smaller `feature_rad`.'
|
maurov/xraysloth
|
sloth/collects/datagroup_xan.py
|
Python
|
bsd-3-clause
| 767
| 0.009126
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""DataGroupXanes: work with XANES data sets
============================================
- DataGroup
- DataGroup1D
- DataGroupXanes
"""
from .datagroup import MODNAME
from .datagroup1D import DataGroup1D
class DataGroupXanes(DataGroup1D):
"""DataGroup for XANES scans"""
def __init__(self, kwsd=None, _larch=None):
super(DataGroupXanes, self).__init__(kwsd=kwsd, _larch=_larch)
### LARCH ###
def datagroup_xan(kwsd=None, _larch=Non
|
e):
"""utility to perform wrapped operation
|
s on a list of XANES data
groups"""
return DataGroupXanes(kwsd=kwsd, _larch=_larch)
def registerLarchPlugin():
return (MODNAME, {'datagroup_xan' : datagroup_xan})
if __name__ == '__main__':
pass
|
quattor/aquilon
|
lib/aquilon/aqdb/model/address_assignment.py
|
Python
|
apache-2.0
| 6,715
| 0.000596
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Assign Addresses to interfaces """
from datetime import datetime
import re
from sqlalchemy import (Column, Integer, DateTime, ForeignKey, Sequence,
UniqueConstraint, Index)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import relation, backref, deferred, validates
from sqlalchemy.sql import and_
from aquilon.exceptions_ import InternalError
from aquilon.aqdb.column_types import IP, AqStr, EmptyStr
from aquilon.aqdb.model import Base, Interface, ARecord, Network
from aquilon.aqdb.model.a_record import dns_fqdn_mapper
_TN = 'address_assignment'
_ABV = 'addr_assign'
class AddressAssignment(Base):
"""
Assignment of IP addresses to network interfaces.
It's kept as an association map to model the linkage, since we need to
have maximum ability to provide potentially complex configuration
scenarios, such as advertising certain VIP addresses from some, but not
all of the network interfaces on a machine (to be used for backup
servers, cluster filesystem servers, NetApp filers, etc.). While in
most cases we can assume VIPs are broadcast out all interfaces on the
box we still need to have the underlying model as the more complex
many to many relationship implemented here.
"""
__tablename__ = _TN
_label_check = re.compile('^[a-z0-9]{0,16}$')
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
assignment_type = Column(AqStr(32), nullable=False)
interface_id = Column(ForeignKey(Interface.id, ondelete='CASCADE'),
nullable=False)
label = Column(EmptyStr(16), nullable=False)
ip = Column(IP, nullable=False)
network_id = Column(ForeignKey(Network.id), nullable=False)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
interface = relation(Interface, innerjoin=True,
backref=backref('assignments', order_by=[label],
cascade='all, delete-orphan'))
# Setting viewonly is very important here as we do not want the removal of
# an AddressAssignment record to change the linked DNS record(s)
# Can't use backref or back_populates due to the different mappers
dns_records = relation(dns_fqdn_mapper,
primaryjoin=and_(network_id == dns_fqdn_mapper.c.network_id,
ip == dns_fqdn_mapper.c.ip),
foreign_keys=[dns_fqdn_mapper.c.ip,
dns_fqdn_mapper.c.network_id],
viewonly=True)
fqdns = association_proxy
|
('dns_records', 'fqdn')
network = relation(Network, innerjoin=True,
backref=backref('assignments', passive_deletes=True,
order_by=[ip]))
__table_args__ = (UniqueConstraint(interface_id, ip),
UniqueConstraint(interface_id, label),
Index("%s_network_ip_idx" % _ABV, network_id, ip))
__mapper_args__ = {'polymorphic_on': assignment_type,
|
'polymorphic_identity': 'standard'}
@property
def logical_name(self):
"""
Compute an OS-agnostic name for this interface/address combo.
BIG FAT WARNING: do _NOT_ assume that this name really exist on the
host!
There are external systems like DSDB that can not handle having multiple
addresses on the same interface. Because of that this function generates
an unique name for every interface/address tuple.
"""
# Use the Linux naming convention because people are familiar with that
# and it is easy to parse if needed
name = self.interface.name
if self.label:
name += ":%s" % self.label
return name
@property
def is_shared(self):
return False
def __init__(self, label=None, network=None, **kwargs):
if not label:
label = ""
elif not self._label_check.match(label): # pragma: no cover
raise ValueError("Illegal address label '%s'." % label)
# Right now network_id is nullable due to how refresh_network works, so
# verify the network here
if not network: # pragma: no cover
raise InternalError("AddressAssignment needs a network")
super(AddressAssignment, self).__init__(label=label, network=network,
**kwargs)
def __repr__(self):
return "<Address %s on %s/%s>" % (self.ip,
self.interface.hardware_entity.label,
self.logical_name)
# Assigned to external classes here to avoid circular dependencies.
Interface.addresses = association_proxy('assignments', 'ip')
# Can't use backref or back_populates due to the different mappers
# This relation gives us the two other sides of the triangle mentioned above
# Do NOT consider the DNS environment here - whether the IP is used or not does
# not depend on its visibility in DNS
ARecord.assignments = relation(
AddressAssignment,
primaryjoin=and_(AddressAssignment.network_id == ARecord.network_id,
AddressAssignment.ip == ARecord.ip),
foreign_keys=[AddressAssignment.ip, AddressAssignment.network_id],
viewonly=True)
class SharedAddressAssignment(AddressAssignment):
priority = Column(Integer)
# As priority is an additional col we cannot make it non-null
@validates('priority')
def _validate_priority(self, key, value): # pylint: disable=W0613
if not value:
raise ValueError("Shared addresses require a priority")
return value
@property
def is_shared(self):
return True
__mapper_args__ = {'polymorphic_identity': 'shared'}
|
johnbelamaric/themis
|
vendor/github.com/grpc-ecosystem/grpc-opentracing/python/examples/trivial/trivial_client.py
|
Python
|
apache-2.0
| 1,218
| 0
|
from __future__ import print_function
import time
import argparse
import grpc
from jaeger_client import Config
from grpc_opentracing import open_tracing_client_interceptor
from grpc_opentracing.grpcext import intercept_channel
import command_line_pb2
def run():
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_payloads',
action='store_true',
help='log request/response objects to open-tracing spans')
args = parser.parse_args()
config = Config(
config={
'sampler': {
'type': 'const',
|
'param': 1,
},
'logging': True,
},
service_name='trivial-client')
tracer = config.initialize_tracer()
tracer_interceptor = open_tracing_client_interceptor(
tracer, log_payloads=args.log_payloads)
channel = grpc.insecure_channel('localhost:50051')
channel = intercept_channel(channel, tracer_interceptor)
stub = command_line_pb2.CommandLineStub(channel)
response = stub.Echo(command_line_pb2.CommandRequest(text='
|
Hello, hello'))
print(response.text)
time.sleep(2)
tracer.close()
time.sleep(2)
if __name__ == '__main__':
run()
|
knuu/competitive-programming
|
yukicoder/yuki279.py
|
Python
|
mit
| 91
| 0
|
from collections im
|
port Counter
c = Counter(input())
print(min(c['t'], c['r'], c['e']//2))
| |
AndroidOpenDevelopment/android_external_chromium_org
|
build/linux/install-arm-sysroot.py
|
Python
|
bsd-3-clause
| 2,718
| 0.008462
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to install ARM root image for cross building of ARM chrome on linux.
This script can be run manually but is more often run as part of gclient
hooks. When run from hooks this script should be a no-op on non-linux
platforms.
The sysroot image could be constructed from scratch based on the current
state or precise/arm but for consistency we currently use a pre-built root
image which was originall
|
y designed for building trusted NaCl code. The image
will normally need to be rebuilt every time chrome's build dependancies are
changed.
Steps to rebuild the arm sysroot image:
- cd $SRC/native_client
-
|
./tools/trusted_cross_toolchains/trusted-toolchain-creator.armel.precise.sh \
UpdatePackageLists
- ./tools/trusted_cross_toolchains/trusted-toolchain-creator.armel.precise.sh \
BuildJail $SRC/out/arm-sysroot.tar.gz
- gsutil cp -a public-read $SRC/out/arm-sysroot.tar.gz \
nativeclient-archive2/toolchain/$NACL_REV/sysroot-arm-trusted.tgz
"""
import os
import shutil
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
URL_PREFIX = 'https://storage.googleapis.com'
URL_PATH = 'nativeclient-archive2/toolchain'
REVISION = 13035
TARBALL = 'sysroot-arm-trusted.tgz'
def main(args):
if '--linux-only' in args:
# This argument is passed when run from the gclient hooks.
# In this case we return early on non-linux platforms
# or if GYP_DEFINES doesn't include target_arch=arm
if not sys.platform.startswith('linux'):
return 0
if "target_arch=arm" not in os.environ.get('GYP_DEFINES', ''):
return 0
src_root = os.path.dirname(os.path.dirname(SCRIPT_DIR))
sysroot = os.path.join(src_root, 'arm-sysroot')
url = "%s/%s/%s/%s" % (URL_PREFIX, URL_PATH, REVISION, TARBALL)
stamp = os.path.join(sysroot, ".stamp")
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == url:
print "ARM root image already up-to-date: %s" % sysroot
return 0
print "Installing ARM root image: %s" % sysroot
if os.path.isdir(sysroot):
shutil.rmtree(sysroot)
os.mkdir(sysroot)
tarball = os.path.join(sysroot, TARBALL)
curl = ['curl', '--fail', '-L', url, '-o', tarball]
if os.isatty(sys.stdout.fileno()):
curl.append('--progress')
else:
curl.append('--silent')
subprocess.check_call(curl)
subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot])
os.remove(tarball)
with open(stamp, 'w') as s:
s.write(url)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
samihuc/PolyglotDB
|
polyglotdb/corpus/__init__.py
|
Python
|
mit
| 328
| 0
|
from .context import CorpusContext
from .audio import AudioContext
from .importable import ImportContext
from .lexical import LexicalContext
from .pause
|
import PauseContext
from .utterance import UtteranceContext
from .structured import StructuredContext
from .syllabic import SyllabicC
|
ontext
from .spoken import SpokenContext
|
iakov/RHVoice
|
site_scons/site_tools/newlines.py
|
Python
|
gpl-3.0
| 816
| 0.035539
|
from SCons.Script import *
def exists(env):
return (env["PLATFORM"]=="win32")
def ConvertNewlines(target,source,env):
for t,s in zip(target,source):
f_in=open(str(s),"rb")
f_out=open(str(t),"wb")
f_out.write(f_in.read().replace("\n","\r\n"))
f_out.close()
f_in.close()
return None
def ConvertNewlinesB(target,source,env):
for t,s in zip(target,source):
f_in=open(str(s),"rb")
f_out=open(str(t),"wb")
f_out.write("\xef\xbb\xbf")
f_out.write(f_in.read().replace("\n","\r\n"))
f_out.close()
f_in.close()
return No
|
ne
def generate(env):
env["BUILDE
|
RS"]["ConvertNewlines"]=Builder(action=ConvertNewlines,suffix=".txt")
env["BUILDERS"]["ConvertNewlinesB"]=Builder(action=ConvertNewlinesB,suffix=".txt")
|
City-of-Helsinki/smbackend
|
services/views.py
|
Python
|
agpl-3.0
| 1,229
| 0
|
import requests
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf impo
|
rt csrf_exempt
from django.views.decorators.http import require_http_methods
@csrf_exempt
@require_http_methods(["POST"])
def post_service_request(request):
payload = request.POST.copy()
outgoing = payload.dict()
if outgoing.get("internal_feedback", False):
if "internal_feedback" in outgoing:
del outgoing["internal_feedback"]
api_key = settings.OPEN311["INTERNAL_FEEDBACK_API_KEY"]
else:
api_key = setting
|
s.OPEN311["API_KEY"]
outgoing["api_key"] = api_key
url = settings.OPEN311["URL_BASE"]
session = requests.Session()
# Modify parameters for request in case of City of Turku
if "smbackend_turku" in settings.INSTALLED_APPS:
outgoing.pop("service_request_type")
outgoing.pop("can_be_published")
outgoing["address_string"] = "null"
outgoing["service_code"] = settings.OPEN311["SERVICE_CODE"]
r = session.post(url, data=outgoing)
if r.status_code != 200:
return HttpResponseBadRequest()
return HttpResponse(r.content, content_type="application/json")
|
dimagi/commcare-hq
|
corehq/apps/ota/tests/test_claim.py
|
Python
|
bsd-3-clause
| 4,929
| 0.00142
|
from uuid import uuid4
from django.test import TestCase
from casexml.apps.case.clean
|
up import claim_case, get_first_claim
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.util import post_case_blocks
from corehq.apps.case_search.models import CLAIM_CASE_TYPE
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.ot
|
a.utils import get_restore_user
from corehq.apps.users.models import CommCareUser
from corehq.form_processor.exceptions import CaseNotFound
from corehq.form_processor.models import CommCareCase
DOMAIN = 'test_domain'
USERNAME = '[email protected]'
PASSWORD = 'hemato-encephalic'
# https://en.wikipedia.org/wiki/Lina_Stern
def index_to_dict(instance):
keys = ('identifier', 'referenced_type', 'referenced_id', 'relationship')
return {k: str(getattr(instance, k)) for k in keys}
class CaseClaimTests(TestCase):
def setUp(self):
super(CaseClaimTests, self).setUp()
self.domain = create_domain(DOMAIN)
self.user = CommCareUser.create(DOMAIN, USERNAME, PASSWORD, None, None)
self.restore_user = get_restore_user(DOMAIN, self.user, None)
self.host_case_id = uuid4().hex
self.host_case_name = 'Dmitri Bashkirov'
self.host_case_type = 'person'
self.create_case()
def tearDown(self):
self.user.delete(self.domain.name, deleted_by=None)
self.domain.delete()
super(CaseClaimTests, self).tearDown()
def create_case(self):
case_block = CaseBlock.deprecated_init(
create=True,
case_id=self.host_case_id,
case_name=self.host_case_name,
case_type=self.host_case_type,
owner_id='in_soviet_russia_the_case_owns_you',
).as_xml()
post_case_blocks([case_block], {'domain': DOMAIN})
def assert_claim(self, claim=None, claim_id=None):
if claim is None:
claim_ids = CommCareCase.objects.get_case_ids_in_domain(DOMAIN, CLAIM_CASE_TYPE)
self.assertEqual(len(claim_ids), 1)
claim = CommCareCase.objects.get_case(claim_ids[0], DOMAIN)
if claim_id:
self.assertEqual(claim.case_id, claim_id)
self.assertEqual(claim.name, self.host_case_name)
self.assertEqual(claim.owner_id, self.user.user_id)
self.assertEqual([index_to_dict(i) for i in claim.indices], [{
'identifier': 'host',
'referenced_type': 'person',
'referenced_id': self.host_case_id,
'relationship': 'extension',
}])
def test_claim_case(self):
"""
claim_case should create an extension case
"""
claim_id = claim_case(DOMAIN, self.restore_user, self.host_case_id,
host_type=self.host_case_type, host_name=self.host_case_name)
self.assert_claim(claim_id=claim_id)
def test_claim_case_id_only(self):
"""
claim_case should look up host case details if only ID is passed
"""
claim_id = claim_case(DOMAIN, self.restore_user, self.host_case_id)
self.assert_claim(claim_id=claim_id)
def test_first_claim_one(self):
"""
get_first_claim should return one claim
"""
claim_id = claim_case(DOMAIN, self.restore_user, self.host_case_id,
host_type=self.host_case_type, host_name=self.host_case_name)
claim = get_first_claim(DOMAIN, self.user.user_id, self.host_case_id)
self.assert_claim(claim, claim_id)
def test_first_claim_none(self):
"""
get_first_claim should return None if not found
"""
claim = get_first_claim(DOMAIN, self.user.user_id, self.host_case_id)
self.assertIsNone(claim)
def test_closed_claim(self):
"""
get_first_claim should return None if claim case is closed
"""
claim_id = claim_case(DOMAIN, self.restore_user, self.host_case_id,
host_type=self.host_case_type, host_name=self.host_case_name)
self._close_case(claim_id)
first_claim = get_first_claim(DOMAIN, self.user.user_id, self.host_case_id)
self.assertIsNone(first_claim)
def test_claim_case_other_domain(self):
malicious_domain = 'malicious_domain'
domain_obj = create_domain(malicious_domain)
self.addCleanup(domain_obj.delete)
claim_id = claim_case(malicious_domain, self.restore_user, self.host_case_id,
host_type=self.host_case_type, host_name=self.host_case_name)
with self.assertRaises(CaseNotFound):
CommCareCase.objects.get_case(claim_id, malicious_domain)
def _close_case(self, case_id):
case_block = CaseBlock.deprecated_init(
create=False,
case_id=case_id,
close=True
).as_xml()
post_case_blocks([case_block], {'domain': DOMAIN})
|
hvdwolf/pyExifToolGUI
|
scripts/petgfunctions.py
|
Python
|
gpl-3.0
| 115,680
| 0.00593
|
# -*- coding: utf-8 -*-
# petgfunctions.py - This python "helper" script holds a lot of functions
# Copyright (c) 2012-2014 Harry van der Wolf. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public Licence as published
# by the Free Software Foundation, either version 2 of the Licence, or
# version 3 of the Licence, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public Licence for more details.
# This file is part of pyexiftoolgui.
# pyexiftoolgui is a pySide script program that reads and writes
# gps tags from/to files. It can use a "reference" image to write the
# gps tags to a multiple set of files that are taken at the same
# location.
# pyexiftoolgui is a graphical frontend for the open source
# command line tool exiftool by Phil Harvey, but it's not
# a complete exiftool gui: not at all.
import os, sys, platform, shlex, subprocess, time, re, string, datetime, math
import PySide
from PySide.QtCore import *
from PySide.QtGui import *
import programinfo
import programstrings
import petgfilehandling
from ui_create_args import Ui_Dialog_create_args
from ui_export_metadata import Ui_Dialog_export_metadata
from ui_remove_metadata import Ui_Dialog_remove_metadata
from ui_modifydatetime import Ui_DateTimeDialog
from ui_syncdatetime import Ui_SyncDateTimeTagsDialog
#------------------------------------------------------------------------
# All kind of functions
###################################################################################################################
# Start of Startup checks
###################################################################################################################
def remove_workspace( self ):
# Remove our temporary workspace
# try:
# fls = os.remove(self.tmpworkdir + "/*")
# except:
# print("No files in " + self.tmpworkdir + " or no folder at all")
# try:
# fldr = os.rmdir(self.tmpworkdir)
# except:
# print("Couldn't remove folder")
print(self.tmpworkdir)
if self.OSplatform == "Windows":
self.tmpworkdir = self.tmpworkdir.replace("/", "\\")
command_line = "rmdir /S /Q " + self.tmpworkdir
else:
command_line = "rm -rf " + self.tmpworkdir
p = os.system(command_line)
#args = shlex.split(command_line)
#print args
#p = subprocess.call(args, shell=True)
if p == 0:
print(("Removed " + self.tmpworkdir + " and it contents."))
else:
print(("Error removing " + self.tmpworkdir + " and it contents."))
def is_executable(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def check_for_program(program):
exists = False
for path in os.environ["PATH"].split(os.pathsep):
#program = program.replace("\"", "")
path_plus_program = os.path.join(path, program)
#print("path_plus_program " + str(path_plus_program))
if is_executable(path_plus_program):
#print "program " + program + " found"
exists = True
return exists
# End of function check_for_program and is_executable (mini sub for check_for_program)
def exiftool_version_level_text(self):
if float(self.exiftoolversion) < 9.07:
self.statusbar.showMessage("I will disable the GPano options as exiftool >=9.07 is required. You have " + str(self.exiftoolversion))
exiftoolleveltext = "Your exiftool version is " + str(self.exiftoolversion) + " . You need >=9.07 to write to images.\n"
exiftoolleveltext += "Exiftool and therefore pyExifToolGUI can read the tags. See the View Data tab."
self.lbl_exiftool_leveltext.setText(exiftoolleveltext)
elif float(self.exiftoolversion) < 9.09:
#else:
exiftoolleveltext = "Your exiftool version is " + str(self.exiftoolversion) + " . Tags marked with * are obligatory. "
exiftoolleveltext += "\"Pose Heading Degrees\" is necessary to make it also function in Google Maps.\n Tags marked with *** are only writable with exiftool >= 9.09"
self.lbl_exiftool_leveltext.setText(exiftoolleveltext)
self.statusbar.showMessage("Your exiftoolversion is " + str(self.exiftoolversion))
else:
exiftoolleveltext = "Your exiftool version is " + str(self.exiftoolversion) + " . Tags marked with * are obligatory. "
exiftoolleveltext += "\"Pose Heading Degrees\" is necessary to make it also function in Google Maps. Tags marked with *** are only writable with exiftool >= 9.09"
self.lbl_exiftool_leveltext.setText(exiftoolleveltext)
self.statusbar.showMessage("Your exiftoolversion is " + str(self.exiftoolversion))
#print "exiftoolversion : " + self.exiftoolversion
def find_on_path(tool):
""" Find the first occurrence of a tool on the path."""
paths = os.environ["PATH"].split(os.pathsep)
for path in paths:
path = os.path.join(path, tool)
if os.path.exists(path):
return path
def tool_check( self ):
# We need this startup check as long as we don't have a package
# that deals with dependencies
if self.alternate_exiftool == True:
self.exiftoolprog = self.exiftooloption.text()
else:
self.exiftoolprog = "exiftool"
if (self.OSplatform in ("Windows", "win32")):
self.exiftoolprog = find_on_path("exiftool.exe")
elif self.OSplatform == "Darwin":
self.exiftoolprog = find_on_path("exiftool")
#else:
# self.exiftoolprog = find_on_path("exiftool")
# Check for exiftool, based on the setting or no setting above
if (self.OSplatform in ("Windows", "win32")):
if ("exiftool.exe" in self.exif
|
toolprog) or ("Exiftool.exe" in self.exiftoolprog) or not self.exiftoolprog:
#self.exiftool_dir = os.path.join(self.realfile_dir, "exiftool", "exiftool.exe")
#self.exiftoolprog = self.exiftool_dir + "\exiftool.exe"
|
if not os.path.isfile(self.exiftoolprog):
configure_message = "exiftool is missing or incorrectly configured in Preferences!\n"
configure_message += "This tool is an absolute must have!\nPlease set the correct location or install exiftool first.\n\n"
configure_message += "If your exiftool is named \"exiftool(-k).exe\", rename it to \"exiftool.exe\""
ret = QMessageBox.critical(self, "exiftool is missing or incorrectly configured", configure_message)
result = self.select_exiftool()
#print str(result)
if result == "":
ret = QMessageBox.critical(self, "Canceled exiftool selection", "You canceled the exiftool selection.\nThe program will quit!\nFirst install exiftool or restart this program and select the correct exiftool.\nI will now (try to) open the exiftool website.")
try:
webbrowser.open("http://www.sno.phy.queensu.ca/~phil/exiftool/")
finally:
sys.exit()
else:
self.exiftoolprog = result
#Check exiftool version
args = '"' + self.exiftoolprog + '" -ver'
self.exiftoolversion = subprocess.check_output(args, shell=True)
# now check for the supported languages
args = '"' + self.exiftoolprog + '" -lang'
self.exiftoollanguages = subprocess.check_output(args, shell=True)
else:
if not check_for_program(self.exiftoolprog):
configure_message = "exiftool is missing or incorrectly configured in Preferences!\n"
configure_message += "This tool is an absolute must have!\nPlease set the correct location or install exiftool first."
ret = QMessageBox.critical(self, "exiftool is missing or incorrectly configured", configure_message)
result = self.select_exiftool()
#print
|
Faraaz54/python_training_problems
|
basic_python/write_file.py
|
Python
|
mit
| 123
| 0.00813
|
t
|
ext = 'this is a sample file\nnew line'
savefile = open('newtext', 'w')
savefile.write(text)
savefile
|
.close()
|
dersphere/plugin.video.moviemazer
|
default.py
|
Python
|
gpl-2.0
| 19,585
| 0.00337
|
# Moviemazer XBMC Addon
# written by Tristan Fischer (sphere)
#
# If you have suggestions or problems: write me.
#
# Mail: [email protected]
#
# Special Thanks to the website www.moviemaze.de
# Import Python stuff
import urllib
import urllib2
import re
import os
import sys
import time
from shutil import copyfile
# Import XBMC Stuff
import xbmcplugin
import xbmcgui
import xbmcaddon
# Creating some default variables and objects
Addon = xbmcaddon.Addon('plugin.video.moviemazer')
MAIN_URL = 'http://www.moviemaze.de'
ADDON_ID = Addon.getAddonInfo('id')
CACHE_DIR = 'special://profile/addon_data/%s/cache/' % ADDON_ID
IMAGE_DIR = 'special://home/addons/%s/resources/images/' % ADDON_ID
GetSetting = Addon.getSetting
SetSetting = Addon.setSetting
Language = Addon.getLocalizedString
Handle = int(sys.argv[1])
ProgressDialog = xbmcgui.DialogProgress()
# Functions for getting a list of dicts containing movie headers like ID and title
def get_top_ten_movies():
returnmovies = []
fullurl = '%s/media/trailer/' % MAIN_URL
link = get_cached_url(fullurl, 'mainpage.cache', GetSetting('cache_movies_list'))
matchtopten = re.compile('<tr><td valign="top" align="right"><b>([0-9]+)</b></td><td width=100% style="text-align:left;"><a href="/media/trailer/([0-9]+),(?:[0-9]+?,)?([^",]+?)">([^<]+)</a> <span class="small_grey">\(([^<]+)\)</span></td></tr>').findall(link)
for rank, movieid, urlend, title, trailerkind in matchtopten:
movie = {'movieid': movieid,
'title': title,
'urlend': urlend,
'rank': '%s. ' % rank,
'date': ''}
returnmovies.append(movie)
return returnmovies
def get_recent_movies():
returnmovies = []
fullurl = '%s/media/trailer/' % MAIN_URL
link = get_cached_url(fullurl, 'mainpage.cache', GetSetting('cache_movies_list'))
matchtrecentupdates = re.compile('<td(?: valign="top" style="text-align:left;"><b style="white-space: nowrap;">([^<]*)</b)?></td><td width=100% style="text-align:left;"><a href="/media/trailer/([0-9]+),(?:[0-9]+?,)?([^",]+?)">([^<]+)</a> <span class="small_grey">\(([^<]+)\)</span></td></tr>').findall(link)
for date, movieid, urlend, title, trailerkind in matchtrecentupdates:
if date != '':
lastdate = date
else:
date = lastdate
datearray = date.split(' ')
months_de_short = ['', 'Jan', 'Feb', 'M\xe4r', 'Apr', 'Mai', 'Juni', 'Juli', 'Aug', 'Sep', 'Okt', 'Nov', 'Dez']
try:
date = ' (%s%02d.%s)' % (datearray[0], months_de_short.index(datearray[1]), '2011') # Fixme: dirty hack :(
except:
date = ''
movie = {'movieid': movieid,
'title': title,
'urlend': urlend,
'rank': '',
'date': date}
returnmovies.append(movie)
return returnmovies
def get_current_movies():
returnmovies = []
fullurl = '%s/media/trailer/' % MAIN_URL
link = get_cached_url(fullurl, 'mainpage.cache', GetSetting('cache_movies_list'))
matchtacttrailers = re.compile('<tr><td(?: valign="top"><b>[A-Z0-9]</b)?></td><td style="text-align:left;"><a href="/media/trailer/([0-9]+),(?:[0-9]+?,)?([^",]+?)">([^<]+)</a></td></tr>').findall(link)
for movieid, urlend, title in matchtacttrailers:
movie = {'movieid': movieid,
'title': title,
'urlend': urlend,
'rank': '',
'date': ''}
returnmovies.append(movie)
return returnmovies
# Function to get a dict of detailed movie information like coverURL, plot and genres
def get_movie_infos(movieid, urlend='movie.html'):
returnmovie = {'movieid': movieid,
'title': '',
'otitle': '',
'coverurl': '',
'plot': '',
'genres': '',
'date': ''}
fullurl = '%s/media/trailer/%s,15,%s' % (MAIN_URL,
movieid,
urlend)
cachefile = 'id%s.cache' % movieid
link = get_cached_url(fullurl, cachefile, GetSetting('cache_movie_info'))
titlematch = re.compile('<h1>(.+?)</h1>.*<h2>\((.+?)\)</h2>', re.DOTALL).findall(link)
for title, otitle in titlematch:
returnmovie.update({'title': title, 'otitle': otitle})
covermatch = re.compile('src="([^"]+?)" width="150"').findall(link)
for coverurl in covermatch:
if coverurl != '/filme/grafiken/kein_poster.jpg':
returnmovie.update({'coverurl': MAIN_URL + coverurl})
plotmatch = re.compile('WERDEN! -->(.+?)</span>').findall(link)
for plot in plotmatch:
plot = re.sub('<[^<]*?/?>', '', plot)
returnmovie.update({'plot': plot})
releasedatematch = re.compile('Dt. Start:</b> ([0-9]+.+?)<img').findall(link)
for releasedateugly in releasedatematch:
datearray = releasedateugly.split(' ')
months_de_long = ['', 'Januar', 'Februar', 'M\xe4rz', 'April', 'Mai', 'Juni', 'Juli', 'August', 'September', 'Oktober', 'November', 'Dezember']
date = ' (%s%02d.%s)' % (datearray[0], months_de_long.index(datearray[1]), '2011') # Fixme: dirty hack :(
returnmovie.update({'date': date})
genresmatch = re.compile('<b style="font-weight:bold;">Genre:</b> (.+?)<br />', re.DOTALL).findall(link)
for allgenres in genresmatch:
returnmovie.update({'genres': allgenres})
return returnmovie
# Function to get a list of dicts which contains trailer- URL, resolution, releasedate
def get_movie_trailers(movieid, urlend='movie.html'):
returntrailers = []
fullurl = '%s/media/trailer/%s,15,%s' % (MAIN_URL,
movieid,
urlend)
cachefile = 'id%s.cache' % movieid
link = get_cached_url(fullurl, cachefile, GetSetting('cache_movie_info'))
matchtrailerblock = re.compile('<table border=0 cellpadding=0 cellspacing=0 align=center width=100%><tr><td class="standard">.+?<b style="font-weight:bold;">(.+?)</b><br />\(([0-9:]+) Minuten\)(.+?</td></tr></table><br /></td></tr></table><br />)', re.DOTALL).findall(link)
for trailername, duration, trailerblock in matchtrailerblock:
matchlanguageblock = re.compile('alt="Sprache: (..)">(.+?)>([^<]+)</td></tr></table></td>', re.DOTALL).findall(trailerblock)
for language, languageblock, date in matchlanguageblock:
datearray = date.split(' ')
months_de_short = ['', 'Jan', 'Feb', 'M\xe4rz', 'Apr', 'Mai', 'Juni', 'Juli', 'Aug', 'Sep', 'Okt', 'Nov', 'Dez']
try:
date = datearray[0] + str(months_de_short.index(datearray[1])).zfill(2) + '.2011' # fixme: this could be made better, no idea how :)
except:
date = ''
mat
|
chtrailer = re.compile('generateDownloadLink\("([^"]+_([0-9]+)\.(?:mov|mp4)\?down=1)"\)').findall(languageblock)
|
for trailerurl, resolution in matchtrailer:
trailer = {'trailername': trailername,
'duration': duration,
'language': language,
'resolution': resolution,
'date': date,
'trailerurl': MAIN_URL + trailerurl}
returntrailers.append(trailer)
return returntrailers
# Functions to get the informations for xbmc
def show_categories():
add_dir(Language(30003), 3, os.path.join(IMAGE_DIR, 'database.png')) # Current
add_dir(Language(30001), 1, os.path.join(IMAGE_DIR, 'ranking.png')) # TopTen
add_dir(Language(30002), 2, os.path.join(IMAGE_DIR, 'schedule.png')) # Recent
end_dir()
def show_top_ten_movies():
toptenmovies = get_top_ten_movies()
show_movies(toptenmovies)
end_dir()
def show_recent_movies():
recentmovies = get_recent_movies()
show_movies(recentmovies)
end_dir()
def show_current_movies():
currentmovies = get_current_movies()
show_movies(currentmovies)
end_dir()
# Functions to show the informations in xbmc
def sho
|
Dudy/newsletterman
|
src/EmailHandlerV1.py
|
Python
|
apache-2.0
| 1,058
| 0.014178
|
#!/usr/bin/env python
import webapp2
import logging
from google.appengine.ext.webapp.mail_handlers import InboundMailHandler
from google.appengine.ext import ndb
from MailMessage import MailMessage
# the email domain of this app is @pomis-newsletterman.appspotmail.com
class EmailHandlerV1(InboundMailHandler):
def receive(self, mail_message):
logging.info(mail_message.to_mime_message())
# store message
service_id = mail_message.to.split('@')[0]
if '<' in service_id:
service_id = service_id.split('<')[1]
mime_message = str(mail_message.to_mime_message())
service_key = ndb.Key(MailMessage, service_id)
new_id =
|
ndb.Model.allocate_ids(size = 1, parent = service_key)[0]
mail_message_key = ndb.Key(MailMessage, new_id, parent = service_key)
persistent_mail_message = MailMessage(parent = mail_message_key, mime_message = mime_message)
|
persistent_mail_message.put()
app = webapp2.WSGIApplication([EmailHandlerV1.mapping()], debug=True)
|
DylanMcCall/rhythmbox-songinfo-context-menu
|
plugins/artsearch/musicbrainz.py
|
Python
|
gpl-2.0
| 4,454
| 0.014594
|
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2009 Jonathan Matthew <[email protected]>
#
# This program is free softwar
|
e; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# y
|
ou may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import xml.dom.minidom as dom
import urllib.parse
import rb
from gi.repository import RB
# musicbrainz URLs
MUSICBRAINZ_RELEASE_URL = "http://musicbrainz.org/ws/2/release/%s?inc=artists"
MUSICBRAINZ_RELEASE_PREFIX = "http://musicbrainz.org/release/"
MUSICBRAINZ_RELEASE_SUFFIX = ".html"
MUSICBRAINZ_SEARCH_QUERY = "artist:\"%s\" AND release:\"%s\""
MUSICBRAINZ_SEARCH_URL = "http://musicbrainz.org/ws/2/release/?query=%s&limit=1"
# musicbrainz IDs
MUSICBRAINZ_VARIOUS_ARTISTS = "89ad4ac3-39f7-470e-963a-56509c546377"
# Amazon URL bits
AMAZON_IMAGE_URL = "http://images.amazon.com/images/P/%s.01.LZZZZZZZ.jpg"
class MusicBrainzSearch(object):
def get_release_cb (self, data, args):
(key, store, callback, cbargs) = args
if data is None:
print("musicbrainz release request returned nothing")
callback(*cbargs)
return
try:
parsed = dom.parseString(data)
storekey = RB.ExtDBKey.create_storage('album', key.get_field('album'))
# check that there's an artist that isn't 'various artists'
artist_tags = parsed.getElementsByTagName('artist')
if len(artist_tags) > 0:
artist_id = artist_tags[0].attributes['id'].firstChild.data
if artist_id != MUSICBRAINZ_VARIOUS_ARTISTS:
# add the artist name (as album-artist) to the storage key
nametags = artist_tags[0].getElementsByTagName('name')
if len(nametags) > 0:
artistname = nametags[0].firstChild.data
print("got musicbrainz artist name %s" % artistname)
storekey.add_field('artist', artistname)
# look for an ASIN tag
asin_tags = parsed.getElementsByTagName('asin')
if len(asin_tags) > 0:
asin = asin_tags[0].firstChild.data
print("got ASIN %s" % asin)
image_url = AMAZON_IMAGE_URL % asin
store.store_uri(storekey, RB.ExtDBSourceType.SEARCH, image_url)
else:
print("no ASIN for this release")
callback(*cbargs)
except Exception as e:
print("exception parsing musicbrainz response: %s" % e)
callback(*cbargs)
def try_search_artist_album (self, key, store, callback, *args):
album = key.get_field("album")
artist = key.get_field("artist")
if not album or not artist:
print("artist or album information missing")
callback(*args)
return
query = MUSICBRAINZ_SEARCH_QUERY % (artist.lower(), album.lower())
url = MUSICBRAINZ_SEARCH_URL % (urllib.parse.quote(query, safe=':'),)
loader = rb.Loader()
loader.get_url(url, self.get_release_cb, (key, store, callback, args))
def search(self, key, last_time, store, callback, *args):
key = key.copy() # ugh
album_id = key.get_info("musicbrainz-albumid")
if album_id is None:
print("no musicbrainz release ID for this track")
self.try_search_artist_album(key, store, callback, args)
return
if album_id.startswith(MUSICBRAINZ_RELEASE_PREFIX):
album_id = album_id[len(MUSICBRAINZ_RELEASE_PREFIX):]
if album_id.endswith(MUSICBRAINZ_RELEASE_SUFFIX):
album_id = album_id[:-len(MUSICBRAINZ_RELEASE_SUFFIX)]
print("stripped release ID: %s" % album_id)
url = MUSICBRAINZ_RELEASE_URL % (album_id)
loader = rb.Loader()
loader.get_url(url, self.get_release_cb, (key, store, callback, args))
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 2/instances/11_2_wikiflow_1sh_1s_annot/sessioncompute_2/SessionCompute_2.py
|
Python
|
gpl-3.0
| 2,784
| 0.001796
|
#!/usr/bin/env python
from sciwonc.dataflow.DataStoreClient import DataStoreClient
import ConfigDB_SessionCompute_2
import pprint
# connector and config
client = DataStoreClient("mongodb", ConfigDB_SessionCompute_2)
config = ConfigDB_SessionCompute_2
# according to config
dataList = client.getData() # return an array of docs (like a csv reader)
output = []
ONE_HOUR_IN_SECONDS = 3600
if(dataList):
for i in dataList:
contributor_username = i[config.COLUMN]
current_user = contributor_username
start_time = None
end_time = None
duration = None
last_start_timestamp = None
count = 1
if contributor_username:
print "\n\n"
print contributor_username.encode('utf-8')
while True:
doc = i['data'].next()
if doc is None:
break;
print doc["timestamp"]
if start_time is None:
start_time = float(doc["timestamp"])
if end_time is None:
end_time = start_time + ONE_HOUR_IN_S
|
ECONDS
else:
if float(doc["timestamp"]) <= end_time:
end_time = float(doc["timestamp"]) + ONE_HOUR_IN_SECONDS
|
count += 1
else:
new_doc = {}
new_doc["start time"] = start_time
new_doc["end time"] = end_time
new_doc["duration"] = (end_time - start_time)
new_doc["edition_counts"] = count
new_doc["contributor_username"] = contributor_username
output.append(new_doc)
start_time = float(doc["timestamp"])
end_time = start_time + ONE_HOUR_IN_SECONDS
count = 1
if start_time:
new_doc = {}
new_doc["start time"] = start_time
new_doc["end time"] = end_time
new_doc["duration"] = (end_time - start_time)
new_doc["edition_counts"] = count
new_doc["contributor_username"] = contributor_username
output.append(new_doc)
pprint.pprint(output)
clientOutput = DataStoreClient("mongodb", ConfigDB_SessionCompute_2)
clientOutput.saveData(output)
# import datetime
# print(
# datetime.datetime.fromtimestamp(
# int("1176585742")
# ).strftime('%Y-%m-%d %H:%M:%S')
# )
# {
# start time:
# end time:
# duration:
# user:
# }
# import time
# timestamp2 = time.mktime(d.timetuple()) # DO NOT USE IT WITH UTC DATE
# datetime.fromtimestamp(timestamp2)
# datetime.datetime(2011, 1, 1, 0, 0)
|
RyanHope/AutobahnPython
|
examples/asyncio/wamp/rpc/decorators/frontend.py
|
Python
|
mit
| 2,365
| 0
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
try:
import asyncio
except ImportError:
# Trollius >= 0.3 was renamed
import trollius as asyncio
from os import environ
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component calling the different backend procedures.
"""
@asyncio.coroutine
def onJoin(self, details):
procs = [u'com.mathservice.add2',
u'com.mathservice.mul2',
u'com.mathservice.div2']
try:
for proc in procs:
|
res = yield from self.call(proc, 2, 3)
print("{}: {}".format(proc, res))
except Exception as e:
print("Something went wrong: {}".format(e))
self.leave()
|
def onDisconnect(self):
asyncio.get_event_loop().stop()
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
debug=False, # optional; log even more details
)
runner.run(Component)
|
timpalpant/KaggleTSTextClassification
|
scripts/plot_feature_distributions.py
|
Python
|
gpl-3.0
| 2,179
| 0.005048
|
#!/usr/bin/env python
'''
Plot distribution of each feature,
conditioned on its bfeature type
'''
import argparse
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from common import *
from information import utils
from scipy.stats import itemfreq
nbins = 100
def opts():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('features', type=load_npz,
help='Training data features (npz)')
parser.add_argument('output',
help='Output file with plots (pdf)')
return parser
if __name__ == "__main__":
args = opts().parse_args()
pdf = PdfPages(args.output)
dfs = args.features['ifeatures']
cfs = args.features['ffeatures']
print "Plotting float features"
bfs = args.features['bfeatures']
u = utils.unique_rows(bfs)
indices = [np.all(bfs==ui, axis=-1) for ui in u]
for j, f in enumer
|
ate(cfs.T):
print "...ffeature %d" % j
fi
|
g = plt.figure()
h = np.zeros(nbins)
not_nan = f[np.logical_not(np.isnan(f))]
f_min = not_nan.min()
f_max = not_nan.max()
x = np.linspace(f_min, f_max, nbins)
dx = (f_max - f_min) / nbins
for idx in indices:
h_new, bins = np.histogram(f[idx], range=(f_min, f_max), bins=nbins)
plt.bar(x, h_new, bottom=h, width=dx)
h += h_new
plt.xlim(f_min, f_max)
plt.xlabel('f')
plt.ylabel('P(f)')
plt.title('FFeature %d. # NaN = %d' % (j, np.sum(np.isnan(f))))
pdf.savefig(fig)
plt.close()
print "Plotting integer features"
for j, x in enumerate(dfs.T):
print "...dfeature %d" % j
freq = itemfreq(x)
fig = plt.figure()
xu = np.sort(np.unique(x))
h = np.zeros_like(xu)
for idx in indices:
f = itemfreq(x[idx])
h_new = np.zeros_like(h)
h_new[f[:,0]] = f[:,1]
plt.bar(xu, h_new, bottom=h)
h += h_new
plt.xlabel('f')
plt.ylabel('P(f)')
plt.title('DFeature %d' % j)
pdf.savefig(fig)
plt.close()
pdf.close()
|
eljost/pysisyphus
|
tests_staging/test_matched_rmsd/test_matched_rmsd.py
|
Python
|
gpl-3.0
| 816
| 0
|
#!/usr/bin/env python3
import os
from pathlib import Path
import numpy as np
from pysisyphus.helpers import geom_from_xyz_file
from pysisyphus.sto
|
castic.align import matched_rmsd
THIS_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
def test_matched_rmsd():
|
geom1 = geom_from_xyz_file(THIS_DIR / "eins.xyz")
# Calling with the identical geometries should return RMSD of 0.
min_rmsd, (geom1_matched, geom2_matched) = matched_rmsd(geom1, geom1)
np.testing.assert_allclose(min_rmsd, 0.0, atol=1e-10)
np.testing.assert_allclose(geom1_matched.coords, geom2_matched.coords)
geom2 = geom_from_xyz_file(THIS_DIR / "zwei.xyz")
min_rmsd, _ = matched_rmsd(geom1, geom2)
np.testing.assert_allclose(min_rmsd, 0.057049, atol=1e-5)
if __name__ == "__main__":
test_matched_rmsd()
|
nansencenter/nansat
|
nansat/tests/test_nansat.py
|
Python
|
gpl-3.0
| 34,081
| 0.00355
|
# ------------------------------------------------------------------------------
# Name: test_nansat.py
# Purpose: Test the Nansat class
#
# Author: Morten Wergeland Hansen, Asuka Yamakawa, Anton Korosov
#
# Created: 18.06.2014
# Last modified:24.08.2017 14:00
# Copyright: (c) NERSC
# Licence: This file is part of NANSAT. You can redistribute it or modify
# under the terms of GNU General Public License, v.3
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------------------------
from __future__ import unicode_literals, absolute_import
import os
import logging
import unittest
import warnings
import datetime
from mock import patch, PropertyMock, Mock, MagicMock, DEFAULT
import numpy as np
try:
if 'DISPLAY' not in os.environ:
import matplotlib; matplotlib.use('Agg')
import
|
matplotlib
import matplotlib.pyplot as plt
except ImportError:
MATPLOTLIB_IS_INSTALLED = False
else:
MATPLOTLIB_IS_INSTALLED = True
from nansat import Nansat, Domain, NSR
from nansat.utils import gdal
import nansat.nansat
from nansat.exceptions import NansatGDALError, WrongMapperError, NansatReadError
from nansat.tests.nansat_test_base import NansatTestBase
warnings.simplefilter("always", UserWarning)
class NansatTest(NansatTes
|
tBase):
def test_open_gcps(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
self.assertEqual(type(n), Nansat)
self.assertEqual(n.vrt.dataset.GetProjection(), '')
self.assertTrue((n.vrt.dataset.GetGCPProjection().startswith('GEOGCS["WGS 84",')))
self.assertEqual(n.vrt.dataset.RasterCount, 3)
self.assertEqual(n.filename, self.test_file_gcps)
self.assertIsInstance(n.logger, logging.Logger)
self.assertEqual(n.name, os.path.split(self.test_file_gcps)[1])
self.assertEqual(n.path, os.path.split(self.test_file_gcps)[0])
def test_that_only_mappers_with_mapper_in_the_module_name_are_imported(self):
mappers = nansat.nansat._import_mappers()
for mapper in mappers:
self.assertTrue('mapper' in mapper)
def test_get_time_coverage_start_end(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.set_metadata('time_coverage_start', '2016-01-20')
n.set_metadata('time_coverage_end', '2016-01-21')
self.assertEqual(type(n.time_coverage_start), datetime.datetime)
self.assertEqual(type(n.time_coverage_end), datetime.datetime)
def test_from_domain_array(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
n = Nansat.from_domain(d, np.random.randn(500, 500), {'name': 'band1'})
self.assertEqual(type(n), Nansat)
self.assertEqual(type(n[1]), np.ndarray)
self.assertEqual(n.get_metadata('name', 1), 'band1')
self.assertEqual(n[1].shape, (500, 500))
self.assertEqual(n.filename, '')
self.assertIsInstance(n.logger, logging.Logger)
self.assertEqual(n.name, '')
self.assertEqual(n.path, '')
def test_from_domain_nansat(self):
n1 = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n2 = Nansat.from_domain(n1, n1[1])
self.assertEqual(type(n2), Nansat)
self.assertEqual(len(n2.bands()), 1)
self.assertEqual(type(n2[1]), np.ndarray)
def test_add_band(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n = Nansat.from_domain(d, log_level=40)
n.add_band(arr, {'name': 'band1'})
self.assertEqual(type(n), Nansat)
self.assertEqual(type(n[1]), np.ndarray)
self.assertEqual(n.get_metadata('name', 1), 'band1')
self.assertEqual(n[1].shape, (500, 500))
def test_add_band_twice(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n = Nansat.from_domain(d, log_level=40)
n.add_band(arr, {'name': 'band1'})
n.add_band(arr, {'name': 'band2'})
self.assertEqual(type(n), Nansat)
self.assertEqual(type(n[1]), np.ndarray)
self.assertEqual(type(n[2]), np.ndarray)
self.assertEqual(n.get_metadata('name', 1), 'band1')
self.assertEqual(n.get_metadata('name', 2), 'band2')
self.assertEqual(n[1].shape, (500, 500))
self.assertEqual(n[2].shape, (500, 500))
def test_add_bands(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n = Nansat.from_domain(d, log_level=40)
n.add_bands([arr, arr],
[{'name': 'band1'}, {'name': 'band2'}])
self.assertIsInstance(n, Nansat)
self.assertEqual(n.vrt.vrt.vrt, None)
self.assertIsInstance(n[1], np.ndarray)
self.assertIsInstance(n[2], np.ndarray)
self.assertEqual(n.get_metadata('name', 1), 'band1')
self.assertEqual(n.get_metadata('name', 2), 'band2')
def test_add_bands_no_parameter(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n = Nansat.from_domain(d, log_level=40)
n.add_bands([arr, arr])
self.assertEqual(type(n), Nansat)
self.assertEqual(type(n[1]), np.ndarray)
self.assertEqual(type(n[2]), np.ndarray)
def test_add_subvrts_only_to_one_nansat(self):
d = Domain(4326, "-te 25 70 35 72 -ts 500 500")
arr = np.random.randn(500, 500)
n1 = Nansat.from_domain(d, log_level=40)
n2 = Nansat.from_domain(d, log_level=40)
n1.add_band(arr, {'name': 'band1'})
self.assertEqual(type(n1.vrt.band_vrts), dict)
self.assertTrue(len(n1.vrt.band_vrts) > 0)
self.assertEqual(n2.vrt.band_vrts, {})
def test_bands(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
bands = n.bands()
self.assertEqual(type(bands), dict)
self.assertTrue(1 in bands)
self.assertTrue('name' in bands[1])
self.assertEqual(bands[1]['name'], 'L_645')
def test_has_band_if_name_matches(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
hb = n.has_band('L_645')
self.assertTrue(hb)
def test_has_band_if_standard_name_matches(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
hb = n.has_band('surface_upwelling_spectral_radiance_in_air_emerging_from_sea_water')
self.assertTrue(hb)
def test_write_fig_tif(self):
n = Nansat(self.test_file_arctic, mapper=self.default_mapper)
tmpfilename = os.path.join(self.tmp_data_path,
'nansat_write_fig_tif.tif')
n.write_figure(tmpfilename)
nn = Nansat(tmpfilename, mapper=self.default_mapper)
# Asserts that the basic georeference (corners in this case) is still
# present after opening the image
self.assertTrue(np.allclose(n.get_corners(), nn.get_corners()))
def test_resize_by_pixelsize(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(pixelsize=500, resample_alg=1)
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_by_factor(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(0.5, resample_alg=1)
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_by_width(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(width=100, resample_alg=1)
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_by_height(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(height=500, resample_alg=1)
self.assertEqual(type(n[1]), np.ndarray)
def test_resize_resize(self):
n = Nansat(self.test_file_gcps, log_level=40, mapper=self.default_mapper)
n.resize(0.1)
n.resize(10)
tmpfilename
|
dnarvaez/sourcestamp
|
setup.py
|
Python
|
apache-2.0
| 1,161
| 0
|
# Copyright 2013 Daniel Narvaez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from setuptools import setup, Extension
classifiers = ["Licen
|
se :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 2",
"Topic :: Software Development :: Build Tools"]
setup(name="sourcestamp",
version="0.1",
description="Compute timestamp for a source code tree",
author="Daniel Narvaez",
author_email="[email protected]",
url="http://github.com/dnarva
|
ez/sourcestamp",
classifiers=classifiers,
ext_modules=[Extension("sourcestamp", ["src/sourcestamp.c"])])
|
tseaver/google-cloud-python
|
talent/google/cloud/talent_v4beta1/gapic/tenant_service_client_config.py
|
Python
|
apache-2.0
| 1,782
| 0
|
config = {
"interfaces": {
"google.cloud.talent.v4beta1.TenantService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000,
}
},
"methods": {
"CreateTenant": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"GetTenant": {
"timeout_millis": 60000,
|
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"UpdateTenant": {
"timeout_millis": 60000,
|
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"DeleteTenant": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"ListTenants": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
},
}
}
}
|
yamstudio/Codeforces
|
200/281A - Word Capitalization.py
|
Python
|
gpl-3.0
| 43
| 0
|
x = raw_input()
print x[0].upper()
|
+ x[1:]
|
|
ruuk/script.web.viewer2
|
lib/webviewer/bs4/element.py
|
Python
|
gpl-2.0
| 61,200
| 0.001307
|
import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be o
|
ne of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, e
|
ncoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_
|
filias/django
|
tests/forms_tests/field_tests/test_uuidfield.py
|
Python
|
bsd-3-clause
| 971
| 0.00103
|
from __future__ import unicode_literals
import uuid
from django.forms import UUIDField, ValidationError
from django.test import SimpleTestCase
class UUIDFieldTest(SimpleTestCase):
def test_uuidfield_1(self):
field = UUIDField()
value = field.clean('550e8400e29b41d4a716446655440000')
self.assertEqual(value, uuid.UUID('550e8400e29b41
|
d4a716446655440000'))
def test_uuidfield_2(self):
field = UUIDField(required=False)
value = field.clean('')
self.assertEqual(value, None)
def test_uuidfield_3(self):
field = UUIDField()
with self.assertRaises(ValidationError) as cm:
field.clean('550e8400')
self.assertEqual(cm.exception.messages[0], 'Enter a valid UUID.')
def test_uuidfield_
|
4(self):
field = UUIDField()
value = field.prepare_value(uuid.UUID('550e8400e29b41d4a716446655440000'))
self.assertEqual(value, '550e8400e29b41d4a716446655440000')
|
xiaoslzhang/pyyingyu
|
yingyu/yingyu_baidu.py
|
Python
|
apache-2.0
| 1,682
| 0.003567
|
# coding=utf-8
import asyncio
import random
import json
import hashlib
import aiohttp
import as
|
ync_timeout
import sys
class BaiduTranslate:
lang_auto = 'auto'
lang_zh = 'zh'
|
lang_en = 'en'
timeout = 20
api_addr = 'http://fanyi-api.baidu.com/api/trans/vip/translate'
def __init__(self, loop=None):
self.appid = '20171009000086968'
self.secret = 'vZ36FjnZ91FoLJwe5NrF'
if loop is None:
self.async = False
self.loop = asyncio.get_event_loop()
else:
self.async = True
self.loop = loop
def translate(self, text, from_lang, to_lang):
if self.async:
return self._request(text, from_lang, to_lang)
else:
return self.loop.run_until_complete(self._request(text, from_lang, to_lang))
async def _request(self, text, from_lang, to_lang):
salt = random.randint(0, 2147483647)
sign = self.appid + text + str(salt) + self.secret
sign = hashlib.md5(sign.encode('utf-8')).hexdigest()
params = {'q': text, 'from': from_lang, 'to': to_lang, 'appid': self.appid, 'salt': salt, 'sign': sign}
async with aiohttp.ClientSession(loop=self.loop) as session:
with async_timeout.timeout(self.timeout, loop=self.loop):
async with session.post(self.api_addr,
data=params) as resp:
body = await resp.read()
res = json.loads(body.decode('utf-8'))
if 'error_code' in res and res['error_code'] != '52000':
raise RuntimeError(res['error_msg'])
return res['trans_result'][0]['dst']
|
realizeapp/realize-core
|
plugins/fitbit/models.py
|
Python
|
agpl-3.0
| 1,974
| 0.007092
|
from core.plugins.lib.proxies import MetricProxy, SourceProxy
from core.plugins.lib.models import PluginDataModel
from core.plugins.lib.fields import Field, ListField, DateTimeField, FloatField, IntegerField
from core.plugins.lib.scope import Scope, ZonePerm, BlockPerm
class BaseFitbitModel(PluginDataModel):
metric_proxy = MetricProxy(name="newsfeed")
source_proxy = SourceProxy(name="fitbit")
date = DateTimeField()
value = FloatField()
class StepModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="steps")
class DistanceModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="distance")
class TimeInBedModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="time_in_bed")
class MinutesAsleepModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="minutes_asleep")
class WeightModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="weight")
class SleepEfficiencyModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="sleep_efficiency")
class ActivityCaloriesModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="activity_calories")
class SleepStartTimeModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="sleep_start_time")
value = DateTimeField()
class CaloriesInModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="calories_in")
class CaloriesModel(BaseFitbitModel):
me
|
tric_proxy = MetricProxy(name="calories")
class WaterModel(BaseFitbitModel):
metric_proxy = MetricProxy(name="water")
MODEL_DICT = {
"activities/steps": StepModel,
"activities/distance": DistanceModel,
"sleep/timeInBed": TimeInBedModel,
"sleep/minutesAsleep": MinutesAsleepModel,
"body/weight": WeightModel,
"sleep/efficiency": SleepEfficie
|
ncyModel,
"activities/activityCalories": ActivityCaloriesModel,
"sleep/startTime": SleepStartTimeModel,
"foods/log/caloriesIn": CaloriesInModel,
"activities/calories": CaloriesModel,
"foods/log/water": WaterModel
}
|
carlgao/lenga
|
images/lenny64-peon/usr/share/python-support/python-mysqldb/MySQLdb/cursors.py
|
Python
|
mit
| 16,782
| 0.003158
|
"""MySQLdb Cursors
This module implements Cursors of various types for MySQLdb. By
default, MySQLdb uses the Cursor class.
"""
import re
insert_values = re.compile(r"\svalues\s*(\(((?<!\\)'.*?\).*(?<!\\)?'|.)+?\))", re.IGNORECASE)
from _mysql_exceptions import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError
class BaseCursor(object):
"""A base for Cursor classes. Useful attributes:
description
A tuple of DB API 7-tuples describing the columns in
the last executed query; see PEP-249 for details.
description_flags
Tuple of column flags for last query, one entry per column
in the result set. Values correspond to those in
MySQLdb.constants.FLAG. See MySQL documentation (C API)
for more information. Non-standard extension.
arraysize
default number of rows fetchmany() will fetch
"""
from _mysql_exceptions import MySQLError, Warning, Error, InterfaceError, \
DatabaseError, DataError, OperationalError, IntegrityError, \
InternalError, ProgrammingError, NotSupportedError
_defer_warnings = False
def __init__(self, connection):
from weakref import proxy
self.connection = proxy(connection)
self.description = None
self.description_flags = None
self.rowcount = -1
self.arraysize = 1
self._executed = None
self.lastrowid = None
self.messages = []
self.errorhandler = connection.errorhandler
self._result = None
self._warnings = 0
self._info = None
self.rownumber = None
def __del__(self):
self.close()
self.errorhandler = None
self._result = None
def close(self):
"""Close the cursor. No further queries will be possible."""
if not self.connection: return
while self.nextset(): pass
self.connection = None
def _check_executed(self):
if not self._executed:
self.errorhandler(self, ProgrammingError, "execute() first")
def _warning_check(self):
from warnings import warn
if self._warnings:
warnings = self._get_db().show_warnings()
if warnings:
# This is done in two loops in case
# Warnings are set to raise exceptions.
for w in warnings:
self.messages.append((self.Warning, w))
for w in warnings:
warn(w[-1], self.Warning, 3)
elif self._info:
self.messages.append((self.Warning, self._info))
warn(self._info, self.Warning, 3)
def nextset(self):
"""Advance to the next result set.
Returns None if there are no more result sets.
"""
if self._executed:
self.fetchall()
del self.messages[:]
db = self._get_db()
nr = db.next_result()
if nr == -1:
return None
self._do_get_result()
self._post_get_result()
self._warning_check()
return 1
def _post_get_result(self): pass
def _do_get_result(self):
db = self._get_db()
self._result = self._get_result()
self.rowcount = db.affected_rows()
self.rownumber = 0
self.description = self._result and self._result.describe() or None
self.description_flags = self._result and self._result.field_flags() or None
self.lastrowid = db.insert_id()
self._warnings = db.warning_count()
self._info = db.info()
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
def _get_db(self):
if not self.connection:
self.errorhandler(self, ProgrammingError, "cursor closed")
return self.connection
def execute(self, query, args=None):
"""Execute a query.
query -- string, query to execute on server
args -- optional sequence or mapping, parameters to use with query.
Note: If args is a sequence, then %s must be used as the
parameter placeholder in the query. If a mapping is used,
%(key)s must be used as the placeholder.
Returns long integer rows affected, if any
"""
from types import ListType, TupleType
from sys import exc_info
del self.messages[:]
db = self._get_db()
charset = db.character_set_name()
if isinstance(query, unicode):
query = query.encode(charset)
if args is not None:
query = query % db.literal(args)
try:
r = self._query(query)
except TypeError, m:
if m.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.messages.append((ProgrammingError, m.args[0]))
self.errorhandler(self, ProgrammingError, m.args[0])
else:
self.messages.append((TypeError, m))
self.errorhandler(self, TypeError, m)
except:
exc, value, tb = exc_info()
del tb
self.messages.append((exc, value))
self.errorhandler(self, exc, value)
self._executed = query
if not self._defer_warnings: self._warning_check()
return r
def executemany(self, query, args):
"""Execute a multi-row query.
query -- string, query to execute on server
args
Sequence of sequences or mappings, parameters to use with
query.
Returns long integer rows affected, if any.
This method improves performance on multiple-row INSERT and
REPLACE. Otherwise it is equivalent to looping over args with
execute().
"""
del self.messages[:]
db = self._get_db()
if not args: return
charset = db.character_set_name()
if isinstance(query, unicode): query = query.encode(charset)
m = insert_values.search(query)
if not m:
r = 0
for a in args:
r = r + self.execute(query, a)
return r
p = m.start(1)
e = m.end(1)
qv = m.group(1)
try:
q = [ qv % db.literal(a) for a in args ]
except TypeError, msg:
if msg.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.messages.append((ProgrammingError, msg.args[0]))
self.errorhandler(self, ProgrammingError, msg.args[0])
else:
self.messages.append((TypeError, msg))
self.errorhandler(self, TypeError, msg)
except:
from sys import exc_info
exc, value, tb = exc_info()
del tb
self.errorhandler(self, exc, value)
r = self._query('\n'.join([query[:p], ',\n'.join(q), query[e:]]))
if not self._defer_warnings: self._warning_check()
return r
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
|
is the parameter above and n is the position of t
|
he parameter
(from zero). Once all result
|
dannybrowne86/django-holidays
|
holidays/holidays/admin.py
|
Python
|
mit
| 720
| 0.009722
|
from django.contrib import admin
from holidays.models import (Holiday, StaticHoliday,
NthXDayHoliday, NthXDayAfterHoliday, CustomHoliday)
class HolidayAdmin(admin.ModelAdmin):
pass
class StaticHolidayAdmin(admin.ModelAdmin):
pass
class NthXDayHolidayAdmin(adm
|
in.ModelAdmin):
pass
class NthXDayAfterHolidayAdmin(admin.ModelAdmin):
pass
class CustomHolidayAdmin(admin.ModelAdmin):
pass
admin.site.register(Holiday, HolidayAdmin)
admin.site.register(StaticHoliday, StaticHolidayAdmin)
admin.site.register(NthXDayHoliday, NthXDayHolidayAdmin)
admin.site.register(NthXDayAfterHoliday, NthXDayAfterHoliday
|
Admin)
admin.site.register(CustomHoliday, CustomHolidayAdmin)
|
potatolondon/centaur
|
views.py
|
Python
|
bsd-3-clause
| 4,150
| 0.002892
|
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.contrib.auth.decorators import user_passes_test
from django.utils.importlib import import_module
from djangae.core.paginator import EmptyPage, PageNotAnInteger
from djangae.core.paginator import DatastorePaginator as Paginator
from google.appengine.e
|
xt import db
from google.appengine.ext.deferred import defer
from .models import Error, Event
import calendar
def get_permission_decorator():
if getattr(settings, 'CENTAUR
|
_PERMISSION_DECORATOR', None):
module, decorator = settings.CENTAUR_PERMISSION_DECORATOR.rsplit('.', 1)
return getattr(import_module(module), decorator)
return user_passes_test(lambda u: u.is_superuser)
permission_decorator = get_permission_decorator()
def timestamp(datetime):
""" Returns UTC timestamp, this is included in python3 but not 2"""
return calendar.timegm(datetime.timetuple())
@permission_decorator
def index(request):
errors = Error.objects.all()
# Filter by user email
if request.GET.get('user', None):
errors_pks = [e.error.pk for e in Event.objects.filter(logged_in_user_email=request.GET.get('user'))]
errors = errors.filter(pk__in=errors_pks)
errors = errors.order_by("-last_event")
page = request.GET.get('page', 1)
paginator = Paginator(errors, 20)
try:
errors = paginator.page(page)
except PageNotAnInteger:
errors = paginator.page(1)
except EmptyPage:
errors = paginator.page(paginator.num_pages)
return render(request, "centaur/index.html", {"errors": errors})
@permission_decorator
def error(request, error_id, limit=200):
error = get_object_or_404(Error, pk=error_id)
events = error.events.all().order_by("-created")[:limit]
series = [
timestamp(event.created.replace(minute=0, second=0, microsecond=0))
for event in events
]
page = request.GET.get('page', 1)
paginator = Paginator(events, 1)
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
return render(request, "centaur/error.html", {
"error": error,
"events": events,
"series": series,
})
CLEANUP_QUEUE = getattr(settings, 'QUEUE_FOR_EVENT_CLEANUP', 'default')
@permission_decorator
def clear_old_events(request):
defer(_clear_old_events, _queue=CLEANUP_QUEUE)
return HttpResponse("OK. Cleaning task deferred.")
EVENT_BATCH_SIZE = 400
ERROR_UPDATE_BATCH_SIZE = 50
def _update_error_count(error_id, events_removed):
@db.transactional(xg=True)
def txn():
_error = Error.objects.get(pk=error_id)
_error.event_count -= events_removed
_error.save()
txn()
def _clear_old_events():
from google.appengine.api.datastore import Query, Delete, Get
query = Query("centaur_event", keys_only=True)
query["created <= "] = timezone.now() - timedelta(days=30)
old_event_keys = list(query.Run(limit=EVENT_BATCH_SIZE))
old_events = filter(None, Get(old_event_keys))
errors = {}
for event in old_events:
data = errors.setdefault(event['error_id'], {'count': 0, 'event_keys':[]})
data['count'] += 1
data['event_keys'].append(event.key())
to_delete = []
for error_id, data in errors.items()[:ERROR_UPDATE_BATCH_SIZE]:
# Each event might be for a different error and while we can delete hundreds of events, we
# probably don't want to defer hundreds of tasks, so we'll only delete events from a handful of distinct events.
defer(_update_error_count, error_id, data['count'], _queue=CLEANUP_QUEUE)
to_delete.extend(data['event_keys'])
Delete(to_delete)
if len(old_event_keys) == EVENT_BATCH_SIZE or len(to_delete) < len(old_events):
# In case we didn't clear everything, run again to find more old events.
defer(_clear_old_events, _queue=CLEANUP_QUEUE)
|
greenoaktree/pfp
|
pfp/native/__init__.py
|
Python
|
mit
| 1,639
| 0.026236
|
import functools
import pfp.interp
def native(name, ret, interp=None, send_interp=False):
"""Used as a decorator to add the decorated function to the
pfp interpreter so that it can be used from within scripts.
:param str name: The name of the function as it will be exposed in template scripts.
:param pfp.fields.Field ret: The return type of the function (a class)
:param pfp.interp.PfpInterp interp: The specific interpreter to add the function to
:param bool send_interp: If the current interpreter should be passed to the function.
Examples:
The example below defines a ``Sum`` function that will return the sum of
all parameters passed to the function: ::
from pfp.fields import PYVAL
@native(name="Sum", ret=pfp.fields.Int64)
def sum_numbers(params, ctxt, scope, stream, coord):
res = 0
for param in params:
res += PYVAL(param)
return res
The code below is the code for the :any:`Int3 <pfp.native.dbg.int3>` function. Notice that it
requires that the interpreter be sent as a parameter: ::
@native(name="Int3", ret=pfp.fields.Void, send_interp=True)
def int3(params, ctxt, scope, stream, coord, interp):
if interp._no_debug:
return
if in
|
terp._int3:
interp.debugger = PfpDbg(interp)
interp.debugger.cmdloop()
"""
def native_decorator(func):
@functools.wraps(func)
def native_wrapper(*args, **kwargs):
return func(*args, **kwargs)
pfp.interp.PfpInterp.add_native(name, func, ret, interp=interp, send_interp=send_interp)
return native_wrapper
return native_decorator
def predefine(template):
|
pfp.interp.PfpInterp.add_predefine(template)
|
sanja7s/SR_Twitter
|
src_graph/plot_degree_assortativity.py
|
Python
|
mit
| 1,447
| 0.032481
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
plot the results from the files igraph_degree_assort_study and degree_assortativity
'''
from igraph import *
import os
import numpy as np
import matplotlib.pyplot as plt
#########################
IN_DIR = '/home/sscepano/Projects7s/Twitter-workspace/ALL_SR'
img_out_plot = "7MOda_unweighted.png"
#########################
#########################
# read from a file the res
#########################
def read_in_res():
f = open(
|
'7MODeg_assort_study.weighted_edge_list', 'r')
DA = []
TH = []
for line in f:
if line.startswith('stats for'):
th = float(line.split()[-1])
TH.append(th)
if line.startswith('The network is'):
da = float(line.split()[-1])
DA.append(da)
th_last = th
f2 = open('plot_da_0.2.txt', 'r')
for line in f2:
(th, da) = line.split()
th = float(th)
if th < th_last:
continue
da = float(da)
TH.append(th)
DA.append(da)
f3 = open('DA_SR_th
|
.tab', 'w')
for i in range(len(TH)):
f3.write(str(TH[i]) + '\t' + str(DA[i]) + '\n')
return TH, DA
def plot_DA(xaxis, da):
x = np.array(xaxis)
y = np.array(da)
plt.plot(x, y, 'c')
plt.grid(True)
plt.title('SR network')
#plt.legend(bbox_to_anchor=(0, 1), bbox_transform=plt.gcf().transFigure)
plt.ylabel('degree assortativity')
plt.xlabel('SR threshold')
plt.savefig(img_out_plot,format='png',dpi=200)
def main():
os.chdir(IN_DIR)
x, DA = read_in_res()
plot_DA(x, DA)
main()
|
int3l/dungeons-and-pythons
|
main.py
|
Python
|
mit
| 155
| 0.019355
|
from dung
|
eon.dungeon import Dungeon
def main():
testdungeon = Dungeon('level1.txt')
p
|
rint(testdungeon)
if main.__name__ == '__main__':
main()
|
oubiga/respect
|
respect/main.py
|
Python
|
bsd-3-clause
| 2,305
| 0.000434
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from getpass import getpass
from datetime import datetime
import pprint
from docopt import docopt
import requests
from .spelling import spellchecker
from .dispatch import dispatch
from .utils import login, validate_username
from .exceptions import ConnectionErrorException
PY3 = sys.version > '3'
if PY3:
pass
else:
input = raw_input
if sys.version < '3':
from urlparse import urljoin
else:
from urllib.parse import urljoin
GITHUB_USERS = 'https://api.github.com/users/'
def parse_respect_args(args):
'''
Respect
Usage:
respect <username> [--repos=<rep>] [--followers=<foll>] [--language=<lang>]
respect <username> bio
respect <username> stars [--verbose]
respect <username> repos [--verbose] [--language=<lang>]
respect -h | --help
Options:
-h, --help Shows this help information.
-v, --verbose Prints detailed info
|
rmation.
-r <rep> --repos <rep> Number of repositories [default: ].
-f <foll> --followers <foll> Number of followers [default: ].
-l <lang>
|
--language <lang> Language name [default: ].
'''
args = docopt(parse_respect_args.__doc__, argv=args)
return args
def main():
"""
Main entry point for the `respect` command.
"""
args = parse_respect_args(sys.argv[1:])
if validate_username(args['<username>']):
print("processing...")
else:
print("@"+args['<username>'], "is not a valid username.")
print("Username may only contain alphanumeric ASCII characters or "
"dashes and cannot begin with a dash.")
return
try:
r = requests.get(urljoin(GITHUB_USERS, args['<username>']))
except ConnectionErrorException as e:
print('Connection Error from requests. Request again, please.')
print(e)
if r.status_code == 404 or r.status_code == 403:
session = login(401, args=args)
return dispatch(args, r, session)
elif r.status_code == 200:
return dispatch(args, response=r)
else:
raise UnknownStausCodeException
if __name__ == '__main__':
main()
|
mrcslws/nupic.research
|
src/nupic/research/frameworks/pytorch/hooks/hook_manager.py
|
Python
|
agpl-3.0
| 5,818
| 0.001375
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from .base import TrackStatsHookBase
class ModelHookManager:
"""
This class registers and manages a set of hooks of subclassed from
`TrackStatsHookBase`. The given hook is registered on all modules within
'named_modules'.
Tracking is started and stopped for all hooks via `self.start_tracking()` and
`self.stop_tracking()`. Alternatively, this class can be used a context manager to
automate these calls. For example,
```
with hook_manager as hooks:
... # Train here
stats = hooks.get_statitics()
```
:param named_modules: dict mapping names to modules
:param hook_class: class subclassed from `TrackStatsHookBase`
:param hook_type: whether to register the hook as "forward" or "backward"
or "pre_forward"
:param hook_args: either a dictionary of args to pass to hook, or a function that
takes a name and module as inputs and then outputs a dictionary of
arguments to pass to the hook
"""
def __init__(
self,
named_modules,
hook_class,
hook_type="forward",
hook_args=None,
):
assert hook_type in ["forward", "backward", "pre_forward"]
assert issubclass(hook_class, TrackStatsHookBase)
# Register the hooks via class method.
tracked_vals = self.register_storage_hooks(named_modules,
hook_class=hook_class,
hook_type=hook_type,
hook_args=hook_args)
# These are the functions that called every forward or backward pass.
self.hooks = tracked_vals[0]
# These are handles to the hooks; PyTorch lets the us
|
er unregister
# hooks through these handles.
self._hook_handles = tracked_vals[1]
# These are the filtered modules that
|
will be tracked.
self.tracked_modules = tracked_vals[2]
# Keep track of whether tracking is on.
self._tracking = False
@property
def tracking(self):
return self._tracking
def __enter__(self):
"""Start tracking when `with` is called."""
self.start_tracking()
return self
def __exit__(self, *args):
"""Stop tracking when `with` block is left."""
self.stop_tracking()
@classmethod
def register_storage_hooks(
cls,
named_modules,
hook_class,
hook_type="forward",
hook_args=None,
):
"""
Register hook on each module in 'named_modules'.
:param named_modules: dict mapping names to modules
:param hook_class: class subclassed from `TrackStatsHookBase`
:param hook_type: whether to register the hook as "forward" or "backward"
or "pre_forward"
:param hook_args: either a dictionary of args to pass to hook, or a function
that takes a name and module as inputs and then outputs a
dictionary of arguments to pass to the hook
"""
assert hook_type in ["forward", "backward", "pre_forward"]
hooks = []
handles = []
tracked_modules = dict()
# Register hooks on the modules.
for n, m in named_modules.items():
if callable(hook_args):
args = hook_args(n, m)
else:
args = hook_args or {}
hook = hook_class(name=n, **args)
if hook_type == "forward":
handle = m.register_forward_hook(hook)
elif hook_type == "pre_forward":
handle = m.register_forward_pre_hook(hook)
else:
handle = m.register_backward_hook(hook)
hooks.append(hook)
handles.append(handle)
tracked_modules[n] = m
return hooks, handles, tracked_modules
def start_tracking(self):
self._tracking = True
for hook in self.hooks:
hook.start_tracking()
def stop_tracking(self):
self._tracking = False
for hook in self.hooks:
hook.stop_tracking()
def get_statistics(self):
"""
This returns a generator with elements
`(name, module, statistic_0, ..., statistic_n)`.
"""
return (
(name, module, *hook.get_statistics())
for (name, module), hook in zip(self.tracked_modules.items(), self.hooks)
)
def remove_hooks(self):
"""
Remove all hooks from the model and stop tracking statistics.
"""
for handle in self._hook_handles:
handle.remove()
self.hooks = []
self._hook_handles = []
self.tracked_modules = dict()
|
Kotaimen/georest
|
georest/view/utils.py
|
Python
|
bsd-2-clause
| 1,423
| 0.000703
|
# -*- encoding: utf-8 -*-
__author__ = 'pp'
__date__ = '6/25/14'
"""
georest.view.utils
~~~~~~~~~~~~~~~~~
helper/mixin things for views
"""
import sys
from functools import wraps
from flask import request
from .exceptions import InvalidRequest
from ..geo import GeoException
def get_json_content():
"""check content type and return raw text instrad of json data"""
if request.mimetype != 'application/json':
raise InvalidRequest('Only "application/json" supported')
try:
data = request.data.decode('utf-8')
# data = request.get_data().decode('utf-8')
except UnicodeError:
raise InvalidRequest('Cannot decode content with utf-8')
return data
def get_if_match():
"""get if_match etag from request"""
etag = None
if request.if_match and not request.if_match.star_tag:
try:
etag, = request.if_match.as_set() # only 1 allowed
except ValueError:
raise InvalidRequest('Cannot process if_match %s' % \
|
request.if_match)
return etag
def catcher(f):
"""catching uncatched errors, and filling the traceback"""
@wraps(f)
def decorator(*args, **kwargs):
try:
return f(*args, **kwargs)
except GeoException as e:
if not e.traceback:
e.traceback = sys.exc_info()[2]
raise
|
return decorator
|
joe-eklund/cs460
|
bene/lab2/src/transfer.py
|
Python
|
gpl-3.0
| 4,724
| 0.013124
|
import sys
from sim import Sim
from node import Node
from link import Link
from transport import Transport
from tcp import TCP
from network import Network
import optparse
import os
import subprocess
class AppHandler(object):
def __init__(self,filename, directory):
self.filename = filename
self.directory = directory
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.f = open("%s/%s" % (self.directory,self.filename),'w')
def receive_data(self,data):
Sim.trace('AppHandler',"application got %d bytes" % (len(data)))
self.f.write(data)
self.f.flush()
class Main(object):
def __init__(self):
self.iterations = 1 #set from flags
self.out_directory = '../output/received'
self.in_directory = '../data'
self.parse_options()
print self.filename
# self.total = 0.0;
for i in range(0, self.iterations):
self.run()
self.diff()
# for windowSize in [1000]:#, 2000, 5000, 10000, 15000, 20000]:
# print "--Results with window size " + str(windowSize)
#self.window = windowSize
# self.run()
# print "Average over " + str(iterations) + " iterations: " + str(self.total / float(iterations))
def parse_options(self):
parser = optparse.OptionParser(usage = "%prog [options]",
version = "%prog 0.1")
parser.add_option("-f","--filename",type="str",dest="filename",
default='test.txt',
help="filename to send")
parser.add_option("-l","--loss",type="float",dest="loss",
default=0.0,
help="random loss rate")
parser.add_option("-w","--window",type="int",dest="window",
default=1000,
help="transmission window size")
parser.add_option("-i","--iterations",type="int",dest="iterations",
default=1,
help="number of iterations to run")
(options,args) = parser.parse_args()
self.filename = options.filename
self.loss = options.loss
self.window = options.window
self.iterations = options.iterations
def diff(self):
args = ['diff','-u',self.in_directory + '/' + self.filename,self.out_directory+'/'+self.filename]
result = subprocess.Popen(args,stdout = subprocess.PIPE).communicate()[0]
pr
|
int
if not result:
print "File transfer correct!"
else:
print "File transfer failed. Here is the diff:"
print
print result
sys.exit()
def run(self):
# parameters
Sim.scheduler.reset()
Sim.set_debug('AppHandler')
Sim.set_debug('TCP')
# setup network
net = Network('../networks/setup.txt')
net.loss(self.loss)
# set
|
up routes
n1 = net.get_node('n1')
n2 = net.get_node('n2')
n1.add_forwarding_entry(address=n2.get_address('n1'),link=n1.links[0])
n2.add_forwarding_entry(address=n1.get_address('n2'),link=n2.links[0])
# setup transport
t1 = Transport(n1)
t2 = Transport(n2)
# setup application
a = AppHandler(self.filename, self.out_directory)
# setup connection
c1 = TCP(t1,n1.get_address('n2'),1,n2.get_address('n1'),1,a,window=self.window)
c2 = TCP(t2,n2.get_address('n1'),1,n1.get_address('n2'),1,a,window=self.window)
# send a file
with open(self.in_directory + '/' + self.filename,'r') as f:
while True:
data = f.read(10000)
if not data:
break
Sim.scheduler.add(delay=0, event=data, handler=c1.send)
# run the simulation
Sim.scheduler.run()
# print str(self.window) + " & " + \
# str(Sim.scheduler.current_time()) + " & " + \
# str(4116160.0 / float(Sim.scheduler.current_time())) + " & " + \
# str(c2.totalQueueingDelay / float(c1.totalPacketsSent)) + " \\\\"
# print str(self.window) + "," + str(4116160.0 / float(Sim.scheduler.current_time()))
print str(self.window) + "," + str(c2.totalQueueingDelay / float(c1.totalPacketsSent))
# print "Ave Queueing Delay: " + str(c2.totalQueueingDelay / float(c1.totalPacketsSent))
# print "Throughput: " + str(4116160.0 / float(Sim.scheduler.current_time()))
# self.total += Sim.scheduler.current_time()
if __name__ == '__main__':
m = Main()
|
SRabbelier/Melange
|
thirdparty/google_appengine/google/net/proto/ProtocolBuffer.py
|
Python
|
apache-2.0
| 14,205
| 0.017247
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import struct
import array
import string
import re
from google.pyglib.gexcept import AbstractMethod
import httplib
__all__ = ['ProtocolMessage', 'Encoder', 'Decoder',
'ProtocolBufferDecodeError',
'ProtocolBufferEncodeError',
'ProtocolBufferReturnError']
URL_RE = re.compile('^(https?)://([^/]+)(/.*)$')
class ProtocolMessage:
def __init__(self, contents=None):
raise AbstractMethod
def Clear(self):
raise AbstractMethod
def IsInitialized(self, debug_strs=None):
raise AbstractMethod
def Encode(self):
try:
return self._CEncode()
except AbstractMethod:
e = Encoder()
self.Output(e)
return e.buffer().tostring()
def SerializeToString(self):
return self.Encode()
def SerializePartialToString(self):
try:
return self._CEncodePartial()
except (AbstractMethod, AttributeError):
e = Encoder()
self.OutputPartial(e)
return e.buffer().tostring()
def _CEncode(self):
raise AbstractMethod
def _CEncodePartial(self):
raise AbstractMethod
def ParseFromString(self, s):
self.Clear()
self.MergeFromString(s)
def ParsePartialFromString(self, s):
self.Clear()
self.MergePartialFromString(s)
def MergeFromString(self, s):
self.MergePartialFromString(s)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
def MergePartialFromString(self, s):
try:
self._CMergeFromString(s)
except AbstractMethod:
a = array.array('B')
a.fromstring(s)
d = Decoder(a, 0, len(a))
self.TryMerge(d)
def _CMergeFromString(self, s):
raise AbstractMethod
def __getstate__(self):
return self.Encode()
def __setstate__(self, contents_):
self.__init__(contents=contents_)
def sendCommand(self, server, url, response, follow_redirects=1,
secure=0, keyfile=None, certfile=None):
data = self.Encode()
if secure:
if keyfile and certfile:
conn = httplib.HTTPSConnection(server, key_file=keyfile,
cert_file=certfile)
else:
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.putrequest("POST", url)
conn.putheader("Content-Length", "%d" %len(data))
conn.endheaders()
conn.send(data)
resp = conn.getresponse()
if follow_redirects > 0 and resp.status == 302:
m = URL_RE.match(resp.getheader('Location'))
if m:
protocol, server, url = m.groups()
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects - 1,
secure=(protocol == 'https'),
keyfile=keyfile,
certfile=certfile)
if resp.status != 200:
raise ProtocolBufferReturnError(resp.status)
if response is not None:
response.ParseFromString(resp.read())
return response
def sendSecureCommand(self, server, keyfile, certfile, url, response,
follow_redirects=1):
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects,
secure=1, keyfile=keyfile, certfile=certfile)
def __str__(self, prefix="", printElemNumber=0):
raise AbstractMethod
def ToASCII(self):
return self._CToASCII(ProtocolMessage._SYMBOLIC_FULL_ASCII)
def ToCompactASCII(self):
return self._CToASCII(ProtocolMessage._NUMERIC_ASCII)
def ToShortASCII(self):
return self._CToASCII(ProtocolMessage._SYMBOLIC_SHORT_ASCII)
_NUMERIC_ASCII = 0
_SYMBOLIC_SHORT_ASCII = 1
_SYMBOLIC_FULL_ASCII = 2
def _CToASCII(self, output_format):
raise AbstractMethod
def ParseASCII(self, ascii_string):
raise AbstractMethod
def ParseASCIIIgnoreUnknown(self, ascii_string):
raise AbstractMethod
def Equals(self, other):
raise AbstractMethod
def __eq__(self, other):
if other.__class__ is self.__class__:
return self.Equals(other)
return NotImplemented
def __ne__(self, other):
if other.__class__ is self.__class__:
return not self.Equals(other)
return NotImplemented
def Output(self, e):
dbg = []
if not self.IsInitialized(dbg):
ra
|
ise ProtocolBufferEncodeError, '\n\t'.join(dbg)
self.OutputUnchecked(e)
return
def OutputUnchecked(self, e):
raise AbstractMethod
def OutputPartial(self, e):
raise AbstractMethod
def Parse(self, d):
self.Clear()
self.Merge(d)
return
def Merge(self, d):
self.TryMer
|
ge(d)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
return
def TryMerge(self, d):
raise AbstractMethod
def CopyFrom(self, pb):
if (pb == self): return
self.Clear()
self.MergeFrom(pb)
def MergeFrom(self, pb):
raise AbstractMethod
def lengthVarInt32(self, n):
return self.lengthVarInt64(n)
def lengthVarInt64(self, n):
if n < 0:
return 10
result = 0
while 1:
result += 1
n >>= 7
if n == 0:
break
return result
def lengthString(self, n):
return self.lengthVarInt32(n) + n
def DebugFormat(self, value):
return "%s" % value
def DebugFormatInt32(self, value):
if (value <= -2000000000 or value >= 2000000000):
return self.DebugFormatFixed32(value)
return "%d" % value
def DebugFormatInt64(self, value):
if (value <= -20000000000000 or value >= 20000000000000):
return self.DebugFormatFixed64(value)
return "%d" % value
def DebugFormatString(self, value):
def escape(c):
o = ord(c)
if o == 10: return r"\n"
if o == 39: return r"\'"
if o == 34: return r'\"'
if o == 92: return r"\\"
if o >= 127 or o < 32: return "\\%03o" % o
return c
return '"' + "".join([escape(c) for c in value]) + '"'
def DebugFormatFloat(self, value):
return "%ff" % value
def DebugFormatFixed32(self, value):
if (value < 0): value += (1L<<32)
return "0x%x" % value
def DebugFormatFixed64(self, value):
if (value < 0): value += (1L<<64)
return "0x%x" % value
def DebugFormatBool(self, value):
if value:
return "true"
else:
return "false"
class Encoder:
NUMERIC = 0
DOUBLE = 1
STRING = 2
STARTGROUP = 3
ENDGROUP = 4
FLOAT = 5
MAX_TYPE = 6
def __init__(self):
self.buf = array.array('B')
return
def buffer(self):
return self.buf
def put8(self, v):
if v < 0 or v >= (1<<8): raise ProtocolBufferEncodeError, "u8 too big"
self.buf.append(v & 255)
return
def put16(self, v):
if v < 0 or v >= (1<<16): raise ProtocolBufferEncodeError, "u16 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
return
def put32(self, v):
if v < 0 or v >= (1L<<32): raise ProtocolBufferEncodeError, "u32 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
return
def put64(self, v):
if v < 0 or v >= (1L<<64): raise ProtocolBufferEncodeError, "u64 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
self.buf.append((v >> 32) & 255)
self.buf.append((v >> 40) & 255)
self.buf.append((v >> 48) & 255)
self.buf.append((v >> 56
|
ruiminshen/yolo-tf
|
parse_darknet_yolo2.py
|
Python
|
lgpl-3.0
| 6,342
| 0.002841
|
"""
Copyright (C) 2017, 申瑞珉 (Ruimin Shen)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import time
import shutil
import argparse
import configparser
import operator
import itertools
import struct
import numpy as np
|
import pandas as pd
import tensorflow as tf
import model.yolo2.inference as inference
import utils
def transpose_weights(weights, num_anchors):
ksize1, ksize2, channels_
|
in, _ = weights.shape
weights = weights.reshape([ksize1, ksize2, channels_in, num_anchors, -1])
coords = weights[:, :, :, :, 0:4]
iou = np.expand_dims(weights[:, :, :, :, 4], -1)
classes = weights[:, :, :, :, 5:]
return np.concatenate([iou, coords, classes], -1).reshape([ksize1, ksize2, channels_in, -1])
def transpose_biases(biases, num_anchors):
biases = biases.reshape([num_anchors, -1])
coords = biases[:, 0:4]
iou = np.expand_dims(biases[:, 4], -1)
classes = biases[:, 5:]
return np.concatenate([iou, coords, classes], -1).reshape([-1])
def transpose(sess, layer, num_anchors):
v = next(filter(lambda v: v.op.name.endswith('weights'), layer))
sess.run(v.assign(transpose_weights(sess.run(v), num_anchors)))
v = next(filter(lambda v: v.op.name.endswith('biases'), layer))
sess.run(v.assign(transpose_biases(sess.run(v), num_anchors)))
def main():
model = config.get('config', 'model')
cachedir = utils.get_cachedir(config)
with open(os.path.join(cachedir, 'names'), 'r') as f:
names = [line.strip() for line in f]
width, height = np.array(utils.get_downsampling(config)) * 13
anchors = pd.read_csv(os.path.expanduser(os.path.expandvars(config.get(model, 'anchors'))), sep='\t').values
func = getattr(inference, config.get(model, 'inference'))
with tf.Session() as sess:
image = tf.placeholder(tf.float32, [1, height, width, 3], name='image')
func(image, len(names), len(anchors))
tf.contrib.framework.get_or_create_global_step()
tf.global_variables_initializer().run()
prog = re.compile(r'[_\w\d]+\/conv(\d*)\/(weights|biases|(BatchNorm\/(gamma|beta|moving_mean|moving_variance)))$')
variables = [(prog.match(v.op.name).group(1), v) for v in tf.global_variables() if prog.match(v.op.name)]
variables = sorted([[int(k) if k else -1, [v for _, v in g]] for k, g in itertools.groupby(variables, operator.itemgetter(0))], key=operator.itemgetter(0))
assert variables[0][0] == -1
variables[0][0] = len(variables) - 1
variables.insert(len(variables), variables.pop(0))
with tf.name_scope('assign'):
with open(os.path.expanduser(os.path.expandvars(args.file)), 'rb') as f:
major, minor, revision, seen = struct.unpack('4i', f.read(16))
tf.logging.info('major=%d, minor=%d, revision=%d, seen=%d' % (major, minor, revision, seen))
for i, layer in variables:
tf.logging.info('processing layer %d' % i)
total = 0
for suffix in ['biases', 'beta', 'gamma', 'moving_mean', 'moving_variance', 'weights']:
try:
v = next(filter(lambda v: v.op.name.endswith(suffix), layer))
except StopIteration:
continue
shape = v.get_shape().as_list()
cnt = np.multiply.reduce(shape)
total += cnt
tf.logging.info('%s: %s=%d' % (v.op.name, str(shape), cnt))
p = struct.unpack('%df' % cnt, f.read(4 * cnt))
if suffix == 'weights':
ksize1, ksize2, channels_in, channels_out = shape
p = np.reshape(p, [channels_out, channels_in, ksize1, ksize2]) # Darknet format
p = np.transpose(p, [2, 3, 1, 0]) # TensorFlow format (ksize1, ksize2, channels_in, channels_out)
sess.run(v.assign(p))
tf.logging.info('%d parameters assigned' % total)
remaining = os.fstat(f.fileno()).st_size - f.tell()
transpose(sess, layer, len(anchors))
saver = tf.train.Saver()
logdir = utils.get_logdir(config)
if args.delete:
tf.logging.warn('delete logging directory: ' + logdir)
shutil.rmtree(logdir, ignore_errors=True)
os.makedirs(logdir, exist_ok=True)
model_path = os.path.join(logdir, 'model.ckpt')
tf.logging.info('save model into ' + model_path)
saver.save(sess, model_path)
if args.summary:
path = os.path.join(logdir, args.logname)
summary_writer = tf.summary.FileWriter(path)
summary_writer.add_graph(sess.graph)
tf.logging.info('tensorboard --logdir ' + logdir)
if remaining > 0:
tf.logging.warn('%d bytes remaining' % remaining)
def make_args():
parser = argparse.ArgumentParser()
parser.add_argument('file', help='Darknet .weights file')
parser.add_argument('-c', '--config', nargs='+', default=['config.ini'], help='config file')
parser.add_argument('-d', '--delete', action='store_true', help='delete logdir')
parser.add_argument('-s', '--summary', action='store_true')
parser.add_argument('--logname', default=time.strftime('%Y-%m-%d_%H-%M-%S'), help='the name of TensorBoard log')
parser.add_argument('--level', default='info', help='logging level')
return parser.parse_args()
if __name__ == '__main__':
args = make_args()
config = configparser.ConfigParser()
utils.load_config(config, args.config)
if args.level:
tf.logging.set_verbosity(args.level.upper())
main()
|
polypmer/scrape
|
new-york-times/nytimes-scrape.py
|
Python
|
mit
| 1,833
| 0.004364
|
model_search = "http://api.nytimes.com/svc/search/v2/" + \
"articlesearch.response-format?" + \
"[q=search term&" + \
"fq=filter-field:(filter-term)&additional-params=values]" + \
"&api-key=9key"
"""http://api.nytimes.com/svc/search/v2/articlesearch.json?q=terrorism+OR+terrorist
&begin_date=19900102&end_date=19900103&sort=newest&api-key=
key"""
search = "http://api.nytimes.com/svc/search/v2/" + \
"articlesearch.json?" + \
"[q=terror]" + \
"&api-key=key"
precise_search = "http://api.nytimes.com/svc/search/v2/" + \
"articlesearch.json"
terms = "?q=terrorism+OR+terrorist"
api = "&api-key=key"
print(precise_search+terms+dates+api)
"""
a
|
ggressive for looping in order to overcome the ten article limit. instead search each key word PER JOUR, and then concat the jsons into a nice pandas dataframe, and then eventually a csv.
"""
months_list = ["%.2d" % i for i in range(1,2)]
days_list = ["%.2d" % i for i in range(1,32)]
json_files = []
print(months_
|
list)
for x in months_list:
month_s = x
month_e = x
for y in days_list:
day_s = y
day_e = str(int(y)+1).zfill(2)
year_s = "1990"
year_e = "1990"
start = year_s + month_s + day_s
end = year_e + month_e + day_e
dates = "&begin_date="+start+"&end_date="+end+"&sort=newest"
#print(start + " "+end + "\n" +dates)
r = requests.get(precise_search+terms+dates+api)
original_json = json.loads(r.text)
response_json = original_json['response']
json_file = response_json['docs']
json_files.append(json_file)
frames = []
for x in json_files:
df = pd.DataFrame.from_dict(x)
frames.append(df)
#print(frames)
result = pd.concat(frames)
result
|
danielquinn/paperless
|
src/documents/templatetags/customisation.py
|
Python
|
gpl-3.0
| 865
| 0
|
import os
from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag()
def custom_css():
theme_path = os.path.join(
settings.MEDIA_ROOT,
"overrides.css"
)
|
if os.path.exists(theme_path):
return mark_safe(
'<link rel="stylesheet" type="text/css" href="{}" />'.format(
os.path.join(set
|
tings.MEDIA_URL, "overrides.css")
)
)
return ""
@register.simple_tag()
def custom_js():
theme_path = os.path.join(
settings.MEDIA_ROOT,
"overrides.js"
)
if os.path.exists(theme_path):
return mark_safe(
'<script src="{}"></script>'.format(
os.path.join(settings.MEDIA_URL, "overrides.js")
)
)
return ""
|
queirozfcom/spam-filter
|
case3_naive_normal.py
|
Python
|
mit
| 8,514
| 0.009396
|
import numpy as np
import scipy.stats as stats
import sys
# lib eh a nossa biblioteca criada para este trabalho
import lib.naive_bayes as nb
import lib.preprocessing as prep
import lib.validation as valid
import lib.normalization as normal
from config.constants import *
def case3(output=True):
accuracy_in_each_turn = list()
precision_in_each_turn_spam = list()
recall_in_each_turn_spam = list()
precision_in_each_turn_ham = list()
recall_in_each_turn_ham = list()
m = np.loadtxt(open("resources/normalized_data.csv","rb"),delimiter=',')
shuffled = np.random.permutation(m)
valid.validate_cross_validation(NUMBER_OF_ROUNDS,TRAIN_TEST_RATIO)
# equiprobable priors
prior_spam = 0.5
prior_ham = 0.5
for i in xrange(NUMBER_OF_ROUNDS):
# we're using cross-validation so each iteration we take a different
# slice of the data to serve as test set
train_set,test_set = prep.split_sets(shuffled,TRAIN_TEST_RATIO,i)
#parameter estimation
#but now we take ALL attributes into consideration
sample_means_word_spam = list()
sample_means_word_ham = list()
sample_variances_word_spam = list()
sample_variances_word_ham = list()
# all but the last one
for attr_index in xrange(57):
sample_means_word_spam.append(nb.take_mean_spam(train_set,attr_index,SPAM_ATTR_INDEX))
sample_means_word_ham.append(nb.take_mean_ham(train_set,attr_index,SPAM_ATTR_INDEX))
sample_variances_word_spam.append(nb.take_variance_spam(train_set,attr_index,SPAM_ATTR_INDEX))
sample_variances_word_ham.append(nb.take_variance_ham(train_set,attr_index,SPAM_ATTR_INDEX))
#sample standard deviations from sample variances
sample_std_devs_spam = map(lambda x: x ** (1/2.0), sample_variances_word_spam)
sample_std_devs_ham = map(lambda x: x ** (1/2.0), sample_variances_word_ham)
hits = 0.0
misses = 0.0
#number of instances correctly evaluated as spam
correctly_is_spam = 0.0
#total number of spam instances
is_spam = 0.0
#total number of instances evaluated as spam
guessed_spam = 0.0
#number of instances correctly evaluated as ham
correctly_is_ham = 0.0
#total number of ham instances
is_ham = 0.0
#total number of instances evaluated as ham
guessed_ham = 0.0
# now we test the hypothesis against the test set
for row in test_set:
# ou seja, o produto de todas as prob. condicionais das palavras dada a classe
# eu sei que ta meio confuso, mas se olhar com cuidado eh bonito fazer isso tudo numa linha soh! =)
product_of_all_conditional_probs_spam = reduce(lambda acc,cur: acc * stats.norm(sample_means_word_spam[cur], sample_std_devs_spam[cur]).pdf(row[CASE_2_ATTRIBUTE_INDEXES[cur]]) , xrange(10), 1)
# nao precisa dividir pelo termo de normalizacao pois so queremos saber qual e o maior!
posterior_spam = prior_spam * product_of_all_conditional_probs_spam
product_of_all_conditional_probs_ham = reduce(lambda acc,cur: acc * stats.norm(sample_means_word_ham[cur], sample_std_devs_ham[cur]).pdf(row[CASE_2_ATTRIBUTE_INDEXES[cur]]) , xrange(10), 1)
posterior_ham = prior_ham * product_of_all_conditional_probs_ham
# whichever is greater - that will be our prediction
if posterior_spam > posterior_ham:
guess = 1
else:
guess = 0
if(row[SPAM_ATTR_INDEX] == guess):
hits += 1
else:
misses += 1
# we'll use these to calculate metrics
if (row[SPAM_ATTR_INDEX] == 1 ):
is_spam += 1
if guess == 1:
guessed_spam += 1
correctly_is_spam += 1
else:
guessed_ham += 1
else:
is_ham += 1
if guess == 1:
guessed_spam += 1
else:
guessed_ham += 1
correctly_is_ham += 1
#accuracy = number of correctly evaluated instances/
# number of instances
#
#
accuracy = hits/(hits+misses)
#
|
precision_spam = number of correctly evaluated instances as spam/
# number of spam instances
#
#
# in order to avoid divisions by zero in case nothing wa
|
s found
if(is_spam == 0):
precision_spam = 0
else:
precision_spam = correctly_is_spam/is_spam
#recall_spam = number of correctly evaluated instances as spam/
# number of evaluated instances como spam
#
#
# in order to avoid divisions by zero in case nothing was found
if(guessed_spam == 0):
recall_spam = 0
else:
recall_spam = correctly_is_spam/guessed_spam
#precision_ham = number of correctly evaluated instances as ham/
# number of ham instances
#
#
# in order to avoid divisions by zero in case nothing was found
if(is_ham == 0):
precision_ham = 0
else:
precision_ham = correctly_is_ham/is_ham
#recall_ham = number of correctly evaluated instances as ham/
# number of evaluated instances como ham
#
#
# in order to avoid divisions by zero in case nothing was found
if(guessed_ham == 0):
recall_ham = 0
else:
recall_ham = correctly_is_ham/guessed_ham
accuracy_in_each_turn.append(accuracy)
precision_in_each_turn_spam.append(precision_spam)
recall_in_each_turn_spam.append(recall_spam)
precision_in_each_turn_ham.append(precision_ham)
recall_in_each_turn_ham.append(recall_ham)
# calculation of means for each metric at the end
mean_accuracy = np.mean(accuracy_in_each_turn)
std_dev_accuracy = np.std(accuracy_in_each_turn)
variance_accuracy = np.var(accuracy_in_each_turn)
mean_precision_spam = np.mean(precision_in_each_turn_spam)
std_dev_precision_spam = np.std(precision_in_each_turn_spam)
variance_precision_spam = np.var(precision_in_each_turn_spam)
mean_recall_spam = np.mean(recall_in_each_turn_spam)
std_dev_recall_spam = np.std(recall_in_each_turn_spam)
variance_recall_spam = np.var(recall_in_each_turn_spam)
mean_precision_ham = np.mean(precision_in_each_turn_ham)
std_dev_precision_ham = np.std(precision_in_each_turn_ham)
variance_precision_ham = np.var(precision_in_each_turn_ham)
mean_recall_ham = np.mean(recall_in_each_turn_ham)
std_dev_recall_ham = np.std(recall_in_each_turn_ham)
variance_recall_ham = np.var(recall_in_each_turn_ham)
if output:
print "\033[1;32m"
print '============================================='
print 'CASE 3 - ALL ATTRIBUTES - USING NORMAL MODEL'
print '============================================='
print "\033[00m"
print 'MEAN ACCURACY: '+str(round(mean_accuracy,5))
print 'STD. DEV. OF ACCURACY: '+str(round(std_dev_accuracy,5))
print 'VARIANCE OF ACCURACY: '+str(round(variance_accuracy,8))
print ''
print 'MEAN PRECISION FOR SPAM: '+str(round(mean_precision_spam,5))
print 'STD. DEV. OF PRECISION FOR SPAM: '+str(round(std_dev_precision_spam,5))
print 'VARIANCE OF PRECISION FOR SPAM: '+str(round(variance_precision_spam,8))
print ''
print 'MEAN RECALL FOR SPAM: '+str(round(mean_recall_spam,5))
print 'STD. DEV. OF RECALL FOR SPAM: '+str(round(std_dev_recall_spam,5))
print 'VARIANCE OF RECALL FOR SPAM: '+str(round(variance_recall_spam,8))
print ''
print 'MEAN PRECISION FOR HAM: '+str(round(mean_precision_ham,5))
print 'STD. DEV. OF PRECISION FOR HAM: '+str(round(std_dev_precision_ham,5))
|
hryamzik/ansible
|
lib/ansible/modules/packaging/os/rhsm_repository.py
|
Python
|
gpl-3.0
| 7,912
| 0.00316
|
#!/usr/bin/python
# Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rhsm_repository
short_description: Manage RHSM repositories using the subscription-manager command
description:
- Manage(Enable/Disable) RHSM repositories to the Red Hat Subscription
Management entitlement platform using the C(subscription-manager) command.
version_added: '2.5'
author: Giovanni Sciortino (@giovannisciortino)
notes:
- In order to manage RHSM repositories the system must be already registered
to RHSM manually or using the Ansible C(redhat_subscription) module.
requirements:
- subscription-manager
options:
state:
description:
- If state is equal to present or disabled, indicates the desired
repository state.
choices: [present, enabled, absent, disabled]
required: True
default: "present"
name:
description:
- The ID of repositories to enable.
- To operate on several repositories this can accept a comma separated
list or a YAML list.
required: True
'''
EXAMPLES = '''
- name: Enable a RHSM repository
rhsm_repository:
name: rhel-7-server-rpms
- name: Disable all RHSM repositories
rhsm_repository:
name: '*'
state: disabled
- name: Enable all repositories starting with rhel-6-server
rhsm_repository:
name: rhel-6-server*
state: enabled
- name: Disable all repositories except rhel-7-server-rpms
rhsm_repository:
name: "{{ item }}"
state: disabled
with_items: "{{
rhsm_repository.repositories |
map(attribute='id') |
difference(['rhel-7-server-rpms']) }}"
'''
RETURN = '''
repositories:
description:
- The list of RHSM repositories with their states.
- When this module is used to change the repositories states, this list contains the updated states after the changes.
returned: success
type: list
'''
import re
import os
from fnmatch import fnmatch
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
def run_subscription_manager(module, arguments):
# Execute subuscription-manager with arguments and manage common errors
rhsm_bin = module.get_bin_path('subscription-manager')
if not rhsm_bin:
module.fail_json(msg='The executable file subscription-manager was not found in PATH')
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env)
if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0):
module.fail_json(msg='The executable file subscription-manager must be run using root privileges')
elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
module.fail_json(msg='This system has no repositories available through subscriptions')
elif rc == 1:
module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
else:
return rc, out, err
def get_repository_list(module, list_parameter):
# Generate RHSM repository list and return a list of dict
if list_parameter == 'list_enabled':
rhsm_arguments = ['repos', '--list-enabled']
elif list_parameter == 'list_disabled':
rhsm_arguments = ['repos', '--list-disabled']
elif list_parameter == 'list':
rhsm_arguments = ['repos', '--list']
rc, out, err = run_subscription_manager(module, rhsm_arguments)
skip_lines = [
'+----------------------------------------------------------+',
' Available Repositories in /etc/yum.repos.d/redhat.repo'
]
repo_id_re_str = r'Repo ID: (.*)'
repo_name_re_str = r'Repo Name: (.*)'
repo_url_re_str = r'Repo URL: (.*)'
repo_enabled_re_str = r'Enabled: (.*)'
repo_id = ''
repo_name = ''
repo_url = ''
repo_enabled = ''
repo_result = []
for line in out.split('\n'):
if line in skip_lines:
continue
repo_id_re = re.match(repo_id_re_str, line)
if repo_id_re:
repo_id = repo_id_re.group(1)
continue
repo_name_re = re.match(repo_name_re_str, line)
if repo_name_re:
repo_name = repo_name_re.group(1)
continue
repo_url_re = re.match(repo_url_re_str, line)
if repo_url_re:
repo_url = repo_url_re.group(1)
continue
repo_enabled_re = re.match(repo_enabled_re_str, line)
if repo_enabled_re:
repo_enabled = repo_enabled_re.group(1)
repo = {
"id": repo_id,
"name": repo_name,
"url": repo_url,
"enabled": True if repo_enabled == '1' else False
}
repo_result.append(repo)
return repo_result
def repository_modify(module, state, name):
name = set(name)
current_repo_list = get_repository_list(module, 'list')
updated_repo_list = deepcopy(current_repo_list)
matched_existing_repo = {}
for repoid in name:
matched_existing_repo[repoid] = []
for idx, repo in enumerate(current_repo_list):
if fnmatch(repo['id'], repoid):
matched_existing_repo[repoid].append(repo)
# Update current_repo_list to return it as result variable
updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False
changed = False
results = []
diff_before = ""
diff_after = ""
rhsm_arguments = ['repos']
for repoid in matched_existing_repo:
if len(matched_existing_repo[repoid]) == 0:
results.append("%s is not a valid repository ID" % repoid)
module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid)
for repo in matched_existing_repo[repoid]:
if state in ['disabled', 'absent']:
if repo['enabled']:
changed = True
|
diff_before += "Repository '%s' is enabled for this system\n" % repo['id']
diff_after += "Repository '%s' is disabled for this system\n" % repo['id']
results.append("Repository '%s' is disabled for this system" % repo['id'])
rhsm_arguments += ['--disable', repo['id']]
elif state in ['enabled', 'present']:
if not repo['enabled']:
changed = True
|
diff_before += "Repository '%s' is disabled for this system\n" % repo['id']
diff_after += "Repository '%s' is enabled for this system\n" % repo['id']
results.append("Repository '%s' is enabled for this system" % repo['id'])
rhsm_arguments += ['--enable', repo['id']]
diff = {'before': diff_before,
'after': diff_after,
'before_header': "RHSM repositories",
'after_header': "RHSM repositories"}
if not module.check_mode:
rc, out, err = run_subscription_manager(module, rhsm_arguments)
results = out.split('\n')
module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='list', required=True),
state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'),
),
supports_check_mode=True,
)
name = module.params['name']
state = module.params['state']
repository_modify(module, state, name)
if __name__ == '__main__':
main()
|
rtnpro/test-your-code
|
test_your_code/examples/factorial/test_factorial.py
|
Python
|
gpl-2.0
| 386
| 0.005181
|
im
|
port u
|
nittest
from factorial import fact
class TestFactorial(unittest.TestCase):
"""
Our basic test class
"""
def test_fact(self):
"""
The actual test.
Any method which starts with ``test_`` will considered as a test case.
"""
res = fact(5)
self.assertEqual(res, 120)
if __name__ == '__main__':
unittest.main()
|
funkring/fdoo
|
addons-funkring/at_account/wizard/invoice_attachment_wizard.py
|
Python
|
agpl-3.0
| 2,242
| 0.005352
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.addons.report_aeroo import report_aeroo
from openerp.addons.at_base import util
from openerp.osv import fields, osv
from openerp.tools.translate import _
class inovice_attachment_wizard(osv.TransientModel):
_name = "account.invoice.attachment.wizard"
_description = "Invoice Attachment Wizard"
def action_import(self, cr, uid, ids, context=None):
wizard = self.browse(cr,
|
uid, ids[0])
invoice_id = util.active_id(context, "account.invoice")
if not invoice_id:
raise osv.except_osv(_("Error!"), _("No invoice found"))
report_obj = self.pool.get("ir.actions.report.xml")
data=base64.decodestring(wizard.document)
data = report_aeroo.fixPdf(data)
if not data:
raise osv.except_osv(_("Error!"), _("PDF is corrupted and unable to fix!"))
if not report_obj.write_attachment(cr, u
|
id, "account.invoice", invoice_id, report_name="account.report_invoice", datas=base64.encodestring(data), context=context, origin="account.invoice.attachment.wizard"):
raise osv.except_osv(_("Error!"), _("Unable to import document (check if invoice is validated)"))
return { "type" : "ir.actions.act_window_close" }
_columns = {
"document" : fields.binary("Document")
}
|
andreamartire/gmpy
|
test3/gmpy_test_thr.py
|
Python
|
lgpl-3.0
| 3,614
| 0.010238
|
# partial unit test for gmpy2 threaded mpz functionality
# relies on Tim Peters' "doctest.py" test-driver
import gmpy2 as _g, doctest, sys, operator, gc, queue, threading
from functools import reduce
__test__={}
def _tf(N=2, _K=1234**5678):
"""Takes about 100ms on a first-generation Macbook Pro"""
for i in range(N): assert (_g.mpz(1234)**5678)==_K
a=_g.mpz(123)
b=_g.mpz(456)
c=_g.mpz(123456789123456789)
def factorize(x=c):
r'''
(Takes about 25ms, on c, on a first-generation Macbook Pro)
>>> factorize(a)
[3, 41]
>>> factorize(b)
[2, 2, 2, 3, 19]
>>>
'''
import gmpy2 as _g
savex=x
prime=2
x=_g.mpz(x)
factors=[]
while x>=prime:
newx,mult=x.remove(prime)
if mult:
factors.extend([int(prime)]*mult)
x=newx
prime=_g.next_prime(prime)
for factor in factors: assert _g.is_prime(factor)
from operator import mul
assert reduce(mul, factors)==savex
return factors
def elemop(N=1000):
r'''
(Takes about 40ms on a first-generation Macbook Pro)
'''
for i in range(N):
assert a+b == 579
assert a-b == -333
assert b*a == a*b == 56088
assert b%a == 87
assert divmod(a, b) == (0, 123)
assert divmod(b, a) == (3, 87)
assert -a == -123
assert pow(a, 10) == 792594609605189126649
assert pow(a, 7, b) == 99
assert cmp(a, b) == -1
assert '7' in str(c)
assert '0' not in str(c)
assert a.sqrt() == 11
assert _g.lcm(a, b) == 18696
assert _g.fac(7) == 5040
assert _g.fib(17) == 1597
assert _g.divm(b, a, 20) == 12
assert _g.divm(4, 8, 20) == 3
assert _g.divm(4, 8, 20) == 3
assert _g.mpz(20) == 20
assert _g.mpz(8) == 8
assert _g.mpz(4) == 4
assert a.invert(100) == 87
def _test(chat=None):
if chat:
print("Unit tests for gmpy2 (threading)")
print(" on Python %s" % sys.version)
print("Testing gmpy2 {0}".format(_g.version()))
print(" Mutliple-precision library: {0}".format(_g.mp_version()))
print(" Floating-point library: {0}".format(_g.mpfr_version()))
print(" Complex library: {0}".format(_g.mpc_version()))
print(" Caching Values: (Number) {0}".format(_g.get_cache()[0]))
print(" Caching Values: (Size, limbs) {0}".format(_g.get_cache()[1]))
thismod = sys.modules.get(__name__)
doctest.testmod(thismod, report=0)
if chat: print("Repeating tests, with caching disabled")
_g.set_cache(0,128)
sav = sys.stdout
class _Dummy:
def write(self,*whatever):
pass
try:
sys.stdout = _Dummy()
doctest.testmod(thismod, report=0)
finally:
sys.stdout = sav
if chat:
print()
print("Overall results for thr:")
return doctest.master.summarize(chat)
class DoOne(threading.Thread):
def __init__(self, q):
threading.Thread.__init__(self)
self.q = q
def run(self):
while True:
task = self.q.get()
if task is None
|
: break
task()
def _test_thr(Ntasks=5, Nthreads=1):
q = queue.Queue()
funcs = (_tf, 1), (factorize, 4), (elemop, 2)
for i in range(Ntasks):
for f, n in funcs:
for x in range(n):
q.put(f)
for i in range(Nthreads):
q.put(None)
thrs = [DoOne(q) for i in range(Nthreads)]
|
for t in thrs: t.start()
for t in thrs: t.join()
if __name__=='__main__':
_test(1)
|
Mlieou/leetcode_python
|
leetcode/python/ex_244.py
|
Python
|
mit
| 791
| 0.002528
|
class WordDistance(object):
def __init__(self, words):
"""
initialize your data structure here.
:type words: List[str]
"""
self.word_dict = {}
for idx, w in enumerate(words):
self.word_dict[w] = self.word_dict.get(w, []) + [idx]
def shortest(self, word1, word2):
"""
Adds a word into the data structure.
:type word1: str
:type word2: str
:rtype: int
"""
ret
|
urn min(abs(i - j) for i in self.word_dict[word1] for j in
|
self.word_dict[word2])
# Your WordDistance object will be instantiated and called as such:
# wordDistance = WordDistance(words)
# wordDistance.shortest("word1", "word2")
# wordDistance.shortest("anotherWord1", "anotherWord2")
|
appuio/ansible-role-openshift-zabbix-monitoring
|
vendor/openshift-tools/ansible/roles/lib_gcloud/build/ansible/gcloud_iam_sa.py
|
Python
|
apache-2.0
| 2,675
| 0.003364
|
# pylint: skip-file
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
''' ansible module for gcloud iam servicetaccount'''
module = AnsibleModule(
argument_spec=dict(
# credentials
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
name=dict(default=None, type='str'),
display_name=dict(default=None, type='str'),
),
supports_check_mode=True,
)
gcloud = GcloudIAMServiceAccount(module.params['name'], module.params['display_name'])
state = module.params['state']
api_rval = gcloud.list_service_accounts()
#####
# Get
#####
if state == 'list':
if api_rval['returncode'] != 0:
module.fail_json(msg=api_
|
rval, state="list")
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if gcloud.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = gcloud.delete_service_account()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, sta
|
te="absent")
if state == 'present':
########
# Create
########
if not gcloud.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = gcloud.create_service_account()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
# update
elif gcloud.needs_update():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed an update.')
api_rval = gcloud.update_service_account()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present|update")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
sharad1126/owtf
|
framework/db/resource_manager.py
|
Python
|
bsd-3-clause
| 3,759
| 0.004256
|
from framework.db import models
from framework.config import config
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.dependency_management.interfaces import ResourceInterface
from framework.lib.general import cprint
import os
import logging
from framework.utils import FileOperations
class ResourceDB(BaseComponent, ResourceInterface):
COMPONENT_NAME = "resource"
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.db_config = self.get_component("db_config")
self.target = self.get_component("target")
self.db = self.get_component("db")
self.LoadResourceDBFromFile(self.config.get_profile_path("RESOURCES_PROFILE"))
def LoadResourceDBFromFile(self, file_path): # This needs to be a list instead of a dictionary to preserve order in python < 2.7
logging.info("Loading Resources from: " + file_path + " ..")
resources = self.GetResourcesFromFile(file_path)
# Delete all old resources which are not edited by user
# because we may have updated the resource
self.db.session.query(models.Resource).filter_by(dirty=False).delete()
# resources = [(Type, Name, Resource), (Type, Name, Resource),]
|
for Type, Name, Resource in resources:
self.db.session.add(models.Resource(resource_type=Type, resource_name=Name, resource=Resource))
self.db.session.commit()
def GetResourcesFromFile(self, resource_file
|
):
resources = set()
ConfigFile = FileOperations.open(resource_file, 'r').read().splitlines() # To remove stupid '\n' at the end
for line in ConfigFile:
if '#' == line[0]:
continue # Skip comment lines
try:
Type, Name, Resource = line.split('_____')
# Resource = Resource.strip()
resources.add((Type, Name, Resource))
except ValueError:
cprint("ERROR: The delimiter is incorrect in this line at Resource File: "+str(line.split('_____')))
return resources
def GetReplacementDict(self):
configuration = self.db_config.GetReplacementDict()
configuration.update(self.target.GetTargetConfig())
configuration.update(self.config.GetReplacementDict())
return configuration
def GetRawResources(self, ResourceType):
filter_query = self.db.session.query(models.Resource.resource_name, models.Resource.resource).filter_by(resource_type = ResourceType)
# Sorting is necessary for working of ExtractURLs, since it must run after main command, so order is imp
sort_query = filter_query.order_by(models.Resource.id)
raw_resources = sort_query.all()
return raw_resources
def GetResources(self, ResourceType):
replacement_dict = self.GetReplacementDict()
raw_resources = self.GetRawResources(ResourceType)
resources = []
for name, resource in raw_resources:
resources.append([name, self.config.MultipleReplace(resource, replacement_dict)])
return resources
def GetRawResourceList(self, ResourceList):
raw_resources = self.db.session.query(models.Resource.resource_name, models.Resource.resource).filter(models.Resource.resource_type.in_(ResourceList)).all()
return raw_resources
def GetResourceList(self, ResourceTypeList):
replacement_dict = self.GetReplacementDict()
raw_resources = self.GetRawResourceList(ResourceTypeList)
resources = []
for name, resource in raw_resources:
resources.append([name, self.config.MultipleReplace(resource, replacement_dict)])
return resources
|
icyflame/batman
|
scripts/template.py
|
Python
|
mit
| 14,372
| 0.000348
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
Very simple script to replace a template with another one.
It also converts the old MediaWiki boilerplate format to the new format.
Syntax: python template.py [-remove] [xml[:filename]] oldTemplate [newTemplate]
Specify the template on the command line. The program will pick up the template
page, and look for all pages using it. It will then automatically loop over
them, and replace the template.
Command line options:
-remove Remove every occurrence of the template from every article
-subst Resolves the template by putting its text directly into the
article. This is done by changing {{...}} or {{msg:...}} into
{{subst:...}}
-assubst Replaces the first argument as old template with the second
argument as new template but substitutes it like -subst does.
Using both options -remove and -subst in the same command line has
the same effect.
-xml retrieve information from a local dump
(https://download.wikimedia.org). If this argument isn't given,
info will be loaded from the maintenance page of the live wiki.
argument can also be given as "-xml:filename.xml".
-user: Only process pages edited by a given user
-skipuser: Only process pages not edited by a given user
-timestamp: (With -user or -skipuser). Only check for a user where his edit is
not older than the given timestamp. Timestamp must be writen in
MediaWiki timestamp format which is "%Y%m%d%H%M%S"
If this parameter is missed, all edits are checked but this is
restricted to the last 100 edits.
-summary: Lets you pick a custom edit summary. Use quotes if edit summary
contains spaces.
-always Don't bother asking to confirm any of the changes, Just Do It.
-addcat: Appends the given category to every page that is edited. This is
useful when a category is being broken out from a template
parameter or when templates are being upmerged but more information
must be preserved.
other: First argument is the old template name, second one is the new
name.
If you want to address a template which has spaces, put quotation
marks around it, or use underscores.
Examples:
If you have a template called [[Template:Cities in Washington]] and want to
change it to [[Template:Cities in W
|
ashington state]], start
python pwb.py templa
|
te "Cities in Washington" "Cities in Washington state"
Move the page [[Template:Cities in Washington]] manually afterwards.
If you have a template called [[Template:test]] and want to substitute it only
on pages in the User: and User talk: namespaces, do:
python pwb.py template test -subst -namespace:2 -namespace:3
Note that -namespace: is a global Pywikibot parameter
This next example substitutes the template lived with a supplied edit summary.
It only performs substitutions in main article namespace and doesn't prompt to
start replacing. Note that -putthrottle: is a global Pywikibot parameter.
python pwb.py template -putthrottle:30 -namespace:0 lived -subst -always \
-summary:"BOT: Substituting {{lived}}, see [[WP:SUBST]]."
This next example removes the templates {{cfr}}, {{cfru}}, and {{cfr-speedy}}
from five category pages as given:
python pwb.py template cfr cfru cfr-speedy -remove -always \
-page:"Category:Mountain monuments and memorials" \
-page:"Category:Indian family names" \
-page:"Category:Tennis tournaments in Belgium" \
-page:"Category:Tennis tournaments in Germany" \
-page:"Category:Episcopal cathedrals in the United States" \
-summary:"Removing Cfd templates from category pages that survived."
This next example substitutes templates test1, test2, and space test on all
pages:
python pwb.py template test1 test2 "space test" -subst -always
"""
#
# (C) Daniel Herding, 2004
# (C) Rob W.W. Hooft, 2003-2005
# (C) xqt, 2009-2015
# (C) Pywikibot team, 2004-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import re
from warnings import warn
import pywikibot
from pywikibot import i18n, pagegenerators, xmlreader, Bot
from pywikibot.exceptions import ArgumentDeprecationWarning
from scripts.replace import ReplaceRobot as ReplaceBot
class XmlDumpTemplatePageGenerator(object):
"""
Generator which yields Pages that transclude a template.
These pages will be retrieved from a local XML dump file
(cur table), and may not still transclude the template.
"""
def __init__(self, templates, xmlfilename):
"""
Constructor.
Arguments:
* templateNames - A list of Page object representing the searched
templates
* xmlfilename - The dump's path, either absolute or relative
"""
self.templates = templates
self.xmlfilename = xmlfilename
def __iter__(self):
"""Yield page objects until the entire XML dump has been read."""
mysite = pywikibot.Site()
dump = xmlreader.XmlDump(self.xmlfilename)
# regular expression to find the original template.
# {{vfd}} does the same thing as {{Vfd}}, so both will be found.
# The old syntax, {{msg:vfd}}, will also be found.
templatePatterns = []
for template in self.templates:
templatePattern = template.title(withNamespace=False)
if mysite.namespaces[10].case == 'first-letter':
templatePattern = '[%s%s]%s' % (templatePattern[0].upper(),
templatePattern[0].lower(),
templatePattern[1:])
templatePattern = re.sub(' ', '[_ ]', templatePattern)
templatePatterns.append(templatePattern)
templateRegex = re.compile(
r'\{\{ *([mM][sS][gG]:)?(?:%s) *(?P<parameters>\|[^}]+|) *}}'
% '|'.join(templatePatterns))
for entry in dump.parse():
if templateRegex.search(entry.text):
page = pywikibot.Page(mysite, entry.title)
yield page
class TemplateRobot(ReplaceBot):
"""This bot will replace, remove or subst all occurrences of a template."""
def __init__(self, generator, templates, **kwargs):
"""
Constructor.
@param generator: the pages to work on
@type generator: iterable
@param templates: a dictionary which maps old template names to
their replacements. If remove or subst is True, it maps the
names of the templates that should be removed/resolved to None.
@type templates: dict
"""
self.availableOptions.update({
'subst': False,
'remove': False,
'summary': None,
'addedCat': None,
})
Bot.__init__(self, generator=generator, **kwargs)
self.templates = templates
# get edit summary message if it's empty
if not self.getOption('summary'):
comma = self.site.mediawiki_message('comma-separator')
params = {'list': comma.join(self.templates.keys()),
'num': len(self.templates)}
site = self.site
if self.getOption('remove'):
self.options['summary'] = i18n.twntranslate(
site, 'template-removing', params)
elif self.getOption('subst'):
self.options['summary'] = i18n.twntranslate(
site, 'template-substituting', params)
else:
self.options['summary'] = i18n.twntranslate(
site, 'template-changing', params)
# regular expression to find the original template.
# {{vfd}} does the same thing as {{Vfd}}, so both will be found.
# The old syntax, {{msg:vfd}}, will also be found.
# The group 'parameters' will eithe
|
CanaimaGNULinux/canaimagnulinux.wizard
|
canaimagnulinux/wizard/browser/socialnetwork.py
|
Python
|
gpl-2.0
| 3,378
| 0.000296
|
# -*- coding: utf-8 -*-
from canaimagnulinux.wizard.interfaces import IChat
from canaimagnulinux.wizard.interfaces import ISocialNetwork
from canaimagnulinux.wizard.utils import CanaimaGnuLinuxWizardMF as _
from collective.beaker.interfaces import ISession
from collective.z3cform.wizard import wizard
from plone import api
from plone.z3cform.fieldsets import group
from z3c.form import field
try:
from zope.browserpage import viewpagetemplatefile
except ImportError:
# Plone < 4.1
from zope.app.pagetemplate import viewpagetemplatefile
import logging
logger = logging.getLogger(__name__)
class ChatGroup(group.Group):
prefix = 'chats'
label = _(u'Chats Information')
fields = field.Fields(IChat)
class SocialNetworkGroup(group.Group):
prefix = 'socialnetwork'
label = _(u'Social Network Information')
fields = field.Fields(ISocialNetwork)
class SocialNetworkStep(wizard.GroupStep):
prefix = 'Social'
label = _(u'Social Network accounts')
description = _(u'Input your social networks details')
template = viewpagetemplatefile.ViewPageTemplateFile('templates/socialnetwork.pt')
fields = field.Fields()
groups = [ChatGroup, SocialNetworkGroup]
def __init__(self, context, request, wizard):
# Use collective.beaker for session managment
session = ISession(request, None)
self.sessionmanager = session
super(SocialNetworkStep, self).__init__(context, request, wizard)
def load(self, context):
member = api.user.get_current()
data = self.getContent()
# Chats group
if not data.get('irc', None):
irc = member.getProperty('irc')
if type(irc).__name__ == 'object':
irc = None
data['irc'] = irc
if not data.get('telegram', None):
telegram = member.getProperty('telegram')
if type(telegram).__name__ == 'object':
telegram = None
data['telegram'] = telegram
if not data.get('skype', None):
skype = member.getProperty('skype')
if type(skype).__name__ == 'object':
skype = None
data['skype'] = skype
# Social Network group
|
if not data.get('twitter', None):
twitter = member.getProperty('twitter')
if type(twitter).__name__ == 'object':
twitter = None
data['twitter'] = twitter
if not data.get('instagram', None):
instagram = member.getProperty('instagram')
if type(instagram).__name__ == 'object':
instagram = None
data['instagram'] = instagram
if not data.get('facebook', None):
|
facebook = member.getProperty('facebook')
if type(facebook).__name__ == 'object':
facebook = None
data['facebook'] = facebook
def apply(self, context, initial_finish=False):
data = self.getContent()
return data
def applyChanges(self, data):
member = api.user.get_current()
member.setMemberProperties(mapping={
'irc': data['irc'],
'telegram': data['telegram'],
'skype': data['skype'],
'twitter': data['twitter'],
'instagram': data['instagram'],
'facebook': data['facebook']}
)
|
DarioGT/OMS-PluginXML
|
org.modelsphere.sms/lib/jython-2.2.1/Lib/macpath.py
|
Python
|
gpl-3.0
| 6,978
| 0.005589
|
"""Pathname and path-related operations for the Macintosh."""
import os
from stat import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","islink","exists","isdir","isfile",
"walk","expanduser","expandvars","normpath","abspath",
"realpath"]
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] != ':'
def join(s, *p):
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] != ':':
path = path + ':'
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i + 1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
def splitext(p):
"""Split a path into root and extension.
The extension is everything starting at the last dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
root, ext = '', ''
for c in p:
if c == ':':
root, ext = root + ext + c, ''
elif c == '.':
if ext:
root, ext = root + ext, c
else:
ext = c
elif ext:
ext = ext + c
else:
root = root + c
return root, ext
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and components[1] == ''
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISDIR(st[ST_MODE])
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_SIZE]
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_MTIME]
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_ATIME]
def islink(s):
"""Return true if the pathname refers to a symbolic link.
Always false on the Mac, until we understand Aliases.)"""
return 0
def isfile(s):
"""Return true if the pathname refers to an existing regular file."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISREG(st[ST_MODE])
def exists(s):
"""Return true if the pathname refers to an existing file or directory."""
|
try:
st = os.stat(s)
except os.error:
return 0
return 1
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix
|
)):
if prefix[:i+1] != item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
norm_error = 'macpath.norm_error: path cannot be normalized'
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
if ":" not in s:
return ":"+s
comps = s.split(":")
i = 1
while i < len(comps)-1:
if comps[i] == "" and comps[i-1] != "":
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error, 'Cannot use :: immediately after volume name'
else:
i = i + 1
s = ":".join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
s = s[:-1]
return s
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
|
asedunov/intellij-community
|
python/testData/resolve/ReferenceInDocstring.py
|
Python
|
apache-2.0
| 114
| 0.017544
|
from datetime import datetime
def foo(p):
"""Foo
:param datetime p: a datetime
|
<ref>
""
|
"
|
chilleo/ALPHA
|
raxmlOutputWindows/matplotlibCustomBackend/customFormlayout.py
|
Python
|
mit
| 20,667
| 0.000919
|
# -*- coding: utf-8 -*-
"""
formlayout
==========
Module creating Qt form dialogs/layouts to edit various type of parameters
formlayout License Agreement (MIT License)
------------------------------------------
Copyright (c) 2009 Pierre Raybaut
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
# History:
# 1.0.10: added float validator (disable "Ok" and "Apply" button when not valid)
# 1.0.7: added support for "Apply" button
# 1.0.6: code cleaning
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__version__ = '1.0.10'
__license__ = __doc__
DEBUG = False
import copy
import datetime
import warnings
import six
from matplotlib import colors as mcolors
from matplotlib.backends.qt_compat import QtGui, QtWidgets, QtCore
BLACKLIST = set(["title", "label"])
class ColorButton(QtWidgets.QPushButton):
"""
Color choosing push button
"""
colorChanged = QtCore.Signal(QtGui.QColor)
def __init__(self, parent=None):
QtWidgets.QPushButton.__init__(self, parent)
self.setFixedSize(20, 20)
self.setIconSize(QtCore.QSize(12, 12))
self.clicked.connect(self.choose_color)
self._color = QtGui.QColor()
def choose_color(self):
color = QtWidgets.QColorDialog.getColor(
self._color, self.parentWidget(), "",
QtWidgets.QColorDialog.ShowAlphaChannel)
if color.isValid():
self.set_color(color)
def get_color(self):
return self._color
@QtCore.Slot(QtGui.QColor)
def set_color(self, color):
if color != self._color:
self._color = color
self.colorChanged.emit(self._color)
pixmap = QtGui.QPixmap(self.iconSize())
pixmap.fill(color)
self.setIcon(QtGui.QIcon(pixmap))
color = QtCore.Property(QtGui.QColor, get_color, set_color)
def to_qcolor(color):
"""Create a QColor from a matplotlib color"""
qcolor = QtGui.QColor()
try:
rgba = mcolors.to_rgba(color)
except ValueError:
warnings.warn('Ignoring invalid color %r' % color)
return qcolor # return invalid QColor
qcolor.setRgbF(*rgba)
return qcolor
class ColorLayout(QtWidgets.QHBoxLayout):
"""Color-specialized QLineEdit layout"""
def __init__(self, color, parent=None):
QtWidgets.QHBoxLayout.__init__(self)
assert isinstance(color, QtGui.QColor)
self.lineedit = QtWidgets.QLineEdit(
mcolors.to_hex(color.getRgbF(), keep_alpha=True), parent)
self.lineedit.editingFinished.connect(self.update_color)
self.addWidget(self.lineedit)
self.colorbtn = ColorButton(parent)
self.colorbtn.color = color
self.colorbtn.colorChanged.connect(self.update_text)
self.addWidget(self.colorbtn)
def update_color(self):
color = self.text()
qcolor = to_qcolor(color)
self.colorbtn.color = qcolor # defaults to black if not qcolo
|
r.isValid()
def update_text(self, color):
self.lineedit.setText(mcolors.to_hex(color.getRgbF(), keep_alpha=True))
def text(self):
return self.lineedit.text()
def font_is_installed(font):
"""Check if font is installed"""
return [fam for fam in QtGui.QFontDatabase().families()
if six.tex
|
t_type(fam) == font]
def tuple_to_qfont(tup):
"""
Create a QFont from tuple:
(family [string], size [int], italic [bool], bold [bool])
"""
if not (isinstance(tup, tuple) and len(tup) == 4
and font_is_installed(tup[0])
and isinstance(tup[1], int)
and isinstance(tup[2], bool)
and isinstance(tup[3], bool)):
return None
font = QtGui.QFont()
family, size, italic, bold = tup
font.setFamily(family)
font.setPointSize(size)
font.setItalic(italic)
font.setBold(bold)
return font
def qfont_to_tuple(font):
return (six.text_type(font.family()), int(font.pointSize()),
font.italic(), font.bold())
class FontLayout(QtWidgets.QGridLayout):
"""Font selection"""
def __init__(self, value, parent=None):
QtWidgets.QGridLayout.__init__(self)
font = tuple_to_qfont(value)
assert font is not None
# Font family
self.family = QtWidgets.QFontComboBox(parent)
self.family.setCurrentFont(font)
self.addWidget(self.family, 0, 0, 1, -1)
# Font size
self.size = QtWidgets.QComboBox(parent)
self.size.setEditable(True)
sizelist = list(range(6, 12)) + list(range(12, 30, 2)) + [36, 48, 72]
size = font.pointSize()
if size not in sizelist:
sizelist.append(size)
sizelist.sort()
self.size.addItems([str(s) for s in sizelist])
self.size.setCurrentIndex(sizelist.index(size))
self.addWidget(self.size, 1, 0)
# Italic or not
self.italic = QtWidgets.QCheckBox(self.tr("Italic"), parent)
self.italic.setChecked(font.italic())
self.addWidget(self.italic, 1, 1)
# Bold or not
self.bold = QtWidgets.QCheckBox(self.tr("Bold"), parent)
self.bold.setChecked(font.bold())
self.addWidget(self.bold, 1, 2)
def get_font(self):
font = self.family.currentFont()
font.setItalic(self.italic.isChecked())
font.setBold(self.bold.isChecked())
font.setPointSize(int(self.size.currentText()))
return qfont_to_tuple(font)
def is_edit_valid(edit):
text = edit.text()
state = edit.validator().validate(text, 0)[0]
return state == QtGui.QDoubleValidator.Acceptable
class FormWidget(QtWidgets.QWidget):
update_buttons = QtCore.Signal()
def __init__(self, data, comment="", parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.data = copy.deepcopy(data)
self.widgets = []
self.formlayout = QtWidgets.QFormLayout(self)
if comment:
self.formlayout.addRow(QtWidgets.QLabel(comment))
self.formlayout.addRow(QtWidgets.QLabel(" "))
if DEBUG:
print("\n"+("*"*80))
print("DATA:", self.data)
print("*"*80)
print("COMMENT:", comment)
print("*"*80)
def get_dialog(self):
"""Return FormDialog instance"""
dialog = self.parent()
while not isinstance(dialog, QtWidgets.QDialog):
dialog = dialog.parent()
return dialog
def setup(self):
# self.formlayout.setFieldGrowthPolicy(1)
for label, value in self.data:
if DEBUG:
print("value:", value)
if label is None and value is None:
# Separator: (None, None)
self.formlayout.addRow(QtWidgets.QLabel(" "), QtWidgets.QLabel(" "))
self.widgets.append(None)
continue
elif label is None:
# Comment
self.formlayout.addRow(QtWidgets.QLabel(value))
self.widgets.append(None)
continue
elif tuple_to_qfont(value) is not None:
field = FontLayout(value, self)
|
andoniaf/DefGrafana.py
|
graf2png.py
|
Python
|
gpl-3.0
| 1,737
| 0.000576
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from PIL import Image
def graf2png(weburl, username, password, timeout, imgname, hwin, wwin, onlypanel):
driver = webdriver.PhantomJS()
driver.set_window_size(hwin, wwin)
driver.get(weburl)
# Introducimos username
in_user = driver.find_element_by_name('username')
in_user.clear()
in_user.send_keys(username)
# Introducimos password
in_pass = driver.find_element_by_id('inputPassword')
in_pass.clear()
in_pass.send_keys(password)
in_pass.send_keys(Keys.ENTER)
# Espera a que cargue la consulta
time.sleep(timeout)
# Timestamp para evitar sobreescribir capturas
currtime = time.strftime("%y%m%d%H%M%S", time.localtime())
imgname = imgname + currtime + '.png'
# Realizar screenshot
driver.save_screenshot(imgname)
print("Screen guardada como: " + imgname)
# Recortar panel(?)
# Solo funciona con los paneles cuya clase
|
sea 'panel-fullscreen',
# esta es la clase que tiene por defecto los paneles cuando
# generas un enlace para compartir. (Share Panel > Link > Copy)
if (onlypanel):
panel = driver.find_element_by_cl
|
ass_name('panel-fullscreen')
plocation = panel.location
psize = panel.size
left = plocation['x']
top = plocation['y']
right = plocation['x'] + psize['width']
bottom = plocation['y'] + psize['height']
pimg = Image.open(imgname)
pimg = pimg.crop((left, top, right, bottom))
pimgname = 'panel_' + imgname
pimg.save(pimgname)
print("Panel recortado guardado como: " + pimgname)
|
jnsebgosselin/WHAT
|
gwhat/brf_mod/kgs_brf.py
|
Python
|
gpl-3.0
| 4,831
| 0.000414
|
# -*- coding: utf-8 -*-
# Copyright © 2014-2018 GWHAT Project Contributors
# https://github.com/jnsebgosselin/gwhat
#
# This file is part of GWHAT (Ground-Water Hydrograph Analysis Toolbox).
# Licensed under the terms of the GNU General Public License.
# ---- Standard library imports
import os
import csv
import sys
# ---- Third party imports
import numpy as np
from xlrd import xldate_as_tuple
# ---- Local imports
from gwhat.brf_mod import __install_dir__
def produce_BRFInputtxt(well, time, wl, bp, et):
comment = 'No comment men'
wlu = 'feet'
bpu = 'feet'
etu = 'NONE'
sampleinterval = time[1]-time[0]
timeunits = 'days'
N = len(time)
yr, mth, day, hr, mn, sec = xldate_as_tuple(time[0], 0)
dst = '%02d/%02d/%d, %02d:%02d:%02d' % (yr, mth, day, hr, mn, sec)
yr, mth, day, hr, mn, sec = xldate_as_tuple(time[-1], 0)
det = '%02d/%02d/%d, %02d:%02d:%02d' % (yr, mth, day, hr, mn, sec)
fcontent = []
fcontent.append(['Comment: %s' % comment])
fcontent.append(['Well: %s' % well])
fcontent.append(['WL Units: %s' % wlu])
fcontent.append(['BP Units: %s' % bpu])
fcontent.append(['ET Units: %s' % etu])
fcontent.append(['Sample Interval: %f' % sampleinterval])
fcontent.append(['Time Units: %s' % timeunits])
fcontent.append(['Data Start Time: %s' % dst])
fcontent.append(['Data End Time: %s' % det])
fcontent.append(['Number of Data: %d' % N])
fcontent.append(['Time WL BP ET'])
# Add the data to the file content.
wl = (100 - wl) * 3.28084
bp = bp * 3.28084
t = time - time[0]
fcontent.extend([[time[i], wl[i], bp[i], et[i]] for i in range(N)])
filename = os.path.join(__install_dir__, 'BRFInput.txt')
with open(filename, 'w', encoding='utf8') as f:
writer = writer = csv.writer(f, delimiter='\t', lineterminator='\n')
writer.writerows(fcontent)
def produce_par_file(lagBP, lagET, detrend_waterlevels=True,
correct_waterlevels=True):
"""
Create the parameter file requires by the KGS_BRF program.
"""
brfinput = os.path.join(__install_dir__, 'BRFInput.txt')
brfoutput = os.path.join(__install_dir__, 'BRFOutput.txt')
wlcinput = os.path.join(__install_dir__, 'WLCInput.txt')
wlcoutput = os.path.join(__install_dir__, 'WLCOutput.txt')
detrend = 'Yes' if detrend_waterlevels else 'No'
correct = 'Yes' if correct_waterlevels else 'No'
par = []
par.append(['BRF Option (C[ompute] or R[ead]): Compute'])
par.append(['BRF Input Data File: %s' % brfinput])
par.append(['Number of BP Lags: %d' % lagBP])
par.append(['Number of BP ET: %d' % lagET])
par.append(['BRF Output Data File: %s' % brfoutput])
par.append(['Detrend data? (Y[es] or N[o]): %s' % detrend])
par.append(['Correct WL? (Y[es] or N[o]): %s' % correct])
par.append(['WLC Input Data File: %s' % wlcinput])
par.append(['WLC Output Data File: %s' % wlcoutput])
filename = os.path.join(__install_dir__, 'kgs_brf.par')
with open(filename, 'w', encoding='utf8') as f:
writer = csv.writer(f, delimiter='\t', lineterminator='\n')
writer.writerows(par)
def run_kgsbrf():
exename = os.path.join(__install_dir__, 'kgs_brf.exe')
parname = os.path.join(__install_dir__, 'kgs_brf.par')
if os.path.exists(exename) and os.path.exists(parname):
if os.name == 'nt':
os.system('""%s" < "%s""' % (exename, parname))
def read_brf_output():
"""
Read the barometric response function from the output file produced
by kgs_brf.exe.
"""
filename = os.path.join(__install_dir__, 'BRFOutput.txt')
with open(filename, 'r') as f:
reader = list(csv.reader(f))
header = []
for row in reader:
header.append(row)
if 'LagNo Lag A sdA SumA sdSumA B sdB SumB sdSumB' in row[0]:
break
# well = header[2][0].split()[-1]
# date0 = header[8][0].split()[-1]
# date1 = header[9][0].split()[-1]
data = reader[len(header):]
dataf = []
count = 1
for row in data:
if count == 1:
dataf.append([float(i) for i in row[0].split()])
|
count += 1
elif count in [2, 3]:
dataf[-1].extend([float(i) for i in row[0].split()])
count += 1
elif count == 4:
|
dataf[-1].extend([float(i) for i in row[0].split()])
count = 1
# Remove non valid data.
dataf = [row for row in dataf if row[4] > -999]
# Format data into numpy arrays
dataf = np.array(dataf)
lag = dataf[:, 1]
A = dataf[:, 4]
err = dataf[:, 5]
return lag, A, err
if __name__ == "__main__":
# plt.close('all')
# produce_par_file()
run_kgsbrf()
load_BRFOutput(show_ebar=True, msize=5, draw_line=False)
# plt.show()
|
powervm/pypowervm
|
pypowervm/tasks/hdisk/_fc.py
|
Python
|
apache-2.0
| 19,741
| 0
|
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tasks around VIOS-backed 'physical' fibre channel disks."""
import itertools
from lxml import etree
from oslo_log import log as logging
from pypowervm import const as c
import pypowervm.entities as ent
import pypowervm.exceptions as pexc
from pypowervm.i18n import _
import pypowervm.tasks.storage as tsk_stg
import pypowervm.utils.transaction as tx
from pypowervm.wrappers import job as pvm_job
from pypowervm.wrappers import virtual_io_server as pvm_vios
LOG = logging.getLogger(__name__)
_LUA_CMD_VERSION = '3'
_LUA_VERSION = '2.0'
_LUA_RECOVERY = 'LUARecovery'
_RM_HDISK = 'RemoveDevice'
_MGT_CONSOLE = 'ManagementConsole'
class LUAType(object):
"""LUA Vendors."""
IBM = "IBM"
EMC = "EMC"
NETAPP = "NETAPP"
HDS = "HDS"
HP = "HP"
OTHER = "OTHER"
class LUAStatus(object):
"""LUA Recovery status codes."""
DEVICE_IN_USE = '1'
ITL_NOT_RELIABLE = '2'
DEVICE_AVAILABLE = '3'
STORAGE_NOT_INTEREST = '4'
LUA_NOT_INTEREST = '5'
INCORRECT_ITL = '6'
FOUND_DEVICE_UNKNOWN_UDID = '7'
FOUND_ITL_ERR = '8'
def normalize_lun(scsi_id):
"""Normalize the lun id to Big Endian
:param scsi_id: Volume lun id
:return: Converted LUN id in Big Endian as per the RFC 4455
"""
# PowerVM keeps LUN identifiers in hex format.
lun = '%x' % int(scsi_id)
# For drivers which support complex LUA lun-id exceeding more than 2
# bytes in such cases we need to append 8 zeros else 12 zeros to
# pass 8 byte lun-id
if len(lun) == 8:
lun += "00000000"
else:
lun += "000000000000"
return lun
class ITL(object):
"""The Nexus ITL.
See SCSI ITL. This is the grouping of the SCSI initiator, target and
LUN.
"""
def __init__(self, initiator, target, lun):
"""Create the ITL.
:param initiator: The initiator WWPN.
:param target: The target WWPN.
:param lun: The LUN identifier. Ex. 2 (an int). The identifier will
be formatted from a generic integer LUN ID to match
PowerVM's LUN Identifier format.
"""
self.initiator = initiator.lower().replace(':', '')
self.target = target.lower().replace(':', '')
self.lun = normalize_lun(lun)
def __eq__(self, other):
if other is None or not isinstance(other, ITL):
return False
return (self.initiator == other.initiator and
self.target == other.target and
self.lun == other.lun)
def __hash__(self):
return hash(self.initiator) ^ hash(self.target) ^ hash(self.lun)
def __ne__(self, other):
return not self.__eq__(other)
def good_discovery(status, device_name):
"""Checks the hdisk discovery results for a good discovery.
Acceptable LUA discovery statuses are :-
DEVICE_AVAILABLE: hdisk discovered on all the ITL paths and available.
DEVICE_IN_USE: hdisk discovered on all the ITL paths and is in-use by
the server.
FOUND_ITL_ERR: hdisk is discovered on some of the ITL paths and available.
This can happen if there are multiple ITL nexus paths are passed, and
hdisk is discovered on few of the paths only. This can happen if multiple
target wwpns and vios wwpns exists and only few are connected. If hdisk
can be discovered on ANY of the paths its considered for good discovery.
"""
return device_name is not None and status in [
LUAStatus.DEVICE_AVAILABLE, LUAStatus.DEVICE_IN_USE,
LUAStatus.FOUND_ITL_ERR]
def build_itls(i_wwpns, t_wwpns, lun):
"""This method builds the list of ITLs for all of the permutations.
An ITL is specific to an initiator, target, and LUN. However, with multi
pathing, there are several scenarios where a given LUN will have many ITLs
because of multiple initiators or targets.
The initiators should be tied to a given Virtual I/O Server (or perhaps
specific WWPNs within a VIOS).
:param i_wwpns: List or set of initiator WWPNs.
:param t_wwpns: List or set of target WWPNs.
:param lun: The LUN identifier. Ex. 2 (an int). The identifier will be
formatted from a generic integer LUN ID to match PowerVM's
LUN Identifier format.
:return: List of all the ITL permutations.
"""
return [ITL(i, t, lun) for i, t in itertools.product(i_wwpns, t_wwpns)]
def discover_hdisk(adapter, vios_uuid, itls, vendor=LUAType.OTHER,
device_id=None):
"""Attempt to discover a hard disk attached to a Virtual I/O Server.
See lua_recovery. This method attempts that call and analyzes the
results. On certain failure conditions (see below), this method will find
stale LPARs, scrub storage artifacts associated with them, and then retry
lua_recovery. The retry is only attempted once; that result is returned
regardless.
The main objective of this method is to resolve errors resulting from
incomplete cleanup of previou
|
s LPARs. The stale LPAR's storage mappings
can cause hdisk discovery to fail because it thinks the hd
|
isk is already in
use.
Retry conditions: The scrub-and-retry will be triggered if:
o dev_name is None; or
o status is anything other than DEVICE_AVAILABLE or FOUND_ITL_ERR. (The
latter is acceptable because it means we discovered some, but not all, of
the ITLs. This is okay as long as dev_name is set.)
:param adapter: The pypowervm adapter.
:param vios_uuid: The Virtual I/O Server UUID.
:param itls: A list of ITL objects.
:param vendor: The vendor for the LUN. See the LUAType.* constants.
:param device_id: The device ID parameter in the LUA input XML.
Typically the base 64 encoded pg83 value.
:return status: The status code from the discover process.
See LUAStatus.* constants.
:return dev_name: The name of the discovered hdisk.
:return udid: The UDID of the device.
"""
# First attempt
status, devname, udid = lua_recovery(adapter, vios_uuid, itls,
vendor=vendor, device_id=device_id)
# Do we need to scrub and retry?
if not good_discovery(status, devname):
vwrap = pvm_vios.VIOS.get(adapter, uuid=vios_uuid,
xag=(c.XAG.VIO_SMAP, c.XAG.VIO_FMAP))
scrub_ids = tsk_stg.find_stale_lpars(vwrap)
if scrub_ids:
# Detailed warning message by _log_lua_status
LOG.warning(_("hdisk discovery failed; will scrub stale storage "
"for LPAR IDs %s and retry."), scrub_ids)
# Scrub from just the VIOS in question.
scrub_task = tx.FeedTask('scrub_vios_%s' % vios_uuid, [vwrap])
tsk_stg.add_lpar_storage_scrub_tasks(scrub_ids, scrub_task)
scrub_task.execute()
status, devname, udid = lua_recovery(adapter, vios_uuid, itls,
vendor=vendor,
device_id=device_id)
return status, devname, udid
def lua_recovery(adapter, vios_uuid, itls, vendor=LUAType.OTHER,
device_id=None):
"""Logical Unit Address Recovery - discovery of a FC-attached hdisk.
When a new disk is created externally (say on a block device), the Virtual
I/O Server may or may not discover it immediately. This method forces a
discovery on a given Virtual I/O Server.
:param adapter: The pypowervm adapter
|
maelnor/cinder
|
cinder/openstack/common/rpc/amqp.py
|
Python
|
apache-2.0
| 25,306
| 0.00004
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
# TODO(pekowsk): Remove import cfg and below comment in Havana.
# This import should no longer be needed when the amqp_rpc_single_reply_queue
# option is removed.
from oslo.config import cfg
from cinder.openstack.common import excutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import local
from cinder.openstack.common import log as logging
from cinder.openstack.common.rpc import common as rpc_common
# TODO(pekowski): Remove this option in Havana.
amqp_opts = [
cfg.BoolOpt('amqp_rpc_single_reply_queue',
default=False,
help='Enable a fast single reply queue if using AMQP based '
'RPC like RabbitMQ or Qpid.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the teatDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the caller of
create_connection(). This is essentially a wrapper around
Connection that supports 'with'. It can also return a new
Connection, or one from a pool. The function will also catch
when an instance of this class is to be deleted. With that
we can return Connections to the pool on exceptions and so
forth without making the caller be responsible for catching
them. If possible the function makes sure to return a
connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool"""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
i
|
f pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
|
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self"""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance"""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
""" Connection class for RPC replies / callbacks """
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('no calling threads waiting for msg_id : %s'
', message : %s') % (msg_id, message_data))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_
|
chetan51/nupic
|
tests/integration/nupic/opf/opf_checkpoint_test/experiments/non_temporal_multi_step/base.py
|
Python
|
gpl-3.0
| 14,616
| 0.003626
|
# ----------------------------------------------------------------------
# Copyright (C) 2012 Numenta Inc. All rights reserved.
#
# The information and source code contained herein is the
# exclusive property of Numenta Inc. No part of this software
# may be used, reproduced, stored or distributed in any form,
# without explicit written authorization from Numenta Inc.
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
VERBOSITY = 0
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
'fields': [(u'c1', 'first'), (u'c0', 'first')],
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : VERBOSITY,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'c0_timeOfDay': { 'fieldname': u'c0',
'name': u'c0_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'c0_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'c0',
'name': u'c0_dayOfWeek',
'type': 'DateEncoder'},
u'c0_weekend': { 'fieldname': u'c0',
'name': u'c0_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'c1': { 'clipInput': True,
'fieldname': u'c1',
'n': 100,
'name': u'c1',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SE
|
NSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnos
|
tic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActivePerInhArea': 40,
'seed': 1956,
# coincInputPoolPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose coincInputPoolPct * (2*coincInputRadius+1)^2
'coincInputPoolPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
#
|
ThibaultGigant/Crosswords
|
run.py
|
Python
|
gpl-3.0
| 101
| 0
|
# -*- coding: utf-8 -*-
from ihm.main_window import launch
i
|
f __name__ == '__main__':
|
launch()
|
ewandor/home-assistant
|
homeassistant/components/cover/abode.py
|
Python
|
apache-2.0
| 1,326
| 0
|
"""
This component provides HA cover support for Abode Security System.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.abode/
"""
import logging
from homeassistant.components.abode import AbodeDevice, DOMAIN as ABODE_DOMAIN
from homeassistant.components.cover import CoverDevice
DEPENDENCIES = ['abode']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up Abode cover devices."""
import abodepy.helpers.constants as CONST
data = hass.data[ABODE_DOMAIN]
devices = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_COVER):
if data.is_excluded(device):
continue
devices.append(AbodeCover(data, device))
data.devices.extend(devices)
add_devices(devices)
class AbodeCover(Abo
|
deDevice, CoverDevice):
"""Representation of an Abode cover."""
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return not self._device.is_open
def close_cover(self, **kwargs):
"""Issue cl
|
ose command to cover."""
self._device.close_cover()
def open_cover(self, **kwargs):
"""Issue open command to cover."""
self._device.open_cover()
|
gravitee-io/jenkins-scripts
|
src/main/python/package_bundles.py
|
Python
|
apache-2.0
| 19,869
| 0.003372
|
import os
import re
import shutil
import zipfile
import requests
import json
from shutil import copy2
from urllib.request import urlretrieve, urlopen
# Input parameters
version_param = os.environ.get('RELEASE_VERSION')
is_latest_param = True if version_param == "master" else False
# build constants
m2repo_path = '/m2repo'
tmp_path = './tmp/%s' % version_param
policies_path = "%s/policies" % tmp_path
resources_path = "%s/resources" % tmp_path
fetchers_path = "%s/fetchers" % tmp_path
services_path = "%s/services" % tmp_path
reporters_path = "%s/reporters" % tmp_path
repositories_path = "%s/repositories" % tmp_path
connectors_path = "%s/connectors" % tmp_path
snapshotPattern = re.compile('.*-SNAPSHOT')
def clean():
if os.path.exists(tmp_path):
shutil.rmtree(tmp_path)
os.makedirs(tmp_path, exist_ok=True)
os.makedirs(policies_path, exist_ok=True)
os.makedirs(fetchers_path, exist_ok=True)
os.makedirs(resources_path, exist_ok=True)
os.makedirs(services_path, exist_ok=True)
os.makedirs(reporters_path, exist_ok=True)
os.makedirs(repositories_path, exist_ok=True)
os.makedirs(connectors_path, exist_ok=True)
def get_policies(release_json):
components = release_json['components']
search_pattern = re.compile('gravitee-policy-.*')
policies = []
for component in components:
if search_pattern.match(component['name']) and 'gravitee-policy-api' != component['name']:
policies.append(component)
if "gravitee-policy-ratelimit" == component['name']:
policies.append({"name": "gravitee-policy-quota", "version": component['version']})
if int(component['version'].replace(".", "").replace("-SNAPSHOT", "")) >= 1100:
policies.append({"name": "gravitee-policy-spikearrest", "version": component['version']})
return policies
def get_resources(release_json):
components_name = [
"gravitee-resource-cache",
"gravitee-resource-oauth2-provider-generic",
"gravitee-resource-oauth2-provider-am"
]
resources = []
for component_name in components_name:
resources.append(get_component_by_name(release_json, component_name))
return resources
def get_fetchers(release_json):
components = release_json['components']
search_pattern = re.compile('gravitee-fetcher-.*')
fetchers = []
for component in components:
if search_pattern.match(component['name']) and 'gravitee-fetcher-api' != component['name']:
fetchers.append(component)
return fetchers
def get_reporters(release_json):
components_name = [
"gravitee-reporter-file",
"gravitee-reporter-tcp",
"gravitee-elasticsearch"
]
reporters = []
for component_name in components_name:
reporters.append(get_component_by_name(release_json, component_name))
return reporters
def get_repositories(release_json):
components_name = [
"gravitee-repository-mongodb",
"gravitee-repository-jdbc",
"gravitee-elasticsearch",
"gravitee-repository-gateway-bridge-http"
]
repositories = []
for component_name in components_name:
repositories.append(get_component_by_name(release_json, component_name))
return repositories
def get_services(release_json):
components_name = [
"gravitee-service-discovery-consul"
]
components = release_json['components']
search_pattern = re.compile('gravitee-policy-ratelimit')
services = []
for component in components:
if search_pattern.match(component['name']):
service = component.copy()
service['name'] = 'gravitee-gateway-services-ratelimit'
services.append(service)
break
for component_name in components_name:
services.append(get_component_by_name(release_json, component_name))
return services
def get_connectors(release_json):
components = release_json['components']
search_pattern = re.compile('gravitee-.*-connectors-ws')
connectors = []
for component in components:
if search_pattern.match(component['name']):
connectors.append(component)
return connectors
def get_component_by_name(release_json, component_name):
components = release_json['components']
search_pattern = re.compile(component_name)
for component in components:
if search_pattern.match(component['name']):
return component
def get_download_url(group_id, artifact_id, version, t):
m2path = "%s/%s/%s/%s/%s-%s.%s" % (m2repo_path, group_id.replace(".", "/"), artifact_id, version, artifact_id, version, t)
if os.path.exists(m2path):
return m2path
else:
sonatypeUrl = "https://oss.sonatype.org/service/local/artifact/maven/redirect?r=%s&g=%s&a=%s&v=%s&e=%s" % (
("snapshots" if snapshotPattern.match(version) else "releases"), group_id.replace(".", "/"), artifact_id, version, t)
f = urlopen(sonatypeUrl)
return f.geturl()
def get_suffix_path_by_name(name):
if name.find("policy") == -1:
suffix = name[name.find('-') + 1:name.find('-', name.find('-') + 1)]
if suffix == "gateway":
return "services"
if suffix == "repository":
return "repositories"
if suffix == "cockpit":
return "connectors"
return suffix + "s"
else:
return "policies"
def download(name, filename_path, url):
print('\nDowloading %s\n%s' % (name, url))
if url.startswith("http"):
filename_path = tmp_path + "/" + get_suffix_path_by_name(name) + url[url.rfind('/'):]
urlretrieve(url, filename_path)
else:
copy2(url, filename_path)
print('\nDowloaded in %s' % filename_path)
return filename_path
def unzip(files):
unzip_dirs = []
dist_dir = get_dist_dir_name()
for file in files:
with zipfile.ZipFile(file) as zip_file:
zip_file.extractall("%s/%s" % (tmp_path, dist_dir))
unzip_dir = "%s/%s/%s" % (tmp_path, dist_dir, sorted(zip_file.namelist())[0])
unzip_dirs.append(unzip_dir)
preserve_permissions(unzip_dir)
return sorted(unzip_dirs)
def preserve_permissions(d):
search_bin_pattern = re.compile(".*/bin$")
search_gravitee_pattern = re.compile("gravitee(\.bat)?")
perm = 0o0755
for dirname, subdirs, files in os.walk(d):
if search_bin_pattern.match(dirname):
for file in files:
if search_gravitee_pattern.match(file):
file_path = "%s/%s" % (dirname, file)
print(" set permission %o to %s" % (perm, file_path))
os.chmod(file_path, perm)
def copy_files_into(src_dir, dest_dir, exclude_pattern=None):
if exclude_pattern is None:
exclude_pattern = []
filenames = [os.path.join(src_dir, fn) for fn in next(os.walk(src_dir))[2]]
print(" copy")
print(" %s" % filenames)
print(" into")
print(" %s" % dest_dir)
for file in filenames:
to_exclude = False
for pattern in exclude_pattern:
search_pattern = re.compile(pattern)
if search_pattern.match(file):
to_exclude = True
break
if to_exclude:
print("[INFO] %s is excluded from files." % file)
continue
copy2(file, dest_dir)
def download_policies(policies):
paths = []
for policy in policies:
if policy
|
['name'] != "gravitee-policy-core":
url = get_download_url("io.gravitee.policy", policy['name'], policy['version'], "zip")
paths.append(
download(policy['name'], '%s/%s-%s.zip' % (policies_path, policy['name'], policy['version']), url))
return paths
def download_management_api(mgmt_api, default_version):
v = default_version if 'version' not in mgmt_api else mgmt_api['version']
url = get_download_url("io.gravitee.management.standalone", "gravitee-managem
|
ent-api-standalone-distribution-zip",
v, "zip")
return download(mgmt_
|
botswana-harvard/edc-rdb
|
bcpp_rdb/dataframes/ccc.py
|
Python
|
gpl-2.0
| 1,163
| 0.00086
|
import pandas as pd
from sqlalchemy import create_engine
from bcpp_rdb.private_settings import Rdb
class CCC(object):
"""CDC data for close clinical cohort."""
def __init__(self):
self.engine = create_engine('postgresql://{user}:{password}@{host}/{db}'.format(
user=Rdb.user, password=Rdb.password, host=Rdb.host, db=Rdb.name),
connect_args={})
with self.engine.connect() as conn, conn.begin():
self.df_enrolled = pd.read_sql_query(self.sql_enrolled, conn)
def sql_enrolled(self):
"""
* If patient is from BCPP survye, oc_study_id is a BHP identifier.
* ssid is the CDC allocated identifier of format NNN-NNNN.
"""
|
return """select ssid as cdcid, oc_study_id as subject_identifier,
appt_date from dw.oc_crf_ccc_enrollment"""
def sql_refused(self):
"""
* If patient is from BCPP survye, oc_study_id is a BHP identifier.
* ssid is the CDC allocated identifier of format NNN-NNNN.
"""
return """select ssid as cdcid, oc_study_id as subject_identifier,
|
appt_date from dw.oc_crf_ccc_enrollment"""
|
evasilchenko/castle
|
core/info.py
|
Python
|
mit
| 536
| 0.001866
|
class Information(object):
|
def __init__(self, pygame):
self.pygame = pygame
self.display_fps = False
def _show_fps(self, clock, screen):
font = self.pygame.font.SysFont('Calibri', 25, True, False)
text = font.render("fps: {0:.2f}".format(clock.get_fps()), True, (0, 0, 0))
screen.blit(text, [0, 0])
def show_fps(self, clock, screen):
if self.display_fps:
self._show_fps(clock, screen)
def toggle_fps(self):
self.display_fps = not self.disp
|
lay_fps
|
smorante/continuous-goal-directed-actions
|
guided-motor-primitives/src/roman14_primitive.py
|
Python
|
mit
| 5,788
| 0.023151
|
#!/usr/bin/env python
from __future__ import division
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import linalg
import csv
import codecs
import copy
def comparison(trajOne, trajTwo):
segmentOne=np.array(trajOne)
segmentTwo=np.array(trajTwo)
for i in range(2,5):
segmentOne[:,i]= segmentOne[:,i] - segmentOne[0,i]
segmentTwo[:,i]= segmentTwo[:,i] - segmentTwo[0,i]
dist=0
for i in range(min(len(trajOne), len(trajTwo))):
dist = dist + np.linalg.norm(segmentOne[i,2:]-segmentTwo[i,2:])
return dist
def plotTraj(jointTrajectory):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.tick_params(labelsize=28)
ax.set_xlabel("$\Theta_{1}$ [deg]", size=30)
ax.set_ylabel("$\Theta_{2}$ [deg]", size=30)
ax.set_zlabel("$\Theta_{3}$ [deg]", size=30)
# ax.plot(jointTrajectory[:,2], jointTrajectory[:,3], jointTrajectory[:,4], lw=2,color='red',label='Human-Guided Random Trajectory')
ax.plot(jointTrajectory[:,2], jointTrajectory[:,3], jointTrajectory[:,4], lw=2,color='red',label='Human-Guided Random Trajectory .')
ax.legend(prop={'size':30})
plt.show()
def plotDistances(trajOne, trajTwo):
segmentOne=np.array(trajOne)
segmentTwo=np.array(trajTwo)
for i in range(2,5):
segmentOne[:,i]= segmentOne[:,i] - segmentOne[0,i]
segmentTwo[:,i]= segmentTwo[:,i] - segmentTwo[0,i]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.tick_params(labelsize=30)
ax.set_xlabel("$\Theta_{1}$ [deg]", size=30)
ax.set_ylabel("$\Theta_{2}$ [deg]", size=30)
ax.set_zlabel("$\Theta_{3}$ [deg]", size=30)
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_zticklabels('')
for i in range(len(segmentOne)):
if i==0:
ax.plot([segmentOne[i,2], segmentTwo[i,2]],[ segmentOne[i,3], segmentTwo[i,3]], [segmentOne[i,4], segmentTwo[i,4]], lw=2,color='blue',label='Distances')
else:
ax.plot([segmentOne[i,2], segmentTwo[i,2]],[ segmentOne[i,3], segmentTwo[i,3]], [segmentOne[i,4], segmentTwo[i,4]], lw=2,color='blue')
ax.plot(segmentOne[:,2], segmentOne[:,3], segmentOne[:,4], lw=3,color='red',label='Segment 1')
ax.plot(segmentTwo[:,2], segmentTwo[:,3], segmentTwo[:,4], lw=3,color='green',label='Segment 2')
ax.legend(prop={'size':30})
plt.show()
def plotSingle(trajOne):
segmentOne=np.array(trajOne)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_zticklabels('')
ax.plot(segmentOne[:,2], segmentOne[:,3], segmentOne[:,4], lw=5,color='red',label='Segment 1')
plt.show()
def plot2DXiP():
x = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170]
y = [94, 86, 72, 58, 46, 41, 38, 31, 27, 22, 13, 10, 8, 6, 5, 3, 2, 1]
plt.plot(x, y, linewidth=4)
plt.ylim([0,94])
plt.xlim([0,170])
plt.xlabel(u"\u03BE", fontsize=30)
plt.ylabel('Number of Primitives', fontsize=30)
plt.tick_params(labelsize=25)
#plt.title('Relation between ' + u'\u03BE' + ' and primitives', fontsize=30)
plt.grid(True)
plt.show()
def s
|
aveToCSV(primitives, tau, xi):
fileName='db_var/db_
|
'+ 'tau' +str(tau) + '_xi'+ str(xi)
# to write in CSV
with open(fileName, 'wb') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_MINIMAL)
for k in range(len(primitives)):
wr.writerow(primitives[k])
# to replace '"'
contents = codecs.open(fileName, encoding='utf-8').read()
s = contents.replace('"', '')
with open(fileName, "wb") as f:
f.write(s.encode("UTF-8"))
def main():
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42 # type 42 fonts (truetype) for IEEE papercept system
#325, 350
tau=0.5 # length in seconds
xi=50
jointTrajectory= np.loadtxt('../../main/app/record/recordedTeo/state/data.log')
x=list(jointTrajectory)
plotTraj(jointTrajectory)
numberOfSlices=0
realDataMatrix=[]
slicesList=[]
temp0=np.array(x).astype('float')
#slices by time
newTimeValue= np.ceil(temp0[-1][1] - temp0[0][1])
numberOfSlices = int(newTimeValue/tau)
X = np.array(x)
## OVERLOAD
for h in range(numberOfSlices):
initial=(X.shape[0]/numberOfSlices)*h
final=(X.shape[0]/numberOfSlices)*(h+1)
if X[initial:final].shape[0] == 0:
print 'NO POINTS IN THIS SET. PROBABLY NUMBEROFPOINTS < NUMBEROFSLICES'
else:
slicesList.append(X[initial:final].tolist())
plotDistances(slicesList[20],slicesList[50])
primitives=[]
primitives.append(slicesList[0])
for i in range(numberOfSlices):
for k in range(len(primitives)):
jay = comparison(slicesList[i],primitives[k])
if jay < xi:
#print 'Similar to primitive', k,'in database. Jay', jay
jay=-1
break
if jay !=-1:
#print 'Adding segment', i,'to database'
primitives.append(slicesList[i])
print 'xi:', xi
print 'tau:', tau
print 'number of primitives:', len(primitives)
# making a copy to be modified
relativePrims=copy.deepcopy(primitives)
# making them relative
for a in range(len(primitives)):
for b in range(len(primitives[a])):
for c in range(2,5):
relativePrims[a][b][c]= primitives[a][b][c] - primitives[a][b-1][c]
if b==0:
relativePrims[a][b][c]= primitives[a][b][c] - primitives[a][0][c]
#saveToCSV(relativePrims, tau, xi)
if __name__ == '__main__':
main()
|
omoju/Fundamentals
|
Data/twitterDataAnalysis/info_gain.py
|
Python
|
gpl-3.0
| 3,353
| 0.037578
|
import os
import sys
import numpy as np
import math
def findBinIndexFor(aFloatValue, binsList):
#print "findBinIndexFor: %s" % aFloatValue
returnIndex = -1
for i in range(len(binsList)):
thisBin = binsList[i]
if (aFloatValue >= thisBin[0]) and (aFloatValue < thisBin[1]):
returnIndex = i
break
return returnIndex
def compute_joint_prob(joint_list, vals1, vals2, bins1=None, bins2=None, asFreq=False):
returnDict = {}
for rec in joint_list:
val1 = rec[0]
val2 = rec[1]
#Find name by which first val should appear
dictName1 = val1
if bins1 is not None:
dictName1 = findBinIndexFor(val1, bins1)
#Find name by which second val should appear
dictName2 = val2
if bins2 is not None:
dictName2 = findBinIndexFor(val2, bins2)
#If first name is not present in dict,
#then initialize it
if dictName1 not in returnDict:
returnDict[dictName1] = {}
for val in vals2:
#Determine name under which
#y-values should appear (i.e. as bin names
#or as given names)
asDictName = val
if bins2 is not None:
asDictName = findBinIndexFor(val, bins2)
returnDict[dictName1][asDictName] = 0
returnDict[dictName1][dictName2]+=1
if not asFreq:
#Normalize values
for key in returnDict:
for secondKey in returnDict[key]:
returnDict[key][secondKey] = float(returnDict[key][secondKey]) / len(joint_list)
return returnDict
def getXForFixedY(joint_prob_dist, yVal):
returnList = []
for key in joint_prob_dist:
returnList.append( joint_prob_dist[key][yVal])
return returnList
def compute_h(floatsList):
returnFloat = None
acc = 0
for f in floatsList:
if f != 0:
acc = acc - f * math.log(f, 2)
returnFloat = acc
return returnFlo
|
at
# Computes Kullback-Leibler divergence between
# P(X,Y) and P(X)
def conditional_entropy(joint_prob_dist, xVals, yVals):
returnFloat = None
h_acc = 0
margin
|
al_y_dist = getYMarginalDist(joint_prob_dist)
for x in xVals:
for y in yVals:
joint_xy = 0
marginal_y = 0
if not x in joint_prob_dist or y not in joint_prob_dist[x]:
joint_xy = 0
else:
joint_xy = joint_prob_dist[x][y]
if not y in marginal_y_dist:
marginal_y = 0
else:
marginal_y = marginal_y_dist[y]
if joint_xy!=0 and marginal_y!=0:
h_acc-=joint_xy*math.log(joint_xy/marginal_y, 2)
# for yVal in yVals:
# new_xDist = getXForFixedY(joint_prob_dist, yVal)
# h_yVal = compute_h(new_xDist)
# p_yVal = reduce(lambda x, y: x+y, new_xDist)
# h_acc+=p_yVal * h_yVal
returnFloat = h_acc
return returnFloat
def getYMarginalDist(joint_prob_dist):
returnDict = {}
for xKey in joint_prob_dist:
for yKey in joint_prob_dist[xKey]:
if not yKey in returnDict:
returnDict[yKey] = 0
returnDict[yKey]+=joint_prob_dist[xKey][yKey]
return returnDict
def getXMarginalDist(joint_prob_dist):
returnDict = {}
for key in joint_prob_dist:
yVals = joint_prob_dist[key]
marginalVal = reduce(lambda x,y: x+y, [yVals[e] for e in yVals])
returnDict[key] = marginalVal
return returnDict
def entropy_loss(joint_prob_dist, xVals, yVals):
returnFloat = None
priorsDict = getXMarginalDist(joint_prob_dist)
priors = priorsDict.values()
h_prior = compute_h(priors)
h_conditional = conditional_entropy(joint_prob_dist, xVals, yVals)
returnFloat = h_prior - h_conditional
return returnFloat
|
pligor/predicting-future-product-prices
|
04_time_series_prediction/models/model_34_price_history_autoencoder.py
|
Python
|
agpl-3.0
| 22,721
| 0.004181
|
from __future__ import division
import numpy as np
import tensorflow as tf
from cost_functions.huber_loss import huber_loss
from data_providers.data_provider_33_price_history_autoencoder import PriceHistoryAutoEncDataProvider
from interfaces.neural_net_model_interface import NeuralNetModelInterface
from mylibs.batch_norm import BatchNormer, batchNormWrapper, fully_connected_layer_with_batch_norm_and_l2, \
fully_connected_layer_with_batch_norm
from mylibs.jupyter_notebook_helper import DynStats, getRunTime
from tensorflow.contrib import rnn
from collections import OrderedDict
from mylibs.py_helper import merge_dicts
from mylibs.tf_helper import generate_weights_var, fully_connected_layer
from os import system
from fastdtw import fastdtw
from matplotlib import pyplot as plt
from plotter.price_hist import renderRandomMultipleTargetsVsPredictions
# DYNAMIC SEQUENCES - HEAVY MODEL
class PriceHistoryAutoencoder(NeuralNetModelInterface):
"""
NECESSARY FOR MULTIPLE SEQS:
- Make it with dynamic inputs
IDEAS FOR IMPROVEMENT:
0) introduce extra layers
1) Add the mobile attributes per instance
2) MAKE OUTPUT BE DEPENDED ON PREVIOUS OUTPUT
3) use EOS
4) Add dropout
*) Make also input be depende on previous input ??
"""
DATE_FEATURE_LEN = 6
INPUT_FEATURE_LEN = DATE_FEATURE_LEN + 1
TS_INPUT_IND = 0 # if feature len is multi
TARGET_FEATURE_LEN = 1
ADAM_DEFAULT_LEARNING_RATE = 1e-3
SEED = 16011984
DEFAULT_KEEP_PROB = 1.
DEFAULT_LAMDA2 = 0.
DEFAULT_ARR_LAMDA2 = [DEFAULT_LAMDA2] * 3
BATCH_NORM_ENABLED_BY_DEFAULT = True
DIM_REDUCTION = 2
class DECODER_FIRST_INPUT(object):
PREVIOUS_INPUT = "PREVIOUS_INPUT"
ZEROS = "ZEROS"
def __init__(self, rng, dtype, config):
super(PriceHistoryAutoencoder, self).__init__()
self.rng = rng
self.dtype = dtype
self.config = config
self.train_data = None
self.valid_data = None
self.init = None
self.error = None
self.inputs = None
self.predictions = None
self.train_step = None
self.is_training = None
self.decoder_extra_inputs = None
self.keep_prob_rnn_out = None
self.keep_prob_readout = None
self.twod = None
self.sequence_lens = None
self.sequence_len_mask = None
@staticmethod
def DEFAULT_ACTIVATION_RNN():
return tf.nn.tanh # tf.nn.elu
def run(self, npz_path, epochs, batch_size, enc_num_units, dec_num_units, ts_len,
hidden_enc_num_units,
hidden_enc_dim,
hidden_dec_dim,
hidden_dec_num_units,
learning_rate=ADAM_DEFAULT_LEARNING_RATE,
preds_gather_enabled=True,
):
graph = self.getGraph(batch_size=batch_size, verbose=False, enc_num_units=enc_num_units,
dec_num_units=dec_num_units, ts_len=ts_len,
learning_rate=learning_rate, hidden_enc_num_units=hidden_enc_num_units,
hidden_enc_dim=hidden_enc_dim,
hidden_dec_dim=hidden_dec_dim,
hidden_dec_num_units=hidden_dec_num_units)
# input_keep_prob=input_keep_prob, hidden_keep_prob=hidden_keep_prob,
train_data = PriceHistoryAutoEncDataProvider(npz_path=npz_path, batch_size=batch_size, rng=self.rng,
which_set='train')
# during cross validation we execute our experiment multiple times and we get a score at the end
# so this means that we need to retrain the model one final time in order to output the predictions
# from this training procedure
preds_dp = PriceHistoryAutoEncDataProvider(npz_path=npz_path, batch_size=batch_size, rng=self.rng,
shuffle_order=False,
which_set='test',
) if preds_gather_enabled else None
self.__print_hyperparams(learning_rate=learning_rate, epochs=epochs, enc_num_units=enc_num_units,
dec_num_units=dec_num_units)
return self.train_validate(train_data=train_data, valid_data=None, graph=graph, epochs=epochs,
preds_gather_enabled=preds_gather_enabled, preds_dp=preds_dp,
batch_size=batch_size)
def train_validate(self, train_data, valid_data, **kwargs):
graph = kwargs['graph']
epochs = kwargs['epochs']
batch_size = kwargs['batch_size']
verbose = kwargs['verbose'] if 'verbose' in kwargs.keys() else True
preds_dp = kwargs['preds_dp'] if 'preds_dp' in kwargs.keys() else None
preds_gather_enabled = kwargs['preds_gather_enabled'] if 'preds_gather_enabled' in kwargs.keys() else True
test_error = None
preds_dict = None
twod_dict = None
with tf.Session(graph=graph, config=self.config) as sess:
sess.run(self.init) # sess.run(tf.initialize_all_variables())
dynStats = DynStats(validation=valid_data is not None)
for epoch in range(epochs):
train_error, runTime = getRunTime(
lambda:
self.trainEpoch(
sess=sess,
data_provider=train_data,
extraFeedDict={
self.is_training: True,
}
)
)
if np.isnan(train_error):
raise Exception('do something with your learning rate because it is extremely high')
if valid_data is None:
if verbose:
# print 'EndEpoch%02d(%.3f se
|
cs):err(train)=%.4f,acc(train)=%.2f,err(valid)=%.2f,acc(valid)=%.2f, ' % \
# (epoch + 1, runTime, train_error, train_accuracy, valid_error, valid_accuracy)
print 'End Epoch %02d (%.3f secs): err(train) = %.6f' % (
|
epoch + 1, runTime, train_error)
dynStats.gatherStats(train_error=train_error)
else:
# if (epoch + 1) % 1 == 0:
valid_error = self.validateEpoch(
sess=sess,
data_provider=valid_data,
extraFeedDict={self.is_training: False},
)
if np.isnan(valid_error):
raise Exception('do something with your learning rate because it is extremely high')
if verbose:
print 'End Epoch %02d (%.3f secs): err(train) = %.6f, err(valid)=%.6f' % (
epoch + 1, runTime, train_error, valid_error)
dynStats.gatherStats(train_error=train_error, valid_error=valid_error)
preds_dict, test_error, twod_dict = self.getPredictions(batch_size=batch_size, data_provider=preds_dp,
sess=sess) if preds_gather_enabled else (
None, None, None)
if verbose:
if preds_gather_enabled:
print "total test error: {}".format(test_error)
print
if preds_gather_enabled:
return dynStats, self.trimPredsDict(preds_dict,
data_provider=preds_dp), preds_dp.get_targets_dict_trimmed(), twod_dict
else:
return dynStats
def getGraph(self,
batch_size,
enc_num_units,
hidden_enc_num_units,
hidden_enc_dim,
hidden_dec_dim,
hidden_dec_num_units,
dec_num_units,
ts_len,
learning_rate=ADAM_DEFAULT_LEARNING_RATE, # default of Adam is 1e-3
verbose=True):
# momentum = 0.5
# tf.
|
kyoren/https-github.com-h2oai-h2o-3
|
h2o-py/tests/testdir_algos/deeplearning/pyunit_demoDeeplearning.py
|
Python
|
apache-2.0
| 1,322
| 0.04236
|
import sys
sys.path.insert(1,"../../../")
import h2o, tests
def deepLearningDemo():
# Training data
train_data
|
= h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_model.csv"))
train_data = train_data.drop('Site')
train_data
|
['Angaus'] = train_data['Angaus'].asfactor()
print train_data.describe()
train_data.head()
# Testing data
test_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_eval.csv"))
test_data['Angaus'] = test_data['Angaus'].asfactor()
print test_data.describe()
test_data.head()
# Run GBM
gbm = h2o.gbm(x = train_data[1:],
y = train_data['Angaus'],
validation_x= test_data [1:] ,
validation_y= test_data ['Angaus'],
ntrees=100,
distribution="bernoulli")
gbm.show()
# Run DeepLearning
dl = h2o.deeplearning(x = train_data[1:],
y = train_data['Angaus'],
validation_x= test_data [1:] ,
validation_y= test_data ['Angaus'],
loss = 'CrossEntropy',
epochs = 1000,
hidden = [20, 20, 20])
dl.show()
if __name__ == "__main__":
tests.run_test(sys.argv, deepLearningDemo)
|
zvezdan/pip
|
tests/functional/test_download.py
|
Python
|
mit
| 17,722
| 0
|
import os
import textwrap
import pytest
from pip._internal.status_codes import ERROR
from tests.lib.path import Path
def fake_wheel(data, wheel_path):
data.packages.join(
'simple.dist-0.1-py2.py3-none-any.whl'
).copy(data.packages.join(wheel_path))
@pytest.mark.network
def test_download_if_requested(script):
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip(
'download', '-d', 'pip_downloads', 'INITools==0.1', expect_error=True
)
assert Path('scratch') / 'pip_downloads' / 'INITools-0.1.tar.gz' \
in result.files_created
assert script.site_packages / 'initools' not in result.files_created
@pytest.mark.network
def test_basic_download_setuptools(script):
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip('download', 'setuptools')
setuptools_prefix = str(Path('scratch') / 'setuptools')
assert any(
path.startswith(setuptools_prefix) for path in result.files_created
)
def test_download_wheel(script, data):
"""
Test using "pip download" to download a *.whl archive.
"""
result = script.pip(
'download',
'--no-index',
'-f', data.packages,
'-d', '.', 'meta'
)
assert (
Path('scratch') / 'meta-1.0-py2.py3-none-any.whl'
in result.files_created
)
assert script.site_packages / 'piptestpackage' not in result.files_created
@pytest.mark.network
def test_single_download_from_requirements_file(script):
"""
It should support download (in the scratch path) from PyPi from a
requirements file
"""
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""
INITools==0.1
"""))
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
expect_error=True,
)
assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created
assert script.site_packages / 'initools' not in result.files_created
@pytest.mark.network
def test_basic_download_should_download_dependencies(script):
"""
It should download dependencies (in the scratch path)
"""
result = script.pip(
'download', 'Paste[openid]==1.7.5.1', '-d', '.', expect_error=True,
)
assert Path('scratch') / 'Paste-1.7.5.1.tar.gz' in result.files_created
openid_tarball_prefix = str(Path('scratch') / 'python-openid-')
assert any(
path.startswith(openid_tarball_prefix) for path in result.files_created
)
assert script.site_packages / 'openid' not in result.files_created
def test_download_wheel_archive(script, data):
"""
It should download a wheel archive path
"""
wheel_filename = 'colander-0.9.9-py2.py3-none-any.whl'
wheel_path = '/'.join((data.find_links, wheel_filename))
result = script.pip(
'download', wheel_path,
'-d', '.', '--no-deps'
)
assert Path('scratch') / wheel_filename in result.files_created
def test_download_should_download_wheel_deps(script, data):
"""
It should download dependencies for wheels(in the scratch path)
"""
wheel_filename = 'colander-0.9.9-py2.py3-none-any.whl'
dep_filename = 'translationstring-1.1.tar.gz'
wheel_path = '/'.join((data.find_links, wheel_filename))
result = script.pip(
'download', wheel_path,
'-d', '.', '--find-links', data.find_links, '--no-index'
)
assert Path('scratch') / wheel_filename in result.files_created
assert Path('scratch') / dep_filename in result.files_created
@pytest.mark.network
def test_download_should_skip_existing_files(script):
"""
It should not download files already existing in the scratch dir
"""
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""
INITools==0.1
"""))
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
expect_error=True,
)
assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created
assert script.site_packages / 'initools' not in result.files_created
# adding second package to test-req.txt
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""
INITools==0.1
python-openid==2.2.5
"""))
# only the second package should be downloaded
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
expect_error=True,
)
openid_tarball_prefix = str(Path('scratch') / 'python-openid-')
assert any(
path.startswith(openid_tarball_prefix) for path in result.files_created
)
assert Path('scratch') / 'INITools-0.1.tar.gz' not in result.files_created
assert script.site_packages / 'initools' not in result.files_created
assert script.site_packages / 'openid' not in result.files_created
@pytest.mark.network
def test_download_vcs_link(script):
"""
It should allow -d flag for vcs links, regression test for issue #798.
"""
result = script.pip(
'download', '-d', '.', 'git+git://github.com/pypa/pip-test-package.git'
)
assert (
Path('scratch') / 'pip-test-package-0.1.1.zip'
in result.files_created
)
assert script.site_packages / 'piptestpackage' not in result.files_created
def test_only_binary_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--only-binary=:all:`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
def test_no_deps_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--no-deps`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '
|
--no-index', '--find-links', data.find_links,
'--no-deps',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
|
)
def test_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--no-deps`` or ``--only-binary=:all:`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
assert '--only-binary=:all:' in result.stderr
def test_no_binary_set_then_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--only-binary=:all:`` is set without ``--no-binary``.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--no-binary=fake',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
assert '--only-binary=:all:' in result.stderr
def test_download_specify_platform(script, data):
"""
Test using "pip download --platform" to download a .whl archive
supported for a specific platform
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
# Confirm that universal wheels are returned even for specific
# platforms.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') /
|
romanvm/WsgiBoostServer
|
benchmarks/test_app.py
|
Python
|
mit
| 1,229
| 0.003255
|
from bottle import route, default_app
app = default_app()
data = {
"id": 78874,
"seriesName": "Firefly",
"aliases": [
"Serenity"
],
"banner": "graphical/78874-g3.jpg",
"seriesId": "7097",
"status": "Ended",
"firstAired": "2002-09-20",
"network": "FOX (US)",
"networkId": "",
"runtime": "45",
"genre": [
"Drama",
"Science-Fiction"
],
"overview": "In the far-distant future, Captain Malcolm \"Mal\" Reynolds is a renegade former brown-coat sergeant, now turned smuggler & rogue, "
"who is the commander of a small spacecraft, with a loyal hand-picked crew made up of the first mate, Zoe Warren; the pilot Hoban \"Wash\" Washburn; "
"the gung-ho grunt Jayne Cobb; the engineer Kaylee Frye; the fugitives Dr. Simon Tam and his psychic sister River. "
"Together, they travel the far reaches of space in search of food, money, and anything to live on.",
"lastUpdated": 1486759680,
"airsDayOfWeek": "",
"airsTime": "",
"rating": "TV-14",
"imdbId": "tt0303461",
"zap2it
|
Id": "EP00524463",
"added": "",
"addedBy": None,
"siteRating": 9.5,
"siteRatingCount": 472,
}
@route('/api')
def api():
|
return data
|
jdf76/plugin.video.youtube
|
resources/lib/youtube_plugin/kodion/constants/__init__.py
|
Python
|
gpl-2.0
| 526
| 0
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2016 bromix (plugin.v
|
ideo.youtube)
Copyright (C) 2016-2018 plugin.video.youtube
SPDX-License-Identifier: GPL-2.0-only
See LICENSES/GPL-2.0-only for more information.
"""
from . import const_settings as setting
from . import const_localize as localize
from . import const_sort_methods as sort_method
from . import const_content_types as content_type
from . import const_paths as paths
__all__ =
|
['setting', 'localize', 'sort_method', 'content_type', 'paths']
|
iptvgratis/TUPLAY
|
resources/tools/epg_formulatv.py
|
Python
|
gpl-3.0
| 24,492
| 0.007035
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# beta.1 EPG FórmulaTV.com
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
#------------------------------------------------------------
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
#------------------------------------------------------------
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import time
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
import time
from datetime import datetime
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
tmp = xbmc.translatePath(os.path.join('special://userdata/playlists/tmp', ''))
LIST = "list"
THUMBNAIL = "thumbnail"
MOVIES = "movies"
TV_SHOWS = "tvshows"
SEASONS = "seasons"
EPISODES = "episodes"
FANART = "fanart"
OTHER = "other"
MUSIC = "music"
def epg_ftv(title):
plugintools.log('[%s %s].epg_ftv %s' % (addonName, addonVersion, title))
channel = title.lower()
channel = channel.replace("Opción 1", "").replace("HD", "").replace("720p", "").replace("1080p", "").replace("SD", "").replace("HQ", "").replace("LQ", "").strip()
channel = channel.replace("Opción 2", "")
channel = channel.replace("Opción 3", "")
channel = channel.replace("Op. 1", "")
channel = channel.replace("Op. 2", "")
channel = channel.replace("Op. 3", "")
plugintools.log("Canal: "+channel)
params = plugintools.get_params()
params["url"]='http://www.formulatv.com/programacion/'
if channel == "la 1" or channel == "la 1 hd":
channel = "la 1"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "la 2":
channel = "la 2"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "antena 3" or channel == "antena 3 hd":
channel = "antena 3 televisión"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cuatro" or channel == "cuatro hd":
channel = "cuatro"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "telecinco hd" or channel == "telecinco":
channel == "telecinco"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "la sexta" or channel == "la sexta hd":
channel = "lasexta"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+1" or channel == "canal+ 1" or channel == "canal plus" or channel == "canal+ hd":
channel = "canal+1"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+2" or channel == "canal+ 2" or channel == "canal plus 2" or channel == "canal+ 2 hd":
channel = "canal+ 2"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ 1 ...30" or channel == "canal+ 1... 30":
channel = "canal+ 1 ...30"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ series":
channel = "canal+ series"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "goltv" or channel == "golt":
channel = "gol televisión"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "40 TV":
channel = "40 tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal sur" or channel == "andalucia tv":
channel = "canal sur"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "aragón tv" or channel == "aragon tv":
channel = "aragon-television"
epg_channel = epg_formulatv(params, channel)
return
|
epg_channel
elif channel == "axn" or channel == "axn hd":
channel = "axn"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "axn white":
channel = "axn white"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "xtrm":
channel = "xtrm"
epg_channel = epg_formulatv(params, chann
|
el)
return epg_channel
elif channel == "bio":
channel = "bio"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "calle 13" or channel == "calle 13 hd":
channel = "calle 13"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "amc" or channel == "amc españa":
channel = "amc (españa)"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal barça" or channel == "canal barca":
channel = "barça tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "andalucía tv" or channel == "andalucia tv":
channel = "andalucia-tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "aragón tv" or channel == "aragon tv":
channel = "aragon-television"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "axn" or channel == "axn hd":
channel = "axn"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "bio":
channel = "bio"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal barça" or channel == "canal barca":
channel = "canal barca"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ 30" or channel == "canal+ ...30" or channel == "canal plus 30":
channel = "canal+ 1... 30"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ accion" or channel == "canal+ acción" or channel=="canal plus accion":
channel = "canal+ acción"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ comedia" or channel == "canal plus comedia":
channel = "canal+ comedia"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ decine" or channel == "canal plus decine":
channel = "canal+ dcine"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ deporte" or channel == "canal plus deporte":
channel = "canal+ deporte"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ futbol" or channel == "canal+ fútbol" or channel == "canal plus fútbol" or channel == "canal plus futbol":
channel = "canal+ fútbol"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ liga":
channel = "canal+ liga"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ golf" or channel == "canal plus golf":
channel = "golf+"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ toros" or channel == "canal plus toros":
channel = "canal+ toros"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ extra" or channel=="canal+ xtra":
channel = "canal+ xtra"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif chan
|
pycket/pycket
|
pycket/prims/general.py
|
Python
|
mit
| 75,231
| 0.004878
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
#import struct
from pycket import impersonators as imp
from pycket import values, values_string
from pycket.cont import continuation, loop_label, call_cont
from pycket.arity import Arity
from pycket import values_parameter
from pycket import values_struct
from pycket import values_regex
from pycket import vector as values_vector
from pycket.error import SchemeException, UserException
from pycket.foreign import W_CPointer, W_CType
from pycket.hash.equal import W_EqualHashTable
from pycket.hash.base import W_HashTable
from pycket.hash.simple import (W_EqImmutableHashTable, W_EqvImmutableHashTable, W_EqMutableHashTable, W_EqvMutableHashTable, make_simple_immutable_table)
from pycket.prims.expose import (unsafe, default, expose, expose_val, prim_env,
procedure, define_nyi, subclass_unsafe, make_procedure)
from pycket.prims.primitive_tables import *
from pycket.prims import string
from pycket.racket_paths import racket_sys_paths
from pycket.env import w_global_config
from rpython.rlib import jit, objectmodel, unroll, rgc
from rpython.rlib.rsre import rsre_re as re
# import for side effects
from pycket.prims import control
from pycket.prims import continuation_marks
from pycket.prims import char
from pycket.prims import box
from pycket.prims import equal as eq_prims
from pycket.prims import foreign
from pycket.prims import hash
from pycket.prims import impersonator
from pycket.prims import input_output
from pycket.prims import logging
from pycket.prims import numeric
from pycket.prims import parameter
from pycket.prims import random
from pycket.prims import regexp
from pycket.prims import string
from pycket.prims import struct_structinfo
from pycket.prims import undefined
from pycket.prims import vector
from rpython.rlib import jit
def make_pred(name, cls):
@expose(name, [values.W_Object], simple=True)
def predicate_(a):
return values.W_Bool.make(isinstance(a, cls))
predicate_.__name__ += cls.__name__
def make_dummy_char_pred(name):
@expose(name, [values.W_Character], simple=True)
def predicate_(a):
return values.w_false
predicate_.__name__ += name
def make_pred_eq(name, val):
typ = type(val)
@expose(name, [values.W_Object], simple=True)
def pred_eq(a):
return values.W_Bool.make(a is val)
for args in [
("output-port?", values.W_OutputPort),
("pair?", values.W_Cons),
("mpair?", values.W_MCons),
("number?", values.W_Number),
("complex?", values.W_Number),
("fixnum?", values.W_Fixnum),
("flonum?", values.W_Flonum),
("vector?", values.W_MVector),
("string?", values_string.W_String),
("symbol?", values.W_Symbol),
("boolean?", values.W_Bool),
("inspector?", values_struct.W_StructInspector),
("struct-type?", values_struct.W_StructType),
("struct-constructor-procedure?", values_struct.W_StructConstructor),
("struct-predicate-procedure?", values_struct.W_StructPredicate),
("struct-type-property?", values_struct.W_StructProperty),
("struct-type-property-accessor-procedure?",
values_struct.W_StructPropertyAccessor),
("box?", values.W_Box),
("variable-reference?", values.W_VariableReference),
("thread-cell?", values.W_ThreadCell),
("thread-cell-values?", values.W_ThreadCellValues),
("semaphore?", values.W_Semaphore),
("semaphore-peek-evt?", values.W_SemaphorePeekEvt),
("path?", values.W_Path),
("bytes?", values.W_Bytes),
("pseudo-random-generator?", values.W_PseudoRandomGenerator),
("char?", values.W_Character),
("continuation?", values.W_Continuation),
("continuation-mark-set?", values.W_ContinuationMarkSet),
("continuation-mark-key?", values.W_ContinuationMarkKey),
("primitive?", values.W_Prim),
("keyword?", values.W_Keyword),
("weak-box?", values.W_WeakBox),
("ephemeron?", values.W_Ephemeron),
("placeholder?", values.W_Placeholder),
("hash-placeholder?", values.W_HashTablePlaceholder),
("module-path-index?", values.W_ModulePathIndex),
("resolved-module-path?", values.W_ResolvedModulePath),
("impersonator-property-accessor-procedure?",
imp.W_ImpPropertyAccessor),
("impersonator-property?", imp.W_ImpPropertyDescriptor),
("parameter?", values_parameter.W_BaseParameter),
("parameterization?", values_
|
parameter.W_Parameterization),
("hash?", W_HashTable),
("cpointer?", W_CPointer),
("ctype?", W_CType),
("continuation-prompt-tag?", values.W_ContinuationPromptTag),
("logger?", values.W_Logger),
("log-receiver?", values.W_LogReciever),
("evt?", values.W_Evt),
("unquoted-printing-string?", values.W_UnquotedPrintingString)
|
,
("port?", values.W_Port),
("security-guard?", values.W_SecurityGuard),
# FIXME
("will-executor?", values.W_WillExecutor),
("bytes-converter?", values.W_Impossible),
("fsemaphore?", values.W_Impossible),
("thread-group?", values.W_Impossible),
("udp?", values.W_Impossible),
("extflonum?", values.W_ExtFlonum),
("custodian-box?", values.W_Impossible),
("custodian?", values.W_Impossible),
("future?", values.W_Impossible),
]:
make_pred(*args)
for args in [
("void?", values.w_void),
("false?", values.w_false),
("null?", values.w_null),
]:
make_pred_eq(*args)
@expose("hash-weak?", [values.W_Object], simple=True)
def hash_weah_huh(obj):
# FIXME
return values.w_false
@expose("hash-strong?", [values.W_Object], simple=True)
def hash_strong_huh(obj):
# FIXME: /pypy/rpython/rlib/rweakref.py
return values.W_Bool.make(isinstance(obj, W_HashTable))
@expose("hash-ephemeron?", [values.W_Object], simple=True)
def hash_strong_huh(obj):
# FIXME
return values.w_false
@expose("hash-equal?", [values.W_Object], simple=True)
def hash_eq(obj):
inner = obj
if isinstance(obj, imp.W_ImpHashTable) or isinstance(obj, imp.W_ChpHashTable):
inner = obj.get_proxied()
return values.W_Bool.make(isinstance(inner, W_EqualHashTable))
@expose("hash-eq?", [values.W_Object], simple=True)
def hash_eq(obj):
inner = obj
if isinstance(obj, imp.W_ImpHashTable) or isinstance(obj, imp.W_ChpHashTable):
inner = obj.get_proxied()
eq_mutable = isinstance(inner, W_EqMutableHashTable)
eq_immutable = isinstance(inner, W_EqImmutableHashTable)
return values.W_Bool.make(eq_mutable or eq_immutable)
@expose("hash-eqv?", [values.W_Object], simple=True)
def hash_eqv(obj):
inner = obj
if isinstance(obj, imp.W_ImpHashTable) or isinstance(obj, imp.W_ChpHashTable):
inner = obj.get_proxied()
eqv_mutable = isinstance(inner, W_EqvMutableHashTable)
eqv_immutable = isinstance(inner, W_EqvImmutableHashTable)
return values.W_Bool.make(eqv_mutable or eqv_immutable)
def struct_port_huh(w_struct):
w_in, w_out = struct_port_prop_huh(w_struct)
return (w_in is not None) or (w_out is not None)
def struct_port_prop_huh(w_struct):
w_type = w_struct.struct_type()
in_property = out_property = None
for property in w_type.properties:
w_property, w_value = property
if w_property is values_struct.w_prop_input_port:
in_property = w_value
elif w_property is values_struct.w_prop_output_port:
out_property = w_value
return in_property, out_property
def struct_input_port_huh(w_struct):
w_in, w_out = struct_port_prop_huh(w_struct)
return w_in is not None
def struct_output_port_huh(w_struct):
w_in, w_out = struct_port_prop_huh(w_struct)
return w_out is not None
@expose("input-port?", [values.W_Object], simple=True)
def input_port_huh(a):
if isinstance(a, values.W_InputPort):
return values.w_true
elif isinstance(a, values_struct.W_Struct):
if struct_input_port_huh(a):
|
ProjectSWGCore/NGECore2
|
scripts/static_spawns/naboo/theed.py
|
Python
|
lgpl-3.0
| 1,499
| 0.02068
|
import sys
from resources.datatables import Options
from resources.datatables import StateStatus
def addPlanetSpawns(core, planet):
stcSvc = core.staticService
objSvc = core.ob
|
jectService
#junkdealer
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5694), float(6.5), float(4182), float(0.707), float(-0.707))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5717), float(6.5), float(4159), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5086), float(6), float(4142),
|
float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5147), float(6.5), float(4158), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5114), float(6.5), float(4161), float(0.71), float(-0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5222), float(6), float(4217), float(0.71), float(-0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5127), float(6), float(4239), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5761), float(6.6), float(4234), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5475), float(6), float(4105), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-4999), float(6), float(4119), float(0.71), float(0.71))
stcSvc.spawnObject('junkdealer', 'naboo', long(0), float(-5883), float(6), float(4214), float(0.71), float(0.71))
return
|
timercrack/pydatacoll
|
pydatacoll/utils/__init__.py
|
Python
|
apache-2.0
| 755
| 0
|
#!/usr/bin/env pytho
|
n
#
# Copyright 2016 timercrack
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def str_to_number(s):
try:
if not isinstance(s, str):
return s
return int(s)
except ValueError:
return float(s)
|
thisisshi/cloud-custodian
|
c7n/resources/ebs.py
|
Python
|
apache-2.0
| 57,596
| 0.00026
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from collections import Counter
import logging
import itertools
import json
import time
from botocore.exceptions import ClientError
from concurrent.futures import as_completed
from dateutil.parser import parse as parse_date
from c7n.actions import BaseAction
from c7n.exceptions import PolicyValidationError
from c7n.filters import (
CrossAccountAccessFilter, Filter, AgeFilter, ValueFilter,
ANNOTATION_KEY)
from c7n.filters.health import HealthEventFilter
from c7n.filters.related import RelatedResourceFilter
from c7n.manager import resources
from c7n.resources.kms import ResourceKmsKeyAlias
from c7n.resources.securityhub import PostFinding
from c7n.query import QueryResourceManager, TypeInfo
from c7n.tags import Tag, coalesce_copy_user_tags
from c7n.utils import (
camelResource,
chunks,
get_retry,
local_session,
select_keys,
set_annotation,
type_schema,
QueryParser,
)
from c7n.resources.ami import AMI
log = logging.getLogger('custodian.ebs')
@resources.register('ebs-snapshot')
class Snapshot(QueryResourceManager):
class resource_type(TypeInfo):
service = 'ec2'
arn_type = 'snapshot'
enum_spec = (
'describe_snapshots', 'Snapshots', None)
id = 'SnapshotId'
id_prefix = 'snap-'
filter_name = 'SnapshotIds'
filter_type = 'list'
name = 'SnapshotId'
date = 'StartTime'
default_report_fields = (
'SnapshotId',
'VolumeId',
'tag:InstanceId',
'VolumeSize',
'StartTime',
'State',
)
def resources(self, query=None):
qfilters = SnapshotQueryParser.parse(self.data.get('query', []))
query = query or {}
if qfilters:
query['Filters'] = qfilters
if query.get('OwnerIds') is None:
query['OwnerIds'] = ['self']
if 'MaxResults' not in query:
query['MaxResults'] = 1000
return super(Snapshot, self).resources(query=query)
def get_resources(self, ids, cache=True, augment=True):
if cache:
resources = self._get_cached_resources(ids)
if resources is not None:
return resources
while ids:
try:
return self.source.get_resources(ids)
except ClientError as e:
bad_snap = ErrorHandler.extract_bad_snapshot(e)
if bad_snap:
ids.remove(bad_snap)
continue
raise
return []
class ErrorHandler:
@staticmethod
def remove_snapshot(rid, resource_set):
found = None
for r in resource_set:
if r['SnapshotId'] == rid:
found = r
break
if found:
resource_set.remove(found)
@staticmethod
def extract_bad_snapshot(e):
"""Handle various client side errors when describing snapshots"""
msg = e.response['Error']['Message']
error = e.response['Error']['Code']
e_snap_id = None
if error == 'InvalidSnapshot.NotFound':
e_snap_id = msg[msg.find("'") + 1:msg.rfind("'")]
log.warning("Snapshot not found %s" % e_snap_id)
elif error == 'InvalidSnapshotID.Malformed':
e_snap_id = msg[msg.find('"') + 1:msg.rfind('"')]
log.warning("Snapshot id malformed %s" % e_snap_id)
return e_snap_id
@staticmethod
def extract_bad_volume(e):
"""Handle various client side errors when describing volumes"""
msg = e.response['Error']['Message']
error = e.response['Error']['Code']
e_vol_id = None
if error == 'InvalidVolume.NotFound':
e_vol_id = msg[msg.find("'") + 1:msg.rfind("'")]
log.warning("Volume not found %s" % e_vol_id)
elif error == 'InvalidVolumeID.Malformed':
e_vol_id = msg[msg.find('"') + 1:msg.rfind('"')]
log.warning("Volume id malformed %s" % e_vol_id)
return e_vol_id
class SnapshotQueryParser(QueryParser):
QuerySchema = {
'description': str,
'owner-alias': ('amazon', 'amazon-marketplace', 'microsoft'),
'owner-id': str,
'progress': str,
'snapshot-id': str,
'start-time': str,
'status': ('pending', 'completed', 'error'),
'tag': str,
'tag-key': str,
'volume-id': str,
'volume-size': str,
}
type_name = 'EBS'
@Snapshot.action_registry.register('tag')
class SnapshotTag(Tag):
permissions = ('ec2:CreateTags',)
def process_resource_set(self, client, resource_set, tags):
while resource_set:
try:
return super(SnapshotTag, self).process_resource_set(
client, resource_set, tags)
except ClientError as e:
bad_snap = ErrorHandler.extract_bad_snapshot(e)
if bad_snap:
ErrorHandler.remove_snapshot(bad_snap, resource_set)
continue
raise
@Snapshot.filter_registry.register('age')
class SnapshotAge(AgeFilter):
"""EBS Snapshot Age Filter
Filters an EBS snapshot based on the age of the snapshot (in days)
:example:
.. code-block:: yaml
policies:
- name: ebs-snapshots-week-old
resource: ebs-snapshot
filters:
- type: age
days: 7
op: ge
"""
schema = type_schema(
'age',
days={'type': 'number'},
op={'$ref': '#/definitions/filters_common/comparison_operators'})
date_attribute = 'StartTime'
def _filter_ami_snapshots(self, snapshots):
if not self.data.get('value', True):
return snapshots
# try using cache first to get a listing of all AMI snapshots and compares resources to the list
# This will populate the cache.
amis = self.manager.get_resource_manager('ami').resources()
ami_snaps = []
for i in amis:
for dev in i.get('BlockDeviceMappings'):
if 'Ebs' in dev and 'SnapshotId' in dev['Ebs']:
ami_snaps.append(dev['Ebs']['SnapshotId'])
|
matches = []
f
|
or snap in snapshots:
if snap['SnapshotId'] not in ami_snaps:
matches.append(snap)
return matches
@Snapshot.filter_registry.register('cross-account')
class SnapshotCrossAccountAccess(CrossAccountAccessFilter):
permissions = ('ec2:DescribeSnapshotAttribute',)
def process(self, resources, event=None):
self.accounts = self.get_accounts()
results = []
client = local_session(self.manager.session_factory).client('ec2')
with self.executor_factory(max_workers=3) as w:
futures = []
for resource_set in chunks(resources, 50):
futures.append(w.submit(
self.process_resource_set, client, resource_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception checking cross account access \n %s" % (
f.exception()))
continue
results.extend(f.result())
return results
def process_resource_set(self, client, resource_set):
results = []
for r in resource_set:
attrs = self.manager.retry(
client.describe_snapshot_attribute,
SnapshotId=r['SnapshotId'],
Attribute='createVolumePermission')['CreateVolumePermissions']
shared_accounts = {
g.get('Group') or g.get('UserId') for g in attrs}
delta_accounts = shared_accounts.difference(self.accounts)
if delta_accounts:
r['c7n:CrossAccountViolations'] = list(delta_accounts)
results.append(r)
return results
@Snapshot.filter_registry.register('unused')
class SnapshotUnusedFilter(Filter):
"""Filters snapshots based on usage
true: s
|
stefantalpalaru/django-bogofilter
|
bogofilter/forms.py
|
Python
|
bsd-3-clause
| 216
| 0.009259
|
from django_comments.forms import CommentForm
from bogofilter.mo
|
dels import BogofilterComment
import time
class BogofilterCommentForm(CommentForm):
def get_comment_model(self):
|
return BogofilterComment
|
cloud-engineering/xfc-email-notifier
|
snippets/snippet_notfication.py
|
Python
|
mit
| 939
| 0.015974
|
import subprocess
import pynotify
import time
def notify_with_subprocess(title, message):
subprocess.Popen(['notify-send', title, message])
return
def notify_with_pynotify(title, message):
pynotify.init("Test")
notice = pynotify.Notification(title, message)
notice.show()
return
def update_with_pynotify():
pynotify.init("app_name")
n = pynotify.Notification("", "message A", icon='some_icon')
n.set_urgency(pynotify.URGENCY_CRITICAL)
n.set_timeout(10)
n.show()
n.update("","message B")
n.show()
def callback_func
|
tion(notification=Non
|
e, action=None, data=None):
print "It worked!"
pynotify.init("app_name")
n = pynotify.Notification("Title", "body")
n.set_urgency(pynotify.URGENCY_NORMAL)
n.set_timeout(100)
n.show()
#n.add_action("clicked","Button text", callback_function, None)
#n.update("Notification", "Update for you")
#n.show()
#update_with_pynotify()
|
AdamDynamic/TwitterMetrics
|
CreateJson.py
|
Python
|
gpl-2.0
| 2,191
| 0.016431
|
#!/usr/bin/env python
# Creates and saves a JSON file to update the D3.js graphs
import MySQLdb
import MySQLdb.cursors
import json
import Reference as r
import logging
def CreateSentimentIndex(NegativeWords, PositiveWords, TotalWords):
''' Creates a sentiment value for the word counts'''
if TotalWords != 0:
Sentiment = ((PositiveWords - NegativeWords)/float(TotalWords))
return Sentiment
def CreateJsonData(QueryResults):
''' Creates a list of dictionaries containing the dates and sentiment indexes'''
Output = []
for Row in QueryResults:
RowDate = Row['DateTime'].strftime('%Y-%m-%d %H:%M:%S')
RowSentiment = CreateSentimentIndex(Row['Negative'], Row['Positive'], Row['TotalWords'])
Output.append({"date" : RowDate, "index" : RowSentiment})
return Output
|
def OutputJsonFile(InputDictionary):
'''Saves a dictionary to an output file in a JSON format'''
JsonOutput = json.dumps(InputDictionary)
OutputFileName = 'json/twittermetric
|
s_sentiment.js'
FileOutput = open(OutputFileName,'w')
print >> FileOutput, JsonOutput
return True
def CreateJsonFile():
'''Extracts data from the database and saves a JSON file to the server'''
FN_NAME = "CreateJsonFile"
dbDict = MySQLdb.connect(
host=r.DB_HOST,
user=r.DB_USER,
passwd=r.DB_PASSWORD,
db=r.DB_NAME,
cursorclass=MySQLdb.cursors.DictCursor
)
curDict = dbDict.cursor()
Query = "SELECT " + r.KR_FIELD_TOTALWORDS + ", " + r.KR_FIELD_POSITIVE + ", " + r.KR_FIELD_NEGATIVE + ", " + r.KR_FIELD_DATETIME + " FROM " + r.DB_TABLE_KEYWORDSRESULTS + ";"
logging.debug(FN_NAME, Query)
curDict.execute(Query)
QueryResults = curDict.fetchall()
Output = CreateJsonData(QueryResults)
ProcessResult = OutputJsonFile(Output)
logging.info('%s - JSON file created and saved to server with result %s', FN_NAME, ProcessResult)
dbDict.close
return ProcessResult
|
jkonecny12/anaconda
|
pyanaconda/modules/payloads/source/repo_files/repo_files_interface.py
|
Python
|
gpl-2.0
| 1,434
| 0.001395
|
#
# DBus interface for payload Repo files image source.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# Th
|
is copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABIL
|
ITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from dasbus.server.interface import dbus_interface
from pyanaconda.modules.common.constants.interfaces import PAYLOAD_SOURCE_REPO_FILES
from pyanaconda.modules.payloads.source.source_base_interface import PayloadSourceBaseInterface
@dbus_interface(PAYLOAD_SOURCE_REPO_FILES.interface_name)
class RepoFilesSourceInterface(PayloadSourceBaseInterface):
"""Interface for the payload Repo files image source."""
pass
|
iskandr/brainmets
|
data.py
|
Python
|
apache-2.0
| 7,077
| 0.046489
|
import pandas as pd
import numpy as np
import sklearn.preprocessing
from sklearn.linear_model import LinearRegression, LogisticRegression
FILENAME = 'BrainMets.xlsx'
MONTHS_TO_LIVE = 9
N_TRAIN = 250
def categorical_indices(values):
"""
When we have a categorical feature like 'cancer type', we want to transform its unique values
to indices in some range [0, ..., n-1] where n is the number of categories
"""
unique = values.unique()
indices = np.zeros(len(values), dtype=int)
for (i, v) in enumerate(sorted(unique)):
indices[np.array(values == v)] = i
return indices
def load_dataframe(filename = FILENAME):
df = pd.read_excel(filename, 'DATA', header=1)
df['cancer type'] = df['cancer type'].str.lower().str.strip()
# df['cancer type'] = categorical_indices(cancer_type)
df['Brain Tumor Sx'] = df['Brain Tumor Sx'].astype('float')
|
# df['Brain Tumor Sx'] = categorical_indices(brain_tumor_sx)
return df
def get_expert_predictions(df):
expert_predictions = {}
experts = [
'Prediction(Cleveland Clinic)',
' Prediction (Lanie Francis)',
'Prediction(Flickinger)',
'Prediction(Loefler',
'Prediction(Knisely)',
'Prediction(Lunsford)',
'Prediction (Tahrini)',
'Prediction (Sheehan)',
'Prediction (Linskey)',
'Predi
|
ction(friedman)',
'Prediction(Stupp)',
'Prediction(Rakfal)',
'Prediction(Rush)',
' Prediction( Kondziolka)'
]
for expert in experts:
expert_predictions[expert] = df[expert]
return expert_predictions
def feature_selection(df, Y, training_set_mask):
Y_training = Y[training_set_mask]
df_training = df.ix[training_set_mask]
fields = []
n_tumors = df['# of tumors']
n_tumors_training = n_tumors[training_set_mask]
def impute(X, df, name, model, postprocess = lambda x: x, maxval = None):
Y = df[name]
missing = np.array(Y.isnull())
X_train = X[~(missing)]
Y_train = Y[~missing]
X_test = X[missing]
model.fit(X_train, Y_train)
Y_test = model.predict(X_test)
Y_test = postprocess(Y_test)
if maxval:
Y_test = np.minimum(Y_test, maxval)
Y_filled = Y.copy()
Y_filled[missing] = Y_test
df[name] = Y_filled
def impute_missing_features(df):
input_fields = df[[
'Brain Tumor Sx',
'RPA',
'ECOG',
'Prior WBRT',
'Diagnosis of Primary at the same time as Brain tumor'
]]
X = np.array(input_fields)
missing = df['Extracranial Disease Status'].isnull()
impute(X, df, 'Extracranial Disease Status', LogisticRegression())
impute(X, df, 'K Score', LinearRegression(), lambda x: 10*(x.astype('int')/10), maxval = 100)
return df
def extract_features(df, binarize_categorical):
df = df.copy()
df['log_age']= np.log2(df['age'])
df = impute_missing_features(df)
df['# of tumors > 1'] = df['# of tumors'] > 1
df['# of tumors > 4'] = df['# of tumors'] > 4
df['# of tumors > 10'] = df['# of tumors'] > 10
df['age <45'] = df['age'] < 45
df['age 45-55'] = (df['age'] >= 45) & (df['age'] < 55)
df['age 55-65'] = (df['age'] >= 55) & (df['age'] < 65)
df['age 65-75'] = (df['age'] >= 65) & (df['age'] < 75)
df['age >=75'] = (df['age'] >= 75)
df['age <40'] = df['age'] < 40
df['age 40-50'] = (df['age'] >= 40) & (df['age'] < 50)
df['age 50-60'] = (df['age'] >= 50) & (df['age'] < 60)
df['age 50-70'] = (df['age'] >= 50) & (df['age'] < 70)
df['age 60-70'] = (df['age'] >= 60) & (df['age'] < 70)
df['age 70-80'] = (df['age'] >= 70) & (df['age'] < 80)
df['age >=80'] = (df['age'] >= 80)
df['age >=70'] =df['age'] >= 70
df['age 45-60'] = (df['age'] >= 45) & (df['age'] < 60)
df['Normalized K Score'] = df['K Score'] / 100.0
continuous_fields = [
'# of tumors > 1',
'age 50-70',
'age >=70',
'Normalized K Score',
]
binary_fields = [
'Prior WBRT',
'Diagnosis of Primary at the same time as Brain tumor'
]
9, 12, 14, 15, 16, 18, 20, 22, 25
categorical_fields = [
'Extracranial Disease Status',
'cancer type',
'Brain Tumor Sx',
'RPA',
'ECOG',
]
vectors = []
for field in continuous_fields + binary_fields:
v = np.array(df[field]).astype('float')
vectors.append(v)
for field in categorical_fields:
values = df[field]
if binarize_categorical:
unique = np.unique(values)
print "Expanding %s into %d indicator variables: %s" % (field, len(unique), unique)
for i, v in enumerate(sorted(unique)):
print len(vectors), field, v, np.sum(values == v)
vec = np.zeros(len(values), dtype='float')
vec[np.array(values == v)] = 1
vectors.append(vec)
else:
vectors.append(categorical_indices(values))
X = np.vstack(vectors).T
print X.dtype, X.shape
return X
def make_dataset(df, binarize_categorical):
"""
Load dataset with continuous outputs
"""
dead = np.array(df['Dead'] == 1)
Y = np.array(np.array(df['SurvivalMonths']))
expert_predictions = get_expert_predictions(df)
test_set_mask = np.zeros(len(df), dtype=bool)
# training set is any data point for which we have no expert
# predictions
for expert_Y in expert_predictions.values():
test_set_mask |= ~expert_Y.isnull()
X = extract_features(df, binarize_categorical)
return X, Y, dead, expert_predictions, test_set_mask
def make_labeled_dataset(df, months_to_live = MONTHS_TO_LIVE, binarize_categorical = True):
X, Y_continuous, dead, expert_predictions, test_set_mask = make_dataset(df, binarize_categorical)
# get rid of patients for whom we don't have a long enough history
mask = np.array(dead | (Y_continuous >= months_to_live))
X = X[mask]
Y = dead[mask] & (Y_continuous[mask] < months_to_live)
return X, Y
# TODO: fill in missing cancer types
def annotate_5year_survival(df):
five_year_survival = {
'breast': 25,
'nsclc': 4,
'sclc' : None,
'rcc' : 12.1,
'melanoma' : 16.1,
'carcinoid' : None,
'endometrial' : 17.5,
'sarcoma' : None,
'colon' : 12.9,
'rectal' : None,
'prostate' : 28,
'uterine' : None ,
'nasopharyngeal' : None,
'thyroid' : 54.7,
}
def load_dataset(filename = FILENAME, binarize_categorical = True):
df = load_dataframe(filename)
return make_dataset(df, binarize_categorical = binarize_categorical)
def load_labeled_dataset(filename = FILENAME, months_to_live = MONTHS_TO_LIVE, binarize_categorical = True):
df = load_dataframe(filename)
return make_labeled_dataset(df, months_to_live, binarize_categorical = binarize_categorical)
def split_labeled_dataset(df, months_to_live = MONTHS_TO_LIVE, n_train = N_TRAIN, binarize_categorical = True, shuffle = True, verbose = True):
X, y = make_labeled_dataset(df, months_to_live = months_to_live, binarize_categorical = binarize_categorical)
if shuffle:
idx = np.arange(len(y))
np.random.shuffle(idx)
y = y[idx]
X = X[idx]
Xtrain = X[:n_train]
Ytrain = y[:n_train]
Xtest = X[n_train:]
Ytest = y[n_train:]
if verbose:
print Xtest[[0,1,2], :]
print Ytest[[0,1,2]]
print np.mean(Ytrain)
print np.mean(Ytest)
print Xtrain.shape
print Xtest.shape
return Xtrain, Ytrain, Xtest, Ytest
def load_dataset_splits(filename = FILENAME, months_to_live = MONTHS_TO_LIVE, n_train = N_TRAIN):
df = load_dataframe(filename)
return split_dataset(df, months_to_live, n_train)
|
wimmuskee/ontolex-db
|
format/namespace.py
|
Python
|
mit
| 343
| 0
|
# -*- coding: utf-8 -*-
from rdflib import Namespace
ONTOLEX = Namespace("http://www.w3.org/ns/lemon/ontolex#")
LEXINFO = Namespace("http://www.lexinfo.net/ontology/2.0/lexin
|
fo#")
DECOMP = Namespace("http://www.w3.org/ns/lemon/de
|
comp#")
ISOCAT = Namespace("http://www.isocat.org/datcat/")
LIME = Namespace("http://www.w3.org/ns/lemon/lime#")
|
k3yavi/alevin
|
testing/src-py/Utilities.py
|
Python
|
gpl-3.0
| 3,040
| 0.000987
|
'''
'''
import sys
import os
import gzip
import regex
# 'borrowed' from CGAT - we may not need this functionality
# ultimately. When finalised, if req., make clear source
def openFile(filename, mode="r", create_dir=False):
'''open file called *filename* with mode *mode*.
gzip - compressed files are recognized by the
suffix ``.gz`` and opened transparently.
Note that there are differences in the file
like objects returned, for example in the
ability to seek.
Arguments
---------
filename : string
mode : string
File opening mode
create_dir : bool
If True, the directory containing filename
will be created if it does not exist.
Returns
-------
File or file-like object in case of gzip compressed files.
'''
_, ext = os.path.splitext(filename)
if create_dir:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if ext.lower() in (".gz", ".z"):
if sys.version_info.major >= 3:
if mode == "r":
return gzip.open(filename, 'rt', encoding="ascii")
elif mode == "w":
return gzip.open(filename, 'wt', encoding="ascii")
else:
raise NotImplementedError(
"mode '{}' not implemented".format(mode))
else:
return gzip.open(filename, mode)
else:
return open(filename, mode)
def checkError(barcode, whitelist, limit=1):
near_matches = set()
comp_regex = regex.compile("(%s){e<=1}" % barcode)
comp_regex2 = regex.compile("(%s){e<=1}" % barcode[:-1])
b_length = len(barcode)
for whitelisted_barcode in whitelist:
w_length = len(whitelisted_barcode)
if barcode == whitelisted_barcode:
continue
if (max(b_length, w_length) > (min(b_length, w_length) + 1)):
continue
if comp_regex.m
|
atch(whitelisted_barcode) or comp_regex2.match(whitelisted_barcode):
near_matches.add(whitelisted_barcode)
if len(near_matches) > limit:
return near_matches
return near_matches
# partially 'borrowed' from CGAT - we may not need this functionality
# ultimately. When fina
|
lised, if req., make clear source
def FastqIterator(infile):
'''iterate over contents of fastq file.'''
while 1:
line1 = infile.readline()
if not line1:
break
if not line1.startswith('@'):
raise ValueError("parsing error: expected '@' in line %s" % line1)
line2 = infile.readline()
line3 = infile.readline()
if not line3.startswith('+'):
raise ValueError("parsing error: expected '+' in line %s" % line3)
line4 = infile.readline()
# incomplete entry
if not line4:
raise ValueError("incomplete entry for %s" % line1)
read_id, seq, qualities = line1[:-1], line2[:-1], line4[:-1]
yield ("", read_id, seq, qualities)
|
jodal/comics
|
comics/comics/partiallyclips.py
|
Python
|
agpl-3.0
| 375
| 0
|
from comics.aggregator.crawler import CrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "PartiallyClips"
language = "en"
url = "ht
|
tp://partiallyclips.com/"
start_date =
|
"2002-01-01"
rights = "Robert T. Balder"
active = False
class Crawler(CrawlerBase):
def crawl(self, pub_date):
pass
|
bitcraft/pyglet
|
contrib/scene2d/tests/scene2d/SPRITE_OVERLAP.py
|
Python
|
bsd-3-clause
| 2,162
| 0
|
# !/usr/bin/env python
"""Testing a sprite.
The ball should bounce off the sides of the window. You may resize the
window.
This test should just run without failing.
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import unittest
from pyglet.gl import glClear
import pyglet.window
import pyglet.window.event
from pyglet import clock
from scene2d import Sprite, Image2d, FlatView
from scene2d.image import TintEffect
from scene2d.camera import FlatCamera
ball_png = os.path.join(os.path.dirname(__file__), 'ball.png')
class BouncySprite(Sprite):
def update(self):
# move, check bounds
p = self.properties
self.x += p['dx']
self.y += p['dy']
if self.left < 0:
self.left = 0
p['dx'] = -p['dx']
|
elif self.right > 320:
self.right = 320
p['dx'] = -p['dx']
if self.bottom < 0:
self.bottom = 0
p['dy'] = -p['dy']
elif self.top > 320:
self.top = 320
p['dy'] = -p['dy']
class SpriteOverlapTest(unittest.T
|
estCase):
def test_sprite(self):
w = pyglet.window.Window(width=320, height=320)
image = Image2d.load(ball_png)
ball1 = BouncySprite(0, 0, 64, 64, image, properties=dict(dx=10, dy=5))
ball2 = BouncySprite(288, 0, 64, 64, image,
properties=dict(dx=-10, dy=5))
view = FlatView(0, 0, 320, 320, sprites=[ball1, ball2])
view.fx, view.fy = 160, 160
clock.set_fps_limit(60)
e = TintEffect((.5, 1, .5, 1))
while not w.has_exit:
clock.tick()
w.dispatch_events()
ball1.update()
ball2.update()
if ball1.overlaps(ball2):
if 'overlap' not in ball2.properties:
ball2.properties['overlap'] = e
ball2.add_effect(e)
elif 'overlap' in ball2.properties:
ball2.remove_effect(e)
del ball2.properties['overlap']
view.clear()
view.draw()
w.flip()
w.close()
unittest.main()
|
valasek/taekwondo
|
manage.py
|
Python
|
gpl-3.0
| 801
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "itf.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensur
|
e that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to
|
activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
arseneyr/essentia
|
test/src/unittest/standard/test_scale.py
|
Python
|
agpl-3.0
| 2,054
| 0.003408
|
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestScale(TestCase):
def testRegression(self):
inputSize = 1024
input = range(inputSize)
factor = 0.5
expected = [factor * n for n in input]
output = Scale(factor=factor, clipping=False)(input)
self.assertEqualVector(output, expected)
def testZero(self):
inputSize = 1024
input = [0] * inputSize
expected = input[:]
output = Scale()(input)
self.assertEqualVector(output, input)
def testEmpty(self):
input = []
expected = input[:]
output = Sca
|
le()(input)
self.assertEqualVector(output, input)
def testClipping(self):
inputSize = 1024
maxAbsValue= 10
factor = 1
input = [n + maxAbsValue for n in
|
range(inputSize)]
expected = [maxAbsValue] * inputSize
output = Scale(factor=factor, clipping=True, maxAbsValue=maxAbsValue)(input)
self.assertEqualVector(output, expected)
def testInvalidParam(self):
self.assertConfigureFails(Scale(), { 'maxAbsValue': -1 })
suite = allTests(TestScale)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
simon-r/PyParticles
|
pyparticles/pset/rebound_boundary.py
|
Python
|
gpl-3.0
| 2,289
| 0.042813
|
# PyParticles : Particles simulation in python
# Copyright (C) 2012 Simone Riva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have
|
received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pyparticles.pset.boundary as bd
class ReboundBoundary( bd.Bounda
|
ry ):
def __init__( self , bound=(-1,1) , dim=3 ):
self.set_boundary( bound , dim )
self.set_normals()
def set_normals( self ):
self.__N = np.zeros( ( 2*self.dim , self.dim ) )
#print( self.__N )
if self.dim >= 2 :
self.__N[0,:2] = np.array( [1,0] )
self.__N[1,:2] = np.array( [-1,0] )
self.__N[2,:2] = np.array( [0,1] )
self.__N[3,:2] = np.array( [0,-1] )
if self.dim == 3 :
self.__N[4,:] = np.array( [0,0,1] )
self.__N[5,:] = np.array( [0,0,-1] )
def boundary( self , p_set ):
v_mi = np.zeros((3))
v_mx = np.zeros((3))
for i in range( self.dim ) :
j = 2*i
v_mi[:] = 0.0
v_mx[:] = 0.0
#delta = self.bound[i,1] - self.bound[i,0]
b_mi = p_set.X[:,i] < self.bound[i,0]
b_mx = p_set.X[:,i] > self.bound[i,1]
v_mi[i] = self.bound[i,0]
v_mx[i] = self.bound[i,1]
p_set.X[b_mi,:] = p_set.X[b_mi,:] + 2.0 * self.__N[j,:] * ( v_mi - p_set.X[b_mi,:] )
p_set.X[b_mx,:] = p_set.X[b_mx,:] + 2.0 * self.__N[j,:] * ( v_mx - p_set.X[b_mx,:] )
p_set.V[b_mi,i] = -p_set.V[b_mi,i]
p_set.V[b_mx,i] = -p_set.V[b_mx,i]
|
hammurabi13th/haopy
|
hao.py
|
Python
|
mit
| 12,161
| 0.068167
|
#!/usr/bin/python3
from urllib.parse import urlencode
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
from argparse import ArgumentParser
from threading import Thread
import re
class HackAlunoOnline:
def __init__( self , matricula , full_search = False ):
# Exibicao default de matricula/nome/curso/situacao/periodo/CRA
# full search para as demais informacoes
# Main url
self.aluno_online_url = 'https://www.alunoonline.uerj.br'
# parameters
self.matricula = matricula
self.full_search = full_search
# Main html
self.main_html = self._get_aluno_online_html( '/requisicaoaluno/requisicao.php' , { 'requisicao': 'SinteseFormacao' } )
# Main data
self.nome = self._extract_nome()
self.cra = self._extract_cra()
self.curso = self._extract_curso()
self.situacao = self._extract_situacao()
self.periodo = self._extract_periodo()
# get and extract personal data
if ( self.
|
full_search ):
# dados contato
self.dados_contato_html = self._get_aluno_online_html( '/recadastramento_dados_contato/recadastramento_dados_contato.php' )
self.telef
|
one = self._extract_telefone()
self.email = self._extract_email()
self.endereco = self._extract_endereco()
self.cep = self._extract_cep()
# dados pessoais
self.dados_pessoais_html = self._get_aluno_online_html( '/recadastramento_dados_pessoais/recadastramento_dados_pessoais.php' )
self.nascimento = self._extract_nascimento()
self.sexo = self._extract_sexo()
self.estado_civil = self._extract_estado_civil()
self.naturalidade = self._extract_naturalidade()
self.nacionalidade = self._extract_nacionalidade()
self.pai = self._extract_pai()
self.mae = self._extract_mae()
self.cpf = self._extract_cpf()
self.rg = self._extract_rg() #Número, Órgão, UF, País, Data Emissão, Data Validade
self.titulo_eleitor = self._extract_titulo_eleitor() #Número, Zona, Seção, UF, Data Emissão
self.certificado_reservista = self._extract_certificado_reservista() #Número, Nro. de Série, Órgão, Tipo, Data Emissão, UF
self.ensino_medio = self._extract_ensino_medio() #Nome do Estabelecimento, País, UF, Tipo de Ensino, Data Conclusão
# disciplinas
self.disciplinas_realizadas_html = self._get_aluno_online_html( '/requisicaoaluno/requisicao.php' , { 'requisicao': 'DisciplinasRealizadas' } )
self.disciplinas = self._extract_disciplinas()
def _get_aluno_online_html( self , endpoint , parameters = {} ):
result = None
try:
parameters.update( { 'matricula': self.matricula } )
data = urlencode( parameters )
request = Request( self.aluno_online_url + endpoint , data.encode( 'ascii' ) )
response = urlopen( request )
result = BeautifulSoup( response.read() , 'html.parser' )
except:
pass
return result
def _extract_nome( self ):
try:
nome = self.main_html.find( id = "table_cabecalho_rodape" ).find_all( 'font' )[2].string[15:]
except:
nome = ''
return nome
def _extract_cra( self ):
try:
cra = float( self.main_html.find_all( 'div' )[7].text[16:].replace( ',' , '.' ) )
except:
cra = ''
return cra
def _extract_curso( self ):
try:
curso = self.main_html.find_all( 'div' )[6].text[8:]
except:
curso = ''
return curso
def _extract_situacao( self ):
try:
situacao = self.main_html.find_all( 'div' )[4].text[11:]
except:
situacao = ''
return situacao
def _extract_periodo( self ):
try:
for element in self.main_html.select( 'div > b' ):
if ( element.text == "Períodos Utilizados/Em Uso para Integralização Curricular:" ):
periodo = int( element.parent.text[59:] )
except:
periodo = ''
return periodo
def _format_telefone( self , ddd , tel , ramal ):
return '({0}) {1} [{2}]'.format( ddd , tel[:4] + '-' + tel[4:] , ( 'Sem Ramal' if not ramal else ( 'Ramal ' + ramal ) ) )
def _extract_telefone( self ):
telefone = []
# Tel 1..2
for i in range( 1 , 3 ):
try:
ddd = self.dados_contato_html.find( 'input' , { 'name': 'num_ddd_' + str( i ) + '_pag' } ).get( 'value' )
tel = self.dados_contato_html.find( 'input' , { 'name': 'num_tel_' + str( i ) + '_pag' } ).get( 'value' )
ramal = self.dados_contato_html.find( 'input' , { 'name': 'num_ramal_' + str( i ) + '_pag' } ).get( 'value' )
telefone.append( self._format_telefone( ddd , tel , ramal ) )
except:
pass
return telefone
def _extract_email( self ):
try:
email = self.dados_contato_html.find( 'input' , { 'name': 'dsc_email_pag' } ).get( 'value' )
except:
email = ''
return email
def _extract_endereco( self ):
try:
endereco = self.dados_contato_html.find( 'input' , { 'name': 'txt_end_pag' } ).get( 'value' )
endereco += ', ' + self.dados_contato_html.find( 'input' , { 'name': 'cod_bairro_input' } ).get( 'value' )
endereco += ', ' + self.dados_contato_html.select( 'select[name="cod_munic_pag"] option[selected]' )[0].text
endereco += ', ' + self.dados_contato_html.select( 'select[name="cod_uf_pag"] option[selected]' )[0].text
except:
endereco = ''
return endereco
def _extract_cep( self ):
try:
cep = self.dados_contato_html.find( 'input' , { 'name': 'num_cep_pag' } ).get( 'value' )
cep = cep[:5] + '-' + cep[5:]
except:
cep = ''
return cep
def _extract_nascimento( self ):
try:
nascimento = self.dados_pessoais_html.find_all( 'div' )[2].text[15:]
except:
nascimento = ''
return nascimento
def _extract_sexo( self ):
try:
sexo = self.dados_pessoais_html.find_all( 'div' )[3].text[6:]
except:
sexo = ''
return sexo
def _extract_estado_civil( self ):
try:
civil = self.dados_pessoais_html.find_all( 'div' )[4].text[12:]
except:
civil = ''
return civil
def _extract_naturalidade( self ):
try:
naturalidade = self.dados_pessoais_html.find_all( 'div' )[5].text[14:]
except:
naturalidade = ''
return naturalidade
def _extract_nacionalidade( self ):
try:
nacionalidade = self.dados_pessoais_html.find_all( 'div' )[6].text[15:]
except:
nacionalidade = ''
return nacionalidade
def _extract_pai( self ):
try:
pai = self.dados_pessoais_html.find_all( 'div' )[7].text[13:]
except:
pai = ''
return pai
def _extract_mae( self ):
try:
mae = self.dados_pessoais_html.find_all( 'div' )[8].text[13:]
except:
mae = ''
return mae
def _extract_cpf( self ):
try:
cpf = self.dados_pessoais_html.find_all( 'font' )[10].text
cpf = cpf[:3] + '.' + cpf[3:6] + '.' + cpf[6:9] + '-' + cpf[9:]
except:
cpf = ''
return cpf
def _extract_dados_pessoais_divs( self , start , end , cut ):
arrayReturn = []
try:
array = self.dados_pessoais_html.find_all( 'div' )[start:end]
arrayReturn.append( array[0].text[cut:] )
for data in array[1:]:
text = data.text.strip()
if ( ( not 'Não Informado' in text ) and ( not '__/__/____' in text ) ):
arrayReturn.append( text )
except:
arrayReturn = ''
return arrayReturn
def _extract_rg( self ):
return self._extract_dados_pessoais_divs( 9 , 14 , 8 )
def _extract_titulo_eleitor( self ):
return self._extract_dados_pessoais_divs( 15 , 19 , 8 )
def _extract_certificado_reservista( self ):
return self._extract_dados_pessoais_divs( 20 , 25 , 8 )
def _extract_ensino_medio( self ):
return self._extract_dados_pessoais_divs( 26 , 31 , 25 )
def _extract_disciplinas( self ):
disciplinas = []
try:
for linha in self.disciplinas_realizadas_html.find_all( 'div' , style = re.compile( '^width:100%;font-size=12px;' ) ):
conteudoLinha = []
for coluna in linha.children:
conteudoColuna = coluna.string.strip()
if ( conteudoColuna and not re.match( '\\d{4}/\\d' , conteudoColuna ) ):
conteudoLinh
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.