repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
akimo12345/AndroidViewClient
src/com/dtmilano/android/viewclient.py
Python
apache-2.0
183,943
0.007245
# -*- coding: utf-8 -*- ''' Copyright (C) 2012-2015 Diego Torres Milano Created on Feb 2, 2012 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @author: Diego Torres Milano ''' __version__ = '10.6.1' import sys import warnings if sys.executable: if 'monkeyrunner' in sys.executable: warnings.warn( ''' You should use a 'python' interpreter, not 'monkeyrunner' for this module ''', RuntimeWarning) import subprocess import re import socket import os import types import time import signal import copy import pickle import platform import xml.parsers.expat import unittest from com.dtmilano.android.common import _nd, _nh, _ns, obtainPxPy, obtainVxVy,\ obtainVwVh, obtainAdbPath from com.dtmilano.android.window import Window from com.dtmilano.android.adb import adbclient from com.dtmilano.android.uiautomator.uiautomatorhelper import UiAutomatorHelper DEBUG = False DEBUG_DEVICE = DEBUG and False DEBUG_RECEIVED = DEBUG and False DEBUG_TREE = DEBUG and False DEBUG_GETATTR = DEBUG and False DEBUG_CALL = DEBUG and False DEBUG_COORDS = DEBUG and False DEBUG_TOUCH = DEBUG and False DEBUG_STATUSBAR = DEBUG and False DEBUG_WINDOWS = DEBUG and False DEBUG_BOUNDS = DEBUG and False DEBUG_DISTANCE = DEBUG and False DEBUG_MULTI = DEBUG and False DEBUG_VIEW = DEBUG and False DEBUG_VIEW_FACTORY = DEBUG and False DEBUG_CHANGE_LANGUAGE = DEBUG and False WARNINGS = False VIEW_SERVER_HOST = 'localhost' VIEW_SERVER_PORT = 4939 ADB_DEFAULT_PORT = 5555 OFFSET = 25 ''' This assumes the smallest touchable view on the screen is approximately 50px x 50px and touches it at M{(x+OFFSET, y+OFFSET)} ''' USE_ADB_CLIENT_TO_GET_BUILD_PROPERTIES = True ''' Use C{AdbClient} to obtain the needed properties. If this is C{False} then C{adb shell getprop} is used ''' USE_PHYSICAL_DISPLAY_INFO = True ''' Use C{dumpsys display} to obtain display properties. If this is C{False} then C{USE_ADB_CLIENT_TO_GET_BUILD_PROPERTIES} is used ''' SKIP_CERTAIN_CLASSES_IN_GET_XY_ENABLED = False ''' Skips some classes related with the Action Bar and the PhoneWindow$DecorView in the coordinates calculation @see: L{View.getXY()} ''' VIEW_CLIENT_TOUCH_WORKAROUND_ENABLED = False ''' Under some conditions the touch event should be longer [t(DOWN) << t(UP)]. C{True} enables a workaround to delay the events.''' # some device properties VERSION_SDK_PROPERTY = 'ro.build.version.sdk' VERSION_RELEASE_PROPERTY = 'ro.build.version.release' # some constants for the attributes ID_PROPERTY = 'mID' ID_PROPERTY_UI_AUTOMATOR = 'uniqueId' TEXT_PROPERTY = 'text:mText' TEXT_PROPERTY_API_10 = 'mText' TEXT_PROPERTY_UI_AUTOMATOR = 'text' WS = u"\xfe" # the whitespace replacement char for TEXT_PROPERTY TAG_PROPERTY = 'getTag()' LEFT_PROPERTY = 'layout:mLeft' LEFT_PROPERTY_API_8 = 'mLeft' TOP_PROPERTY = 'layout:mTop' TOP_PROPERTY_API_8 = 'mTop' WIDTH_PROPERTY = 'layout:getWidth()' WIDTH_PROPERTY_API_8 = 'getWidth()' HEIGHT_PROPERTY = 'layout:getHeight()' HEIGHT_PROPERTY_API_8 = 'getHeight()' GET_VISIBILITY_PROPERTY = 'getVisibility()' LAYOUT_TOP_MARGIN_PROPERTY = 'layout:layout_topMargin' IS_FOCUSED_PROPERTY_UI_AUTOMATOR = 'focused' IS_FOCUSED_PROPERTY = 'focus:isFocused()' # visibility VISIBLE = 0x0 INVISIBLE = 0x4 GONE = 0x8 RegexType = type(re.compile('')) IP_RE = re.compile('^(\d{1,3}\.){3}\d{1,3}$') ID_RE = re.compile('id/([^/]*)(/(\d+))?') class ViewNotFoundException(Exception): ''' ViewNotFoundException is raised when a View is not found. ''' def __init__(self, attr, value, root): if isinstance(value, RegexType): msg = "Couldn't find View with %s that matches '%s' in tree with root=%s" % (attr, value.pattern, root) else: msg = "Couldn't find View with %s='%s' in tree with root=%s" % (attr, value, root) super(Exception, self).__init__(msg) class View: ''' View class ''' @staticmethod def factory(arg1, arg2, version=-1, forceviewserveruse=False, windowId=None): ''' View factory @type arg1: ClassType or dict @type arg2: View instance or AdbClient ''' if DEBUG_VIEW_FACTORY: print >> sys.stderr, "View.factory(%s, %s, %s, %s)" % (arg1, arg2, version, forceviewserveruse) if type(arg1) == types.ClassType: cls = arg1 attrs = None else: cls = None attrs = arg1 if isinstance(arg2, View): view = arg2 device = None else: device = arg2 view = None if attrs and attrs.has_key('class'): clazz = attrs['class'] if DEBUG_VIEW_FACTORY: print >> sys.stderr, " View.factory: creating View with specific class: %s" % clazz if clazz == 'android.widget.TextView': return TextView(attrs, device, version, forceviewserveruse, windowId) elif clazz == 'android.widget.EditText': return EditText(attrs, device, version, forceviewserveruse, windowId) elif clazz == 'android.widget.ListView': return ListView(attrs, device, version, forceviewserveruse, windowId) else: return View(attrs, device, version, forceviewserveruse, windowId) elif cls: if view: return cls.__copy(view) else: return cls(attrs, device, version, forceviewserveruse, windowId) elif view: return copy.copy(view) else: if DEBUG_VIEW_FACTORY: print >> sys.stderr, " View.factory: creating generic View" return View(attrs, device, version, forceviewserveruse, windowId) @classmethod def __copy(cls, view): ''' Copy constructor ''' return cls(view.map, view.device, view.version, view.forceviewserveruse, view.windowId) def __init__(self, _map, device, version=-1, forceviewserveruse=False, windowId=None): ''' Constructor @type _map: map @param _map: the map containing the (attribute, value) pairs @type device: AdbClient @param device: the device containing this View @type version: int @param version: the Android SDK version number of the platform where this View belongs. If this is C{-1} then the Android SDK version will be obtained in this constructor. @type forceviewserveruse: boolean @param forceviewserveruse: Force the use of C{ViewServer} even if the conditions were given to use C{UiAutomator}. ''' if DEBUG_VIEW: print >> sys.stderr, "View.__init__(%s, %s, %s, %s)" % ("map" if _map is not None else None, device, version, forceviewserveruse) if _map: print >> sys.stderr, " map:", type(_map) for attr, val in _map.iteritems(): if len(val) > 50: val = val[:50] + "..." print >> sys.stderr, " %s=%s" % (attr, val) self.map = _map ''' The map that contains t
he C{attr},C{value} pairs ''' self.device = device ''' The AdbClient ''' self.children = [] ''' The children of this View ''' self.parent = None ''' The parent of this View ''' self.windows = {} self.currentFocus = None ''' The current focus ''' self.windowId = windowId ''' The window this view resides '''
self.build = {} ''' Build properties ''' self.version = version
instituteofdesign/django-lms
apps/springboard/views.py
Python
bsd-3-clause
1,051
0.005709
from django.conf import settings from django.db.models import Q from libs.django_utils import render_to_response from django.views.generic import ListView from springboard.models import IntranetApplication from django.contrib.auth.decorators import login_required from alerts.models import Alert class SpringBoard(ListView): context_object_name = "applications" template_na
me = "springboard/springboard.html" def get_queryset(self): # Check the groups the user is allowed to see return IntranetApplication.objects.filter(Q(groups__in = self.request.user.groups.all()) | Q(groups__isnull=True)).distinct() def get_context_data(self, **kwargs): # Temporary message for testing from django.contrib import messages # Call the base implementation first to get a context context = super(SpringBoard, self).get_context_da
ta(**kwargs) # Get all the alerts for the user context['alerts'] = Alert.objects.filter(sent_to = self.request.user) return context
saurabh6790/google_integration
google_integration/google_connect/doctype/google_app_setup/google_app_setup.py
Python
mit
280
0.007143
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Techn
ologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class GoogleAppSetup
(Document): pass
brajput24/fabric-bolt
fabric_bolt/utils/runner.py
Python
mit
1,251
0.000799
from logan.runner import run_app, configure_app import sys import base64 import os KEY_LENGTH = 40 CONFIG_TEMPLATE = """ from fabric_bolt.core.settings.base import * CONF_ROOT = os.path.dirname(__file__) DATA
BASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(CONF_ROOT, 'fabric-bolt.db'), 'USER': 'sqlite3', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } SECRET_KEY = %(default_key)r """ def generate_settings(): output = CONFIG_TEMPLATE % dict( default_key=base64.b64encode(os.urandom(KEY_LENGTH)), ) return output def configure(): configure_app(
project='fabric-bolt', default_config_path='~/.fabric-bolt/settings.py', default_settings='fabric_bolt.core.settings.base', settings_initializer=generate_settings, settings_envvar='FABRIC_BOLT_CONF', ) def main(progname=sys.argv[0]): run_app( project='fabric-bolt', default_config_path='~/.fabric-bolt/settings.py', default_settings='fabric_bolt.core.settings.base', settings_initializer=generate_settings, settings_envvar='FABRIC_BOLT_CONF', ) if __name__ == '__main__': main()
zetaops/SpiffWorkflow
doc/conf.py
Python
lgpl-3.0
5,907
0.003894
# -*- coding: utf-8 -*- from __future__ import division # -*- coding: utf-8 -*- # # Sphinx documentation build configuration file, created by # sphinx-quickstart.py on Sat Mar 8 21:47:50 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # All configuration values have a default value; values that are commented out # serve to show the default value. import sys, os, re # If your extensions are in another directory, add it here. sys.path.append(os.path.dirname(os.path.dirname(__file__))) # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.addons.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General substitutions. project = 'SpiffWorkflow' copyright = '2012 ' + ', '.join(open('../AUTHORS').readlines()) # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. # # The short X.Y version. import SpiffWorkflow version = SpiffWorkflow.__version__ # The full version, including alpha/beta/rc tags. release = version # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'friendly' # Options for HTML output # ----------------------- # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'sphinxdoc.css' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['figures'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Content template for the index page. html_index = 'index.html' # Custom sidebar templates, maps page names to templates. html_sidebars = {'index': 'indexsidebar.html'} # Additional templates that should be rendered to pages, maps page names to # templates. html_additional_pages = {'index': 'index.html'} # If true, the reST sources are included in the HTML build as _sources/<name>. #html_copy_source = True html_use_opensearch = 'http://sphinx.pocoo.org' # Output file base name for HTML help builder. htmlhelp_basename = 'Sphinxdoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file
, target name, title, author, document class [howto/manual]). latex_documents = [('contents', 'sphinx.tex', 'Sphinx Documentation', 'Georg Brandl', 'manual', 1)] latex_logo = '_static/sphinx.png' #latex_use_parts = True # Additional stuff for the LaTeX preamble. latex_elements = { 'fontpkg': '\\usepackage{palatino}' }
# Documents to append as an appendix to all manuals. #latex_appendices = [] # Extension interface # ------------------- from sphinx import addnodes dir_sig_re = re.compile(r'\.\. ([^:]+)::(.*)$') def parse_directive(env, sig, signode): if not sig.startswith('.'): dec_sig = '.. %s::' % sig signode += addnodes.desc_name(dec_sig, dec_sig) return sig m = dir_sig_re.match(sig) if not m: signode += addnodes.desc_name(sig, sig) return sig name, args = m.groups() dec_name = '.. %s::' % name signode += addnodes.desc_name(dec_name, dec_name) signode += addnodes.desc_addname(args, args) return name def parse_role(env, sig, signode): signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig) return sig event_sig_re = re.compile(r'([a-zA-Z-]+)\s*\((.*)\)') def parse_event(env, sig, signode): m = event_sig_re.match(sig) if not m: signode += addnodes.desc_name(sig, sig) return sig name, args = m.groups() signode += addnodes.desc_name(name, name) plist = addnodes.desc_parameterlist() for arg in args.split(','): arg = arg.strip() plist += addnodes.desc_parameter(arg, arg) signode += plist return name def setup(app): from sphinx.ext.autodoc import cut_lines app.connect('autodoc-process-docstring', cut_lines(4, what=['module'])) app.add_description_unit('directive', 'dir', 'pair: %s; directive', parse_directive) app.add_description_unit('role', 'role', 'pair: %s; role', parse_role) app.add_description_unit('confval', 'confval', 'pair: %s; configuration value') app.add_description_unit('event', 'event', 'pair: %s; event', parse_event)
trgomes/estrutura-de-dados
Exercicios/5-balanceamento.py
Python
mit
3,019
0.002659
# -*- coding: utf-8 -*- import unittest class PilhaVaziaErro(Exception): pass class Pilha(): def __init__(self): self.lista = [] def topo(self): if self.lista: return self.lista[-1] raise PilhaVaziaErro() def vazia(self): return not bool(self.lista) def empilhar(self, valor): self.lista.append(valor) def desempilhar(self): try: return self.lista.pop() except IndexError: raise PilhaVaziaErro def esta_balanceada(expressao): """ Função que calcula se expressão possui parenteses, colchetes e chaves balanceados O Aluno deverá informar a complexidade de tempo e espaço da função Deverá ser usada como estrutura de dados apenas a pilha feita na aula anterior :param expressao: string com expressao a ser balanceada :return: boleano verdadeiro se expressao está balanceada e falso caso contrário Complexidade Tempo: O(n) Memoria: O(n) """ if expressao: pilha = Pilha() if expressao[0] in '}])': return False for i in expressao: if i in '{[(': pilha.empilhar(i) elif i in '}])': if i=='}' and pilha.desempilhar() != '{': return False elif i==']' and pilha.desempilhar() != '[': return False elif i==')' and pilha.desempilhar() != '(': return False if pilha.vazia(): return True return False else: return True class BalancearTestes(unittest.TestCase): def test_expressao_vazia(self): self.assertTrue(esta_balanceada('')) def test_parenteses(self): self.assertTrue(esta_balanceada('()')) def test_chaves(self): self.assertTrue(esta_balanceada('{}')) def test_colchetes(self): self.assertTrue(esta_balanceada('[]')) def test_todos_caracteres(self): self.assertTrue(esta_balanceada('({[]})')) self.assertTrue(esta_balanceada('[({})]')) self.assertTrue(esta_balanceada('{[()]}')) def test_chave_nao_fechada(self): self.assertFalse(esta_balanceada('{')) def test_colchete_nao_fechado(self): self.assertFalse(esta_balanceada('[')) def test_parentese_nao_fechado(
self): self.assertFalse(esta_balanceada('('))
def test_chave_nao_aberta(self): self.assertFalse(esta_balanceada('}{')) def test_colchete_nao_aberto(self): self.assertFalse(esta_balanceada('][')) def test_parentese_nao_aberto(self): self.assertFalse(esta_balanceada(')(')) def test_falta_de_caracter_de_fechamento(self): self.assertFalse(esta_balanceada('({[]}')) def test_falta_de_caracter_de_abertura(self): self.assertFalse(esta_balanceada('({]})')) def test_expressao_matematica_valida(self): self.assertTrue(esta_balanceada('({[1+3]*5}/7)+9'))
njoyce/sockjs-gevent
setup.py
Python
mit
2,151
0.00186
import os import sys from setuptools import setup, find_packages version = '0.3.3' def get_package_manifest(filename): packages = [] with open(filename) as package_file: for line in package_file.readlines(): line = line.strip() if not line: continue if line.startswith('#'): # comment continue if line.startswith('-e '): # not a valid package continue packages.append(line) return packages def get_install_requires(): """ :returns: A list of packages required for installation. """ return get_package_manifest('requirements.txt') def get_tests_requires(): """ :returns: A list of packages required for running the tests. """ packages = get_package_manifest('requirements_dev.txt') try: from unittest import mock except ImportError: packages.append('mock') if sys.version_
info[:2] < (2, 7): packages.append('unittest2') return packages def read(f): with open(os.path.join(os.path.dirname(__file__), f)) as f: return f.read().strip() setup( name='sockjs-gevent', version=version, description=('gevent base sockjs server'), long_description='\n\n'.join((read('README.md'), read('CHANGES.txt'))), classifiers=[ "Intended Audience :: Developers", "Programmin
g Language :: Python", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Internet :: WWW/HTTP", 'Topic :: Internet :: WWW/HTTP :: WSGI' ], author='Nick Joyce', author_email='[email protected]', url='https://github.com/njoyce/sockjs-gevent', license='MIT', install_requires=get_install_requires(), tests_require=get_tests_requires(), setup_requires=['nose>=1.0'], test_suite='nose.collector', include_package_data = True, packages=find_packages(exclude=["examples", "tests"]), zip_safe = False, )
vberthiaume/digitalFilters
ch2/freqplot.py
Python
gpl-3.0
469
0.01919
################################ FIG J.2 P.683 ################################ import matplotlib.pyplot as plt def freqplot(fdata, ydata, symbol='', ttl='', xlab='Frequency (Hz)', ylab
=''): """ FREQPLOT - Plot a function of frequency. See myplot for more features.""" #not sure what
this means #if nargin<2, fdata=0:length(ydata)-1; end plt.plot(fdata, ydata, symbol); plt.grid() plt.title(ttl) plt.ylabel(ylab) plt.xlabel(xlab);
baida21/py-flask-signup
tests/application-tests.py
Python
apache-2.0
1,642
0.004263
# Copyright 2013. Amazon Web Services, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import application import unittest from application import application from flask import Flask, current_app, request, Response """ Main test cases for our application """ class AppTestCase(unittest.TestCase): #application = Flask(__name__) def setUp(self): application.testing = True with application.app_context(): self.client = current_app.test_client() def test_load_config(self): """ Test that we can load our config properly """ self.assertTrue(1) def test_get_test(self): """ Test hitting /test and that we get a correct HTTP response """ self.assertTrue(1) def test_get_form(self):
""" Test that we can get a signup form """ self.assertTrue(1) def test_get_user(self): """ Test that we can get a user context """ self.assertTrue(1) def test_login(self):
""" Test that we can authenticate as a user """ self.assertTrue(1) if __name__ == '__main__': unittest.main()
hepochen/hoedown_misaka
docs/conf.py
Python
mit
9,639
0.005602
# -*- coding: utf-8 -*- # # Misaka documentation build configuration file, created by # sphinx-quickstart on Sun Jul 12 11:37:42 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Misaka' copyright = u'2011-2017, Frank Smit' author = u'Frank Smit' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2.1.0' # The full version, including alpha/beta/rc tags. release = '2.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [alabaster.get_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style
sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this direct
ory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = { # '**': [ # 'about.html', # # 'navigation.html', # # 'relations.html', # # 'searchbox.html', # # 'donate.html', # ] # } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'Misakadoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Misaka.tex', u'Misaka Documentation', u'Frank Smit', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs =
wingtk/gvsbuild
gvsbuild/projects/graphene.py
Python
gpl-2.0
1,663
0.001203
# Copyright (C) 2016 - Yevgen Muntyan # Copyright (C) 2016 - Ignacio Casal Quinteiro # Copyright (C) 2016 - Arnavion # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have receiv
ed a copy of the GNU General Public License # along with this program; if not, see <http://www.gn
u.org/licenses/>. from gvsbuild.utils.base_builders import Meson from gvsbuild.utils.base_expanders import Tarball from gvsbuild.utils.base_project import project_add @project_add class Graphene(Tarball, Meson): def __init__(self): Meson.__init__( self, "graphene", archive_url="https://github.com/ebassi/graphene/archive/refs/tags/1.10.6.tar.gz", hash="7eba972751d404316a9b59a7c1e0782de263c3cf9dd5ebf1503ba9b8354cc948", dependencies=["ninja", "meson", "pkg-config", "glib"], ) if self.opts.enable_gi: self.add_dependency("gobject-introspection") enable_gi = "enabled" else: enable_gi = "disabled" self.add_param("-Dintrospection={}".format(enable_gi)) def build(self): Meson.build(self, make_tests=True) self.install(r".\LICENSE share\doc\graphene")
Shadybloom/synch-profiler
wordfreq-morph.py
Python
mit
7,594
0.006508
#!/usr/bin/env python # -*- coding: utf-8 -*- # Скрипт извлекает слова из текстового файла и сортирует их по частоте. # С помощью модуля pymorphy2 можно привести слова к начальной форме (единственное число, именительный падеж). # Нужен pymorphy2 и русскоязычный словарь для него! # pip install --user pymorphy2 # Примеры: # ./wordfreq-morph.py ./text-file.txt | less # xclip -o | ./wordfreq-morph.py -m # Проверялся на интерпретаторе: # Python 3.6.1 on linux import sys import sqlite3 import os import re import argparse # Сортировка вывода словарей: from collections import OrderedDict #------------------------------------------------------------------------------ # Опции: # Проверочный морфологический словарь (в каталоге скрипта): NORMAL_DICT_PATH = 'dict.opencorpora-sing-nom.txt' NORMAL_DICT_DIR = 'word-length-dicts' database_name = 'opencorpora-sing-nom.sqlite' #------------------------------------------------------------------------- # Аргументы командной строки: def create_parser(): """Список доступных параметров скрипта.""" parser = argparse.ArgumentParser() parser.add_argument('file', nargs='*', help='Русскоязычный текстовый файл в UTF-8' ) parser.add_argument('-m', '--morph', action='store_true', default='False', help='Преобразование слов в начальную форму (нужен pymorphy2)' ) return parser #------------------------------------------------------------------------- # Функции:
def metadict_path (metadict_dir): """Возвращает абсолютный путь к каталогу словарей.""" # Получаем абсолютный путь к каталогу скрипта: script_path = os.path.dirname(os.path.abspath(__file__)) # Добавляем к пути каталог словарей: metadict_path = script_path + '/' + metadict_dir return metadict_path def find_files (directory): """Во
звращает список путей ко всем файлам каталога, включая подкаталоги.""" path_f = [] for d, dirs, files in os.walk(directory): for f in files: # Формирование адреса: path = os.path.join(d,f) # Добавление адреса в список: path_f.append(path) return path_f def lowercase (text): """Создаёт из текста список слов в нижнем регистре""" # Переводим текст в нижний регистр: text = str(text.lower()) # Регексп вытаскивает из текста слова: words = re.findall(r"(\w+)", text, re.UNICODE) # Восстанавливаются ссылки: urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text) words = words + urls return words def wordfreq_old (words): """Создаёт словарь с частотой слов""" stats = {} # Слово -- ключ словаря, значение, это его частота: for word in words: stats[word] = stats.get(word, 0) + 1 return stats def word_test_slow (word): """Светяет слово со словарём, выбирая словарь по длине слова.""" # Определяем длину слова: search_string = '-' + str(len(word)) + '.txt' dicts_list = find_files(metadict_path(NORMAL_DICT_DIR)) test = False # Подключаем словарь для проверки: for dict in dicts_list: if search_string in dict: normal_dict_file = open(dict, "r") normal_dict = normal_dict_file.read() normal_dict_file.close() if word in normal_dict: return True else: return False def word_test_sql (word,cursor): """Проверяет, есть ли слово в базе данных""" # Номер таблицы, это длина слова: word_lenght = len(word) # А вот не нужно хардкодить (число таблиц в базе данных может измениться) if word_lenght > 32: word_lenght = 32 table_name = 'opencorpora' + str(word_lenght) #database = sqlite3.connect(metadict_path(database_name)) #cursor = database.cursor() cursor.execute("SELECT words FROM "+table_name+" WHERE words=?",(word,)) result = cursor.fetchall() #database.close() if result: return True else: return False def wordfreq_morph (words): """Создаёт словарь с частотой слов (в начальной форме)""" # Морфологический анализатор: import pymorphy2 stats = {} n_stats = {} for word in words: stats[word] = stats.get(word, 0) + 1 morph = pymorphy2.MorphAnalyzer() for item in stats: # Слово приводится к начальной форме: n_word = morph.parse(item)[0].normal_form # Неологизмы оставляем без изменений: if word_test_sql(n_word,cursor) is not True: n_word = item # Создаётся новый ключ, или прибавляется значение к существующему: if n_word not in n_stats: n_stats[n_word] = stats[item] else: n_stats[n_word] = n_stats[n_word] + stats[item] return n_stats def dict_sort (stats): """Сортировка словаря по частоте и алфавиту""" stats_sort = OrderedDict(sorted(stats.items(), key=lambda x: x[0], reverse=False)) stats_list = OrderedDict(sorted(stats_sort.items(), key=lambda x: x[1], reverse=True)) return stats_list #------------------------------------------------------------------------- # Тело программы: # Создаётся список аргументов скрипта: parser = create_parser() namespace = parser.parse_args() # Проверяем, существует ли указанный файл: file_patch = ' '.join(namespace.file) if namespace.file is not None and os.path.exists(file_patch): file = open(file_patch, "r") text = file.read() file.close() # Если нет, читаем стандартный ввод: else: text = sys.stdin.read() # Извлекаем из текста слова: words = lowercase(text) # Подключение к базе данных: database = sqlite3.connect(metadict_path(database_name)) cursor = database.cursor() # Если указано преобразование слов: if namespace.morph is True: wordfreq = wordfreq_morph(words) else: wordfreq = wordfreq_old(words) # Отключаемся от базы данных: database.close() # Вывод словаря: wordfreq_sort=dict_sort(wordfreq) for word, count in wordfreq_sort.items(): print (count, word)
pitbulk/python3-saml
tests/src/OneLogin/saml2_tests/utils_test.py
Python
bsd-3-clause
40,950
0.004274
# -*- coding: utf-8 -*- # Copyright (c) 2014, OneLogin, Inc. # All rights reserved. from base64 import b64decode import json from lxml import etree from os.path import dirname, join, exists import unittest from xml.dom.minidom import parseString from onelogin.saml2 import compat from onelogin.saml2.constants import OneLogin_Saml2_Constants from onelogin.saml2.settings import OneLogin_Saml2_Settings from onelogin.saml2.utils import OneLogin_Saml2_Utils class OneLogin_Saml2_Utils_Test(unittest.TestCase): data_path = join(dirname(__file__), '..', '..', '..', 'data') def loadSettingsJSON(self, filename=None): if filename: filename = join(dirname(__file__), '..', '..', '..', 'settings', filename) else: filename = join(dirname(__file__), '..', '..', '..', 'settings', 'settings1.json') if exists(filename): stream = open(filename, 'r') settings = json.load(stream) stream.close() return settings else: raise Exception('Settings json file does not exist') def file_contents(self, filename): f = open(filename, 'r') content = f.read() f.close() return content def testFormatCert(self): """ Tests the format_cert method of the OneLogin_Saml2_Utils """ settings_info = self.loadSettingsJSON() cert = settings_info['idp']['x509cert'] self.assertNotIn('-----BEGIN CERTIFICATE-----', cert) self.assertNotIn('-----END CERTIFICATE-----', cert) self.assertEqual(len(cert), 860) formated_cert1 = OneLogin_Saml2_Utils.format_cert(cert) self.assertIn('-----BEGIN CERTIFICATE-----', formated_cert1) self.assertIn('-----END CERTIFICATE-----', formated_cert1) formated_cert2 = OneLogin_Saml2_Utils.format_cert(cert, True) self.assertEqual(formated_cert1, formated_cert2) formated_cert3 = OneLogin_Saml2_Utils.format_cert(cert, False) self.assertNotIn('-----BEGIN CERTIFICATE-----', formated_cert3) self.assertNotIn('-----END CERTIFICATE-----', formated_cert3) self.assertEqual(len(formated_cert3), 860) def testFormatPrivateKey(self): """ Tests the format_private_key method of the OneLogin_Saml2_Utils """ key = "-----BEGIN RSA PRIVATE KEY-----\nMIICXgIBAAKBgQDivbhR7P516x/S3BqKxupQe0LONoliupiBOesCO3SHbDrl3+q9\nIbfnfmE04rNuMcPsIxB161TdDpIesLCn7c8aPHISKOtPlAeTZSnb8QAu7aRjZq3+\nPbrP5uW3TcfCGPtKTytHOge/OlJbo078dVhXQ14d1EDwXJW1rRXuUt4C8QIDAQAB\nAoGAD4/Z4LWVWV6D1qMIp1Gzr0ZmdWTE1SPdZ7Ej8glGnCzPdguCPuzbhGXmIg0V\nJ5D+02wsqws1zd48JSMXXM8zkYZVwQYIPUsNn5FetQpwxDIMPmhHg+QNBgwOnk8J\nK2sIjjLPL7qY7Itv7LT7Gvm5qSOkZ33RCgXcgz+okEIQMYkCQQDzbTOyDL0c5WQV\n6A2k06T/azdhUdGXF9C0+WkWSfNaovmTgRXh1G+jMlr82Snz4p4/STt7P/XtyWzF\n3pkVgZr3AkEA7nPjXwHlttNEMo6AtxHd47nizK2NUN803ElIUT8P9KSCoERmSXq6\n6PDekGNic4ldpsSvOeYCk8MAYoDBy9kvVwJBAMLgX4xg6lzhv7hR5+pWjTb1rIY6\nrCHbrPfU264+UZXz9v2BT/VUznLF81WMvStD9xAPHpFS6R0OLghSZhdzhI0CQQDL\n8Duvfxzrn4b9QlmduV8wLERoT6rEVxKLsPVz316TGrxJvBZLk/cV0SRZE1cZf4uk\nXSWMfEcJ/0Zt+LdG1CqjAkEAqwLSglJ9Dy3HpgMz4vAAyZWzAxvyA1zW0no9GOLc\nPQnYaNUN/Fy2SYtETXTb0CQ9X1rt8ffkFP7ya+5TC83aMg==\n-----END RSA PRIVATE KEY-----\n" formated_key = OneLogin_Saml2_Utils.format_private_key(key, True) self.assertIn('-----BEGIN RSA PRIVATE KEY-----', formated_key) self.assertIn('-----END RSA PRIVATE KEY-----', formated_key) self.assertEqual(len(formated_key), 891) formated_key = OneLogin_Saml2_Utils.format_private_key(key, False) self.assertNotIn('-----BEGIN RSA PRIVATE KEY-----', formated_key) self.assertNotIn('-----END RSA PRIVATE KEY-----', formated_key) self.assertEqual(len(formated_key), 816) key_2 = "-----BEGIN PRIVATE KEY-----\nMIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAM62buSW9Zgh7CmZ\nouJekK0ac9sgEZkspemjv7SyE6Hbdz+KmUr3C7MI6JuPfVyJbxvMDf3FbgBBK7r5\nyfGgehXwplLMZj8glvV3NkdLMLPWmaw9U5sOzRoym46pVvsEo1PUL2qDK5Wrsm1g\nuY1KIDSHL59NQ7PzDKgm1dxioeXFAgMBAAECgYA/fvRzTReloo3rfWD2Tfv84EpE\nPgaJ2ZghO4Zwl97F8icgIo/R4i760Lq6xgnI+gJiNHz7vcB7XYl0RrRMf3HgbA7z\npJxREmOVltESDHy6lH0TmCdv9xMmHltB+pbGOhqBvuGgFbEOR73lDDV0ln2rEITJ\nA2zjYF+hWe8b0JFeQQJBAOsIIIlHAMngjhCQDD6kla/vce972gCFU7ZeFw16ZMmb\n8W4rGRfQoQWYxSLAFIFsYewSBTccanyYbBNe3njki3ECQQDhJ4cgV6VpTwez4dkp\nU/xCHKoReedAEJhXucTNGpiIqu+TDgIz9aRbrgnUKkS1s06UJhcDRTl/+pCSRRt/\nCA2VAkBkPw4pn1hNwvK1S8t9OJQD+5xcKjZcvIFtKoqonAi7GUGL3OQSDVFw4q1K\n2iSk40aM+06wJ/WfeR+3z2ISrGBxAkAJ20YiF1QpcQlASbHNCl0vs7uKOlDyUAer\nR3mjFPf6e6kzQdi815MTZGIPxK3vWmMlPymgvgYPYTO1A4t5myulAkEA1QioAWcJ\noO26qhUlFRBCR8BMJoVPImV7ndVHE7usHdJvP7V2P9RyuRcMCTVul8RRmyoh/+yG\n4ghMaHo/v0YY5Q==\n-----END PRIVATE KEY-----\n" formated_key_2 = OneLogin_Saml2_Utils.format_private_key(key_2, True) self.assertIn('-----BEGIN PRIVATE KEY-----', formated_key_2) self.assertIn('-----END PRIVATE KEY-----', formated_key_2) self.assertEqual(len(formated_key_2), 916) formated_key_2 = OneLogin_Saml2_Utils.format_private_key(key_2, False) self.assertNotIn('-----BEGIN PRIVATE KEY-----', formated_key_2) self.assertNotIn('-----END PRIVATE KEY-----', formated_key_2) self.assertEqual(len(formated_key_2), 848) key_3 = 'MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAM62buSW9Zgh7CmZouJekK0ac9sgEZkspemjv7SyE6Hbdz+KmUr3C7MI6JuPfVyJbxvMDf3FbgBBK7r5yfGgehXwplLMZj8glvV3NkdLMLPWmaw9U5sOzRoym46pVvsEo1PUL2qDK5Wrsm1guY1KIDSHL59NQ7PzDKgm1dxioeXFAgMBAAECgYA/fvRzTReloo3rfWD2Tfv84EpEPgaJ2ZghO4Zwl97F8icgIo/R4i760Lq6xgnI+gJiNHz7vcB7XYl0RrRMf3HgbA7zpJxREmOVltESDHy6lH0TmCdv9xMmHltB+pbGOhqBvuGgFbEOR73lDDV0ln2rEITJA2zjYF+hWe8b0JFeQQJBAOsIIIlHAMngjhCQDD6kla/vce972gCFU7ZeFw16ZMmb8W4rGRfQoQWYxSLAFIFsYewSBTccanyYbBNe3njki3ECQQDhJ4c
gV6VpTwez4dkpU/xCHKoReedAEJhXucTNGpiIqu+TDgIz9aRbrgnUKkS1s06UJhcDRTl/+pCSRRt/CA2VAkBkPw4pn1hNwvK1S8t9OJQD+5xcKjZcvIFtKoqonAi7GUGL3OQSDVFw4q1K2iSk40aM+06wJ/WfeR+3z2ISrGBxAkAJ20YiF1QpcQlASbHNCl0vs7uKOlDyUAerR3mjFPf6e6kzQdi815MTZGIPxK3vWmMlPymgvgYPYTO1A4t5myulAkEA1QioAWcJoO26qhUlFRBCR8BMJoVPImV7ndVHE7usHdJvP7V2P9RyuRcMCTVul8RRmyoh/+yG4ghMaHo/v0YY5Q==' formated_key_3 = OneLo
gin_Saml2_Utils.format_private_key(key_3, True) self.assertIn('-----BEGIN RSA PRIVATE KEY-----', formated_key_3) self.assertIn('-----END RSA PRIVATE KEY-----', formated_key_3) self.assertEqual(len(formated_key_3), 924) formated_key_3 = OneLogin_Saml2_Utils.format_private_key(key_3, False) self.assertNotIn('-----BEGIN PRIVATE KEY-----', formated_key_3) self.assertNotIn('-----END PRIVATE KEY-----', formated_key_3) self.assertNotIn('-----BEGIN RSA PRIVATE KEY-----', formated_key_3) self.assertNotIn('-----END RSA PRIVATE KEY-----', formated_key_3) self.assertEqual(len(formated_key_3), 848) def testRedirect(self): """ Tests the redirect method of the OneLogin_Saml2_Utils """ request_data = { 'http_host': 'example.com' } # Check relative and absolute hostname = OneLogin_Saml2_Utils.get_self_host(request_data) url = 'http://%s/example' % hostname url2 = '/example' target_url = OneLogin_Saml2_Utils.redirect(url, {}, request_data) target_url2 = OneLogin_Saml2_Utils.redirect(url2, {}, request_data) self.assertEqual(target_url, target_url2) # Check that accept http/https and reject other protocols url3 = 'https://%s/example?test=true' % hostname url4 = 'ftp://%s/example' % hostname target_url3 = OneLogin_Saml2_Utils.redirect(url3, {}, request_data) self.assertIn('test=true', target_url3) self.assertRaisesRegexp(Exception, 'Redirect to invalid URL', OneLogin_Saml2_Utils.redirect, url4, {}, request_data) # Review parameter prefix parameters1 = { 'value1': 'a' } target_url5 = OneLogin_Saml2_Utils.redirect(url, parameters1, request_data) self.assertEqual('http://%s/example?value1=a' % hostname, target_url5) target_url6 = OneLogin_Saml2_Utils.redirect(ur
googleads/google-ads-python
google/ads/googleads/v10/enums/types/payment_mode.py
Python
apache-2.0
1,172
0.000853
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package="google.ads.googleads.v10.enums", marshal="google.ads.googl
eads.v10", manifest={"PaymentModeEnum",}, ) class PaymentModeEnum(proto.Message): r"""Container for enum describing possible payment modes. """ class PaymentMode(proto.Enum): r"""Enum describing possible payment modes.""" UNSPECIFIED = 0 UNKNOWN = 1 CLICKS = 4 CONVERSION_VALUE = 5 CONVERSIONS = 6 GUEST_STAY = 7 __all__ = tuple(sorted(__protobuf__.manifest))
flumotion-mirror/flumotion
flumotion/component/misc/httpserver/serverstats.py
Python
lgpl-2.1
7,784
0
# -*- Mode: Python; test-case-name: -*- # vi:si:et:sw=4:sts=4:ts=4 # Flumotion - a streaming media server # Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L. # Copyright (C) 2010,2011 Flumotion Services, S.A. # All rights reserved. # # This file may be distributed and/or modified under the terms of # the GNU Lesser General Public License version 2.1 as published by # the Free Software Foundation. # This file is distributed without any warranty; without even the implied # warranty of merchantability or fitness for a particular purpose. # See "LICENSE.LGPL" in the source distribution for more information. # # Headers in this file shall remain intact. import time from twisted.internet import reactor from flumotion.common import log # Minimum size to take in account when calculating mean file read MIN_REQUEST_SIZE = 64 * 1024 + 1 # Statistics update period STATS_UPDATE_PERIOD = 10 class RequestStatistics(object): def __init__(self, serverStats): self._stats = serverStats self.bytesSent = 0L self._stats._onRequestStart(self) def onDataSent(self, size): self.bytesSent += size self._stats._onRequestDataSent(self, size) def onCompleted(self, size): self._stats._onRequestComplete(self, size) class ServerStatistics(object): _updater = None _callId = None def __init__(self): now = time.time() self.startTime = now self.currentRequestCount = 0 self.totalRequestCount = 0 self.requestCountPeak = 0 self.requestCountPeakTime = now self.finishedRequestCount = 0 self.totalBytesSent = 0L # Updated by a call to the update method self.meanRequestCount = 0 self.currentRequestRate = 0 self.requestRatePeak = 0 self.requestRatePeakTime = now self.meanRequestRate = 0.0 self.currentBitrate = 0 self.meanBitrate = 0 self.bitratePeak = 0 self.bitratePeakTime = now self._fileReadRatios = 0.0 self._lastUpdateTime = now self._lastRequestCount = 0 self._lastBytesSent = 0L def startUpdates(self, updater): self._updater = updater self._set("bitrate-peak-time", self.bitratePeakTime) self._set("request-rate-peak-time", self.requestRatePeakTime) self._set("request-count-peak-time", self.requestCountPeakTime) if self._callId is None: self._callId = reactor.callLater(STATS_UPDATE_PERIOD, self._update) def stopUpdates(self): self._updater = None if self._callId is not None: self._callId.cancel() self._callId = None def getMeanFileReadRatio(self): if self.finishedRequestCount > 0: return self._fileReadRatios / self.finishedRequestCount return 0.0 meanFileReadRatio = property(getMeanFileReadRatio) def _update(self): now = time.time() updateDelta = now - self._lastUpdateTime # Update average concurrent request meanReqCount = self._updateAverage(self._lastUpdateTime, now, self.meanRequestCount, self.currentRequestCount) # Calculate Request rate countDiff = self.totalRequestCount - self._lastRequestCount newReqRate = float(countDiff) / updateDelta # Calculate average request rate meanReqRate = self._updateAverage(self._lastUpdateTime, now, self.currentRequestRate, newReqRate) # Calculate current bitrate bytesDiff = (self.totalBytesSent - self._lastBytesSent) * 8 newBitrate = bytesDiff / updateDelta # calculate average bitrate meanBitrate = self._updateAverage(self._lastUpdateTime, now, self.currentBitrate, newBitrate) # Update Values self.meanRequestCount = meanReqCount self.currentRequestRate = newReqRate self.meanRequestRate = meanReqRate self.currentBitrate = newBitrate self.meanBitrate = meanBitrate # Update the statistics keys with the new values self._set("mean-request-count", meanReqCount) self._set("current-request-rate", newReqRate) self._set("mean-request-rate", meanReqRate) self._set("current-bitrate", newBitrate) self._set("mean-bitrate", meanBitrate) # Update request rate peak if newReqRate > self.requestRatePeak: self.requestRatePeak = newReqRate
self.requestRatePeakTime = now # update statistic keys self._set("request-rate-peak", newReqRate) self._set("request-rate-peak-time", now) # Update bitrate peak if newBitrate > self.bitratePeak: self.bitratePeak = newBitrate self
.bitratePeakTime = now # update statistic keys self._set("bitrate-peak", newBitrate) self._set("bitrate-peak-time", now) # Update bytes read statistic key too self._set("total-bytes-sent", self.totalBytesSent) self._lastRequestCount = self.totalRequestCount self._lastBytesSent = self.totalBytesSent self._lastUpdateTime = now # Log the stats self._logStatsLine() self._callId = reactor.callLater(STATS_UPDATE_PERIOD, self._update) def _set(self, key, value): if self._updater is not None: self._updater.update(key, value) def _onRequestStart(self, stats): # Update counters self.currentRequestCount += 1 self.totalRequestCount += 1 self._set("current-request-count", self.currentRequestCount) self._set("total-request-count", self.totalRequestCount) # Update concurrent request peak if self.currentRequestCount > self.requestCountPeak: now = time.time() self.requestCountPeak = self.currentRequestCount self.requestCountPeakTime = now self._set("request-count-peak", self.currentRequestCount) self._set("request-count-peak-time", now) def _onRequestDataSent(self, stats, size): self.totalBytesSent += size def _onRequestComplete(self, stats, size): self.currentRequestCount -= 1 self.finishedRequestCount += 1 self._set("current-request-count", self.currentRequestCount) if (size > 0) and (stats.bytesSent > MIN_REQUEST_SIZE): self._fileReadRatios += float(stats.bytesSent) / size self._set("mean-file-read-ratio", self.meanFileReadRatio) def _updateAverage(self, lastTime, newTime, lastValue, newValue): lastDelta = lastTime - self.startTime newDelta = newTime - lastTime if lastDelta > 0: delta = lastDelta + newDelta before = (lastValue * lastDelta) / delta after = (newValue * newDelta) / delta return before + after return lastValue def _logStatsLine(self): """ Statistic fields names: TRC: Total Request Count CRC: Current Request Count CRR: Current Request Rate MRR: Mean Request Rate FRR: File Read Ratio MBR: Mean Bitrate CBR: Current Bitrate """ log.debug("stats-http-server", "TRC: %s; CRC: %d; CRR: %.2f; MRR: %.2f; " "FRR: %.4f; MBR: %d; CBR: %d", self.totalRequestCount, self.currentRequestCount, self.currentRequestRate, self.meanRequestRate, self.meanFileReadRatio, self.meanBitrate, self.currentBitrate)
gotlium/django-geoip-redis
geoip/geo.py
Python
gpl-3.0
1,723
0
# -*- coding: utf-8 -*- __all__ = ["inet_aton", "record_by_ip", "record_by_request", "get_ip", "record_by_ip_as_dict", "record_by_request_as_dict"] import struct import socket from geoip.defaults import BACKEND, REDIS_TYPE from geoip.redis_wrapper import RedisClient from geoip.models import Range _RECORDS_KEYS = ('country', 'area', 'city', 'isp', 'provider') def _from_redis(ip): r = RedisClient() data = r.zrangebyscore("geoip", ip, 'inf', 0, 1, withscores=True) if not data: return res, score = data[0] geo_id, junk, prefix = res.decode().split(":", 2) if prefix == "s" and score > ip: return info = r.get("geoip:%s" % junk) if info is not None: return info.decode('utf-8', 'ignore').split(':') def _from_db(ip): obj = Range.objects.select_related().filter( start_ip__lte=ip, end_ip__gte=ip ).order_by('en
d_ip', '-start_ip')[:1][0] if REDIS_TYPE == 'pk': return map(lambda k: str(getattr(obj,
k).pk), _RECORDS_KEYS) return map(lambda k: str(getattr(obj, k)), _RECORDS_KEYS) def inet_aton(ip): return struct.unpack('!L', socket.inet_aton(ip))[0] def get_ip(request): ip = request.META['REMOTE_ADDR'] if 'HTTP_X_FORWARDED_FOR' in request.META: ip = request.META['HTTP_X_FORWARDED_FOR'].split(',')[0] return ip def record_by_ip(ip): return (_from_redis if BACKEND == 'redis' else _from_db)(inet_aton(ip)) def record_by_request(request): return record_by_ip(get_ip(request)) def record_by_ip_as_dict(ip): return dict(zip(_RECORDS_KEYS, record_by_ip(ip))) def record_by_request_as_dict(request): return dict(zip(_RECORDS_KEYS, record_by_ip(get_ip(request))))
rgommers/statsmodels
statsmodels/base/tests/test_generic_methods.py
Python
bsd-3-clause
12,158
0.003208
# -*- coding: utf-8 -*- """Tests that use cross-checks for generic methods Should be easy to check consistency across models Does not cover tsa Initial cases copied from test_shrink_pickle Created on Wed Oct 30 14:01:27 2013 Author: Josef Perktold """ from statsmodels.compat.python import range import numpy as np import statsmodels.api as sm from statsmodels.compat.scipy import NumpyVersion from numpy.testing import assert_, assert_allclose from nose import SkipTest import platform iswin = platform.system() == 'Windows' npversionless15 = NumpyVersion(np.__version__) < '1.5.0' winoldnp = iswin & npversionless15 class CheckGenericMixin(object): def __init__(self): self.predict_kwds = {} @classmethod def setup_class(self): nobs = 500 np.random.seed(987689) x = np.random.randn(nobs, 3) x = sm.add_constant(x) self.exog = x self.xf = 0.25 * np.ones((2, 4)) def test_ttest_tvalues(self): # test that t_test has same results a params, bse, tvalues, ... res = self.results mat = np.eye(len(res.params)) tt = res.t_test(mat) assert_allclose(tt.effect, res.params, rtol=1e-12) # TODO: tt.sd and tt.tvalue are 2d also for single regressor, squeeze assert_allclose(np.squeeze(tt.sd), res.bse, rtol=1e-10) assert_allclose(np.squeeze(tt.tvalue), res.tvalues, rtol=1e-12) assert_allclose(tt.pvalue, res.pvalues, rtol=5e-10) assert_allclose(tt.conf_int(), res.conf_int(), rtol=1e-10) # test params table frame returned by t_test table_res = np.column_stack((res.params, res.bse, res.tvalues, res.pvalues, res.conf_int())) table1 = np.column_stack((tt.effect, tt.sd, tt.tvalue, tt.pvalue, tt.conf_int())) table2 = tt.summary_frame().values assert_allclose(table2, table_res, rtol=1e-12) # move this to test_attributes ? assert_(hasattr(res, 'use_t')) tt = res.t_test(mat[0]) tt.summary() # smoke test for #1323 assert_allclose(tt.pvalue, res.pvalues[0], rtol=5e-10) def test_ftest_pvalues(self): res = self.results use_t = res.use_t k_vars = len(res.params) # check default use_t pvals = [res.wald_test(np.eye(k_vars)[k], use_f=use_t).pvalue for k in range(k_vars)] assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25) # sutomatic use_f based on results class use_t pvals = [res.wald_test(np.eye(k_vars)[k]).pvalue for k in range(k_vars)] assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25) # label for pvalues in summary string_use_t = 'P>|z|' if use_t is False else 'P>|t|' summ = str(res.summary()) assert_(string_use_t in summ) # try except for models that don't have summary2 try: summ2 = str(res.summary2()) except AttributeError: summ2 = None if summ2 is not None: assert_(string_use_t in summ2) # TODO The following is not (yet) guaranteed across models #@knownfailureif(True) def test_fitted(self): # ignore wrapper for isinstance check from statsmodels.genmod.generalized_linear_model import GLMResults from statsmodels.discrete.discrete_model import DiscreteResults # FIXME: work around GEE has no wrapper if hasattr(self.results, '_results'): results = self.results._results else: results = self.results if (isinstance(results, GLMResults) or isinstance(results, DiscreteResults)): raise SkipTest res = self.results fitted = res.fittedvalues assert_allclose(res.model.endog - fitted, res.resid, rtol=1e-12) assert_allclose(fitted, res.predict(), rtol=1e-12) def test_predict_types(self): res = self.results
# squeeze to make 1d for single regressor test case p_exog = np.squeeze(np.asarray(res.m
odel.exog[:2])) # ignore wrapper for isinstance check from statsmodels.genmod.generalized_linear_model import GLMResults from statsmodels.discrete.discrete_model import DiscreteResults # FIXME: work around GEE has no wrapper if hasattr(self.results, '_results'): results = self.results._results else: results = self.results if (isinstance(results, GLMResults) or isinstance(results, DiscreteResults)): # SMOKE test only TODO res.predict(p_exog) res.predict(p_exog.tolist()) res.predict(p_exog[0].tolist()) else: fitted = res.fittedvalues[:2] assert_allclose(fitted, res.predict(p_exog), rtol=1e-12) # this needs reshape to column-vector: assert_allclose(fitted, res.predict(np.squeeze(p_exog).tolist()), rtol=1e-12) # only one prediction: assert_allclose(fitted[:1], res.predict(p_exog[0].tolist()), rtol=1e-12) assert_allclose(fitted[:1], res.predict(p_exog[0]), rtol=1e-12) # predict doesn't preserve DataFrame, e.g. dot converts to ndarray # import pandas # predicted = res.predict(pandas.DataFrame(p_exog)) # assert_(isinstance(predicted, pandas.DataFrame)) # assert_allclose(predicted, fitted, rtol=1e-12) ######### subclasses for individual models, unchanged from test_shrink_pickle # TODO: check if setup_class is faster than setup class TestGenericOLS(CheckGenericMixin): def setup(self): #fit for each test, because results will be changed by test x = self.exog np.random.seed(987689) y = x.sum(1) + np.random.randn(x.shape[0]) self.results = sm.OLS(y, self.exog).fit() class TestGenericOLSOneExog(CheckGenericMixin): # check with single regressor (no constant) def setup(self): #fit for each test, because results will be changed by test x = self.exog[:, 1] np.random.seed(987689) y = x + np.random.randn(x.shape[0]) self.results = sm.OLS(y, x).fit() class TestGenericWLS(CheckGenericMixin): def setup(self): #fit for each test, because results will be changed by test x = self.exog np.random.seed(987689) y = x.sum(1) + np.random.randn(x.shape[0]) self.results = sm.WLS(y, self.exog, weights=np.ones(len(y))).fit() class TestGenericPoisson(CheckGenericMixin): def setup(self): #fit for each test, because results will be changed by test x = self.exog np.random.seed(987689) y_count = np.random.poisson(np.exp(x.sum(1) - x.mean())) model = sm.Poisson(y_count, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default # use start_params to converge faster start_params = np.array([0.75334818, 0.99425553, 1.00494724, 1.00247112]) self.results = model.fit(start_params=start_params, method='bfgs', disp=0) #TODO: temporary, fixed in master self.predict_kwds = dict(exposure=1, offset=0) class TestGenericNegativeBinomial(CheckGenericMixin): def setup(self): #fit for each test, because results will be changed by test np.random.seed(987689) data = sm.datasets.randhie.load() exog = sm.add_constant(data.exog, prepend=False) mod = sm.NegativeBinomial(data.endog, data.exog) start_params = np.array([-0.0565406 , -0.21213599, 0.08783076, -0.02991835, 0.22901974, 0.0621026, 0.06799283, 0.08406688, 0.18530969, 1.36645452]) self.results = mod.fit(start_params=start_params, disp=0) class TestGenericLogit(CheckGenericMixin): def setup(
leighpauls/k2cro4
third_party/webpagereplay/replay.py
Python
bsd-3-clause
20,951
0.011503
#!/usr/bin/env python # Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Replays web pages under simulated network conditions. Must be run as administrator (sudo). To record web pages: 1. Start the program in record mode. $ sudo ./replay.py --record archive.wpr 2. Load the web pages you want to record in a web browser. It is important to clear browser caches before this so that all subresources are requested from the network. 3. Kill the process to stop recording. To replay web pages: 1. Start the program in replay mode with a previously recorded archive. $ sudo ./replay.py archive.wpr 2. Load recorded pages in a web browser. A 404 will be served for any pages or resources not in the recorded archive. Network simulation examples: # 128KByte/s uplink bandwidth, 4Mbps/s downlink bandwidth with 100ms RTT time $ sudo ./replay.py --up 128KByte/s --down 4Mbit/s --delay_ms=100 archive.wpr # 1% packet loss rate $ sudo ./replay.py --packet_loss_rate=0.01 archive.wpr """ import logging import optparse import os import sys import traceback import cachemissarchive import customhandlers import dnsproxy import httparchive import httpclient import httpproxy import platformsettings import replayspdyserver import servermanager import trafficshaper if sys.version < '2.6': print 'Need Python 2.6 or greater.' sys.exit(1) def configure_logging(log_level_name, log_file_name=None): """Configure logging level and format. Args: log_level_name: 'debug', 'info', 'warning', 'error', or 'critical'. log_file_name: a file name """ if logging.root.handlers: logging.critical('A logging method (e.g. "logging.warn(...)")' ' was called before logging was configured.') log_level = getattr(logging, log_level_name.upper()) log_format = '%(asctime)s %(levelname)s %(message)s' logging.basicConfig(level=log_level, format=log_format) logger = logging.getLogger() if log_file_name: fh = logging.FileHandler(log_file_name) fh.setLevel(log_level) fh.setFormatter(logging.Formatter(log_format)) logger.addHandler(fh) system_handler = platformsettings.get_system_logging_handler() if system_handler: logger.addHandler(system_handler) def AddDnsForward(server_manager, host): """Forward DNS traffic.""" server_manager.Append(platformsettings.set_temporary_primary_nameserver, host) def AddDnsProxy(server_manager, options, host, real_dns_lookup, http_archive): dns_filters = [] if options.dns_private_passthrough: private_filter = dnsproxy.PrivateIpFilter(real_dns_lookup, http_archive) dns_filters.append(private_filter) server_manager.AppendRecordCallback(private_filter.InitializeArchiveHosts) server_manager.AppendReplayCallback(private_filter.InitializeArchiveHosts) if options.shaping_dns: delay_filter = dnsproxy.DelayFilter(options.record, **options.shaping_dns) dns_filters.append(delay_filter) server_manager.AppendRecordCallback(delay_filter.SetRecordMode) server_manager.AppendReplayCallback(delay_filter.SetReplayMode) server_manager.Append(dnsproxy.DnsProxyServer, host, dns_lookup=dnsproxy.ReplayDnsLookup(host, dns_filters)) def AddWebProxy(server_manager, options, host, real_dns_lookup, http_archive, cache_misses): inject_script = httpclient.GetInjectScript(options.inject_scripts) custom_handlers = customhandlers.CustomHandlers(options.screenshot_dir) if options.spdy: assert not options.record, 'spdy cannot be used with --record.' archive_fetch = httpclient.ReplayHttpArchiveFetch( http_archive, inject_script, options.diff_unknown_requests, cache_misses=cache_misses, use_closest_match=options.use_closest_match) server_manager.Append( replayspdyserver.ReplaySpdyServer, archive_fetch, custom_handlers, host=host, port=options.port, certfile=options.certfile) else: custom_handlers.add_server_manager_handler(server_manager) archive_fetch = httpclient.ControllableHttpArchiveFetch( http_archive, real_dns_lookup, inject_script, options.diff_unknown_requests, options.record, cache_misses=cache_misses, use_closest_match=options.use_closest_match) server_manager.AppendRecordCallback(archive_fetch.SetRecordMode) server_manager.AppendReplayCallback(archive_fetch.SetReplayMode) server_manager.Append( httpproxy.HttpProxyServer, archive_fetch, custom_handlers, host=host, port=options.port, **options.shaping_http) if options.ssl: server_manager.Append( httpproxy.HttpsProxyServer, archive_fetch, custom_handlers, options.certfile, host=host, port=options.ssl_port, **options.shaping_http) def AddTrafficShaper(server_manager, options, host): if options.shaping_dummynet: ssl_port = options.ssl_shaping_port if options.ssl else None kwargs = dict( host=host, port=options.shaping_port, ssl_port=ssl_port, use_loopback=not options.server_mode, **options.shaping_dummynet) if not options.dns_forwarding: kwargs['dns_port'] = None server_manager.Append(trafficshaper.TrafficShaper, **kwargs) class OptionsWrapper(object): """Add checks, updates, and methods to option values. Example: options, args = option_parser.parse_args() options = OptionsWrapper(options, option_parser) # run checks and updates if options.record and options.HasTrafficShaping(): [...] """ _TRAFFICSHAPING_OPTIONS = set( ['down', 'up', 'delay_ms', 'packet_loss_rate', 'init_cwnd', 'net']) _CONFLICTING_OPTIONS = ( ('record', ('down', 'up', 'delay_ms', 'packet_loss_rate', 'net', 'spdy', 'use_server_delay')), ('append', ('down', 'up', 'delay_ms', 'packet_loss_rate', 'net', 'spdy', 'use_server_delay')), # same as --record ('net', ('down', 'up', 'delay_ms')), ('server', ('server_mode',)), ) # The --net values come from http://www.webpagetest.org/. # https://sites.google.com/a/webpagetest.org/docs/other-resources/2011-f
cc-broadband-data _NET_CONFIGS = ( # key --down --up --delay_ms ('dsl', ('1536Kbit/s', '384Kbit/s', '50')), ('cable', ( '5Mbit/s', '1Mbit/s', '28')), ('fios', ( '20Mbit/s', '5Mbit/s', '4')), ) NET_CHOICES = [key for key, values in _NET_CONFIGS] def __init__(self, options, parser): self._options = options self._par
ser = parser self._nondefaults = set([ name for name, value in parser.defaults.items() if getattr(options, name) != value]) self._CheckConflicts() self._MassageValues() def _CheckConflicts(self): """Give an error if mutually exclusive options are used.""" for option, bad_options in self._CONFLICTING_OPTIONS: if option in self._nondefaults: for bad_option in bad_options: if bad_option in self._nondefaults: self._parser.error('Option --%s cannot be used with --%s.' % (bad_option, option)) def _ShapingKeywordArgs(self, shaping_key): """Return the shaping keyword args for |shaping_key|. Args: shaping_key: one of 'dummynet', 'dns', 'http'. Returns: {} # if shaping_key does not apply, or options have default values. {k: v, ...} """ kwargs = {} def AddItemIfSet(d, kw_key, opt_key=None): opt_key = opt_key or kw_key if opt_key in self._nondefaults: d[kw_key] = getattr(self, opt_ke
KyleTen2/ReFiSys
FlSys.py
Python
mit
13,483
0.011941
from RemoteFlSys import * class gfeReader:#GetFilesExts Reader def __init__(self, Reader, Params, AddInfo, OneTime = True): self.Params = Params self.Reader = Reader self.AddInfo = AddInfo self.Once = OneTime class fsDirInf: def __init__(self, SuperDir, Name, DictFlInfs, DictDirs, CaseSens = True): self.SuperDir = SuperDir self.Name = Name self.FlInfs = DictFlInfs self.Dirs = DictDirs self.MatchCase = CaseSens self.GfeCached = False if not CaseSens: Name = Name.lower() if self.SuperDir != None: self.SuperDir.AddDir(Name, self) def AddDir(self, Name, AddMe):#Done if not self.MatchCase: Name = Name.lower() self.Dirs[Name] = AddMe def GetFlInf(self, fName):#Done if not self.MatchCase: fName = fName.lower() Pos = fName.find("/", 1) if Pos > 0: return self.Dirs[fName[1:Pos]].GetFlInf(fName[Pos:]) else: return self.FlInfs[fName[1:]][1] def SetFlInf(self, fName, Data, MkDirs = True):#Done CmpName = fName if not self.MatchCase: CmpName = fName.lower() Pos = fName.find("/", 1) if Pos > 0: if MkDirs and not self.Dirs.has_key(CmpName[1:Pos]): self.Dirs[CmpName[1:Pos]] = fsDirInf(self, fName[1:Pos], dict(), dict(), self.MatchCase) self.Dirs[CmpName[1:Pos]].SetFlInf(fName[Pos:], Data, MkDirs) else: self.FlInfs[CmpName[1:]] = fName[1:], Data def GetDir(self, Path):#Done if not self.MatchCase: Path = Path.lower() Pos = Path.find("/", 1) if Pos > 0: return self.Dirs[Path[1:Pos]].GetDir(Path[Pos:]) else: return self.Dirs[Path[1:]] def MkInfCached(self): self.GfeCached = True for Dir in self.Dirs.values(): Dir.MkInfCached() def SetFlInfLst(self, LstFlDat, InfCached = False, MkDirs = True):#Done CurDir = self CurDirPath = "" for fName, Data in LstFlDat: if fName[0] != '/': fName = "/" + fName CmpName = fName if not self.MatchCase: CmpName = fName.lower() Pos = CmpName.find(CurDirPath) while Pos < 0: Pos = CurDirPath.find("/") CurDirPath = CurDirPath[0:Pos] Pos = CmpName.find(CurDirPath) CurDirPath = CmpName[0:Pos + len(CurDirPath)] if len(CurDirPath) != 0: fName = fName[Pos + len(CurDirPath):] CurDir = self.GetDir(CurDirPath) else: CurDir = self CurDir.SetFlInf(fName, Data, MkDirs) if InfCached: self.MkInfCached() def GfeInfCache(self, LstExts, BegDots, Prepend = ""): Rtn = list() for fName in self.FlInfs: FlInf = self.FlInfs[fName] if fName[fName.rfind('.') + 1:] in LstExts: Rtn.append((Prepend + FlInf[0], FlInf[1])) for Dir in self.Dirs.values(): Rtn.extend(Dir.GfeInfCache(LstExts, BegDots, Prepend + Dir.Name + "/")) return Rtn def GfeCheckCache(self, Func, Cmd, Path, LstExts, BegDots = False): TheDir = None Rtn = None try: TheDir = self.GetDir(Path) Rtn = TheDir.GfeInfCache(LstExts, BegDots) except: Rtn = Func(Cmd, Path, LstExts, BegDots) TmpRtn = [0] * len(Rtn) if len(Rtn) > 0 and Rtn[0][0][0] != '/': Path += "/" for c in xrange(len(Rtn)): TmpRtn[c] = (Path + Rtn[c][0], Rtn[c][1]) self.SetFlInfLst(TmpRtn, True) return Rtn DRV_TYPE_REAL = 0 DRV_TYPE_RFS = 1 class FileDrv:#InfoGetters: Reader takes filename AllDrv = dict() CurDir = os.getcwd() def __init__(self, DrvName, PrependName): FileDrv.AllDrv[DrvName] = self self.Name = PrependName self.InfoGetters = dict() self.InfCache = dict() self.SnglReaders = dict() self.Type = DRV_TYPE_REAL def Open(self, fName, Mode): return open(self.Name + fName, Mode) def NativeOpen(self, fName, Mode): return File(self.Name + fName, Mode) def ListDir(self, Path): return os.listdir(self.Name + Path) def IsDir(self, Path): return os.path.isdir(self.Name + Path) def Exists(self, Path): return os.path.exists(self.Name + Path) def IsFile(self, Path): return os.path.isfile(self.Name + Path) def GetFilesExts(self, Path, LstExts, Invert = False, RtnBegDots = False): for c in xrange(len(LstExts)): LstExts[c] = LstExts[c].lower() CurPath = self.Name + Path + "/" Next = [""] Cur = [] Rtn = list() while len(Next) > 0: Cur = Next Next = list() for TestPath in Cur: LstPaths = list() try: LstPaths = os.listdir(CurPath + TestPath) except WindowsError: continue for PathName in LstPaths: Add = TestPath + PathName if os.path.isdir(CurPath + Add): Next.append(Add + "/") elif not RtnBegDots and PathName[0] == '.': continue else: Pos = PathName.rfind('.') if Pos < 0 and (Invert ^ ("" in LstExts)): Rtn.append(Add) elif Pos >= 0 and (Invert ^ (PathName[Pos + 1:].lower() in LstExts)): Rtn.append(Add) return Rtn def UseGfeReader(self, Cmd, Path, LstExts = None, BegDots = False): CurReader = self.InfoGetters[Cmd] if LstExts == None: LstExts = CurReader.Params[0] LstFls = self.GetFilesExts(Path, list(LstExts), CurReader.Params[1], BegDots) Rtn = [None] * len(LstFls) Prepend = self.Name + Path + "/" for c in xrange(len(LstFls)): Rtn[c] = LstFls[c], CurReader.Reader(Prepend + LstFls[c]) return Rtn def GetInfSingle(self, Cmd, fName): Rtn = self.SnglReader[Cmd](fName) self.InfCache[Cmd].SetFlInf(fName, Rtn) return Rtn if os.name == "nt": for c in xrange(26): CurDrv = chr(ord('A') + c) if os.path.isdir(CurDrv + ":"): FileDrv(CurDrv, CurDrv + ":") elif os.name == "posix": FileDrv("C", "") def EvalPath(Path): Pos = Path.find(":") if Pos == -1: Path = CurDir + "/" + Path Pos = Path.find(":") return Path[0:Pos], Path[Pos + 1:] def OpenFile(fName, Mode): DrvName, fName = EvalPath(fName) return FileDrv.AllDrv[DrvName].Open(fName, Mode) def NativeOpenFile(fName, Mode): DrvName, fName = EvalPath(fName) return FileDrv.AllDrv[DrvName].NativeOpen(fName, Mode) def ListDir(Path): if Path == "": return [OutPath + ":" for OutPath in FileDrv.AllDrv.keys()] DrvName, Path = EvalPath(Path) return FileDrv.AllDrv[DrvName].ListDir(Path) def Exists(Path): DrvName, Path = EvalPath(Path) if not FileDrv.AllDrv.has_key(DrvName): return False return FileDrv.AllDrv[DrvName].Exists(Path) def IsDir(Path): DrvName, Path = EvalPath(Path) if not FileDrv.AllDrv.has_key(DrvName): return False return FileDrv.AllDrv[DrvName].IsDir(Path) def IsFile(Path): DrvName, Path = EvalPath(Path) if not FileDrv.A
llDrv.has_key(DrvName): return False return FileDrv.AllDrv
[DrvName].IsFile(Path) def GetFilesExts(Path, LstExt, Invert = False, RtnBegDots = False): DrvName, Path = EvalPath(Path) return FileDrv.AllDrv[DrvName].GetFilesExts(Path, LstExt, Invert, RtnBegDots) def GetInfGfe(Cmd, Path, LstExts = None, BegDots = False):#Warning BegDots functionality is in question print Path DrvName, Path = EvalPath(Path) Drv = F
snowflakedb/spark-snowflake
legacy/dev/merge_pr.py
Python
apache-2.0
18,840
0.004299
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Utility for creating well-formed pull request merges and pushing them to Apache. # usage: ./apache-pr-merge.py (see config env vars below) # # This utility assumes you already have local a Spark git folder and that you # have added remotes corresponding to both (i) the github apache Spark # mirror and (ii) the apache git repo. import json import os import re import subprocess import sys import urllib2 try: import jira.client JIRA_IMPORTED = True except ImportError: JIRA_IMPORTED = False # Location of your Spark git development area SPARK_HOME = os.environ.get("SPARK_REDSHIFT_HOME", os.getcwd()) # Remote name which points to the Gihub site PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "origin") # Remote name which points to Apache git PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "origin") # ASF JIRA username JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "") # ASF JIRA password JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "") # OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests # will be unauthenticated. You should only need to configure this if you find yourself regularly # exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at # https://github.com/settings/tokens. This script only requires the "public_repo" scope. GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY") GITHUB_BASE = "https://github.com/databricks/spark-redshift/pull" GITHUB_API_BASE = "https://api.github.com/repos/databricks/spark-redshift" JIRA_BASE = "https://issues.apache.org/jira/browse" JIRA_API_BASE = "https://issues.apache.org/jira" # Prefix added to temporary branches BRANCH_PREFIX = "PR_TOOL" def get_json(url): try: request = urllib2.Request(url) if GITHUB_OAUTH_KEY: request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY) return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e: if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0': print "Exceeded the GitHub API rate limit; see the instructions in " + \ "dev/merge_spark_pr.py to configur
e an OAuth token for making authenticated " + \ "GitHub requests." else: print "Unable to fetch URL, exiting: %s" % url sys.exit(-1) def fail(msg): print msg clean_up() sys.exit(-1) def run_cmd(cmd): print cmd if isinstance(cmd, list): return subprocess.check_output(cmd) else: return subprocess.check_output(cmd.split(" ")) def continue_maybe(prompt): result = raw_input("\n%s (y/n): " % prompt) if result.lower() != "y": fail("Okay, exiting") def clean_up(): print "Restoring head pointer to %s" % original_head run_cmd("git checkout %s" % original_head) branches = run_cmd("git branch").replace(" ", "").split("\n") for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches): print "Deleting local branch %s" % branch run_cmd("git branch -D %s" % branch) # merge the requested PR and return the merge hash def merge_pr(pr_num, target_ref, title, body, pr_repo_desc): pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num) target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper()) run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name)) run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name)) run_cmd("git checkout %s" % target_branch_name) had_conflicts = False try: run_cmd(['git', 'merge', pr_branch_name, '--squash']) except Exception as e: msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e continue_maybe(msg) msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?" continue_maybe(msg) had_conflicts = True commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name, '--pretty=format:%an <%ae>']).split("\n") distinct_authors = sorted(set(commit_authors), key=lambda x: commit_authors.count(x), reverse=True) primary_author = raw_input( "Enter primary author in the format of \"name <email>\" [%s]: " % distinct_authors[0]) if primary_author == "": primary_author = distinct_authors[0] commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name, '--pretty=format:%h [%an] %s']).split("\n\n") merge_message_flags = [] merge_message_flags += ["-m", title] if body is not None: # We remove @ symbols from the body to avoid triggering e-mails # to people every time someone creates a public fork of Spark. merge_message_flags += ["-m", body.replace("@", "")] authors = "\n".join(["Author: %s" % a for a in distinct_authors]) merge_message_flags += ["-m", authors] if had_conflicts: committer_name = run_cmd("git config --get user.name").strip() committer_email = run_cmd("git config --get user.email").strip() message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % ( committer_name, committer_email) merge_message_flags += ["-m", message] # The string "Closes #%s" string is required for GitHub to correctly close the PR merge_message_flags += ["-m", "Closes #%s from %s." % (pr_num, pr_repo_desc)] run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags) continue_maybe("Merge complete (local ref %s). Push to %s?" % ( target_branch_name, PUSH_REMOTE_NAME)) try: run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref)) except Exception as e: clean_up() fail("Exception while pushing: %s" % e) merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8] clean_up() print("Pull request #%s merged!" % pr_num) print("Merge hash: %s" % merge_hash) return merge_hash def cherry_pick(pr_num, merge_hash, default_branch): pick_ref = raw_input("Enter a branch name [%s]: " % default_branch) if pick_ref == "": pick_ref = default_branch pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper()) run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name)) run_cmd("git checkout %s" % pick_branch_name) try: run_cmd("git cherry-pick -sx %s" % merge_hash) except Exception as e: msg = "Error cherry-picking: %s\nWould you like to manually fix-up this merge?" % e continue_maybe(msg) msg = "Okay, please fix any conflicts and finish the cherry-pick. Finished?" continue_maybe(msg) continue_maybe("Pick complete (local ref %s). Push to %s?" % ( pick_branch_name, PUSH_REMOTE_NAME)) try: run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref)) except Exception as e: clean_up() fail("Exception while pushing: %s" % e) pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8] clean_up() print("Pull request #%s picked into %s!" % (pr_num, pick_ref)) print("Pick hash: %s" % pi
Codewars/codewars-runner
frameworks/python/codewars.py
Python
mit
2,284
0.004816
import unittest import traceback from time import perf_counter class CodewarsTestRunner(object): def __init__(self): pass def run(self, test): r = CodewarsTestResult() s = perf_counter() print("\n<DESCRIBE::>Tests") try: test(r) finally: pass print("\n<COMPLETEDIN::>{:.4f}".format(1000*(perf_counter() - s))) return r __unittest
= True class CodewarsTestResul
t(unittest.TestResult): def __init__(self): super().__init__() self.start = 0.0 def startTest(self, test): print("\n<IT::>" + test._testMethodName) super().startTest(test) self.start = perf_counter() def stopTest(self, test): print("\n<COMPLETEDIN::>{:.4f}".format(1000*(perf_counter() - self.start))) super().stopTest(test) def addSuccess(self, test): print("\n<PASSED::>Test Passed") super().addSuccess(test) def addError(self, test, err): print("\n<ERROR::>Unhandled Exception") print("\n<LOG:ESC:Error>" + esc(''.join(traceback.format_exception_only(err[0], err[1])))) print("\n<LOG:ESC:Traceback>" + esc(self._exc_info_to_string(err, test))) super().addError(test, err) def addFailure(self, test, err): print("\n<FAILED::>Test Failed") print("\n<LOG:ESC:Failure>" + esc(''.join(traceback.format_exception_only(err[0], err[1])))) super().addFailure(test, err) # from unittest/result.py def _exc_info_to_string(self, err, test): exctype, value, tb = err # Skip test runner traceback levels while tb and self._is_relevant_tb_level(tb): tb = tb.tb_next if exctype is test.failureException: length = self._count_relevant_tb_levels(tb) # Skip assert*() traceback levels else: length = None return ''.join(traceback.format_tb(tb, limit=length)) def _is_relevant_tb_level(self, tb): return '__unittest' in tb.tb_frame.f_globals def _count_relevant_tb_levels(self, tb): length = 0 while tb and not self._is_relevant_tb_level(tb): length += 1 tb = tb.tb_next return length def esc(s): return s.replace("\n", "<:LF:>")
todesschaf/pgl
pgl.py
Python
gpl-2.0
4,786
0.002925
# Copyright (c) 2011 Nick Hurley <hurley at todesschaf dot org> # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """Helpers for git extensions written in python """ import inspect import os import subprocess import sys import traceback config = {} def __extract_name_email(info, type_): """Extract a name and email from a string in the form: User Name <[email protected]> tstamp offset Stick that into our config dict for either git committer or git author. """ val = ' '.join(info.split(' ')[:-2]) angle = val.find('<') if angle > -1: config['GIT_%s_NAME' % type_] = val[:angle - 1] config['GIT_%s_EMAIL' % type_] = val[angle + 1:-1] else: config['GIT_%s_NAME' % type_] = val def __create_config(): """Create our configuration dict from git and the env variables we're given. """ devnull = file('/dev/null', 'w') # Stick all our git variables in our dict, just in case anyone needs them gitvar = subprocess.Popen(['git', 'var', '-l'], stdout=subprocess.PIPE, stderr=devnull) for line in gitvar.stdout: k, v = line.split('=', 1) if k == 'GIT_COMMITTER_IDENT': __extract_name_email(v, 'COMMITTER') elif k == 'GIT_AUTHOR_IDENT': __extract_name_email(v, 'AUTHOR') elif v == 'true': v = True elif v == 'false': v = False else: try: v = int(v) except: pass config[k] = v gitvar.wait() # Find out where git's sub
-exes live gitexec = subprocess.Popen(['git', '--exec-path'], stdout=subprocess.PIPE, stderr=devnull) config['GIT_LIBEXEC'] = gitexec.stdout.readlines()[0].strip() gitexec.wait()
# Figure out the git dir in our repo, if applicable gitdir = subprocess.Popen(['git', 'rev-parse', '--git-dir'], stdout=subprocess.PIPE, stderr=devnull) lines = gitdir.stdout.readlines() if gitdir.wait() == 0: config['GIT_DIR'] = lines[0].strip() # Figure out the top level of our repo, if applicable gittoplevel = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE, stderr=devnull) lines = gittoplevel.stdout.readlines() if gittoplevel.wait() == 0: config['GIT_TOPLEVEL'] = lines[0].strip() # We may have been called by a wrapper that passes us some info through the # environment. Use it if it's there for k, v in os.environ.iteritems(): if k.startswith('PY_GIT_'): config[k[3:]] = v elif k == 'PGL_OK': config['PGL_OK'] = True # Make sure our git dir and toplevel are fully-qualified if 'GIT_DIR' in config and not os.path.isabs(config['GIT_DIR']): git_dir = os.path.join(config['GIT_TOPLEVEL'], config['GIT_DIR']) config['GIT_DIR'] = os.path.abspath(git_dir) def warn(msg): """Print a warning """ sys.stderr.write('%s\n' % (msg,)) def die(msg): """Print an error message and exit the program """ sys.stderr.write('%s\n' % (msg,)) sys.exit(1) def do_checks(): """Check to ensure we've got everything we expect """ try: import argparse except: die('Your python must support the argparse module') def main(_main): """Mark a function as the main function for our git subprogram. Based very heavily on automain by Gerald Kaszuba, but with modifications to make it work better for our purposes. """ parent = inspect.stack()[1][0] name = parent.f_locals.get('__name__', None) if name == '__main__': __create_config() if 'PGL_OK' not in config: do_checks() rval = 1 try: rval = _main() except Exception, e: sys.stdout.write('%s\n' % str(e)) f = file('pygit.tb', 'w') traceback.print_tb(sys.exc_info()[2], None, f) f.close() sys.exit(rval) return _main if __name__ == '__main__': """If we get run as a script, check to make sure it's all ok and exit with an appropriate error code """ do_checks() sys.exit(0)
JoshAshby/Fla.gr
app/models/couch/user/userModel.py
Python
mit
3,412
0.001758
#!/usr/bin/env python """ fla.gr user model Given a userID or a username or a email, return the users couchc.database ORM http://xkcd.com/353/ Josh Ashby 2013 http://joshashby.com [email protected] """ from couchdb.mapping import Document, TextField, DateTimeField, \ BooleanField, IntegerField import bcrypt from datetime import datetime import config.config as c import utils.markdownUtils as mdu from models.modelExceptions.userModelExceptions import \ multipleUsersError, passwordError, userError from models.couch.baseCouchModel import baseCouchModel class userORM(Document, baseCouchModel): """ Base ORM for users in fla.gr, this one currently uses couchc.database to store the data. TODO: Flesh this doc out a lot more """ _name = "users" username = TextField() email = TextField() about = TextField(default="") disable = BooleanField(default=False) emailVisibility = BooleanField(default=False) level = IntegerField(default=1) password = TextField() joined = DateTimeField(default=datetime.now) docType = TextField(default="user") formatedAbout = "" _view = 'typeViews/user' @classmeth
od def new(cls, username, password): """ Make a new user, checking for username conflicts. If no conflicts are found the password is encryp
ted with bcrypt and the resulting `userORM` returned. :param username: The username that should be used for the new user :param password: The plain text password that should be used for the password. :return: `userORM` if the username is available, """ if password == "": raise passwordError("Password cannot be null") elif not cls.find(username): passwd = bcrypt.hashpw(password, bcrypt.gensalt()) user = cls(username=username, password=passwd) return user else: raise userError("That username is taken, please choose again.", username) def setPassword(self, password): """ Sets the users password to `password` :param password: plain text password to hash """ self.password = bcrypt.hashpw(password, bcrypt.gensalt()) self.store(c.database.couchServer) @staticmethod def _search(items, value): """ Searches the list `items` for the given value :param items: A list of ORM objects to search :param value: The value to search for, in this case value can be a username or an email, or an id """ foundUser = [] for user in items: if user.email == value \ or user.username == value \ or user.id == value: foundUser.append(user) if not foundUser: return None if len(foundUser)>1: raise multipleUsersError("Multiple Users", value) else: user = foundUser[0] user.formatedAbout = mdu.markClean(user.about) return user @property def hasAdmin(self): return self.level > 50 def format(self): """ Formats markdown and dates into the right stuff """ self.formatedAbout = mdu.markClean(self.about) self.formatedJoined = datetime.strftime(self.joined, "%a %b %d, %Y @ %H:%I%p")
rsk-mind/rsk-mind-framework
test/setting.py
Python
mit
927
0.010787
from rsk_mind.datasource import * from rsk_mind.classifier import * from transformer import CustomTransformer PR
OJECT_NAME = 'test' DATASOURCE= { 'IN' : { 'class' : CSVDataSource, 'params' : ('in.csv', ) }, 'OUT' : { 'class' : CSVDataSource, 'params' : ('out.csv', ) }
} ANALYSIS = { 'persist': True, 'out': 'info.json' } TRANSFORMER = CustomTransformer TRAINING = { 'algorithms' : [ { 'classifier': XGBoostClassifier, 'parameters' : { 'bst:max_depth': 7, 'bst:eta': 0.3, 'bst:subsample': 0.5, 'silent': 0, 'objective': 'binary:logistic', 'nthread': 4, 'eval_metric': 'auc' }, 'dataset': DATASOURCE['IN'] } ], 'ensemble': 'max', 'dataset': DATASOURCE['IN'] } ENGINE = { }
jonyroda97/redbot-amigosprovaveis
lib/matplotlib/tests/test_transforms.py
Python
gpl-3.0
24,823
0.000081
from __future__ import (absolute_import, division, print_function, unicode_literals) from six.moves import zip import unittest import numpy as np from numpy.testing import (assert_allclose, assert_almost_equal, assert_array_equal, assert_array_almost_equal) import pytest import matplotlib.pyplot as plt import matplotlib.patches as mpatches import matplotlib.transforms as mtransforms from matplotlib.path import Path from matplotlib.scale import LogScale from matplotlib.testing.decorators import image_comparison def test_non_affine_caching(): class AssertingNonAffineTransform(mtransforms.Transform): """ This transform raises an assertion error when called when it shouldn't be and self.raise_on_transform is True. """ input_dims = output_dims = 2 is_affine = False def __init__(self, *args, **kwargs): mtransforms.Transform.__init__(self, *args, **kwargs) self.raise_on_transform = False self.underlying_transform = mtransforms.Affine2D().scale(10, 10) def transform_path_non_affine(self, path): assert not self.raise_on_transform, \ 'Invalidated affine part of transform unnecessarily.' return self.underlying_transform.transform_path(path) transform_path = transform_path_non_affine def transform_non_affine(self, path): assert not self.raise_on_transform, \ 'Invalidated affine part of transform unnecessarily.' return self.underlying_transform.transform(path) transform = transform_non_affine my_trans = AssertingNonAffineTransform() ax = plt.axes() plt.plot(np.arange(10), transform=my_trans + ax.transData) plt.draw() # enable the transform to raise an exception if it's non-affine transform # method is triggered again. my_trans.raise_on_transform = True ax.transAxes.invalidate() plt.draw() def test_external_transform_api(): class ScaledBy(object): def __init__(self, scale_factor): self._scale_factor = scale_factor def _as_mpl_transform(self, axes): return (mtransforms.Affine2D().scale(self._scale_factor) + axes.transData) ax = plt.axes() line, = plt.plot(np.arange(10), transform=ScaledBy(10)) ax.set_xlim(0, 100) ax.set_ylim(0, 100) # assert that the top transform of the line is the scale transform. assert_allclose(line.get_transform()._a.get_matrix(), mtransforms.Affine2D().scale(10).get_matrix()) @image_comparison(baseline_images=['pre_transform_data'], tol=0.08) def test_pre_transform_plotting(): # a catch-all for as many as possible plot layouts which handle # pre-transforming the data NOTE: The axis range is important in this # plot. It should be x10 what the data suggests it should be ax = plt.axes() times10 = mtransforms.Affine2D().scale(10) ax.contourf(np.arange(48).reshape(6, 8), transform=times10 + ax.transData) ax.pcolormesh(np.linspace(0, 4, 7), np.linspace(5.5, 8, 9), np.arange(48).reshape(8, 6), transform=times10 + ax.trans
Data) ax.scatter(np.linspace(0, 10), np.linspace(10, 0), transform=times10 + ax.transData) x = np.linspace(8, 10, 20) y = np.linspace(1, 5, 20) u = 2*np.sin(x) + np.cos(y[:, np.newaxis]) v = np.sin(x) - np.cos(y[:, np.newaxis])
df = 25. / 30. # Compatibility factor for old test image ax.streamplot(x, y, u, v, transform=times10 + ax.transData, density=(df, df), linewidth=u**2 + v**2) # reduce the vector data down a bit for barb and quiver plotting x, y = x[::3], y[::3] u, v = u[::3, ::3], v[::3, ::3] ax.quiver(x, y + 5, u, v, transform=times10 + ax.transData) ax.barbs(x - 3, y + 5, u**2, v**2, transform=times10 + ax.transData) def test_contour_pre_transform_limits(): ax = plt.axes() xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20)) ax.contourf(xs, ys, np.log(xs * ys), transform=mtransforms.Affine2D().scale(0.1) + ax.transData) expected = np.array([[1.5, 1.24], [2., 1.25]]) assert_almost_equal(expected, ax.dataLim.get_points()) def test_pcolor_pre_transform_limits(): # Based on test_contour_pre_transform_limits() ax = plt.axes() xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20)) ax.pcolor(xs, ys, np.log(xs * ys), transform=mtransforms.Affine2D().scale(0.1) + ax.transData) expected = np.array([[1.5, 1.24], [2., 1.25]]) assert_almost_equal(expected, ax.dataLim.get_points()) def test_pcolormesh_pre_transform_limits(): # Based on test_contour_pre_transform_limits() ax = plt.axes() xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20)) ax.pcolormesh(xs, ys, np.log(xs * ys), transform=mtransforms.Affine2D().scale(0.1) + ax.transData) expected = np.array([[1.5, 1.24], [2., 1.25]]) assert_almost_equal(expected, ax.dataLim.get_points()) def test_Affine2D_from_values(): points = np.array([[0, 0], [10, 20], [-1, 0], ]) t = mtransforms.Affine2D.from_values(1, 0, 0, 0, 0, 0) actual = t.transform(points) expected = np.array([[0, 0], [10, 0], [-1, 0]]) assert_almost_equal(actual, expected) t = mtransforms.Affine2D.from_values(0, 2, 0, 0, 0, 0) actual = t.transform(points) expected = np.array([[0, 0], [0, 20], [0, -2]]) assert_almost_equal(actual, expected) t = mtransforms.Affine2D.from_values(0, 0, 3, 0, 0, 0) actual = t.transform(points) expected = np.array([[0, 0], [60, 0], [0, 0]]) assert_almost_equal(actual, expected) t = mtransforms.Affine2D.from_values(0, 0, 0, 4, 0, 0) actual = t.transform(points) expected = np.array([[0, 0], [0, 80], [0, 0]]) assert_almost_equal(actual, expected) t = mtransforms.Affine2D.from_values(0, 0, 0, 0, 5, 0) actual = t.transform(points) expected = np.array([[5, 0], [5, 0], [5, 0]]) assert_almost_equal(actual, expected) t = mtransforms.Affine2D.from_values(0, 0, 0, 0, 0, 6) actual = t.transform(points) expected = np.array([[0, 6], [0, 6], [0, 6]]) assert_almost_equal(actual, expected) def test_clipping_of_log(): # issue 804 M, L, C = Path.MOVETO, Path.LINETO, Path.CLOSEPOLY points = [(0.2, -99), (0.4, -99), (0.4, 20), (0.2, 20), (0.2, -99)] codes = [M, L, L, L, C] path = Path(points, codes) # something like this happens in plotting logarithmic histograms trans = mtransforms.BlendedGenericTransform(mtransforms.Affine2D(), LogScale.Log10Transform('clip')) tpath = trans.transform_path_non_affine(path) result = tpath.iter_segments(trans.get_affine(), clip=(0, 0, 100, 100), simplify=False) tpoints, tcodes = list(zip(*result)) assert_allclose(tcodes, [M, L, L, L, C]) class NonAffineForTest(mtransforms.Transform): """ A class which looks like a non affine transform, but does whatever the given transform does (even if it is affine). This is very useful for testing NonAffine behaviour with a simple Affine transform. """ is_affine = False output_dims = 2 input_dims = 2 def __init__(self, real_trans, *args, **kwargs): self.real_trans = real_trans mtransforms.Transform.__init__(self, *args, **kwargs) def transform_non_affine(self, values): return self.real_trans.transform(values) def transform_path_non_affine(self, path): return self.real_trans.transform_path(path) class BasicTransformTests(unittest.TestCase): def setUp(self): self.ta1 = mtransforms.Affine2D(shorthand_name='ta1').rotate(np.pi / 2) self.ta2 =
noikiy/mitmproxy
examples/dns_spoofing.py
Python
mit
1,652
0.004843
""" This inline scripts makes it possible to use mitmproxy in scenarios where IP spoofing has been used to redirect connections to mitmproxy. The way this works is that we rely on either the
TLS Server Name Indication (SNI) or the Host header of the HTTP request. Of course, this is not foolproof - if an HTTPS connection comes without SNI, we don't know the actual target and cannot construct a certificate that looks valid. Similarly, if there's no Host header or a spoofed Host header, we're out of luck as well. Using transparent mode is the better option most of the time. Usage: mitmp
roxy -p 80 -R http://example.com/ // Used as the target location if no Host header is present mitmproxy -p 443 -R https://example.com/ // Used as the target locaction if neither SNI nor host header are present. mitmproxy will always connect to the default location first, so it must be reachable. As a workaround, you can spawn an arbitrary HTTP server and use that for both endpoints, e.g. mitmproxy -p 80 -R http://localhost:8000 mitmproxy -p 443 -R https2http://localhost:8000 """ def request(context, flow): if flow.client_conn.ssl_established: # TLS SNI or Host header flow.request.host = flow.client_conn.connection.get_servername( ) or flow.request.pretty_host(hostheader=True) # If you use a https2http location as default destination, these # attributes need to be corrected as well: flow.request.port = 443 flow.request.scheme = "https" else: # Host header flow.request.host = flow.request.pretty_host(hostheader=True)
dimagi/commcare-hq
corehq/apps/linked_domain/views.py
Python
bsd-3-clause
22,021
0.002089
from datetime import datetime from django.contrib import messages from django.http import Http404, HttpResponseRedirect, JsonResponse from django.urls import reverse from django.utils.decorators import method_decorator from django.utils.translation import ugettext, ugettext_lazy from django.views import View from couchdbkit import ResourceNotFound from djng.views.mixins import JSONResponseMixin, allow_remote_invocation from memoized import memoized from corehq.apps.accounting.models import BillingAccount from corehq.apps.accounting.utils import domain_has_privilege from corehq.apps.analytics.tasks import track_workflow from corehq.apps.app_manager.dbaccessors import ( get_app, get_brief_app_docs_in_domain, get_brief_apps_in_domain, get_build_doc_by_version, get_latest_released_app, get_latest_released_app_versions_by_app_id, ) from corehq.apps.app_manager.decorators import require_can_edit_apps from corehq.apps.app_manager.util import is_linked_app from corehq.apps.case_search.models import CaseSearchConfig from corehq.apps.domain.dbaccessors import domain_exists from corehq.apps.domain.decorators import ( domain_admin_required, login_or_api_key, ) from corehq.apps.domain.exceptions import DomainDoesNotExist from corehq.apps.domain.views.base import DomainViewMixin from corehq.apps.domain.views.settings import BaseAdminProjectSettingsView from corehq.apps.fixtures.dbaccessors import get_fixture_data_type_by_tag from corehq.apps.hqwebapp.decorators import use_multiselect from corehq.apps.hqwebapp.doc_info import get_doc_info_by_id from corehq.apps.hqwebapp.templatetags.hq_shared_tags import pretty_doc_info from corehq.apps.linked_domain.const import ( LINKED_MODELS_MAP, MODEL_APP, MODEL_FIXTURE, MODEL_KEYWORD, MODEL_REPORT, SUPERUSER_DATA_MODELS, ) from corehq.apps.linked_domain.dbaccessors import ( get_active_domain_link, get_available_domains_to_link, get_available_upstream_domains, get_linked_domains, get_upstream_domain_link, ) from corehq.apps.linked_domain.decorators import ( require_access_to_linked_domains, require_linked_domain, ) from corehq.apps.linked_domain.exceptions import ( DomainLinkAlreadyExists, DomainLinkError, DomainLinkNotAllowed, UnsupportedActionError, ) from corehq.apps.linked_domain.local_accessors import ( get_auto_update_rules, get_custom_data_models, get_data_dictionary, get_dialer_settings, get_enabled_toggles_and_previews, get_fixture, get_hmac_callout_settings, get_otp_settings, get_tableau_server_and_visualizations, get_user_roles, ) from corehq.apps.linked_domain.models import ( DomainLink, DomainLinkHistory, wrap_detail, )
from corehq.apps.linked_domain.remote_accessors import get_remote_linkable_ucr from corehq.apps.linked_domain.tasks import ( pull_missing_multime
dia_for_app_and_notify_task, push_models, ) from corehq.apps.linked_domain.ucr import create_linked_ucr from corehq.apps.linked_domain.updates import update_model_type from corehq.apps.linked_domain.util import ( convert_app_for_remote_linking, pull_missing_multimedia_for_app, server_to_user_time, user_has_admin_access_in_all_domains, ) from corehq.apps.linked_domain.view_helpers import ( build_domain_link_view_model, build_pullable_view_models_from_data_models, build_view_models_from_data_models, get_upstream_and_downstream_apps, get_upstream_and_downstream_fixtures, get_upstream_and_downstream_keywords, get_upstream_and_downstream_reports, ) from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader from corehq.apps.reports.dispatcher import ReleaseManagementReportDispatcher from corehq.apps.reports.generic import GenericTabularReport from corehq.apps.sms.models import Keyword from corehq.apps.userreports.dbaccessors import get_report_configs_for_domain from corehq.apps.userreports.models import ( DataSourceConfiguration, ReportConfiguration, ) from corehq.apps.users.decorators import require_permission from corehq.apps.users.models import Permissions, WebUser from corehq.privileges import RELEASE_MANAGEMENT from corehq.util.timezones.utils import get_timezone_for_request @login_or_api_key @require_linked_domain def tableau_server_and_visualizations(request, domain): return JsonResponse(get_tableau_server_and_visualizations(domain)) @login_or_api_key @require_linked_domain def toggles_and_previews(request, domain): return JsonResponse(get_enabled_toggles_and_previews(domain)) @login_or_api_key @require_linked_domain def auto_update_rules(request, domain): return JsonResponse(get_auto_update_rules(domain)) @login_or_api_key @require_linked_domain def custom_data_models(request, domain): limit_types = request.GET.getlist('type') return JsonResponse(get_custom_data_models(domain, limit_types)) @login_or_api_key @require_linked_domain def fixture(request, domain, tag): return JsonResponse(get_fixture(domain, tag)) @login_or_api_key @require_linked_domain def user_roles(request, domain): return JsonResponse({'user_roles': get_user_roles(domain)}) @login_or_api_key @require_linked_domain def brief_apps(request, domain): return JsonResponse({'brief_apps': get_brief_app_docs_in_domain(domain, include_remote=False)}) @login_or_api_key @require_linked_domain def app_by_version(request, domain, app_id, version): return JsonResponse({'app': get_build_doc_by_version(domain, app_id, version)}) @login_or_api_key @require_linked_domain def released_app_versions(request, domain): return JsonResponse({'versions': get_latest_released_app_versions_by_app_id(domain)}) @login_or_api_key @require_linked_domain def case_search_config(request, domain): try: config = CaseSearchConfig.objects.get(domain=domain).to_json() except CaseSearchConfig.DoesNotExist: config = None return JsonResponse({'config': config}) @login_or_api_key @require_linked_domain @require_permission(Permissions.view_reports) def linkable_ucr(request, domain): """Returns a list of reports to be used by the downstream domain on a remote server to create linked reports by calling the `ucr_config` view below """ reports = get_report_configs_for_domain(domain) return JsonResponse({ "reports": [ {"id": report._id, "title": report.title} for report in reports] }) @login_or_api_key @require_linked_domain def ucr_config(request, domain, config_id): report_config = ReportConfiguration.get(config_id) if report_config.domain != domain: return Http404 datasource_id = report_config.config_id datasource_config = DataSourceConfiguration.get(datasource_id) return JsonResponse({ "report": report_config.to_json(), "datasource": datasource_config.to_json(), }) @login_or_api_key @require_linked_domain def get_latest_released_app_source(request, domain, app_id): master_app = get_app(None, app_id) if master_app.domain != domain: raise Http404 latest_master_build = get_latest_released_app(domain, app_id) if not latest_master_build: raise Http404 return JsonResponse(convert_app_for_remote_linking(latest_master_build)) @login_or_api_key @require_linked_domain def data_dictionary(request, domain): return JsonResponse(get_data_dictionary(domain)) @login_or_api_key @require_linked_domain def dialer_settings(request, domain): return JsonResponse(get_dialer_settings(domain)) @login_or_api_key @require_linked_domain def otp_settings(request, domain): return JsonResponse(get_otp_settings(domain)) @login_or_api_key @require_linked_domain def hmac_callout_settings(request, domain): return JsonResponse(get_hmac_callout_settings(domain)) @require_can_edit_apps def pull_missing_multimedia(request, domain, app_id): async_update = request.POST.get('notify') == 'on' force = request.POST.get('force') == 'on' if async_update: pull_missing_multimedia_for_app_and_notify_task.delay(domain, app_id, request.user.email, force)
village-people/flying-pig
malmopy/visualization/visualizer.py
Python
mit
4,199
0.00381
# Copyright (c) 2017 Microsoft Corporation. # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================================================================== from __future__ import absolute_import from os import path class Visualizable(object): def __init__(self, visualizer=None): if visualizer is not None: assert isinstance(visualizer, BaseVisualizer), "visualizer should derive from BaseVisualizer" self._visualizer = visualizer def visualize(self, index, tag, value, **kwargs): if self._visualizer is not None: self._visualizer << (index, tag, value, kwargs) @property def can_visualize(self): return self._visualizer is not None class BaseV
isualizer(object):
""" Provide a unified interface for observing the training progress """ def add_entry(self, index, key, result, **kwargs): raise NotImplementedError() def __lshift__(self, other): if isinstance(other, tuple): if len(other) >= 3: self.add_entry(other[0], str(other[1]), other[2]) else: raise ValueError("Provided tuple should be of the form (key, value)") else: raise ValueError("Trying to use stream operator without a tuple (key, value)") class EmptyVisualizer(BaseVisualizer): """ A boilerplate visualizer that does nothing """ def add_entry(self, index, key, result, **kwargs): pass class ConsoleVisualizer(BaseVisualizer): """ Print visualization to stdout as: key -> value """ CONSOLE_DEFAULT_FORMAT = "[%s] %d : %s -> %.3f" def __init__(self, format=None, prefix=None): self._format = format or ConsoleVisualizer.CONSOLE_DEFAULT_FORMAT self._prefix = prefix or '-' def add_entry(self, index, key, result, **kwargs): print(self._format % (self._prefix, index, key, result)) class CsvVisualizer(BaseVisualizer): """ Write data to file. The following formats are supported: CSV, JSON, Excel. """ def __init__(self, output_file, override=False): if path.exists(output_file) and not override: raise Exception('%s already exists and override is False' % output_file) super(CsvVisualizer, self).__init__() self._file = output_file self._data = {} def add_entry(self, index, key, result, **kwargs): if key in self._data[index]: print('Warning: Found previous value for %s in visualizer' % key) self._data[index].update({key: result}) def close(self, format='csv'): import pandas as pd if format == 'csv': pd.DataFrame.from_dict(self._data, orient='index').to_csv(self._file) elif format == 'json': pd.DataFrame.from_dict(self._data, orient='index').to_json(self._file) else: writer = pd.ExcelWriter(self._file) pd.DataFrame.from_dict(self._data, orient='index').to_excel(writer) writer.save() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() return self
tensorflow/privacy
tensorflow_privacy/privacy/optimizers/dp_optimizer.py
Python
apache-2.0
15,920
0.005339
# Copyright 2020, The TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Differentially private optimizers for TensorFlow.""" from absl import logging import tensorflow as tf from tensorflow_privacy.privacy.dp_query import gaussian_query def make_optimizer_class(cls): """Given a subclass of `tf.compat.v1.train.Optimizer`, returns a DP-SGD subclass of it. Args: cls: Class from which to derive a DP subclass. Should be a subclass of `tf.compat.v1.train.Optimizer`. Returns: A DP-SGD subclass of `cls`. """ parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__ has_compute_gradients = hasattr(cls, 'compute_gradients') if has_compute_gradients: child_code = cls.compute_gradients.__code__ GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name if has_compute_gradients and child_code is not parent_code: logging.warning( 'WARNING: Calling make_optimizer_class() on class %s that overrides ' 'method compute_gradients(). Check to ensure that ' 'make_optimizer_class() does not interfere with overridden version.', cls.__name__) class DPOptimizerClass(cls): # pylint: disable=empty-docstring __doc__ = ("""Differentially private subclass of `{base_class}`. You can use this as a differentially private replacement for `{base_class}`. Note that you must ensure that any loss processed by this optimizer comes in vector form. This is the fully general form of the optimizer that allows you to define your own privacy mechanism. If you are planning to use the standard Gaussian mechanism, it is simpler to use the more specific `{gaussian_class}` class instead. When instantiating this optimizer, you need to supply several DP-related arguments followed by the standard arguments for `{short_base_class}`. Examples: ```python # Create GaussianSumQuery. dp_sum_query = gaussian_query.GaussianSumQuery(l2_norm_clip=1.0, stddev=0.5) # Create optimizer. opt = {dp_class}(dp_sum_query, 1, False, <standard arguments>) ``` When using the optimizer, be sure to pass in the loss as a rank-one tensor with one entry for each example. ```python # Compute loss as a tensor. Do not call tf.reduce_mean as you # would with a standard optimizer. loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits) train_op = opt.minimize(loss, global_step=global_step) ``` """).format( base_class='tf.compat.v1.train.' + cls.__name__, gaussian_class='DP' + cls.__name__.replace('Optimizer', 'GaussianOptimizer'), short_base_class=cls.__name__, dp_class='DP' + cls.__name__) def __init__( self, dp_sum_query, num_microbatches=None, unroll_microbatches=False, while_loop_parallel_iterations=10, *args, # pylint: disable=keyword-arg-before-vararg, g-doc-args **kwargs): """Initializes the DPOptimizerClass. Args: dp_sum_query: `DPQuery` object, specifying differential privacy mechanism to use. num_microbatches: Number of microbatches into which each minibatch is split. If `None`, will default to the size of the minibatch, and per-example gradients will be computed. unroll_microbatches: If true, processes microbatches within a Python loop instead of a `tf.while_loop`. Can be used if using a `tf.while_loop` raises an exception. while_loop_parallel_iterations: The number of iterations allowed to run in parallel. It must be a positive integer. Applicable only when unroll_microbatches is set to False. It gives users some control over memory consumption. *args: These will be passed on to the base class `__init__` method. **kwargs: These will be passed on to the base class `__init__` method. """ super().__init__(*args, **kwargs) self._dp_sum_query = dp_sum_query self._num_microbatches = num_microbatches self._global_state = None # TODO(b/122613513): Set unroll_microbatches=True to avoid this bug. # Beware: When num_microbatches is large (>100), enabling this parameter # may cause an OOM error. self._unroll_microbatches = unroll_microbatches self._while_loop_parallel_iterations = while_loop_parallel_iterations self._was_compute_gradients_called = False def compute_gradients(self, loss, var_list, gate_gradients=GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, grad_loss=None, gradient_tape=None): """DP-SGD version of base class method.""" self._was_compute_gradients_called = True if self._global_state is None: self._global_state = self._dp_sum_query.initial_global_state() if callable(loss): # TF is running in Eager mode, check we received a vanilla tape. if not gradient_tape: raise ValueError('When in Eager mode, a tape needs to be passed.') vector_loss = loss() if self._num_microbatches is None: self._num_microbatches = tf.shape(input=vector_loss)[0] sample_state = self._dp_sum_query.initial_sample_state(var_list) microbatches_losses = tf.reshape(vector_loss, [self._num_microbatches, -1]) sample_params = ( self._dp_sum_query.derive_sample_params(self._global_state)) def process_microbatch(i, sample_state): """Process one microbatch (record) with privacy helper.""" microbatch_loss = tf.reduce_mean( input_tensor=tf.gather(microbatches_losses, [i])) with gradient_tape.stop_recording(): grads = gradient_tape.gradient(microbatch_loss, var_list) sample_state = self._dp_sum_query.accumulate_record( sample_params, sample_state, grads) return sample_state for idx in range(self._num_microbatches): sample_state = process_microbatch(idx, sample_state) grad_sums, self._global_state, _ = ( self._dp_sum_query.get_noised_result(sample_state, self._global_state)) def normalize(v): return v / tf.cast(self._num_microbatches, tf.float32) final_grads = tf.nest.map_structure(normalize, grad_sums) grads_and_vars = list(zip(final_grads, var_list)) return grads_and_vars else: # Note: it would be closer to the correct i.i.d. sampling of records if # we sampled each microbatch from the appropriate binomial distribution, # although that still wouldn't be quite correct because it would be # sampling from the dataset withou
t replacement. if self._num_microbatches is None: self._num_microbatches = tf.shape(input=loss)[0] microbatches_losses = tf.reshape(loss, [self._num_microbatches, -1])
sample_params = ( self._dp_sum_query.derive_sample_params(self._global_state)) def process_microbatch(i, sample_state): """Process one microbatch (record) with privacy helper.""" self_super = super(DPOptimizerClass, self) mean_loss = tf.reduce_mean(
Castronova/EMIT
gui/views/WofSitesView.py
Python
gpl-2.0
3,111
0.002893
import wx from gui.controller.CustomListCtrl import CustomListCtrl from gui.controller.PlotCtrl import PlotCtrl class WofSitesView(wx.Frame): def __init__(self, parent, title, table_columns): wx.Frame.__init__(self, parent=parent, id=-1, title=title, pos=wx.DefaultPosition, size=(680, 700), style=wx.FRAME_FLOAT_ON_PARENT | wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER ^ wx.MAXIMIZE
_BOX) self.start_date = wx.DateTime_Now() - 7 * wx.DateSpan_Day()
self.end_date = wx.DateTime_Now() self.parent = parent self._data = None panel = wx.Panel(self) top_panel = wx.Panel(panel) middle_panel = wx.Panel(panel, size=(-1, 30)) lower_panel = wx.Panel(panel) hboxTopPanel = wx.BoxSizer(wx.HORIZONTAL) self.plot = PlotCtrl(top_panel) hboxTopPanel.Add(self.plot.canvas, 1, wx.EXPAND | wx.ALL, 2) top_panel.SetSizer(hboxTopPanel) hboxMidPanel = wx.BoxSizer(wx.HORIZONTAL) self.startDateText = wx.StaticText(middle_panel, id=wx.ID_ANY, label="Start") self.startDatePicker = wx.DatePickerCtrl(middle_panel, id=wx.ID_ANY, dt=self.start_date) self.endDateText = wx.StaticText(middle_panel, id=wx.ID_ANY, label="End") self.endDatePicker = wx.DatePickerCtrl(middle_panel, id=wx.ID_ANY, dt=self.end_date) self.exportBtn = wx.Button(middle_panel, id=wx.ID_ANY, label="Export") self.addToCanvasBtn = wx.Button(middle_panel, id=wx.ID_ANY, label="Add to Canvas") self.PlotBtn = wx.Button(middle_panel, id=wx.ID_ANY, label="Preview") self.line_style_combo = wx.ComboBox(middle_panel, value="Line style") self.line_style_options = ["Line", "Scatter"] self.line_style_combo.AppendItems(self.line_style_options) hboxMidPanel.Add(self.startDateText, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL) hboxMidPanel.Add(self.startDatePicker, 1, wx.EXPAND | wx.ALL, 2) hboxMidPanel.Add(self.endDateText, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL) hboxMidPanel.Add(self.endDatePicker, 1, wx.EXPAND | wx.ALL, 2) hboxMidPanel.Add(self.PlotBtn, 1, wx.EXPAND | wx.ALL, 2) hboxMidPanel.Add(self.exportBtn, 1, wx.EXPAND | wx.ALL, 2) hboxMidPanel.Add(self.addToCanvasBtn, 1, wx.EXPAND | wx.ALL, 2) hboxMidPanel.Add(self.line_style_combo, 1, wx.EXPAND | wx.ALL, 2) middle_panel.SetSizer(hboxMidPanel) hboxLowPanel = wx.BoxSizer(wx.HORIZONTAL) # Build time series table self.variableList = CustomListCtrl(lower_panel) self.variableList.set_columns(table_columns) hboxLowPanel.Add(self.variableList, 1, wx.EXPAND | wx.ALL, 2) lower_panel.SetSizer(hboxLowPanel) vbox = wx.BoxSizer(wx.VERTICAL) vbox.Add(top_panel, 1, wx.EXPAND | wx.ALL, 2) vbox.Add(middle_panel, 0, wx.EXPAND | wx.ALL, 2) vbox.Add(lower_panel, 1, wx.EXPAND | wx.ALL, 2) panel.SetSizer(vbox) self.status_bar = self.CreateStatusBar() self.status_bar.SetStatusText("Ready") self.Show()
ATSTI/administra
open_corretora/brokerage/wizard/end_contract.py
Python
gpl-2.0
1,064
0.005639
#-*- coding: utf-8 -*- from openerp.osv import fields, osv class finance_contract_rachat(osv.osv_memory): _name = "finance.contract.rachat" _columns = {
'date': fields.date('Date de rachat'), 'date_dem': fields.date('Date de la demande'), 'motif': fields.text('Motif'), 'memo': fields.text('Memo'), 'act_rachat': fields.boolean('Rachat'), 'act_res': fields.boolean('Resiliation'), 'contract_id': fields.many2one('finance.contract', 'Contrat') } def set_rachat(self, cr, uid, ids, context=None): obj = self.browse(c
r, uid, ids[0], context=context) vals = { 'res_reason': obj.motif and obj.motif or False, 'res_memo': obj.memo and obj.memo or False, 'res_date': obj.date and obj.date or False, 'res_dated': obj.date_dem and obj.date_dem or False, 'res': obj.act_res, 'rachat': obj.act_rachat } return self.pool.get('finance.contract').save_rachat(cr, uid, obj.contract_id.id, vals)
simonemurzilli/geonode
geonode/contrib/geosites/site_template/local_settings_template.py
Python
gpl-3.0
597
0
# flake8: noqa # -*- coding: utf-8 -*- ############################################### # Geosite local settings ############################################### import os # Outside URL SITEURL = 'http://$DOMAIN' OGC_SERVER['default']['LOCATION'] = os.path.
join(GEOSERVER_URL, 'geoserver/') OGC_SERVER['default']['PUBLIC_LOCATION'] = os.path.join(SITEURL, 'geoserver/') # databases unique to site if not defined in site settings """ SITE_DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME':
os.path.join(PROJECT_ROOT, '../development.db'), }, } """
wathsalav/xos
xos/core/views/projects.py
Python
apache-2.0
382
0.007853
from core.serializers import ProjectSerializer from rest_framework import generics from core.models import Project class ProjectList(generics.ListCreateAPIView): queryset = Project.objec
ts.all() serializer_class = ProjectSerializer class Pro
jectDetail(generics.RetrieveUpdateDestroyAPIView): queryset = Project.objects.all() serializer_class = ProjectSerializer
arielrossanigo/fades
fades/helpers.py
Python
gpl-3.0
7,161
0.001397
# Copyright 2014-2015 Facundo Batista, Nicolás Demarchi # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License version 3, as published # by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranties of # MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. # # For further info, check https://github.com/PyAr/fades """A collection of utilities for fades.""" import os import sys import json import logging import subprocess from urllib import request from urllib.error import HTTPError import pkg_resources logger = logging.getLogger(__name__) SHOW_VERSION_CMD = """ import sys, json d = dict(path=sys.executable) d.update(zip('major minor micro releaselevel serial'.split(), sys.version_info)) print(json.dumps(d)) """ BASE_PYPI_URL = 'https://pypi.python.org/pypi/{name}/json' STDOUT_LOG_PREFIX = ":: " class ExecutionError(Exception): """Execution of subprocess ended not in 0.""" def __init__(self, retcode, cmd, collected_stdout): """Init.""" self._retcode = retcode self._cmd = cmd self._collected_stdout = collected_stdout super().__init__() def dump_to_log(self, logger): """Send the cmd info and collected stdout to logger.""" logger.error("Execution ended in %s for cmd %s", self._retcode, self._cmd) for line in self._collected_stdout: logger.error(STDOUT_LOG_PREFIX + line) def logged_exec(cmd): """Execute a command, redirecting the out
put to the log.""" logger = logging.getLogger('fades.exec')
logger.debug("Executing external command: %r", cmd) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout = [] for line in p.stdout: line = line[:-1].decode("utf8") stdout.append(line) logger.debug(STDOUT_LOG_PREFIX + line) retcode = p.wait() if retcode: raise ExecutionError(retcode, cmd, stdout) return stdout def get_basedir(): """Get the base fades directory, from xdg or kinda hardcoded.""" try: from xdg import BaseDirectory # NOQA return os.path.join(BaseDirectory.xdg_data_home, 'fades') except ImportError: logger.debug("Package xdg not installed; using ~/.fades folder") from os.path import expanduser return expanduser("~/.fades") def get_confdir(): """Get the config fades directory, from xdg or kinda hardcoded.""" try: from xdg import BaseDirectory # NOQA return os.path.join(BaseDirectory.xdg_config_home, 'fades') except ImportError: logger.debug("Package xdg not installed; using ~/.fades folder") from os.path import expanduser return expanduser("~/.fades") def _get_interpreter_info(interpreter=None): """Return the interpreter's full path using pythonX.Y format.""" if interpreter is None: # If interpreter is None by default returns the current interpreter data. major, minor = sys.version_info[:2] executable = sys.executable else: args = [interpreter, '-c', SHOW_VERSION_CMD] try: requested_interpreter_info = logged_exec(args) except Exception as error: logger.error("Error getting requested interpreter version: %s", error) exit() requested_interpreter_info = json.loads(requested_interpreter_info[0]) executable = requested_interpreter_info['path'] major = requested_interpreter_info['major'] minor = requested_interpreter_info['minor'] if executable[-1].isdigit(): executable = executable.split(".")[0][:-1] interpreter = "{}{}.{}".format(executable, major, minor) return interpreter def get_interpreter_version(requested_interpreter): """Return a 'sanitized' interpreter and indicates if it is the current one.""" logger.debug('Getting interpreter version for: %s', requested_interpreter) current_interpreter = _get_interpreter_info() logger.debug('Current interpreter is %s', current_interpreter) if requested_interpreter is None: return(current_interpreter, True) else: requested_interpreter = _get_interpreter_info(requested_interpreter) is_current = requested_interpreter == current_interpreter logger.debug('Interpreter=%s. It is the same as fades?=%s', requested_interpreter, is_current) return (requested_interpreter, is_current) def get_latest_version_number(project_name): """Return latest version of a package.""" try: raw = request.urlopen(BASE_PYPI_URL.format(name=project_name)).read() except HTTPError as error: logger.warning("Network error. Error: %s", error) raise error try: data = json.loads(raw.decode("utf8")) latest_version = data["info"]["version"] return latest_version except (KeyError, ValueError) as error: # malformed json or empty string logger.error("Could not get the version of the package. Error: %s", error) raise error def check_pypi_updates(dependencies): """Return a list of dependencies to upgrade.""" dependencies_up_to_date = [] for dependency in dependencies.get('pypi', []): # get latest version from PyPI api try: latest_version = get_latest_version_number(dependency.project_name) except Exception as error: logger.warning("--check-updates command will be aborted. Error: %s", error) return dependencies # get required version required_version = None if dependency.specs: _, required_version = dependency.specs[0] if required_version: dependencies_up_to_date.append(dependency) if latest_version > required_version: logger.info("There is a new version of %s: %s", dependency.project_name, latest_version) elif latest_version < required_version: logger.warning("The requested version for %s is greater " "than latest found in PyPI: %s", dependency.project_name, latest_version) else: logger.info("The requested version for %s is the latest one in PyPI: %s", dependency.project_name, latest_version) else: project_name_plus = "{}=={}".format(dependency.project_name, latest_version) dependencies_up_to_date.append(pkg_resources.Requirement.parse(project_name_plus)) logger.info("There is a new version of %s: %s and will use it.", dependency.project_name, latest_version) dependencies["pypi"] = dependencies_up_to_date return dependencies
appuio/ansible-role-openshift-zabbix-monitoring
vendor/openshift-tools/ansible/inventory/aws/ec2.py
Python
apache-2.0
55,406
0.002978
#!/usr/bin/env python2 ''' EC2 external inventory script ================================= Generates inventory that Ansible can understand by making API request to AWS EC2 using the Boto library. NOTE: This script assumes Ansible is being executed where the environment variables needed for Boto have already been set: export AWS_ACCESS_KEY_ID='AK123' export AWS_SECRET_ACCESS_KEY='abc123' This script also assumes there is an ec2.ini file alongside it. To specify a different path to ec2.ini, define the EC2_INI_PATH environment variable: export EC2_INI_PATH=/path/to/my_ec2.ini If you're using eucalyptus you need to set the above variables and you need to define: export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus If you're using boto profiles (requires boto>=2.24.0) you can choose a profile using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using the AWS_PROFILE variable: AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html When run against a specific host, this script returns the following variables: - ec2_ami_launch_index - ec2_architecture - ec2_association - ec2_attachTime - ec2_attachment - ec2_attachmentId - ec2_client_token - ec2_deleteOnTermination - ec2_description - ec2_deviceIndex - ec2_dn
s_name - ec2_eventsSe
t - ec2_group_name - ec2_hypervisor - ec2_id - ec2_image_id - ec2_instanceState - ec2_instance_type - ec2_ipOwnerId - ec2_ip_address - ec2_item - ec2_kernel - ec2_key_name - ec2_launch_time - ec2_monitored - ec2_monitoring - ec2_networkInterfaceId - ec2_ownerId - ec2_persistent - ec2_placement - ec2_platform - ec2_previous_state - ec2_private_dns_name - ec2_private_ip_address - ec2_publicIp - ec2_public_dns_name - ec2_ramdisk - ec2_reason - ec2_region - ec2_requester_id - ec2_root_device_name - ec2_root_device_type - ec2_security_group_ids - ec2_security_group_names - ec2_shutdown_state - ec2_sourceDestCheck - ec2_spot_instance_request_id - ec2_state - ec2_state_code - ec2_state_reason - ec2_status - ec2_subnet_id - ec2_tenancy - ec2_virtualization_type - ec2_vpc_id These variables are pulled out of a boto.ec2.instance object. There is a lack of consistency with variable spellings (camelCase and underscores) since this just loops through all variables the object exposes. It is preferred to use the ones with underscores when multiple exist. In addition, if an instance has AWS Tags associated with it, each tag is a new variable named: - ec2_tag_[Key] = [Value] Security groups are comma-separated in 'ec2_security_group_ids' and 'ec2_security_group_names'. ''' # (c) 2012, Peter Sankauskas # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ###################################################################### import sys import os import argparse import re from time import time import boto from boto import ec2 from boto import rds from boto import elasticache from boto import route53 import six from six.moves import configparser from collections import defaultdict try: import json except ImportError: import simplejson as json class Ec2Inventory(object): def _empty_inventory(self): return {"_meta" : {"hostvars" : {}}} def __init__(self): ''' Main execution path ''' # Inventory grouped by instance IDs, tags, security groups, regions, # and availability zones self.inventory = self._empty_inventory() # Index of hostname (address) to instance ID self.index = {} # Boto profile to use (if any) self.boto_profile = None # Read settings and parse CLI arguments self.parse_cli_args() self.read_settings() # Make sure that profile_name is not passed at all if not set # as pre 2.24 boto will fall over otherwise if self.boto_profile: if not hasattr(boto.ec2.EC2Connection, 'profile_name'): self.fail_with_error("boto version must be >= 2.24 to use profile") # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of instances for inventory if self.inventory == self._empty_inventory(): data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print(data_to_print) def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): ''' Reads the settings from the ec2.ini file ''' if six.PY3: config = configparser.ConfigParser() else: config = configparser.SafeConfigParser() ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path))) config.read(ec2_ini_path) # is eucalyptus? self.eucalyptus_host = None self.eucalyptus = False if config.has_option('ec2', 'eucalyptus'): self.eucalyptus = config.getboolean('ec2', 'eucalyptus') if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') # Regions self.regions = [] configRegions = config.get('ec2', 'regions') configRegions_exclude = config.get('ec2', 'regions_exclude') if (configRegions == 'all'): if self.eucalyptus_host: self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) else: for regionInfo in ec2.regions(): if regionInfo.name not in configRegions_exclude: self.regions.append(regionInfo.name) else: self.regions = configRegions.split(",") # Destination addresses self.destination_variable = config.get('ec2', 'destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') if config.has_option('ec2', 'destination_format') and \ config.has_option('ec2', 'destination_format_tags'): self.destination_format = config.get('ec2', 'destination_format') self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') else: self.destination_format = None self.destination_format_tags = None # Route53 self.route53_enabled = config.getboolean('ec2', 'route53') self.route53_excluded_zones = [] if config.has_option('ec2', 'route53_excluded_zones'): self.route53_excluded_zones.extend( config.get('ec2', 'route53_excluded_zones', '').split(',')) # Include RDS instances? self.rds
Vvucinic/Wander
venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/contrib/gis/gdal/raster/source.py
Python
artistic-2.0
13,274
0.000904
import json import os from ctypes import addressof, byref, c_double, c_void_p from django.contrib.gis.gdal.base import GDALBase from django.contrib.gis.gdal.driver import Driver from django.contrib.gis.gdal.error import GDALException from django.contrib.gis.gdal.prototypes import raster as capi from django.contrib.gis.gdal.raster.band import BandList from django.contrib.gis.gdal.raster.const import GDAL_RESAMPLE_ALGORITHMS from django.contrib.gis.gdal.srs import SpatialReference, SRSException from django.contrib.gis.geometry.regex import json_regex from django.utils import six from django.utils.encoding import ( force_bytes, force_text, python_2_unicode_compatible, ) from django.utils.functional import cached_property class TransformPoint(list): indices = { 'origin': (0, 3), 'scale': (1, 5), 'skew': (2, 4), } def __init__(self, raster, prop): x = raster.geotransform[self.indices[prop][0]] y = raster.geotransform[self.indices[prop][1]] list.__init__(self, [x, y]) self._raster = raster self._prop = prop @property def x(self): return self[0] @x.setter def x(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][0]] = value self._raster.geotransform = gtf @property def y(self): return self[1] @y.setter def y(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][1]] = value self._raster.geotransform = gtf @python_2_unicode_compatible class GDALRaster(GDALBase): """ Wraps a raster GDAL Data Source object. """ def __init__(self, ds_input, write=False): self._write = 1 if write else 0 Driver.ensure_registered() # Preprocess json inputs. This converts json strings to dictionaries, # which are parsed below the same way as direct dictionary inputs. if isinstance(ds_input, six.string_types) and json_regex.match(ds_input): ds_input = json.loads(ds_input) # If input is a valid file path, try setting file as source. if isinstance(ds_input, six.string_types): if not os.path.exists(ds_input): raise GDALException('Unable to read raster source input "{}"'.format(ds_input)) try: # GDALOpen will auto-detect the data source type. self._ptr = capi.open_ds(force_bytes(ds_input), self._write) except GDALException as err: raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err)) elif isinstance(ds_input, dict): # A new raster needs to be created in write mode self._write = 1 # Create driver (in memory by default) driver = Driver(ds_input.get('driver', 'MEM')) # For out of memory drivers, check filename argument if driver.name != 'MEM' and 'name' not in ds_input: raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name)) # Check if width and height where specified if 'width' not in ds_input or 'height' not in ds_input: raise GDALException('Specify width and height attributes for JSON or dict input.') # Check if srid was specified if 'srid' not in ds_input: raise GDALException('Specify srid for JSON or dict input.') # Create GDAL Raster self._ptr = capi.create_ds( driver._ptr, force_bytes(ds_input.get('name', '')), ds_input['width'], ds_input['height'], ds_input.get('nr_of_bands', len(ds_input.get('bands', []))), ds_input.get('datatype', 6), None ) # Set band data if provided for i, band_input in enumerate(ds_input.get('bands', [])): band = self.bands[i] band.data(band_input['data']) if 'nodata_value' in band_input: band.nodata_value = band_input['nodata_value'] # Set SRID self.srs = ds_input.get('srid') # Set additional properties if provided if 'origin' in ds_input: self.origin.x, self.origin.y = ds_input['origin'] if 'scale' in ds_input: self.scale.x, self.scale.y = ds_input['scale'] if 'skew' in ds_input: self.skew.x, self.skew.y = ds_input['skew'] elif isinstance(ds_input, c_void_p): # Instantiate the object using an existing pointer to a gdal raster. self._ptr = ds_input else: raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input))) def __del__(self): if self._ptr and capi: capi.close_ds(self._ptr) def __str__(self): return self.name def __repr__(self): """ Short-hand representation because WKB may be very large. """ return '<Raster object at %s>' % hex(addressof(self._ptr)) def _flush(self): """ Flush all data from memory into the source file if it exists. The data that needs flushing are geotransforms, coordinate systems, no
data_values and pixel values. This function will be called automaticall
y wherever it is needed. """ # Raise an Exception if the value is being changed in read mode. if not self._write: raise GDALException('Raster needs to be opened in write mode to change values.') capi.flush_ds(self._ptr) @property def name(self): """ Returns the name of this raster. Corresponds to filename for file-based rasters. """ return force_text(capi.get_ds_description(self._ptr)) @cached_property def driver(self): """ Returns the GDAL Driver used for this raster. """ ds_driver = capi.get_ds_driver(self._ptr) return Driver(ds_driver) @property def width(self): """ Width (X axis) in pixels. """ return capi.get_ds_xsize(self._ptr) @property def height(self): """ Height (Y axis) in pixels. """ return capi.get_ds_ysize(self._ptr) @property def srs(self): """ Returns the SpatialReference used in this GDALRaster. """ try: wkt = capi.get_ds_projection_ref(self._ptr) if not wkt: return None return SpatialReference(wkt, srs_type='wkt') except SRSException: return None @srs.setter def srs(self, value): """ Sets the spatial reference used in this GDALRaster. The input can be a SpatialReference or any parameter accepted by the SpatialReference constructor. """ if isinstance(value, SpatialReference): srs = value elif isinstance(value, six.integer_types + six.string_types): srs = SpatialReference(value) else: raise ValueError('Could not create a SpatialReference from input.') capi.set_ds_projection_ref(self._ptr, srs.wkt.encode()) self._flush() @property def geotransform(self): """ Returns the geotransform of the data source. Returns the default geotransform if it does not exist or has not been set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0]. """ # Create empty ctypes double array for data gtf = (c_double * 6)() capi.get_ds_geotransform(self._ptr, byref(gtf)) return list(gtf) @geotransform.setter def geotransform(self, values): "Sets the geotransform for the data source." if sum([isinstance(x, (int, float)) for x in values]) != 6: raise ValueError('Geotransform must consist of 6 numeric values.') # Create ctypes double array with input and write data values = (c
Yelp/yelp_clog
clog/scribe_net.py
Python
apache-2.0
8,613
0.001974
# Copyright 2015 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import boto import boto.s3 import boto.s3.connection import datetime import logging import re import sys import zlib import six # THIS MUST END IN A / S3PREFIX = "logs/" S3_KEY_RE = re.compile(r'.*/(?P<stream_name>[\w-]+)/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/.+(?P<gz>\.gz)?$') #----------------------- SCRIBE LOG CHUNK OBJECTS -----------------------# class BadKeyError(Exception): def __init__(self, key, keytype=""): self.key = key self.keytype = keytype def __repr__(self): return "<BadKeyError %s:%s>" % (self.keytype, self.key) def __str__(self): return "BadKeyError: %s key %s did not match the expect
ed format" % (self.keytype, self.key) class ScribeFile(object): """Base class for Scribe file objects. These represent a single log chunk, and can be read or listed. Scribe File objects are equal if the combination of their date, stream name, and aggregator are the same. This allows you to, for
example, create a set of files from both s3 and a local cache without reading the same chunk twice. Important methods: read: adds a file's contents to the stream ostream, transparently handling gzip'd data Properties: sort_key: A key to sort or compare with size: The length of the record in bytes """ def __init__(self, stream_name, year, month, day): self.stream_name = stream_name self.year = year self.month = month self.day = day self.date = datetime.date(self.year, self.month, self.day) @property def size(self): raise NotImplementedError def read(self, ostream=sys.stdout): raise NotImplementedError def read_orig(self, ostream=sys.stdout): raise NotImplementedError class ScribeS3File(ScribeFile): """Represent scribe log chunks on S3""" def __init__(self, key): self.key = key keymd = S3_KEY_RE.match(key.name) if not keymd: raise BadKeyError(key, "S3") super(ScribeS3File, self).__init__( keymd.group('stream_name'), int(keymd.group('year')), int(keymd.group('month')), int(keymd.group('day')), ) def read(self, ostream=sys.stdout): """Read self into the ostream""" decompressor = zlib.decompressobj(31) # Python 2 works with string, python 3 with bytes remainder = "" if six.PY2 else b"" if self.key.name.endswith(".gz"): for data in self.key: remainder += data try: ostream.write(decompressor.decompress(remainder)) remainder = decompressor.unconsumed_tail except zlib.error: # maybe we didn't have enough data in this chunk to # decompress any. if so, build up a string to decompress pass else: for data in self.key: ostream.write(data) if len(remainder) > 0: logging.error("Encountered %d extra bits in zlib output", len(remainder)) def read_orig(self, ostream=sys.stdout): """Read the original of self (compressed if applicable) to ostream""" self.key.get_contents_to_file(ostream) @property def size(self): return self.key.size #----------------------- SCRIBE CONNECTION MANAGERS -----------------------# class ScribeS3(object): """This class represents an S3 connection and abstracts scribe interactions""" LOGS_BASE_PATH = "{prefix}{stream}/{year:=04d}/{month:=02d}/{day:=02d}" LOG_FILE_PATH = LOGS_BASE_PATH + "/{aggregator}-{part:=05d}.gz" COMPLETE_FILE_PATH = LOGS_BASE_PATH + "/COMPLETE" def __init__( self, s3_host, aws_access_key_id, aws_secret_access_key, s3_bucket, s3_key_prefix=None, ): self.s3_key_prefix = s3_key_prefix if self.s3_key_prefix and self.s3_key_prefix[-1] != '/': self.s3_key_prefix += '/' self.s3_connection = boto.s3.connection.S3Connection( host=s3_host, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, ) self.s3_bucket = self.s3_connection.get_bucket(s3_bucket) logging.debug('connected to s3 with %s', self.s3_connection) @property def streams(self): ret = set() for prefix in self.s3_bucket.list(prefix=self.s3_key_prefix, delimiter="/"): prefix = prefix.name.replace(self.s3_key_prefix or S3PREFIX, "", 1).rstrip('/') ret.add(prefix) return ret def complete_for(self, stream_name, date): """Are the S3 uploads for the given stream_name on the given date marked as complete?""" complete_key_name = self.COMPLETE_FILE_PATH.format( prefix=self.s3_key_prefix, stream=stream_name, year=date.year, month=date.month, day=date.day, ) key = self.s3_bucket.get_key(complete_key_name) return bool(key) def get_logs(self, stream_name, date): prefix = self.LOGS_BASE_PATH.format( prefix=self.s3_key_prefix, stream=stream_name, year=date.year, month=date.month, day=date.day, ) ret = set() for s3_name in self.s3_bucket.list(prefix=prefix): if s3_name.name.endswith("COMPLETE"): continue if s3_name.name.endswith("_SUCCESS"): continue if s3_name.name.endswith(".bad"): continue ret.add(ScribeS3File(s3_name)) return ret def get_log(self, stream_name, date, aggregator, part): """Get a specific log .. warning:: This function is deprecated and should not be used. """ key_name = self.LOG_FILE_PATH.format( prefix=self.s3_key_prefix, stream=stream_name, year=date.year, month=date.month, day=date.day, aggregator=aggregator, part=part, ) key = self.s3_bucket.get_key(key_name) if key: return ScribeS3File(key) return None #----------------------- COMMAND OBJECTS -----------------------# class ScribeReader(object): """ ScribeReader provides an interface for interacting with individual log elements (ScribeFile objects) in Scribe """ def __init__(self, stream_name, s3_connections=None, fs_connection=None, ostream=sys.stdout, not_in_s3=False): """Initialize the ScribeReader Args: stream_name: The stream to read from s3_connections: Optionally, an iterable of ScribeS3 objects fs_connection: Optionally, a ScribeFS object not_in_s3: Remove only keys unique to the fs_connection Will read from s3_connection and/or fs_connection, depending on which are provided """ self.stream_name = stream_name self.s3_connections = s3_connections self.fs_connection = fs_connection self.ostream = ostream self.not_in_s3 = not_in_s3 def logs_for_date(self, date): """Write to the initial ostream for the given date""" keys = set() if self.fs_connection: keys |= self.fs_connection.get_logs(self.stream_name, date) if self.s3_connections: for connection in self.s3_connections: if connection is None:
hellowebbooks/hellowebbooks-website
blog/urls.py
Python
mit
237
0
#! /usr/bin/env python # -*-
coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2017-06-27 michael_yin # """ """ from django.conf import settings from django.conf.urls import include, url from django.core.urlresolvers im
port reverse
jdstemmler/jdstemmler.github.io
pelicanconf.py
Python
mit
2,048
0.001953
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals import os import datetime AUTHOR = u'Jayson Stemmler' SITENAME = u'Jayson Stemmler' SITEURL = '' SITENAME = "Jayson Stemmler's Blog" SITETITLE = 'Jayson Stemmler' SITESUBTITLE = 'Research / Data Scientist' SITEDESCRIPTION = '' # SITELOGO = SITEURL + '/images/profile.png' # FAVICON = SITEURL + '/images/favicon.ico' COPYRIGHT_NAME = "Jayson Stemmler" COPYRIGHT_YEAR = datetime.datetime.today().strftime('%Y') # THEME_DIR = os.path.join(os.getenv("HOME"), 'Documents/Blogging/pelican-themes') # THEME = os.path.join(THEME_DIR, 'Flex') THEME = 'themes/Flex' USE_FOLDER_AS_CATEGORY = True PATH = 'content' PAGE_PATHS = ['pages'] ARTICLE_PATHS = ['arti
cles'] TIMEZONE = 'America/Los_Angeles' DEFAULT_LANG = u'en' # Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None DEFAULT_PAGINATION = 10 # Uncomment following line if you
want document-relative URLs when developing #RELATIVE_URLS = True STATIC_PATHS = ['images', 'extra/CNAME'] EXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'},} ARTICLE_URL = 'posts/{date:%Y}/{date:%b}/{slug}' ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%b}/{slug}.html' PAGE_URL = 'pages/{slug}' PAGE_SAVE_AS = 'pages/{slug}.html' YEAR_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/index.html' MONTH_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/{date:%b}/index.html' ## THEME OPTIONS DISQUS_SITENAME = "jdstemmlerblog" GOOGLE_ANALYTICS = "UA-99010895-1" MAIN_MENU = True SITELOGO = 'https://storage.googleapis.com/jdstemmler-blog-images/profile.png' LINKS = (('Resume', 'https://represent.io/jdstemmler'),) SOCIAL = (('linkedin', 'https://linkedin.com/in/jdstemmler/en'), ('github', 'https://github.com/jdstemmler')) MENUITEMS = (('Archives', '/archives.html'), ('Categories', '/categories.html'), ('Tags', '/tags.html'),) BROWSER_COLOR = '#333333' ROBOTS = 'index, follow'
pylayers/pylayers
pylayers/location/geometric/util/boxn.py
Python
mit
27,884
0.029373
# -*- coding:Utf-8 -*- ##################################################################### #This file is part of RGPA. #Foobar is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. #Foobar is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. #You should have received a copy of the GNU General Public License #along with Foobar. If not, see <http://www.gnu.org/licenses/>. #------------------------------------------------------------------- #authors : #Nicolas AMIOT : [email protected] #Bernard UGUEN : [email protected] #Mohamed LAARAIEDH : [email protected] ##################################################################### """ .. curentmodule:: boxn .. autosummary: """ from pylayers.util.project import * import numpy as np import os import pdb import copy import time try: from tvtk.api import tvtk from mayavi import mlab except: print('Layout:Mayavi is not installed') #GeomNetType = np.dtype([('Id',np.uint64), # ('time',np.uint64), # ('p',float,(3)), # ('v',float,(3)), # ('a',float,(3))]) class LBoxN(PyLayers): """ class LBoxN List of BoxN Atributes --------- box : np.array array containing BoxN object. default void vol : list volume of each boxes from self.box. default void bd : np.arrays 2*len(box) x ndim contains all the boundaries of boxes from self.box. default void ndim : int dimension of the nbox parmsh : dictionnary keys ['display'] =True ['interactive'] =False LB : LBoxN Create a BoxN object from another one. default : None, the LBoxN obebject created is void. Methods ------- mesure(self): measure intervals of box append(self,b): append a box 'b' to a Lbox append_l(self,lb): append a lbox 'lb' to a lbox info(self): display LBoxN information bd2coord(self,Mapping = False): convert boundaries of Lbox to their vertexes coordintates octant(self): quadtree on Lboxes volume(self): estimate volume of LBoxes intersect(self,lb): EXPERIMENTAL find the intersection of LBoxN show3(self,col='b',Id=0): required file generation for geomview display """ #__slots__=('box','vol','bd','ndim','ctr','grav','parmsh') def __init__(self,Lb=None,ndim=3): self.ctr= [] if Lb==None: self.box = np.array([]) self.vol = [] self.bd=[] self.ndim = ndim # !!! TO BE DONE # self.bnum = [] else : self.box = np.array([]) self.vol = [] self.bd=[] for b in Lb: self.append(b) self.ndim=b.ndim self.mesure() self.parmsh={} self.parmsh['display']=True self.parmsh['interactive']=False def mesure(self): """ LMeasure BoxN Obtain measure of : - size of each interval from each dimension for each boxes - center of each interval from each dimension for each boxes - Volume of the BoxN for each boxes (NOT WORKING) """ if len(self.bd ) != 0: lbd=len(self.bd)/2 self.ctr=np.zeros((lbd,self.ndim)) for i in xrange(lbd):self.ctr[i,:]=(self.bd[2*i]+self.bd[(2*i)+1])/2. #########################FONTIONNE MAIS TROP LOURD/TENT quand bcp de box # C =np.array(([[1/2.,1/2.]])) # #M =np.array(([[-1.,1.]])) # I = np.identity(len(self.bd)/2) # CTR = np.kron(I,C) # #MES = np.kron(I,M) # #self.mes = np.dot(MES,self.bd)#self.bd[1,:]-self.bd[0,:] # self.ctr = np.dot(CTR,self.bd)#(self.bd[1,:]+self.bd[0,:])/2.0 #self.vol = np.prod(self.mes,axis=1) else : self.ctr = [] def append(self,b): """append : Append a box to LboxN Parameters ---------- b : BoxN box to added Returns ------- Nothing but update self.box status """ self.box=np.append(self.box,b) try: self.bd=np.vstack((self.bd,b.bd[0])) self.bd=np.vstack((self.bd,b.bd[1])) except: self.bd=b.bd[0] self.bd=np.vstack((self.bd,b.bd[1])) V1 = sum(self.vol) V2 = b.vol # uptate center of gravity try: self.grav = (V1*self.grav+V2*b.ctr)/(V1+V2) except: self.grav = b.ctr self.vol.append(b.vol) self.ctr.append(b.ctr) def append_l(self,lb): """Append LBoxN to LBoxN Parameters ---------- lb : LBoxN lbox to be added Returns ------- Nothing but update self.box status """ # for i in xrange(len(lb.box)):self.append(lb.box[i]) self.box=np.append(self.box,lb.box) try : self.bd=np.vstack((self.bd,lb.bd)) self.ctr=np.vstack((self.ctr,lb.ctr)) self.vol=self.vol+lb.vol except: self.bd=lb.bd self.ctr=lb.ctr self.vol=lb.vol # try: # self.bd=np.vstack((self.bd,lb.bd[i][0])) # self.bd=np.vstack((self.bd,lb.bd[i][1])) # except: # self.bd=lb.bd[i][0] # self.bd=lb.bd[i][1] # self.box = self.box + lb.box # V1 = sum(self.vol) # V2 = lb.box[0].vol*len(lb.box) # # uptate center of gravity # try: # self.grav = (V1*self.grav+V2*lb.grav)/(V1+V2) # except: # self.grav = lb.grav # self.vol = self.vol + lb.vol def info(self): """ display LBoxN information """ Vtot = 0 for k in range(len(self.box)): #print "Box : ",k," Volume :",self.vol[k] #print "ndim :",self.ndim print("------------") self.box[k].info() Vtot = Vtot+self.box[k].vol print("Volume : ",Vtot) def bd2coord(self,Mapping = False): """Boundary to coordinates Convert boundaries of Lbox to their vertexes coordintates in : [xmin ymin zmin] [xmax ymax zmax] out : [xmin ymin zmin] [xmin ymax zmin] [xmax ymin zmin] [xmax ymax zmin] [xmin ymin zmax] [xmin ymax zmax] [xmax ymin zmax] [xmax ymax zmax] Parameters ---------- Mapping : Boolean return a mapping of the vertex in regards of Lbox. Default = False Returns ------- P : array 2^ndim x ndim coordinates of box vertex """ lbd=len(self.bd) dimm1 = pow(2,self.ndim-1) dim = pow(2,self.ndim) P=np.zeros((lbd*dimm1,self.ndim)) #P=(len self.bd/2)*4 # organisation de P if self.ndim == 3: R=np.repeat(range(0,dimm1*lbd,dimm1),2) R[range(1,len(R),2)]=R[range(1,len(R),2)]+1 if self.ndim == 2: R=np.repeat(range(0,dimm1*lbd,dim),2)
R[range(1,len(R),2)]=R[range(1,len(R),2)]+1 if self.ndim == 3: RZ=np.repeat(range(0,dimm1*lbd,dim),dimm1)+(lbd/2)*range(0,dimm1,1) # aller chercher dans self.bd R2a=np.repeat(self.bd[range(0,lbd,2),
:],dimm1,axis=0) R2b=np.repeat(self.bd[range(1,lbd,2),:],dimm1,axis=0) # # X # P[R,0]=R2a[:,0]#np.repeat(L.bd[range(0,lbd,2),0],4) # P[R+2,0]=R2b[:,0]#np.repeat(L.bd[range(1,lbd,2),0],4) # # Y # P[np.sort(np.mod(R+3,4*lbd)
matagus/django-jamendo
apps/jamendo/context_processors.py
Python
bsd-3-clause
558
0.005376
#!/usr/bin/env python # -*- coding: utf-8 -*- from datetime import date from django
.conf import settings def settings_context(request): """ Makes available a template var for some interesting var in settings.py """ try: ITEMS_PER_PAGE = settings.ITEMS_PER_PAGE except AttributeError: print "oooo" ITEMS_PER_PAGE = 20 try: TAGS_PER_PAGE = settings.TAGS_PER_PAGE except AttributeError: TAGS_PER_PAGE = 200 return {"ITEMS_PER_PAGE": ITEMS_PER_PAGE, "TAGS_
PER_PAGE": TAGS_PER_PAGE}
tom-henderson/bookmarks
bookmarks/bookmarks/migrations/0005_rename_app.py
Python
mit
431
0.006961
from d
jango.db import migrations class Migration(migrations.Migration): dependencies = [ ('bookmarks', '0004_auto_20160901_2322'), ] operations = [ migrations.RunSQL("DROP TABLE bookmarks_bookmark;"), migrations.RunSQL("ALTER TABLE core_bookmark RENAME TO bookmarks_bookmark;"), migration
s.RunSQL("UPDATE django_content_type SET app_label='bookmarks' WHERE app_label='core';"), ]
drandreaskrueger/cloudminingstatus
cloudminingstatus.py
Python
agpl-3.0
5,999
0.015503
''' cloudminingstatus.py @summary: Show selected API data from cloudhasher and miningpool. @author: Andreas Krueger @since: 12 Feb 2017 @contact: https://github.com/drandreaskrueger @copyright: @author @since @license @license: Donationware, see README.md. Plus see LICENSE. @version: v0.1.0 @status: It is working well. @todo: Make it into webservice? ''' from __future__ import print_function import time import sys import pprint import requests # pip install requests SLEEP_SECONDS= 5*60 SHOW_COMPOSITE_RESULTS = True try: from credentials_ME import POOL_API_USERNAME, HASHER_API_ID, HASHER_API_KEY except: from credentials import POOL_API_USERNAME, HASHER_API_ID, HASHER_API_KEY POOL_API_URL="http://soil.miners-zone.net/apisoil/accounts/%s" HASHER_ORDERS_API_URL="https://www.nicehash.com/api?method=orders.get&my&algo=20&location=0&id=%s&key=%s" HASHER_BALANCE_API_URL="https://www.nicehash.com/api?method=balance&id=%s&key=%s" # unused def humanTime(epoch): return time.strftime("GMT %H:%M:%S %a %d %b %Y", time.gmtime(epoch)) POOL_JSON=[('currentHashrate', (lambda x: "%6.2f MHash/s 30m average" % (x/1000000.0))), ('hashrate' , (lambda x: "%6.2f MHash/s 3h average" % (x/1000000.0))), ('paymentsTotal' , (lambda x:x)), ('stats' , (lambda x: "%10.4f SOIL paid" % (float(x['paid'])/1000000000))), ('stats' , (lambda x: "%10.4f SOIL balance" % (float(x['balance'])/1000000000))), ('24hreward',(lambda x: "%10.4f SOIL" % (float(x)/1000000000))), ('stats' , (lambda x: "%d blocksFound" % (x['blocksFound']))), ('stats' , (lambda x: "%s lastShare" % (humanTime(x['lastShare'])))), ('workers' , (lambda x: "%s last beat" % (humanTime(x['0']['lastBeat'])))), ('workers' , (lambda x: "%s Online" % (not bool(x['0']['offline'])))), ('workersTotal', (lambda x:x)), ] HASHER_JSON_PATH=('result', 'orders', 0) HASHER_JSON=[ ('alive', (lambda x: x)), ('workers', (lambda x: x)),
('id', (lambda x: x)), ('pool_host', (lambda x: x)), ('pool_user', (lambda x: x)), ('limit_speed', (lambda x: "%6.2f MHash/s" % (float(x)*1000))), ('accepted_speed', (lambda x: "%6.2f MHash/s" % (float(x)*1000))), ('btc_paid', (lambda x: x)), ('btc_avail', (lambda x: x)), ('pr
ice', (lambda x: "%s BTC/GH/Day" % x)), ('end', (lambda x: "%4.2f days order lifetime" % (x/1000.0/60/60/24))), ] def getJsonData(url): """ get url, check for status_code==200, return as json """ try: r=requests.get(url) except Exception as e: print ("no connection: ", e) return False if r.status_code != 200: print ("not answered OK==200, but ", r.status_code) return False try: j=r.json() except Exception as e: print ("no json, text:") print (r.text) # raise e return False return j def showPoolData(url): """ gets all json data from pool, but shows only what is in POOL_JSON """ print ("Pool:") j=getJsonData(url) if not j: return False # pprint.pprint (j) for Jkey, Jfn in POOL_JSON: print (Jfn(j[Jkey]), "(%s)" % Jkey) return j def showHasherData(url): """ gets all json data from cloudhasher, but shows only what is in HASHER_JSON """ print ("CloudHasher:") j=getJsonData(url) if not j: return False # pprint.pprint (j) # climb down into the one branch with all the interesting data: j=j [HASHER_JSON_PATH[0]] [HASHER_JSON_PATH[1]] [HASHER_JSON_PATH[2]] # pprint.pprint (j) for Jkey, Jfn in HASHER_JSON: print (Jfn(j[Jkey]), "(%s)" % Jkey) estimate = (float(j['btc_avail']) / ( float(j['price'])*float(j['accepted_speed'])) ) print ("%.2f days" % estimate, end='') print ("(remaining btc / order price / hashrate)") return j def showCompositeResults(pooldata, hasherdata): """ Estimates a coin prices by money spent versus money mined. N.B.: In this form probably only be roughly correct during first buy order? We'll see. """ coinsMined = float(pooldata['stats']['paid']) coinsMined += float(pooldata['stats']['balance']) coinsMined /= 1000000000 hashingCostsBtc = float(hasherdata['btc_paid']) satoshiPrice = hashingCostsBtc / coinsMined * 100000000 print ("%.1f Satoshi/SOIL (mining price approx)" % satoshiPrice) return satoshiPrice def loop(sleepseconds): """ Shows both, then sleeps, the repeats. """ while True: print () pooldata=showPoolData(url=POOL_API_URL%POOL_API_USERNAME) print () hasherdata=showHasherData(url=HASHER_ORDERS_API_URL%(HASHER_API_ID, HASHER_API_KEY)) print () if SHOW_COMPOSITE_RESULTS and pooldata and hasherdata: showCompositeResults(pooldata, hasherdata) print () print (humanTime(time.time()), end='') print ("... sleep %s seconds ..." % sleepseconds) time.sleep(sleepseconds) def checkCredentials(): """ See credentials.py """ yourCredentials=(POOL_API_USERNAME, HASHER_API_ID, HASHER_API_KEY) if "" in yourCredentials: print ("You must fill in credentials.py first.") print (yourCredentials) return False else: return True if __name__ == '__main__': if not checkCredentials(): sys.exit() try: loop(sleepseconds=SLEEP_SECONDS) except KeyboardInterrupt: print ("Bye.") sys.exit()
haamoon/tensorpack
examples/DeepQNetwork/common.py
Python
apache-2.0
3,829
0.000261
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: common.py # Author: Yuxin Wu <[email protected]> import random import time import threading import multiprocessing import numpy as np from tqdm import tqdm from six.moves import queue from tensorpack import * from tensorpack.utils.concurrency import * from tensorpack.utils.stats import * def play_one_episode(player, func, verbose=False): def f(s): spc = player.get_action_space() act = func([[s]])[0][0].argmax() if random.random() < 0.001: act = spc.sample() if verbose: print(act) return act return np.mean(player.play_one_episode(f)) def play_model(cfg, player): predfunc = OfflinePredictor(cfg) while True: score = play_one_episode(player, predfunc) print("Total:", score) def eval_with_funcs(predictors, nr_eval, get_player_fn): class Worker(StoppableThread, ShareSessionThread): def __init__(self, func, queue): super(Worker, self).__init__() self._func = func self.q = queue def func(self, *args, **kwargs): if self.stopped(): raise RuntimeError("stopped!") return self._func(*args, **kwargs) def run(self): with self.default_sess(): player = get_player_fn(train=False) while not self.stopped(): try: score = play_one_episode(player, self.func) # print("Score, ", score) except RuntimeError: return self.queue_put_stoppable(self.q, score) q = queue.Queue() threads = [Worker(f, q) for f in predictors] for k in threads: k.start() time.sleep(0.1) # avoid simulator bugs stat = StatCounter() try: for _
in tqdm(range(nr_eval), **get_tqdm_kwargs()): r = q.get() stat.feed(r) logger.info("Waiting for all the workers to finish the last run...") for k in threads: k.stop() for k in threads:
k.join() while q.qsize(): r = q.get() stat.feed(r) except: logger.exception("Eval") finally: if stat.count > 0: return (stat.average, stat.max) return (0, 0) def eval_model_multithread(cfg, nr_eval, get_player_fn): func = OfflinePredictor(cfg) NR_PROC = min(multiprocessing.cpu_count() // 2, 8) mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn) logger.info("Average Score: {}; Max Score: {}".format(mean, max)) class Evaluator(Triggerable): def __init__(self, nr_eval, input_names, output_names, get_player_fn): self.eval_episode = nr_eval self.input_names = input_names self.output_names = output_names self.get_player_fn = get_player_fn def _setup_graph(self): NR_PROC = min(multiprocessing.cpu_count() // 2, 20) self.pred_funcs = [self.trainer.get_predictor( self.input_names, self.output_names)] * NR_PROC def _trigger(self): t = time.time() mean, max = eval_with_funcs( self.pred_funcs, self.eval_episode, self.get_player_fn) t = time.time() - t if t > 10 * 60: # eval takes too long self.eval_episode = int(self.eval_episode * 0.94) self.trainer.monitors.put_scalar('mean_score', mean) self.trainer.monitors.put_scalar('max_score', max) def play_n_episodes(player, predfunc, nr): logger.info("Start evaluation: ") for k in range(nr): if k != 0: player.restart_episode() score = play_one_episode(player, predfunc) print("{}/{}, score={}".format(k, nr, score))
dongweiming/web_develop
chapter9/section4/kombu_consumer.py
Python
gpl-3.0
761
0.001314
# coding=utf-8 from kombu import Connection, Exchange, Queue, Consumer from kombu.async import Hub web_exchange = Exchange('web_develop', 'direct', durable=True) standard_queue = Queue('standard', exchange=web_exchange, routing_key='web.develop') URI = 'librabbitmq://dongwm:123456@localhost:5672/web_develop' hub = Hub() def on_message(body, message): print(
"Body:'%s', Headers:'%s', Payload:'%s'"
% ( body, message.content_encoding, message.payload)) message.ack() with Connection(URI) as connection: connection.register_with_event_loop(hub) with Consumer(connection, standard_queue, callbacks=[on_message]): try: hub.run_forever() except KeyboardInterrupt: exit(1)
praveen-pal/edx-platform
i18n/transifex.py
Python
agpl-3.0
2,066
0.005808
#!/usr/bin/env python import os, sys from polib import pofile from config import CONFIGURATION from extract import SOURCE_WARN from execute import execute TRANSIFEX_HEADER = 'Translations in this file have been downloaded from %s' TRANSIFEX_URL = 'https://www.transifex.com/projects/p/edx-studio/' def push(): execute('tx push -s') def pull(): for locale in CONFIGURATION.locales: if locale != CONFIGURATION.source_locale: #execute('tx pull -l %s' % locale) execute('tx pull --all') clean_transl
ated_locales() def clean_translated_locales(): """ Strips out the warning from all translated po files about being an English source file. """ fo
r locale in CONFIGURATION.locales: if locale != CONFIGURATION.source_locale: clean_locale(locale) def clean_locale(locale): """ Strips out the warning from all of a locale's translated po files about being an English source file. Iterates over machine-generated files. """ dirname = CONFIGURATION.get_messages_dir(locale) for filename in ('django-partial.po', 'djangojs.po', 'mako.po'): clean_file(dirname.joinpath(filename)) def clean_file(file): """ Strips out the warning from a translated po file about being an English source file. Replaces warning with a note about coming from Transifex. """ po = pofile(file) if po.header.find(SOURCE_WARN) != -1: new_header = get_new_header(po) new = po.header.replace(SOURCE_WARN, new_header) po.header = new po.save() def get_new_header(po): team = po.metadata.get('Language-Team', None) if not team: return TRANSIFEX_HEADER % TRANSIFEX_URL else: return TRANSIFEX_HEADER % team if __name__ == '__main__': if len(sys.argv)<2: raise Exception("missing argument: push or pull") arg = sys.argv[1] if arg == 'push': push() elif arg == 'pull': pull() else: raise Exception("unknown argument: (%s)" % arg)
edx/edx-organizations
organizations/migrations/0002_auto_20170117_1434.py
Python
agpl-3.0
490
0.002041
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('organizations', '0001_initial'), ] operations = [ migrations.AlterField( model_name='organization', name='logo', field=models.ImageField(help_tex
t='Please add only .PNG files for logo images. This logo will be used on certificates.', max_length=255, null=True, upload_to='organizat
ion_logos', blank=True), ), ]
mirrorcoder/paramiko
tests/test_util.py
Python
lgpl-2.1
19,156
0.000104
# Copyright (C) 2003-2009 Robey Pointer <[email protected]> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. """ Some unit tests for utility functions. """ from binascii import hexlify import errno import os from hashlib import sha1 import unittest import paramiko.util from paramiko.util import lookup_ssh_host_config as host_config, safe_string from paramiko.py3compat import StringIO, byte_ord, b # Note some lines in this configuration have trailing spaces on purpose test_config_file = """\ Host * User robey IdentityFile =~/.ssh/id_rsa # comment Host *.example.com \tUser bjork Port=3333 Host * """ dont_strip_whitespace_please = "\t \t Crazy something dumb " test_config_file += dont_strip_whitespace_please test_config_file += """ Host spoo.example.com Crazy something else """ test_hosts_file = """\ secure.example.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA1PD6U2/TVxET6lkpKhOk5r\ 9q/kAYG6sP9f5zuUYP8i7FOFp/6ncCEbbtg/lB+A3iidyxoSWl+9jtoyyDOOVX4UIDV9G11Ml8om3\ D+jrpI9cycZHqilK0HmxDeCuxbwyMuaCygU9gS2qoRvNLWZk70OpIKSSpBo0Wl3/XUmz9uhc= happy.example.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA8bP1ZA7DCZDB9J0s50l31M\ BGQ3GQ/Fc7SX6gkpXkwcZryoi4kNFhHu5LvHcZPdxXV1D+uTMfGS1eyd2Yz/DoNWXNAl8TI0cAsW\ 5ymME3bQ4J/k1IKxCtz/bAlAqFgKoc+EolMziDYqWIATtW0rYTJvzGAzTmMj80/QpsFH+Pc2M= """ # for test 1: from paramiko import * class UtilTest(unittest.TestCase): def test_import(self): """ verify that all the classes can be imported from paramiko. """ symbols = list(globals().keys()) self.assertTrue("Transport" in symbols) self.assertTrue("SSHClient" in symbols) self.assertTrue("MissingHostKeyPolicy" in symbols) self.assertTrue("AutoAddPolicy" in symbols) self.assertTrue("RejectPolicy" in symbols) self.assertTrue("WarningPolicy" in symbols) self.assertTrue("SecurityOptions" in symbols) self.assertTrue("SubsystemHandler" in symbols) self.assertTrue("Channel" in symbols) self.assertTrue("RSAKey" in symbols) self.assertTrue("DSSKey" in symbols) self.assertTrue("Message" in symbols) self.assertTrue("SSHException" in symbols) self.assertTrue("AuthenticationException" in symbols) self.assertTrue("PasswordRequiredException" in symbols) self.assertTrue("BadAuthenticationType" in symbols) self.assertTrue("ChannelException" in symbols) self.assertTrue("SFTP" in symbols) self.assertTrue("SFTPFile" in symbols) self.assertTrue("SFTPHandle" in symbols) self.assertTrue("SFTPClient" in symbols) self.assertTrue("SFTPServer" in symbols) self.assertTrue("SFTPError" in symbols) self.assertTrue("SFTPAttributes" in symbols) self.assertTrue("SFTPServerInterface" in symbols) self.assertTrue("ServerInterface" in symbols) self.assertTrue("BufferedFile" in symbols) self.assertTrue("Agent" in symbols) self.assertTrue("AgentKey" in symbols) self.assertTrue("HostKeys" in symbols) self.assertTrue("SSHConfig" in symbols) self.assertTrue("util" in symbols) def test_parse_config(self): global test_config_file f = StringIO(test_config_file) config = paramiko.util.parse_ssh_config(f) self.assertEqual( config._config, [ {"host": ["*"], "config": {}}, { "host": ["*"], "config": { "identityfile": ["~/.ssh/id_rsa"], "user": "robey", }, }, { "host": ["*.example.com"], "config": {"user": "bjork", "port": "3333"}, }, {"host": ["*"], "config": {"crazy": "something dumb"}}, { "host": ["spoo.example.com"], "config": {"crazy": "something else"}, }, ], ) def test_host_config(self): global test_config_file f = StringIO(test_config_file) config = paramiko.util.parse_ssh_config(f) for host, values in { "irc.danger.com": { "crazy": "something dumb", "hostname": "irc.danger.com", "user": "robey", }, "irc.example.com": { "crazy": "something dumb", "hostname": "irc.example.com", "user": "robey", "port": "3333", }, "spoo.example.com": { "crazy": "something dumb", "hostname": "spoo.example.com", "user": "robey", "port": "3333", }, }.items(): values = dict( values, hostname=host, identityfile=[os.path.expanduser("~/.ssh/id_rsa")], ) self.assertEqual( paramiko.util.lookup_ssh_host_config(host, config), values ) def test_generate_key_bytes(self): x = paramiko.util.generate_key_bytes( sha1, b"ABCDEFGH", "This is my secret passphrase.", 64 ) hex = "".join(["%02x" % byte_ord(c) for c in x]) self.assertEqual( hex, "9110e2f6793b69363e58173e9436b13a5a4b339005741d5c680e505f57d871347b4239f14fb5c46e857d5e100424873ba849ac699cea98d729e57b3e84378e8b", ) def test_host_keys(self): with open("hostfile.temp", "w") as f: f.write(test_hosts_file) try: hostdict = paramiko.util.load_host_keys("hostfile.temp") self.assertEqual(2, len(hostdict)) self.assertEqual(1, len(list(hostdict.values())[0])) self.assertEqual(1, len(list(hostdict.values())[1])) fp = hexlify( hostdict["secure.example.com"]["ssh-rsa"].get_fingerprint() ).upper() self.assertEqual(b"E6684DB30E109B67B70FF1DC5C7F1363", fp) finally: os.unlink("hostfile.temp") def test_host_config_expose_issue_33(self): test_config_file = """ Host www13.* Port 22 Host *.example.com Port 2222 Host * Port 3333 """ f = StringIO(test_config_file) config = paramiko.util.parse_ssh_config(f) host = "www13.example.com" self.assertEqual( paramiko.util.lookup_ssh_host_config(host, config), {"hostname": host, "port": "22"}, ) def test_eintr_retry(self): self.assertEqual("foo", paramiko.util.retry_on_signal(lambda: "foo")) # Variables that are set by raises_intr intr_errors_remaining = [3] call_count = [0] def raises_intr(): call_count[0] += 1 if intr_errors_remaining[0] > 0: intr_errors_remaining[0] -= 1 raise IOError(errno.EINTR, "file", "interrupted system call") self.assertTrue(paramiko.util.retry_on_signal(raises_intr) is None) se
lf.assertEqual(0, intr_errors_remaining[0]) self.asse
rtEqual(4, call_count[0]) def raises_ioerror_not_eintr(): raise IOError(errno.ENOENT, "file", "file not found") self.assertRaises( IOError, lambda: paramiko.util.retry_on_signal(raises_ioerror_not_eintr), ) def
nikkisquared/servo
components/script/dom/bindings/codegen/Configuration.py
Python
mpl-2.0
15,248
0.000787
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from WebIDL import IDLExternalInterface, IDLInterface, WebIDLError class Configuration: """ Represents global configuration state based on IDL parse data and the configuration file. """ def __init__(self, filename, parseData): # Read the configuration file. glbl = {} execfile(filename, glbl) config = glbl['DOMInterfaces'] # Build descriptors for all the interfaces we have in the parse data. # This allows callers to specify a subset of interfaces by filtering # |parseData|. self.descriptors = [] self.interfaces = {} self.maxProtoChainLength = 0 for thing in parseData: # Servo does not support external interfaces. if isinstance(thing, IDLExternalInterface): raise WebIDLError("Servo does not support external interfaces.", [thing.location]) # Some toplevel things are sadly types, and those have an # isInterface that doesn't mean the same thing as IDLObject's # isInterface()... if not isinstance(thing, IDLInterface): continue iface = thing self.interfaces[iface.identifier.name] = iface if iface.identifier.name not in config: # Completely skip consequential interfaces with no descriptor # if they have no interface object because chances are we # don't need to do anything interesting with them. if iface.isConsequential() and not iface.hasInterfaceObject(): continue entry = {} else: entry = config[iface.identifier.name] if not isinstance(entry, list): assert isinstance(entry, dict) entry = [entry] self.descriptors.extend( [Descriptor(self, iface, x) for x in entry]) # Mark the descriptors for which only a single nativeType implements # an interface. for descriptor in self.descriptors: intefaceName = descriptor.interface.identifier.name otherDescriptors = [d for d in self.descriptors if d.interface.identifier.name == intefaceName] descriptor.uniqueImplementation = len(otherDescriptors) == 1 self.enums = [e for e in parseData if e.isEnum()] self.dictionaries = [d for d in parseData if d.isDictionary()] self.callbacks = [c for c in parseData if c.isCallback() and not c.isInterface()] # Keep the descriptor list sorted for determinism. self.descriptors.sort(lambda x, y: cmp(x.name, y.name)) def getInterface(self, ifname): return self.interfaces[ifname] def getDescriptors(self, **filters): """Gets the descriptors that match the given filters.""" curr = self.descriptors for key, val in filters.iteritems(): if key == 'webIDLFile': getter = lambda x: x.interface.filename() elif key == 'hasInterfaceObject': getter = lambda x: x.interface.hasInterfaceObject() elif key == 'isCallback': getter = lambda x: x.interface.isCallback() elif key == 'isJSImplemented': getter = lambda x: x.interface.isJSImplemented() else: getter = lambda x: getattr(x, key) curr = filter(lambda x: getter(x) == val, curr) return curr def getEnums(self, webIDLFile): return filter(lambda e: e.filename() == webIDLFile, self.enums) @staticmethod def _filterForFile(items, webIDLFile=""): """Gets the items that match the given filters.""" if not webIDLFile: return items return filter(lambda x: x.filename() == webIDLFile, items) def getDictionaries(self, webIDLFile=""): return self._filterForFile(self.dictionaries, webIDLFile=webIDLFile) def getCallbacks(self, webIDLFile=""): return self._filterForFile(self.callbacks, webIDLFile=webIDLFile) def getDescriptor(self, interfaceName): """ Gets the appropriate descriptor for the given interface name. """ iface = self.getInterface(interfaceName) descriptors = self.getDescriptors(interface=iface) # We should have exactly one result. if len(descriptors) != 1: raise NoSuchDescriptorError("For " + interfaceName + " found " + str(len(descriptors)) + " matches") return descriptors[0] def getDescriptorProvider(self): """ Gets a descriptor provider that can provide descriptors as needed. """ return DescriptorProvider(self) class NoSuchDescriptorError(TypeError): def __init__(self, str): TypeError.__init__(self, str) class DescriptorProvider: """ A way of getting descriptors for interface names """ def __init__(self, config): self.config = config def getDescriptor(self, interfaceName): """ Gets the appropriate descriptor for the given interface name given the context of the current descriptor. """ return self.config.getDescriptor(interfaceName) def MemberIsUnforgeable(member, descriptor): return ((member.isAttr() or member.isMethod()) and not member.isStatic() and (member.isUnforgeable() or bool(descriptor.interface.getExtendedAttribute("Unforgeable")))) class Descriptor(DescriptorProvider): """ Represents a single descriptor for an interface. See Bindings.conf. """ def __init__(self, config, interface, desc): DescriptorProvider.__init__(self, config) self.interface = interface
# Read the desc, and fill in the relevant defaults. ifaceName = self.interface.identifier.name # Callback types do not use JS smart pointers, so we should not use the # built-in rooting mechanisms for them. if self.interface.isCallback(): self.needsRooting = False ty = "%sBinding::%s" % (ifaceName, ifaceName) self.returnType = "Rc<%s>" % ty self.argumentType = "???" self.nativeType = ty else: self.needsRooting = True self.returnType = "Root<%s>" % ifaceName self.argumentType = "&%s" % ifaceName self.nativeType = "*const %s" % ifaceName self.concreteType = ifaceName self.register = desc.get('register', True) self.outerObjectHook = desc.get('outerObjectHook', 'None') self.proxy = False self.weakReferenceable = desc.get('weakReferenceable', False) # If we're concrete, we need to crawl our ancestor interfaces and mark # them as having a concrete descendant. self.concrete = (not self.interface.isCallback() and not self.interface.getExtendedAttribute("Abstract")) self.hasUnforgeableMembers = (self.concrete and any(MemberIsUnforgeable(m, self) for m in self.interface.members)) self.operations = { 'IndexedGetter': None, 'IndexedSetter': None, 'IndexedDeleter': None, 'NamedGetter': None, 'NamedSetter': None, 'NamedDeleter': None, 'Stringifier': None, } def addOperation(operation, m): if not self.operations[operation]: self.operations[operation] = m # Since stringifiers go on the prototype, we only need to worry # about our own stringifier, not those of our ancestor interfaces. for m in self.interface.members: if m.isMethod() and m.isStringifier():
fegonda/icon_demo
code/model/unet/ff.py
Python
mit
36,759
0.014935
import os import sys import skimage.transform import skimage.exposure import time import glob import numpy as np import mahotas import random import matplotlib import matplotlib.pyplot as plt import scipy import scipy.ndimage import json from scipy.ndimage.filters import maximum_filter base_path = os.path.dirname(__file__) sys.path.insert(1,os.path.join(base_path, '../../common')) sys.path.insert(2,os.path.join(base_path, '../../database')) from utility import Utility from settings import Paths from project import Project from paths import Paths from db import DB # the idea is to grow the labels to cover the whole membrane # image and label should be [0,1] def adjust_imprecise_boundaries(image, label, number_iterations=5): label = label.copy() label_orig = label.copy() for i in xrange(number_iterations): # grow labels by one pixel label = maximum_filter(label, 2) # only keep pixels that are on dark membrane non_valid_label = np.logical_and(label==1, image>0.7) label[non_valid_label] = 0 # make sure original labels are preserved label = np.logical_or(label==1, label_orig==1) return label def deform_images(image1, image2, image3=None): # assumes image is uint8 def apply_deformation(image, coordinates): # ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid. deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect') deformed = np.reshape(deformed, image.shape) return deformed if np.max(image1) < 1.1:
image1 = np.uint8(image1*255) image2 = np.uint8(image2*255) if not image3 is None: image3 = np.uint8(image3*255) displacement_x = np.random.normal(size=image1.shape, scale=10) displacement_y = np.random.normal(size=image1.shape, scale=10) # smooth over image coords_x, coords_y = np.meshgrid(np.arange(0,image1.s
hape[0]), np.arange(0,image1.shape[1]), indexing='ij') displacement_x = coords_x.flatten() + scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten() displacement_y = coords_y.flatten() + scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten() coordinates = np.vstack([displacement_x, displacement_y]) deformed1 = apply_deformation(image1, coordinates) / 255.0 deformed2 = apply_deformation(image2, coordinates) / 255.0 if not image3 is None: deformed3 = apply_deformation(image3, coordinates) return (deformed1, deformed2, deformed3) return (deformed1, deformed2) def deform_images_list(images): # assumes image is uint8 def apply_deformation(image, coordinates): # ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid. deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect') deformed = np.reshape(deformed, image.shape) return deformed displacement_x = np.random.normal(size=images.shape[:2], scale=10) displacement_y = np.random.normal(size=images.shape[:2], scale=10) # smooth over image coords_x, coords_y = np.meshgrid(np.arange(0,images.shape[0]), np.arange(0,images.shape[1]), indexing='ij') displacement_x = coords_x.flatten() #+ scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten() displacement_y = coords_y.flatten() #+ scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten() coordinates = np.vstack([displacement_x, displacement_y]) deformed = images.copy() for i in xrange(images.shape[2]): deformed[:,:,i] = apply_deformation(np.uint8(images[:,:,i]), coordinates) return deformed def normalizeImage(img, saturation_level=0.05, doClahe=False): #was 0.005 if not doClahe: sortedValues = np.sort( img.ravel()) minVal = np.float32(sortedValues[np.int(len(sortedValues) * (saturation_level / 2))]) maxVal = np.float32(sortedValues[np.int(len(sortedValues) * (1 - saturation_level / 2))]) normImg = np.float32(img - minVal) * (255 / (maxVal-minVal)) normImg[normImg<0] = 0 normImg[normImg>255] = 255 output = (np.float32(normImg) / 255.0) return output else: output = skimage.exposure.equalize_adapthist(img) return output def generate_experiment_data_supervised(purpose='train', nsamples=1000, patchSize=29, balanceRate=0.5, rng=np.random): start_time = time.time() if os.path.exists('/media/vkaynig/Data1/Cmor_paper_data/'): pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/' else: pathPrefix = '/n/pfister_lab/vkaynig/' img_search_string_membraneImages = pathPrefix + 'labels/membranes_nonDilate/' + purpose + '/*.tif' img_search_string_backgroundMaskImages = pathPrefix + 'labels/background_nonDilate/' + purpose + '/*.tif' img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif' img_files_gray = sorted( glob.glob( img_search_string_grayImages ) ) img_files_label = sorted( glob.glob( img_search_string_membraneImages ) ) img_files_backgroundMask = sorted( glob.glob( img_search_string_backgroundMaskImages ) ) whole_set_patches = np.zeros((nsamples, patchSize*patchSize), dtype=np.float) whole_set_labels = np.zeros(nsamples, dtype=np.int32) #how many samples per image? nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(img_files_gray)[0]) )) print 'using ' + np.str(nsamples_perImage) + ' samples per image.' counter = 0 img = mahotas.imread(img_files_gray[0]) grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) for img_index in xrange(np.shape(img_files_gray)[0]): img = mahotas.imread(img_files_gray[img_index]) img = normalizeImage(img) grayImages[:,:,img_index] = img label_img = mahotas.imread(img_files_label[img_index]) labelImages[:,:,img_index] = label_img mask_img = mahotas.imread(img_files_backgroundMask[img_index]) maskImages[:,:,img_index] = mask_img for img_index in xrange(np.shape(img_files_gray)[0]): img = grayImages[:,:,img_index] label_img = labelImages[:,:,img_index] mask_img = maskImages[:,:,img_index] #get rid of invalid image borders border_patch = np.int(np.ceil(patchSize/2.0)) border = np.int(np.ceil(np.sqrt(2*(border_patch**2)))) label_img[:border,:] = 0 #top label_img[-border:,:] = 0 #bottom label_img[:,:border] = 0 #left label_img[:,-border:] = 0 #right mask_img[:border,:] = 0 mask_img[-border:,:] = 0 mask_img[:,:border] = 0 mask_img[:,-border:] = 0 membrane_indices = np.nonzero(label_img) non_membrane_indices = np.nonzero(mask_img) positiveSample = True for i in xrange(nsamples_perImage): if counter >= nsamples: break if positiveSample: randmem = random.choice(xrange(len(membrane_indices[0]))) (row,col) = (membrane_indices[0][randmem], membrane_indices[1][randmem]) label = 1.0 positiveSample = False else: randmem = random.choice(xrange(len(non_membrane_indices[0]))) (row,col) = (non_membrane_indices[0][randmem], non_membrane_indices[1][randmem]) label = 0.0 positiveSample = True imgPatch = img[row-border+1:row+border, col-border+1:col+border] imgPatch = skimage.transform.rotate(imgPatch, random.choice(xrange(360))) imgPatch = imgPatch[border-border_patch:border+border_patch-1,border-border_patch:border+border_patch-1] if random.random() < 0.5:
simonolander/euler
euler-169-sum-of-powers-of-2.py
Python
mit
1,172
0
""" sum(2 * 2**i for i in range(i)) == 2 * (2**i - 1) == n i == log_2(n // 2 + 1) """ from math import ceil, log import time def count_ways(n, current_power=None, memo=None): if memo is None: memo = {} if current_power is None: current_power = ceil(log(n // 2 + 1, 2)) key = (n, current_power) if key in memo: return memo[key] current_term = 2 ** current_power max_available = 2 * (2 ** (current_power + 1) - 1) assert n <= max_available next_max_available = 2 * (2 ** current_power - 1) ans = 0 if n >= 2 * curren
t_term: if n == 2 * current_term: ans += 1 else: ans += count_ways(n - 2 * current_term, current_power - 1, memo) if n >= current_term:
if n == current_term: ans += 1 elif n - current_term <= next_max_available: ans += count_ways(n - current_term, current_power - 1, memo) if n <= next_max_available: ans += count_ways(n, current_power - 1, memo) memo[key] = ans return ans t0 = time.time() print(count_ways(10 ** 25)) t1 = time.time() print('Total time:', (t1 - t0) * 1000, 'ms')
tobspr/Panda3D-Bam-Exporter
src/ExportLog.py
Python
mit
2,156
0.004174
import sys import bpy from bpy.props import StringProperty class ExportLog(object): """ Class which tracks warnings and errors during export """ WARNING = "Warning" ERROR = "Error" MESSAGE_SEPERATOR = "\n" SEVERITY_DIVIDER = "|#|" EXPORTED_MESSAGE_QUEUE = [] def __init__(self): self._message_queue = [] def info(self, *args): """ Adds a new info, this will not be logged but just printed to stdout """ print("Info:", *args) def warning(self, *args): """ Adds a new warning to the log """ self._add_entry(self.WARNING, *args) def error(self, *args): """ Adds a new error to the log """ self._add_entry(self.ERROR, *args) def _add_entry(self, severity, *args): """ Internal method to append a new entry to the message queue """ content = ' '.join([str(i) for i in args]) self._message_queue.append((severity, content)) print(severity + ":", content, file=sys.stderr) def report(self): """ Shows a dialog with all warnings and errors, but only in case there were some """ if self._message_queue: ExportLog.EXPORTED_MESSAGE_QUEUE = self._message_queue bpy.ops.pbe_export.status() class OperatorExportStatus(bpy.types.Operator): bl_idname = "pbe_export.status" bl_label = "Export Status" def execute(self, context): wm = context.window_manager return wm.invoke_popup(self, width=800, height=400) def draw(self, context): self.layout.row().label("Export status:") self.layout.row() for severity, message in ExportLog.EXPORTED_MESSAGE_QUEUE:
row = self.layout.row() message = message.replace("\n", "") row.label(message, icon="CANCEL" if severity == ExportLog.ERROR else "ERROR") self.layout.row() def register(): bpy.utils.register_class(OperatorExportStatus) #bpy.utils.register_class(OperatorExportStatusOk) def unregister(): bpy.utils.unregister_class(OperatorExportStatus) #b
py.utils.unregister_class(OperatorExportStatusOk)
rdonnelly/ultimate-league-app
src/ultimate/user/views.py
Python
bsd-3-clause
4,712
0.004032
from django.contrib import messages from django.contrib.auth.decorators import login_required from django.core.urlresolvers import reverse from django.db.models import Q from django.db.transaction import atomic from django.http import HttpResponseRedirect from django.shortcuts import render from django.template import RequestContext from django.utils import timezone from ultimate.leagues.models import Game, League from ultimate.user.models import Player, PlayerRatings from ultimate.forms import EditPlayerForm, EditPlayerRatingsForm, EditProfileForm, SignupForm @login_required def index(request): leagues = League.objects.filter(state__in=['closed', 'open', 'preview']).order_by('league_start_date') leagues = [l for l in leagues if l.is_visible(request.user)] future_games = Game.objects.filter( Q(league__in=leagues) & Q(date__gte=timezone.now().date()) & Q(teams__teammember__user=request.user) ).order_by('date') future_games = [game for game in future_games if game.get_display_teams().exists()] try: next_game = future_games.pop(0) except (IndexError, Game.DoesNotExist) as e: next_game = None try: following_game = future_games.pop(0) except (IndexError, Game.DoesNotExist) as e: following_game = None registrations = [] for league in leagues: for registration in league.get_registrations_for_user(request.user): registrations.append(registration) return render(request, 'user/index.html', { 'current_leagues': leagues, 'following_game': following_game, 'next_game': next_game, 'registrations': registrations }) @atomic def signup(request): form = None if request.method == 'POST': form = SignupForm(request.POST) if form.is_valid(): user = form.save() Player.objects.get_or_create(use
r=user, defaults={'date_of_birth': form.cleaned_data.get('date_of_birth'), 'gender': form.cleaned_data.get('gender')}) messages.success(request, 'Your account was created. You may now log in.') return HttpResponseRedirect(reverse('user')) else: messages.error(request, 'There was an error on the form you submitted.') if not form: form = SignupForm()
return render(request, 'user/signup.html', {'form': form}) @login_required def editprofile(request): try: player = Player.objects.get(user=request.user) except Player.DoesNotExist: player = Player(user=request.user) if request.method == 'POST': form = EditProfileForm(request.POST, instance=request.user) if form.is_valid(): form.save(commit=False) player_form = EditPlayerForm(request.POST, instance=player) if player_form.is_valid(): form.save() player_form.save() messages.success(request, 'Your profile was updated successfully.') return HttpResponseRedirect(reverse('editprofile')) else: messages.error(request, 'There was an error on the form you submitted.') else: player_form = EditPlayerForm(request.POST, instance=player) messages.error(request, 'There was an error on the form you submitted.') else: form = EditProfileForm(instance=request.user) player_form = EditPlayerForm(instance=player) return render(request, 'user/editprofile.html', {'form': form, 'player_form': player_form}) @login_required def editratings(request): try: ratings = PlayerRatings.objects.get(user=request.user, submitted_by=request.user, ratings_type=PlayerRatings.RATING_TYPE_USER) except PlayerRatings.DoesNotExist: ratings = None if request.method == 'POST': form = EditPlayerRatingsForm(request.POST, instance=ratings) if form.is_valid(): instance = form.save(commit=False) instance.ratings_type = PlayerRatings.RATING_TYPE_USER instance.submitted_by = request.user instance.updated = timezone.now() instance.user = request.user instance.save() messages.success(request, 'Your ratings were updated successfully.') return HttpResponseRedirect(reverse('editratings')) else: messages.error(request, 'There was an error on the form you submitted.') else: form = EditPlayerRatingsForm(instance=ratings) return render(request, 'user/editratings.html', { 'form': form } )
svp-dev/slcore
slc/tools/slc/mt/mipsel/regdefs.py
Python
gpl-3.0
87
0.034483
class RegMagic: fixed_registers = []
regmagic = RegMagic
() __all__ = ['regmagic']
mabuchilab/QNET
tests/algebra/test_state_algebra.py
Python
mit
18,851
0.000265
import unittest from sympy import sqrt, exp, I, pi, IndexedBase, symbols, factorial from qnet.algebra.core.abstract_algebra import _apply_rules from qnet.algebra.core.scalar_algebra import ( ScalarValue, KroneckerDelta, Zero, One) from qnet.algebra.toolbox.core import temporary_rules from qnet.algebra.core.operator_algebra import ( OperatorSymbol, LocalSigma, IdentityOperator, OperatorPlus) from qnet.algebra.library.spin_algebra import ( Jz, Jplus, Jminus, SpinSpace,SpinBasisKet) from qnet.algebra.library.fock_operators import ( Destroy, Create, Phase, Displace) from qnet.algebra.core.hilbert_space_algebra import LocalSpace from qnet.algebra.core.state_algebra import ( KetSymbol, ZeroKet, KetPlus, ScalarTimesKet, CoherentStateKet, TrivialKet, TensorKet, BasisKet, Bra, OperatorTimesKet, BraKet, KetBra, KetIndexedSum) from qnet.algebra.core.exceptions import UnequalSpaces from qnet.utils.indices import ( IdxSym, FockIndex, IntIndex, StrLabel, FockLabel, SymbolicLabelBase, IndexOverFockSpace, IndexOverRange, SpinIndex) from qnet.algebra.pattern_matching import wc import pytest class TestStateAddition(unittest.TestCase): def testAdditionToZero(self): hs = LocalSpace("hs") a = KetSymbol("a", hs=hs) z = ZeroKet assert a+z == a assert z+a == a assert z+z == z assert z != 0 assert z.is_zero def testAdditionToOperator(self): hs = LocalSpace("hs") a = KetSymbol("a", hs=hs) b = KetSymbol("b", hs=hs) assert a + b == b + a assert a + b == KetPlus(a,b) def testSubtraction(self): hs = LocalSpace("hs") a = KetSymbol("a", hs=hs) b = KetSymbol("b", hs=hs) z = ZeroKet lhs = a - a assert lhs == z lhs = a - b rhs = KetPlus(a, ScalarTimesKet(-1,b)) assert lhs == rhs def testHilbertSpace(self): h1 = LocalSpace("h1") h2 = LocalSpace("h2") a = KetSymbol("a", hs=h1) b = KetSymbol("b", hs=h2) with pytest.raises(UnequalSpaces): a.__add__(b) def testEquality(self): h1 = LocalSpace("h1") assert (CoherentStateKet(10., hs=h1) + CoherentStateKet(20., hs=h1) == CoherentStateKet(20., hs=h1) + CoherentStateKet(10., hs=h1)) class TestTensorKet(unittest.TestCase): def testIdentity(self): h1 = LocalSpace("h1") a = KetSymbol("a", hs=h1) id = TrivialKet assert a * id == a assert id * a == a def testOrdering(self): h1 = LocalSpace("h1") h2 = LocalSpace("h2") a = KetSymbol("a", hs=h1) b = KetSymbol("b", hs=h2) assert a * b == TensorKet(a,b) assert a * b == b * a def testHilbertSpace(self): h1 = LocalSpace("h1") h2 = LocalSpace("h2") a = KetSymbol("a", hs=h1) b = KetSymbol("b", hs=h2) assert a.space == h1 assert (a * b).space == h1*h2 def testEquality(self): h1 = LocalSpace("h1") h2 = LocalSpace("h2") assert (CoherentStateKet(1, hs=h1) * CoherentStateKet(2, hs=h2) == CoherentStateKet(2, hs=h2) * CoherentStateKet(1, hs=h1)) class TestScalarTimesKet(unittest.TestCase): def testZeroOne(self): h1 = LocalSpace("h1") h2 = LocalSpace("h2") a = KetSymbol("a", hs=h1) b = KetSymbol("b", hs=h2) z = ZeroKet assert a+a == 2*a assert a*1 == a assert 1*a == a assert a*5 == ScalarTimesKet(5, a) assert 5*a == a*5 assert 2*a*3 == 6*a assert a*5*b == ScalarTimesKet(5, a*b) assert a*(5*b) == ScalarTimesKet(5, a*b) assert 0 * a == z assert a * 0 == z assert 10 * z == z def testScalarCombination(self): a = KetSymbol("a", hs="h1") assert a+a == 2*a assert 3 * a + 4 * a == 7 * a assert (CoherentStateKet("1", hs=1) + CoherentStateKet("1", hs=1) == 2 * CoherentStateKet("1", hs=1)) def testHilbertSpace(self): h1 = LocalSpace("h1") h2 = LocalSpace("h2") a = KetSymbol("a", hs=h1) b = KetSymbol("b", hs=h2) assert (5*(a * b)).space == h1*h2 class TestOperatorTimesKet(unittest.TestCase): def testZeroOne(self): h1 = LocalSpace("h1") h2 = LocalSpace("h2") a = KetSymbol("a", hs=h1) b = KetSymbol("b", hs=h2) A = OperatorSymbol("A", hs=h1) Ap = OperatorSymbol("Ap", hs=h1) B = OperatorSymbol("B", hs=h2) assert IdentityOperator*a == a assert A * (Ap * a) == (A * Ap) * a assert (A * B) * (a * b) == (A * a) * (B * b) def testScalarCombination(self): a = KetSymbol("a", hs="h1") assert a+a == 2*a assert 3 * a + 4 * a == 7 * a assert (CoherentStateKet("1", hs=1) + CoherentStateKet("1", hs=1) == 2 * CoherentStateKet("1", hs=1)) def testHilbertSpace(self): h1 = LocalSpace("h1") h2 = LocalSpace("h2") a = KetSymbol("a", hs=h1) b = KetSymbol("b", hs=h2) assert (5*(a * b)).space == h1*h2 class TestLocalOperatorKetRelations(unittest.TestCase): def testCreateDestroy(self): hs1 = LocalSpace(1) assert ( Create(hs=hs1) * BasisKet(2, hs=hs1) == sqrt(3) * BasisKet(3, hs=hs1)) assert ( Destroy(hs=hs1) * BasisKet(2, hs=hs1) == sqrt(2) * BasisKet(1, hs=hs1)) assert ( Destroy(hs=hs1) * BasisKet(0, hs=hs1) == ZeroKet) coh = CoherentStateKet(10., hs=hs1) a = Destroy(hs=hs1) lhs = a * coh rhs = 10 * coh assert lhs == rhs def testSpin(self): j = 3 h = SpinSpace('j', spin=j) assert (Jplus(hs=h) * BasisKet('+2', hs=h) == sqrt(j*(j+1)-2*(2+1)) * BasisKet('+3', hs=h)) assert (Jminus(hs=h) * BasisKet('+2', hs=h) == sqrt(j*(j+1)-2*(2-1)) * BasisKet('+1', hs=h)) assert Jz(hs=h) * BasisKet('+2', hs=h) == 2 * BasisKet('+2', hs=h) tls = SpinSpace('tls', spin='1/2', basis=('-', '+')) assert ( Jplus(hs=tls) * BasisKet('-', hs=tls) == BasisKet('+', hs=tls)) assert ( Jminus(hs=tls) * BasisKet('+', hs=tls) == BasisKet('-', hs=tls)) assert ( Jz(hs=tls) * BasisKet('+', hs=tls) == BasisKet('+', hs=tls) / 2) assert ( Jz(hs=tls) * BasisKet('-', hs=tls) == -BasisKet('-', hs=tls) / 2) def testPhase(self): hs1 = LocalSpace(1) assert (Phase(5, hs=hs1) * BasisKet(3, hs=hs1) == exp(I * 15) * BasisKet(3, hs=hs1)) lhs = Phase(pi, hs=hs1) * CoherentStateKet(3., hs=hs1) rhs = CoherentStateKet(-3., hs=hs1) assert lhs.__class__ == rhs.__class__ assert lhs.space == rhs.space assert abs(lhs.ampl - rhs.ampl) < 1e-14 def testDisplace(self):
hs1 = LocalSpace(1) assert (Displace(5 + 6j, hs=hs1) * CoherentStateKet(3., hs=hs1) == exp(I * ((5+6j)*3).imag) * CoherentStateKet(8 + 6j, hs=hs1)) assert (Displace(5 + 6j, hs=hs1) * BasisKet(0, hs=hs1) == CoherentStateKet(5+6j, hs=hs1)) def testLocalSigma
Pi(self): assert (LocalSigma(0, 1, hs=1) * BasisKet(1, hs=1) == BasisKet(0, hs=1)) assert (LocalSigma(0, 0, hs=1) * BasisKet(1, hs=1) == ZeroKet) def testActLocally(self): hs1 = LocalSpace(1) hs2 = LocalSpace(2) assert ((Create(hs=hs1) * Destroy(hs=hs2)) * (BasisKet(2, hs=hs1) * BasisKet(1, hs=hs2)) == sqrt(3) * BasisKet(3, hs=hs1) * BasisKet(0, hs=hs2)) def testOperatorTensorProduct(self): hs1 = LocalSpace(1) hs2 = LocalSpace(2) assert ((Create(hs=hs1)*Destroy(hs=hs2)) * (BasisKet(0, hs=hs1) * BasisKet(1, hs=hs2)) == BasisKet(1, hs=hs1) * BasisKet(0, hs=hs2)) def testOperatorProduct(self): hs1 = LocalSpace(1) hs2 = Lo
DarthMaulware/EquationGroupLeaks
Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/windows/sentinel/table_print.py
Python
unlicense
3,527
0.004536
from __future__ import print_function from __future__ import division ALIGN_LEFT = '<' ALIGN_CENTER = '_' ALIGN_RIGHT = '>' def pprint(data, header=None, dictorder=None, align=None, output_file=None): if ((dict is type(data[0])) and (dictorder is None)): dictorder = data[0].keys() if ((dict is type(data[0])) and (header is None)): header = data[0].keys() (sdata, align) = makeStrings(data, dictorder, align) (widths, percents) = calcSize(sdata, header) output = '' if header: for i in range(len(header)): output += ((('|' + (' ' * (((widths[i] - len(header[i])) // 2) + 1))) + header[i]) + (' ' * (((widths[i] - len(header[i])) // 2) + 1))) if ((widths[i] - len(header[i])) % 2): output += ' ' if percents[i]: output += (' ' * (percents[i] - header[i].count('%'))) output += '|' output += '\n' for i in range(len(widths)): output += ('+-' + ('-' * ((widths[i] + 1) + percents[i]))) output += '+' output += '\n' for j in range(len(sdata)): d = sdata[j] a = align[j] for i in range(len(d)): if (a[i] == ALIGN_RIGHT): output += ((('|' + (' ' * ((widths[i] - len(d[i])) + 1))) + d[i]) + ' ') elif (a[i] == ALIGN_CENTER): output += ((('|' + (' ' * (((widths[i] - len(d[i])) // 2) + 1))) + d[i]) + (' ' * (((widths[i] - len(d[i])) // 2) + 1))) if ((widths[i] - len(d[i])) % 2):
output += ' ' else: output += (('| ' + d[i]) + (' ' * ((widths[i] - len(d[i])) + 1))) if percents[i]: output += (' ' * (percents[i] - d[i].count('%'))) output += '|' output += '\n' if output_file:
with open(output_file, 'wb') as output_handle: output_handle.write(output) else: print(output, end='') def makeStrings(data, dictOrder, align): r = [] a = ([] if (align is None) else None) for i in data: c = [] ac = [] if dictOrder: for k in dictOrder: c += ([i[k]] if (unicode is type(i[k])) else [(str(i[k]) if (i[k] is not None) else '')]) if (a is not None): ac += ([ALIGN_RIGHT] if ((int is type(i[k])) or (float is type(i[k])) or (long is type(i[k]))) else [ALIGN_LEFT]) else: for k in i: c += ([k] if (unicode is type(k)) else [(str(k) if (k is not None) else '')]) if (a is not None): ac += ([ALIGN_RIGHT] if ((int is type(k)) or (float is type(k)) or (long is type(k))) else [ALIGN_LEFT]) r += [c] if (a is not None): a += [ac] return (r, (a if (a is not None) else align)) def calcSize(data, header): widths = range(len(data[0])) percents = range(len(data[0])) for i in widths: widths[i] = 0 percents[i] = 0 if header: for i in range(len(header)): r = len(header[i]) if (r > widths[i]): widths[i] = r r = header[i].count('%') if (r > percents[i]): percents[i] = r for d in data: for i in range(len(d)): r = len(d[i]) if (r > widths[i]): widths[i] = r r = d[i].count('%') if (r > percents[i]): percents[i] = r return (widths, percents)
hva/warehouse
warehouse/backup/views.py
Python
mit
3,061
0.003495
# coding=utf-8 import os import time from gzip import GzipFile from StringIO import StringIO from django.shortcuts import render_to_response, redirect from django.template import RequestContext from django.http import HttpResponse from django.core.management import call_command from django.views.decorators.csrf import csrf_exempt, csrf_protect from django.contrib.auth.decorators import login_required from django.contrib import messages from .forms import BackupImportForm from .uploadhandler import TemporaryGzipFileUploadHandler breadcrumbs = [ ['warehouse.skill.views.home', 'главная'], ['warehouse.backup.views.home', 'резервное копирование'], ] info = [ { 'view': 'warehouse.backup.views.export_gz', 'title': 'Экспорт', 'text': 'Позволяет сохранить данные из системы в файл для последующего восстановления.', 'cls': 'large-4', }, { 'view': 'warehouse.backup.views.import_gz', 'title': 'Импорт', 'text': 'Позволяет восстановить данные из файла. Внимание! Все существующие записи будут безвозвратно утеряны!', 'cls': 'large-4', }, ] ##### HOME ##### @login_required def home(request): return render_to_response('backup/home.html', {'breadcrumbs': breadcrumbs, 'info': info}, RequestContext(request)) ##### EXPORT ##### @login_required def export_gz(request): filename = 'skill__%s' % time.strftime('%Y%m%d_%H%M%S.gz') response = HttpResponse(mimetype='application/force-download') response['Content-Disposition'] = 'attachment; filename=%s' % filename with GzipFile(fileobj=response, mode='w', filename='skill.json') as gz_stream: call_command('dump
data', 'auth', 'skill', stdout=gz_stream, natural=True, indent=2) return response ##### IMPORT ##### @log
in_required @csrf_exempt def import_gz(request): # changing suffix to '.gz' for temp file names request.upload_handlers = [TemporaryGzipFileUploadHandler()] return _import_gz(request) @csrf_protect def _import_gz(request): if request.method == 'POST': form = BackupImportForm(request.POST, request.FILES) if form.is_valid(): message = _process_file(request.FILES['file']) messages.success(request, message) return redirect('warehouse.backup.views.home') else: form = BackupImportForm() cur = ['warehouse.backup.views.import_gz', 'импорт'] return render_to_response( 'backup/import.html', {'form': form, 'breadcrumbs': breadcrumbs + [cur]}, RequestContext(request) ) def _process_file(f): file_path = f.temporary_file_path() if not f.closed: f.close() stream = StringIO() call_command('loaddata', file_path, stdout=stream) message = stream.getvalue() stream.close() os.unlink(file_path) return message
baylesj/chopBot3000
scripts/print_bot_id.py
Python
mit
604
0
#!/usr/bin/env python import os f
rom slackclient import SlackClient BOT_NAME = 'chopbot3000' slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN')) if __name__ == "__main__": api_call = slack_client.api_call("users.list") if api_call.get('ok'): # retrieve all users so we can find our bot users = api_call.get('members') for user in users: if 'name' in user and user.get('name') == BOT_NAME: print("Bot ID for '" + user['name'] + "
' is " + user.get('id')) else: print("could not find bot user with the name " + BOT_NAME)
googleapis/python-spanner
google/cloud/spanner_v1/database.py
Python
apache-2.0
45,568
0.000812
# Copyright 2016 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """User friendly container for Cloud Spanner Database.""" import copy import functools import grpc import logging import re import threading import google.auth.credentials from google.api_core.retry import Retry from google.api_core.retry import if_exception_type from google.cloud.exceptions import NotFound from google.api_core.exceptions import Aborted from google.api_core import gapic_v1 from google.cloud.spanner_admin_database_v1 import CreateDatabaseRequest from google.cloud.spanner_admin_database_v1 import Database as DatabasePB from google.cloud.spanner_admin_database_v1 import EncryptionConfig from google.cloud.spanner_admin_database_v1 import RestoreDatabaseEncryptionConfig from google.cloud.spanner_admin_database_v1 import RestoreDatabaseRequest from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest from google.cloud.spanner_v1 import ExecuteSqlRequest from google.cloud.spanner_v1 import TransactionSelector from google.cloud.spanner_v1 import TransactionOptions from google.cloud.spanner_v1 import RequestOptions from google.cloud.spanner_v1 import SpannerClient from google.cloud.spanner_v1._helpers import _merge_query_options from google.cloud.spanner_v1._helpers import _metadata_with_prefix from google.cloud.spanner_v1.batch import Batch from google.cloud.spanner_v1.keyset import KeySet from google.cloud.spanner_v1.pool import BurstyPool from google.cloud.spanner_v1.pool import SessionCheckout from google.cloud.spanner_v1.session import Session from google.cloud.spanner_v1.snapshot import _restart_on_unavailable from google.cloud.spanner_v1.snapshot import Snapshot from google.cloud.spanner_v1.streamed import StreamedResultSet from google.cloud.spanner_v1.services.spanner.transports.grpc import ( SpannerGrpcTransport, ) from google.cloud.spanner_v1.table import Table SPANNER_DATA_SCOPE = "https://www.googleapis.com/auth/spanner.data" _DATABASE_NAME_RE = re.compile( r"^projects/(?P<project>[^/]+)/" r"instances/(?P<instance_id>[a-z][-a-z0-9]*)/" r"databases/(?P<database_id>[a-z][a-z0-9_\-]*[a-z0-9])$" ) _DATABASE_METADATA_FILTER = "name:{
0}/operations/" _LIST_TABLES_QUERY = """SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE SPANNER_STATE = 'COMMITTED' """ DEFAULT_RETRY_BACKOFF = Retry(initial=0.02, maximum=32, multiplier=1.3) class Database(object): """Representation of a Cloud Spanner Database. We can use a :class:`Database` to: * :meth:`create` the database * :meth:`reload` the database * :meth:`update` the dat
abase * :meth:`drop` the database :type database_id: str :param database_id: The ID of the database. :type instance: :class:`~google.cloud.spanner_v1.instance.Instance` :param instance: The instance that owns the database. :type ddl_statements: list of string :param ddl_statements: (Optional) DDL statements, excluding the CREATE DATABASE statement. :type pool: concrete subclass of :class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`. :param pool: (Optional) session pool to be used by database. If not passed, the database will construct an instance of :class:`~google.cloud.spanner_v1.pool.BurstyPool`. :type logger: :class:`logging.Logger` :param logger: (Optional) a custom logger that is used if `log_commit_stats` is `True` to log commit statistics. If not passed, a logger will be created when needed that will log the commit statistics to stdout. :type encryption_config: :class:`~google.cloud.spanner_admin_database_v1.types.EncryptionConfig` or :class:`~google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig` or :class:`dict` :param encryption_config: (Optional) Encryption configuration for the database. If a dict is provided, it must be of the same form as either of the protobuf messages :class:`~google.cloud.spanner_admin_database_v1.types.EncryptionConfig` or :class:`~google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig` """ _spanner_api = None def __init__( self, database_id, instance, ddl_statements=(), pool=None, logger=None, encryption_config=None, ): self.database_id = database_id self._instance = instance self._ddl_statements = _check_ddl_statements(ddl_statements) self._local = threading.local() self._state = None self._create_time = None self._restore_info = None self._version_retention_period = None self._earliest_version_time = None self._encryption_info = None self._default_leader = None self.log_commit_stats = False self._logger = logger self._encryption_config = encryption_config if pool is None: pool = BurstyPool() self._pool = pool pool.bind(self) @classmethod def from_pb(cls, database_pb, instance, pool=None): """Creates an instance of this class from a protobuf. :type database_pb: :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` :param database_pb: A instance protobuf object. :type instance: :class:`~google.cloud.spanner_v1.instance.Instance` :param instance: The instance that owns the database. :type pool: concrete subclass of :class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`. :param pool: (Optional) session pool to be used by database. :rtype: :class:`Database` :returns: The database parsed from the protobuf response. :raises ValueError: if the instance name does not match the expected format or if the parsed project ID does not match the project ID on the instance's client, or if the parsed instance ID does not match the instance's ID. """ match = _DATABASE_NAME_RE.match(database_pb.name) if match is None: raise ValueError( "Database protobuf name was not in the " "expected format.", database_pb.name, ) if match.group("project") != instance._client.project: raise ValueError( "Project ID on database does not match the " "project ID on the instance's client" ) instance_id = match.group("instance_id") if instance_id != instance.instance_id: raise ValueError( "Instance ID on database does not match the " "Instance ID on the instance" ) database_id = match.group("database_id") return cls(database_id, instance, pool=pool) @property def name(self): """Database name used in requests. .. note:: This property will not change if ``database_id`` does not, but the return value is not cached. The database name is of the form ``"projects/../instances/../databases/{database_id}"`` :rtype: str :returns: The database name. """ return self._instance.name + "/databases/" + self.database_id @property def state(self): """State of this database. :rtype: :class:`~google.cloud.spanner_admin_database_v1.types.Database.State` :returns: an enum describing the state of the database
kristerhedfors/xnet
xnet/packages/urllib3/contrib/ntlmpool.py
Python
bsd-3-clause
4,740
0
# urllib3/contrib/ntlmpool.py # Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ NTLM authenticating pool, contributed by erikcederstran Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 """ try: from http.client import HTTPSConnection except ImportError: from httplib import HTTPSConnection from logging import getLogger from ntlm import ntlm from urllib3 import HTTPSConnectionPool log = getLogger(__name__) class NTLMConnectionPool(HTTPSConnectionPool): """ Implements an NTLM authentication version of an urllib3 connection pool """ scheme = 'https' def __init__(self, user, pw, authurl, *args, **kwargs): """ authurl is a random URL on the server that is protected by NTLM. user is the Windows user, probably in the DOMAIN\username format. pw is the password for the user. """ super(NTLMConnectionPool, self).__init__(*args, **kwargs) self.authurl = authurl self.rawuser = user user_parts = user.split('\\', 1) self.domain = user_parts[0].upper() self.user = user_parts[1] self.pw = pw def _new_conn(self): # Performs the NTLM handshake that secures the connection. The socket # must be kept open while requests are performed. self.num_connections += 1 log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' % (self.num_connections, self.host, self.authurl)) headers = {} headers['Connection'] = 'Keep-Alive' req_header = 'Authorization' resp_header = 'www-authenticate' conn = HTTPSConnection(host=self.host, port=self.port) # Send negotiation message headers[req_header] = ( 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) log.debug('Request headers: %s' % headers) conn.request('GET', self.authurl, None, headers) res = conn.getresponse() reshdr = dict(res.getheaders()) log.debug('Response status: %s %s' % (res.status, res.reason)) log.debug('Response headers: %s' % reshdr) log.debug('Response data: %s [...]' % res.read(100)) # Remove the reference to the socket, so that it can not be closed by # the res
ponse object (we want to keep the socket open) res.fp = None # Server should respond with a challenge message auth_header_values = reshdr[resp_header].split(', ') auth_header_value = None for s in auth_header_values: if s[:5] == 'NTLM ': auth_header_value = s[5:] if auth_header_value is None: raise Exception('Unexpected %s response header: %s' % (re
sp_header, reshdr[resp_header])) # Send authentication message ServerChallenge, NegotiateFlags = \ ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags) headers[req_header] = 'NTLM %s' % auth_msg log.debug('Request headers: %s' % headers) conn.request('GET', self.authurl, None, headers) res = conn.getresponse() log.debug('Response status: %s %s' % (res.status, res.reason)) log.debug('Response headers: %s' % dict(res.getheaders())) log.debug('Response data: %s [...]' % res.read()[:100]) if res.status != 200: if res.status == 401: raise Exception('Server rejected request: wrong ' 'username or password') raise Exception('Wrong server response: %s %s' % (res.status, res.reason)) res.fp = None log.debug('Connection established') return conn def urlopen(self, method, url, body=None, headers=None, retries=3, redirect=True, assert_same_host=True): if headers is None: headers = {} headers['Connection'] = 'Keep-Alive' return super(NTLMConnectionPool, self).urlopen(method, url, body, headers, retries, redirect, assert_same_host)
IITBinterns13/edx-platform-dev
cms/envs/dev.py
Python
agpl-3.0
6,301
0.002063
""" This config file runs the simplest dev environment""" # We intentionally define lots of variables that aren't used, and # want to import all variables from base settings files # pylint: disable=W0401, W0614 from .common import * from logsettings import get_logger_config DEBUG = True TEMPLATE_DEBUG = DEBUG LOGGING = get_logger_config(ENV_ROOT / "log", logging_env="dev", tracking_filename="tracking.log", dev_env=True, debug=True) modulestore_options = { 'default_class': 'xmodule.raw_module.RawDescriptor', 'host': 'localhost', 'db': 'xmodule', 'collection': 'modulestore', 'fs_root': GITHUB_REPO_ROOT, 'render_template': 'mitxmako.shortcuts.render_to_string', } MODULESTORE = { 'default': { 'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore', 'OPTIONS': modulestore_options }, 'direct': { 'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore', 'OPTIONS': modulestore_options } } # cdodge: This is the specifier for the MongoDB (using GridFS) backed static content store # This is for static content for courseware, not system static content (e.g. javascript, css, edX branding, etc) CONTENTSTORE = { 'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore', 'OPTIONS': { 'host': 'localhost', 'db': 'xcontent', }, # allow for additional options that can be keyed on a name, e.g. 'trashcan' 'ADDITIONAL_OPTIONS': { 'trashcan': { 'bucket': 'trash_fs' } } } DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ENV_ROOT / "db" / "mitx.db", } } LMS_BASE = "10.129.50.13:8000" MITX_FEATURES['PREVIEW_LMS_BASE'] = "10.129.50.13:8000" REPOS = { 'edx4edx': { 'branch': 'master', 'origin': '[email protected]:MITx/edx4edx.git', }, 'content-mit-6002x': { 'branch': 'master', # 'origin': '[email protected]:MITx/6002x-fall-2012.git', 'origin': '[email protected]:MITx/content-mit-6002x.git', }, '6.00x': { 'branch': 'master', 'origin': '[email protected]:MITx/6.00x.git', }, '7.00x': { 'branch': 'master', 'origin': '[email protected]:MITx/7.00x.git', }, '3.091x': { 'branch': 'master', 'origin': 'git@g
ithub.com:MITx/3.091x.git', }, } CACHES = { # This is the cache used for most things. Askbot will not work without a # functioning cache -- it relies on caching to load its settings in places. # In staging/prod envs, the sessions also live here. 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'mitx_loc_mem_cache', 'KEY_FUNCTION': 'util.memcache.safe_key', },
# The general cache is what you get if you use our util.cache. It's used for # things like caching the course.xml file for different A/B test groups. # We set it to be a DummyCache to force reloading of course.xml in dev. # In staging environments, we would grab VERSION from data uploaded by the # push process. 'general': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', 'KEY_PREFIX': 'general', 'VERSION': 4, 'KEY_FUNCTION': 'util.memcache.safe_key', }, 'mongo_metadata_inheritance': { 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', 'LOCATION': '/var/tmp/mongo_metadata_inheritance', 'TIMEOUT': 300, 'KEY_FUNCTION': 'util.memcache.safe_key', } } # Make the keyedcache startup warnings go away CACHE_TIMEOUT = 0 # Dummy secret key for dev SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd' ################################ PIPELINE ################################# PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT) ################################# CELERY ###################################### # By default don't use a worker, execute tasks as if they were local functions CELERY_ALWAYS_EAGER = True ################################ DEBUG TOOLBAR ################################# INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo') MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',) INTERNAL_IPS = ('127.0.0.1',) DEBUG_TOOLBAR_PANELS = ( 'debug_toolbar.panels.version.VersionDebugPanel', 'debug_toolbar.panels.timer.TimerDebugPanel', 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel', 'debug_toolbar.panels.headers.HeaderDebugPanel', 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel', 'debug_toolbar.panels.sql.SQLDebugPanel', 'debug_toolbar.panels.signals.SignalDebugPanel', 'debug_toolbar.panels.logger.LoggingPanel', 'debug_toolbar_mongo.panel.MongoDebugPanel', # Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and # Django=1.3.1/1.4 where requests to views get duplicated (your method gets # hit twice). So you can uncomment when you need to diagnose performance # problems, but you shouldn't leave it on. # 'debug_toolbar.panels.profiling.ProfilingDebugPanel', ) DEBUG_TOOLBAR_CONFIG = { 'INTERCEPT_REDIRECTS': False } # To see stacktraces for MongoDB queries, set this to True. # Stacktraces slow down page loads drastically (for pages with lots of queries). DEBUG_TOOLBAR_MONGO_STACKTRACES = True # disable NPS survey in dev mode MITX_FEATURES['STUDIO_NPS_SURVEY'] = False # Enable URL that shows information about the status of variuous services MITX_FEATURES['ENABLE_SERVICE_STATUS'] = True ############################# SEGMENT-IO ################################## # If there's an environment variable set, grab it and turn on Segment.io # Note that this is the Studio key. There is a separate key for the LMS. import os SEGMENT_IO_KEY = os.environ.get('SEGMENT_IO_KEY') if SEGMENT_IO_KEY: MITX_FEATURES['SEGMENT_IO'] = True ##################################################################### # Lastly, see if the developer has any local overrides. try: from .private import * # pylint: disable=F0401 except ImportError: pass
natetrue/ReplicatorG
skein_engines/skeinforge-31/fabmetheus_utilities/geometry/creation/circle.py
Python
gpl-2.0
2,658
0.017682
""" Polygon path. """ from __future__ import absolute_import #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is i
mported as a main module. import __init__ from fabmetheus_utilities.geometry.creation import lineation from fabmetheus_utilities.geometry.geometry_utilities import evaluate from fabmetheus_utilities.vector3 import Vector3 from fabmetheus_utilities import euclidean import math __author__ = 'Enrique Perez ([email protected])' __credits__ = 'Art of Illusion <http://www.artofillusion.org/>' __dat
e__ = "$Date: 2008/02/05 $" __license__ = 'GPL 3.0' def getGeometryOutput(xmlElement): "Get vector3 vertexes from attribute dictionary." radius = lineation.getRadiusComplex(complex(1.0, 1.0), xmlElement) sides = evaluate.getSidesMinimumThreeBasedOnPrecisionSides(max(radius.real, radius.imag), xmlElement) loop = [] start = evaluate.getEvaluatedFloatZero('start', xmlElement) start = getWrappedFloat(start, 360.0) extent = evaluate.getEvaluatedFloatDefault(360.0 - start, 'extent', xmlElement) end = evaluate.getEvaluatedFloatDefault(start + extent, 'end', xmlElement) end = getWrappedFloat(end, 360.0) revolutions = evaluate.getEvaluatedFloatOne('revolutions', xmlElement) if revolutions > 1: end += 360.0 * (revolutions - 1) angleTotal = math.radians(start) extent = end - start sidesCeiling = int(math.ceil(abs(sides) * extent / 360.0)) sideAngle = math.radians(extent) / sidesCeiling spiral = lineation.Spiral(0.5 * sideAngle / math.pi, xmlElement) for side in xrange(sidesCeiling + (extent != 360.0)): unitPolar = euclidean.getWiddershinsUnitPolar(angleTotal) vertex = spiral.getSpiralPoint(unitPolar, Vector3(unitPolar.real * radius.real, unitPolar.imag * radius.imag)) angleTotal += sideAngle loop.append(vertex) sideLength = sideAngle * lineation.getAverageRadius(radius) lineation.setClosedAttribute(revolutions, xmlElement) return lineation.getGeometryOutputByLoop(lineation.SideLoop(loop, sideAngle, sideLength), xmlElement) def getGeometryOutputByArguments(arguments, xmlElement): "Get vector3 vertexes from attribute dictionary by arguments." evaluate.setAttributeDictionaryByArguments(['radius', 'start', 'end', 'revolutions'], arguments, xmlElement) return getGeometryOutput(xmlElement) def getWrappedFloat(floatValue, modulo): "Get wrapped float." if floatValue >= modulo: return modulo if floatValue >= 0: return floatValue return floatValue % modulo def processXMLElement(xmlElement): "Process the xml element." lineation.processXMLElementByGeometry(getGeometryOutput(xmlElement), xmlElement)
mjsauvinen/P4UL
pyLib/mapTools.py
Python
mit
23,790
0.042917
import operator import numpy as np import sys ''' Description: Author: Mikko Auvinen [email protected] Finnish Meteorological Institute ''' # =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* def readAsciiGridHeader( filename, nHeaderEntries, idx=0 ): fl = open( filename , 'r') name = filename.strip('.asc') # Extract the tile name. hdict = {'id':idx,'name': name } # 'ncols': None # 'nrows': None # 'xllcorner': None # 'yllcorner': None # 'cellsize' : None # 'NODATA_value' : None for i in range(nHeaderEntries): try: s = fl.readline().lower().split() print(' Header line {}: {} '.format(i,s)) hdict[s[0]] = float( s[1] ) except: print('Unexpected ascii grid header format. Exiting.') sys.exit(1) hdict = asciiCenterToCorner( hdict ) idx += 1 fl.close() return hdict, idx # =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* def asciiCenterToCorner( xdict ): # Resolve the issue involving xllcorner vs xllcenter. Both are allowed by ESRI ASCII standard if( 'xllcenter' in xdict.keys() ): xdict['xllcorner'] = xdict['xllcenter'] - xdict['cellsize']/2. if( 'yllcenter' in xdict.keys() ): xdict['yllcorner'] = xdict['yllcenter'] - xdict['cellsize']/2. return xdict # =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* def readAsciiGrid( filename, nHeaderEntries ): try: rx = np.loadtxt( filename, skiprows=nHeaderEntries ) # Note! skiprows=6. print(' File {} read successfully.'.format(filename)) except: print(' Could not read ascii grid file: {}. Exiting.'.format(filename)) sys.exit(1) return rx # =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* def asciiTileToNumpyZ(filename, nHeaderEntries, idx=0): Rdict, idx = readAsciiGridHeader( filename, nHeaderEntries, idx ) R = readAsciiGrid( filename, nHeaderEntries ) Rdict['id'] = idx Rdict['ytlcorner'] = Rdict['yllcorner'] + Rdict['cellsize']* Rdict['nrows'] Rdict['xtlcorner'] = Rdict['xllcorner'] # These are ofter used later. Rdict['GlobOrig'] = np.array([ Rdict['ytlcorner'], Rdict['xtlcorner']]) # [N,E] Rdict['dPx'] = np.array([ Rdict['cellsize'], Rdict['cellsize'] ]) Rdict['R'] = R saveTileAsNumpyZ( filename.strip('.asc'), Rdict ) return Rdict # =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* def readNumpyZGridData( filename, idx=0 ): Rdict = readNumpyZTile(filename, dataOnly=True) Rxdims=np.array(np.shape(Rdict['R'])) Rdict['R'] = [] RxOrig=Rdict['GlobOrig'] dPx=Rdict['dPx'] name = filename.strip('.npz') # Extract the tile name. hdict = {'id':idx,'name': name, 'ncols':Rxdims[1],'nrows':Rxdims[0],\ 'xtlcorner':RxOrig[1],'ytlcorner':RxOrig[0],\ 'cellsize':int(dPx[0]),'nodata_value':None} idx += 1 return hdict, idx # =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* def resolutionFromDicts( dictList ): d1 = dictList[0] dPxRef = d1['cellsize'] for d in dictList: dPx = d['cellsize'] if( dPx != dPxRef ): print('The tile resolutions do not match. Exiting.') sys.exit(1) return np.array([dPxRef,dPxRef]) # =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* def arrangeTileGrid( dictList, fileTypes ): coordList = [] ascii = fileTypes[0]; npz = fileTypes[1] XO_TL = np.zeros(2) # Initialize the Top Left Origin. for d in dictList: # The last two indecies are for row / col addresses. if( ascii ): coordList.append( [d['id'], d['xllcorner'], d['yllcorner'], 0, 0] ) else: # .npz coordList.append( [d['id'], d['xtlcorner'], d['ytlcorner'], 0, 0] ) # Sort the list according to y-values coordListSorted = sorted( coordList, key=operator.itemgetter(2) ) #print(' CoordList y-sorted : {} '.format( coordListSorted )) # Determine the Top Left Origin (y-value). ltmp = coordListSorted[-1] # Take the last entry. dtmp = dictList[ltmp[0]] # Fetch the desired dict. ltmp[0] == id. if( ascii ): XO_TL[0]= dtmp['yllcorner']+dtmp['nrows']*dtmp['cellsize'] else: XO_TL[0]= dtmp['ytlcorner'] irow = 0; maxVal = 0. for t in reversed(coordListSorted): if( t[2] >= maxVal ): # t[2] := y-cord. pass else: irow+=1 # Change row t[3] = irow; maxVal = t[2] imax = irow+1 # Store the number of rows. # Sort the list according to x-values coordListSorted = sorted( coordList, key=operator.itemgetter(1) ) #print(' x-sorted : {} '.format( coordListSorted )) # Determine the Top Left Origin (x-value). # This is the same for xllcorner and xtlcorner. ltmp = coordListSorted[0] dtmp = dictList[ltmp[0]] if( ascii ): XO_TL[1]= dtmp['xllcorner'] else: XO_TL[1]= dtmp['xtlcorner'] jcol = 0; minVal = 1.e12 for t in coordListSorted: if( t[1] <= minVal ): # t[1] := x-cord. pass else: jcol+=1 # Change column t[4] = jcol; minVal = t[1] jmax = jcol+1 # Store the number of columns ijList = [] for t in coordListSorted: ijList.append( [ t[0], t[3], t[4] ] )#
id, irow, jcol return ijList, XO_TL, imax, jmax # =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* def minMaxCoords( xdict , fileTypes ): asc = fileTypes[0]; npz = fileTypes[1] if( asc ): xn = xdict['xllcorner'] yx = xdict['yllcorner']
xx = xdict['xllcorner']+xdict['ncols']*xdict['cellsize'] yn = xdict['yllcorner']-xdict['nrows']*xdict['cellsize'] else: xn = xdict['xtlcorner'] yx = xdict['ytlcorner'] xx = xdict['xtlcorner'] yn = xdict['ytlcorner'] return xn, xx, yn, yx # =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* def compileTileGrid( dictList, ijList, Mrows, Mcols, fileTypes, nHeaderEntries ): M = [] # An empty array to start with. ascii = fileTypes[0]; npz = fileTypes[1] for i in range(Mrows): for j in range(Mcols): for idTile, irow, jcol in ijList: if(irow == i and jcol == j ): d = dictList[idTile] if( ascii ): r = readAsciiGrid( d['name']+'.asc', nHeaderEntries ) elif( npz ): Rdict = readNumpyZTile(d['name']+'.npz') r=Rdict['R'] Rdict = None # Throw the rest away. M.append(r); r = None print(' M.shape = {}'.format(np.shape(M))) T = None for i in range(Mrows): c1 = i*Mcols; c2 = (i+1)*Mcols print('c1={}, c2={}'.format(c1,c2)) if( T is None ): T = np.hstack(M[c1:c2]) else: T = np.vstack( (T,np.hstack(M[c1:c2])) ) print(' np.shape(T) = {}'.format(np.shape(T))) M = None Rdict = {'R' : T} return Rdict # =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* def saveTileAsNumpyZ( filename, Rdict): ''' The saved npz file doesn't contain Rdict, but separate numpy arrays matching key names. Therefore np.load(filename) is equal to the saved Rdict. ''' try: np.savez_compressed(filename, **Rdict) print(' {} saved successfully!'.format(filename)) except: print(' Error in saving {}.npz in saveTileAsNumpyZ().'.format(filename.strip('.npz'))) # =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* def initRdict(Rdict, R=None, dPx=None ): if( R is not None ): Rdict['R'] = R if( dPx is not None ): Rdict['dPx'] = dPx if( 'GlobOrig' not in Rdict ): Rdict['GlobOrig'] = np.array( [0.,0.] ) if('gridRot' not in Rdict ): Rdict['gridRot'] = 0.0 if( ('Rdims' not in Rdict) and (R is not None) ): Rdict['Rdims'] = np.array( R.shape ) return Rdict # =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* def checkDictFormat( Rdict ): # Backwards compatibility for variable name change. if(not('gridRot' in Rdict)): Rdict['gridRot'] = 0.0 if ('XOrig' in Rdict and not('GlobOrig' in Rdict)): Rdict['GlobOrig']=Rdict['XOrig'] # For some reason dPx arrays were saved as 'dpx' in the past hardcoded versions of saveTileAsNumpyZ. if ('dpx' in Rdict and not('dPx' in Rdict)): Rdict['dPx']=Rdict['dpx'] # Add bottom left origin only if the transformation is trivial (i.e. no rotation required). # Otherwise the extractDomainFromTile.py script ought to be used. if(not('GlobOrigBL' in Rdict) and Rdict['gridRo
Nitr4x/whichCDN
plugins/ErrorServerDetection/behaviors.py
Python
mit
907
0.00882
#!/usr/bin/env python from __future__ import print_function import sys import re from utils import CDNEngine from utils import request if sys.version_info >= (3, 0): import subprocess as commands import urllib.parse as urlparse else: import commands import urlparse def detect(hostname): """ Perfor
ms CDN detection thanks to information disclosure from server error. Parameters ---------- hostname : str Hostname to assess """ print('[+] Error server detection\n') hostname = urlparse.urlparse(hostname).netloc regexp = re.compile('\\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\\b') out = commands.getoutput("host " + hostname) addresses = regexp.finditer(out) for addr in addresses: res = request.do('http://' + addr.group()) i
f res is not None and res.status_code == 500: CDNEngine.find(res.text.lower())
QuLogic/burnman
examples/example_user_input_material.py
Python
gpl-2.0
4,958
0.013312
# BurnMan - a lower mantle toolkit # Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S. # Released under GPL v2 or later. """ example_user_input_material --------------------------- Shows user how to input a mineral of his/her choice without usint the library and which physical values need to be input for BurnMan to calculate :math:`V_P, V_\Phi, V_S` and density at depth. *Specifically uses:* * :class:`burnman.mineral.Mineral` *Demonstrates:* * how to create your own minerals """ import os, sys, numpy as np, matplotlib.pyplot as plt #hack to allow scripts to be placed in subdirectories next to burnman: if not os.path.exists('burnman') and os.path.exists('../burnman'): sys.path.insert(1,os.path.abspath('..')) import burnman # A note about units: all the material parameters are expected to be in plain SI units. # This means that the elastic moduli should be in Pascals and NOT Gigapascals, # and the Debye temperature should be in K not C. Additionally, the reference volume # should be in m^3/(mol molecule) and not in unit cell volume and 'n' should be # the number of atoms per molecule. Frequently in the literature the reference volume # is given in Angstrom^3 per unit cell. To convert this to m^3/(mol of molecule) #you should multiply by 10^(-30) * N_a / Z, where N_a is Avogadro's number and Z is the number of # atoms per unit cell. You can look up Z in many places, including www.mindat.org if __name__ == "__main__": ### input variables ### ####################### #INPUT for method """ choose 'slb2' (finite-strain 2nd order shear modulus, stixrude and lithgow-bertelloni, 2005) or 'slb3 (finite-strain 3rd order shear modulus, stixrude and lithgow-bertelloni, 2005) or 'mgd3' (mie-gruneisen-debeye 3rd order shear modulus, matas et al. 2007) or 'mgd2' (mie-gruneisen-debeye 2nd order shear modulus, matas et al. 2007) or 'bm2' (birch-murnaghan 2nd order, if you choose to ignore temperature (your choice in geotherm will not matter in this case)) or 'bm3' (birch-murnaghan 3rd order, if you choose to ignore temperature (your choice in geotherm will not matter in this case))""" method = 'slb3' #in form name_of_mineral (burnman.mineral <- creates list with parameters) class own_material (burnman.Mineral): def __init__(self): s
elf.params = { 'name': 'myownmineral', 'equation_of_st
ate': method, 'V_0': 10.844e-6, #Molar volume [m^3/(mole molecules)] #at room pressure/temperature 'K_0': 135.19e9, #Reference bulk modulus [Pa] #at room pressure/temperature 'Kprime_0': 6.04, #pressure derivative of bulk modulus 'G_0': 175.0e9, #reference shear modulus #at room pressure/temperature 'Gprime_0': 1.7, #pressure derivative of shear modulus 'molar_mass': .055845, #molar mass in units of [kg/mol] 'n': 1, #number of atoms per formula unit 'Debye_0': 998.85, #Debye temperature for material. #See Stixrude & Lithgow-Bertelloni, 2005 for values 'grueneisen_0': 1.368, #Gruneisen parameter for material. #See Stixrude & Lithgow-Bertelloni, 2005 for values 'q_0': 0.917, #isotropic strain derivative of gruneisen #parameter. Values in Stixrude & Lithgow-Bertelloni, 2005 'eta_s_0': 3.0 #full strain derivative of gruneisen parameter #parameter. Values in Stixrude & Lithgow-Bertelloni, 2005 } burnman.Mineral.__init__(self) rock = own_material() #seismic model for comparison: (see burnman/seismic.py) seismic_model = burnman.seismic.PREM() # pick from .prem() .slow() .fast() number_of_points = 20 #set on how many depth slices the computations should be done depths = np.linspace(700e3,2800e3, number_of_points) #depths = seismic_model.internal_depth_list() seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate_all_at(depths) temperature = burnman.geotherm.brown_shankland(seis_p) # The next line is not required here, because the method is set automatically by defining 'equation_of_state' in mineral.params. This shows an alternative way to set the method later, or reset the method to a different one. rock.set_method(method) print "Calculations are done for:" rock.debug_print() mat_rho, mat_vp, mat_vs, mat_vphi, mat_K, mat_G = \ burnman.velocities_from_rock(rock, seis_p, temperature, \ burnman.averaging_schemes.VoigtReussHill()) [vs_err, vphi_err, rho_err]= \ burnman.compare_chifactor([mat_vs,mat_vphi,mat_rho], [seis_vs,seis_vphi,seis_rho]) print vs_err, vphi_err, rho_err
acorg/acmacs-whocc
web/chains-202105/py/directories.py
Python
mit
1,044
0.004789
import sys, os, json from pathlib import Path from acmacs_py.mapi_utils import MapiSettings # ====================================================================== class CladeData: sSubtypeToCladePrefix = {"h1pdm": "clades-A(H1N1)2009pdm", "h3": "clades-A(H3N2)", "bvic": "clades-B/Vic", "byam": "clades-B/Yam"} def __init__(self): self.mapi_settings = MapiSettings("clades.mapi") def entry_names_for_subtype(self, subtype): subtype_prefix = self.sSubtypeToCladePrefix[subtype] names = sorted(name for name in self.mapi_settings.names() if name.startswith(subtype_prefix)) return names def chart_draw_modify(self, *args, **kw): self.mapi_settings.chart_draw_modify(*args, **kw) def chart_draw_reset(self, *args, **kw): self.mapi_settings.chart_draw_reset(*args, **kw) # ====================================================================== def load(app): app["clade_data"] = Cla
deData() # ===================================================================
===
VikParuchuri/simpsons-scripts
tasks/train.py
Python
apache-2.0
16,847
0.008429
from __future__ import division from itertools import chain from sklearn.feature_extraction.text import CountVectorizer import numpy as np import pandas as pd from fisher import pvalue import re import collections from nltk.stem.porter import PorterStemmer import math from percept.tasks.base import Task from percept.fields.base import Complex, List, Dict, Float from inputs.inputs import SimpsonsFormats from percept.utils.models import RegistryCategories, get_namespace from percept.conf.base import settings import os from percept.tasks.train import Train from sklearn.ensemble import RandomForestClassifier import pickle import random import logging log = logging.getLogger(__name__) MAX_FEATURES = 500 DISTANCE_MIN=1 CHARACTER_DISTANCE_MIN = .2 RESET_SCENE_EVERY = 5 def make_df(datalist, labels, name_prefix=""): df = pd.DataFrame(datalist).T if name_prefix!="": labels = [name_prefix + "_" + l for l in labels] labels = [l.replace(" ", "_").lower() for l in labels] df.columns = labels df.index = range(df.shape[0]) return df def return_one(): return 1 class SpellCorrector(object): """ Taken and slightly adapted from peter norvig's post at http://norvig.com/spell-correct.html """ alphabet = 'abcdefghijklmnopqrstuvwxyz' punctuation = [".", "!", "?", ","] def __init__(self): self.NWORDS = self.train(self.words(file(os.path.join(settings.PROJECT_PATH,'data/big.txt')).read())) self.cache = {} def words(self, text): return re.findall('[a-z]+', text.lower()) def train(self, features): model = collections.defaultdict(return_one) for f in features: model[f] += 1 return model def edits1(self, word): splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [a + b[1:] for a, b in splits if b] transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1] replaces = [a + c + b[1:] for a, b in splits for c in self.alphabet if b] inserts = [a + c + b for a, b in splits for c in self.alphabet] return set(deletes + transposes + replaces + inserts) def known_edits2(self, word): return set(e2 for e1 in self.edits1(word) for e2 in self.edits1(e1) if e2 in self.NWORDS) def known(self, words): return set(w for w in words if w in self.NWORDS) def correct(self, word): if word in self.cache: return self.cache[word] suffix = "" for p in self.punctuation: if word.endswith(p): suffix = p word = word[:-1] candidates = self.known([word]) or self.known(self.edits1(word)) or self.known_edits2(word) or [word] newword = max(candidates, key=self.NWORDS.get) + suffix self.cache.update({word : newword}) return newword class Vectorizer(object): def __init__(self): self.fit_done = False def fit(self, input_text, input_scores, max_features=100, min_features=3): self.spell_corrector = SpellCorrector() self.stemmer = PorterStemmer() new_text = self.batch_generate_new_text(input_text) input_text = [input_text[i] + new_text[i] for i in xrange(0,len(input_text))] self.vectorizer1 = CountVectorizer(ngram_range=(1,2), min_df = min_features/len(input_text), max_df=.4, stop_words="english") self.vectorizer1.fit(input_text) self.vocab = self.get_vocab(input_text, input_scores, max_features) self.vectorizer = CountVectorizer(ngram_range=(1,2), vocabulary=self.vocab) self.fit_done = True self.input_text = input_text def spell_correct_text(self, text): text = text.lower() split = text.split(" ") corrected = [self.spell_corrector.correct(w) for w in split] return corrected def batch_apply(self, all_tokens, applied_func): for key in all_tokens: cor = applied_func(all_tokens[key]) all_tokens[key] = cor return all_tokens def batch_generate_new_text(self, text): text = [re.sub("[^A-Za-z0-9]", " ", t.lower()) for t in text] text = [re.sub("\s+", " ", t) for t in text] t_tokens = [t.split(" ") for t in text] all_token_list = list(set(chain.from_iterable(t_tokens))) all_token_dict = {} for t in all_token_list: all_token_dict.update({t : t}) all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem) all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem) for i in xrange(0,len(t_tokens)): for j in xrange(0,len(t_tokens[i])): t_tokens[i][j] = all_token_dict.get(t_tokens[i][j], t_tokens[i][j]) new_text = [" ".join(t) for t in t_tokens] return new_text def generate_new_text(self, text): no_punctuation = re.sub("[^A-Za-z0-9]", " ", text.lower()) no_punctuation = re.sub("\s+", " ", no_punctuation) corrected = self.spell_correct_text(no_punctuation) corrected = [self.stemmer.stem(w) for w in corrected] new = " ".join(corrected) return new def get_vocab(self, input_text, input_scores, max_features): train_mat = self.vectorizer1.transform(input_text) input_score_med = np.median(input_scores) new_scores = [0 if
i<=input_score_med else 1 for i in input_scores] ind_max_features = math.floor(max_features/max(input_scores)) all_vocab = [] all_cols = [np.asarray(train_mat.getcol(i).todense().transpose())[0] for i in x
range(0,train_mat.shape[1])] for s in xrange(0,max(input_scores)): sel_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]==s] out_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]!=s] pvalues = [] for i in xrange(0,len(all_cols)): lcol = all_cols[i] good_lcol = lcol[sel_inds] bad_lcol = lcol[out_inds] good_lcol_present = len(good_lcol[good_lcol > 0]) good_lcol_missing = len(good_lcol[good_lcol == 0]) bad_lcol_present = len(bad_lcol[bad_lcol > 0]) bad_lcol_missing = len(bad_lcol[bad_lcol == 0]) pval = pvalue(good_lcol_present, bad_lcol_present, good_lcol_missing, bad_lcol_missing) pvalues.append(pval.two_tail) col_inds = list(xrange(0,train_mat.shape[1])) p_frame = pd.DataFrame(np.array([col_inds, pvalues]).transpose(), columns=["inds", "pvalues"]) p_frame = p_frame.sort(['pvalues'], ascending=True) getVar = lambda searchList, ind: [searchList[int(i)] for i in ind] vocab = getVar(self.vectorizer1.get_feature_names(), p_frame['inds'][:ind_max_features+2]) all_vocab.append(vocab) return list(set(list(chain.from_iterable(all_vocab)))) def batch_get_features(self, text): if not self.fit_done: raise Exception("Vectorizer has not been created.") new_text = self.batch_generate_new_text(text) text = [text[i] + new_text[i] for i in xrange(0,len(text))] return (self.vectorizer.transform(text).todense()) def get_features(self, text): if not self.fit_done: raise Exception("Vectorizer has not been created.") itext=text if isinstance(text, list): itext = text[0] new_text = self.generate_new_text(itext) if isinstance(text, list): text = [text[0] + new_text] else: text = [text + new_text] return (self.vectorizer.transform(text).todense()) class FeatureExtractor(Task): data = Complex() row_data = List() speaker_code_dict = Dict() speaker_codes = List() vectorizer = Complex() data_format = SimpsonsFormats.dataframe category = RegistryCategories.preprocessors namespace = get_namespace(__module__) help_text = "Cleanup simpsons scripts." args = {'scriptfile' : os.path.abspath(os.path.join(settings.DATA_PATH, "script_tasks"
veekun/spline
spline/i18n/cs/__init__.py
Python
mit
2,365
0.002668
# Encoding: UTF-8 """Czech conjugation """ from spline.i18n.formatter import Formatter, BaseWord, parse_bool class Word(BaseWord): @classmethod def guess_type(cls, word, **props): if word.endswith(u'í'): return SoftAdjective elif word.endswith(u'ý'): return HardAdjective else: return Word class Adjective(Word): def __init__(self, word): self.root = word _interesting_categories = 'gender number case'.split() gender = 'm' case = 1 number = 'sg' def inflect(self, **props): gender = props.get('gender',
self.gender) case = int(props.get('case', self.case))
number = props.get('number', self.number) case_no = (case - 1) + (7 if (number == 'pl') else 0) if gender == 'm': if parse_bool(props.get('animate', True)): return self.root + self.endings_ma[case_no] else: return self.root + self.endings_mi[case_no] elif gender == 'f': return self.root + self.endings_f[case_no] else: return self.root + self.endings_n[case_no] class SoftAdjective(Adjective): def __init__(self, word): if word.endswith(u'í'): self.root = word[:-1] else: self.root = word endings_ma = u'í,ího,ímu,ího,í,ím,ím,í,ích,ím,í,í,ích,ími'.split(',') endings_mi = u'í,ího,ímu,í,í,ím,ím,í,ích,ím,í,í,ích,ími'.split(',') endings_f = u'í,í,í,í,í,í,í,í,ích,ím,í,í,ích,ími'.split(',') endings_n = u'í,ího,ímu,í,í,ím,ím,í,ích,ím,í,í,ích,ími'.split(',') class HardAdjective(Adjective): def __init__(self, word): if any(word.endswith(x) for x in u'ýáé'): self.root = word[:-1] else: self.root = word endings_ma = u'ý,ého,ému,ého,ý,ém,ým,í,ých,ým,é,í,ých,ými'.split(',') endings_mi = u'ý,ého,ému,ý,ý,ém,ým,é,ých,ým,é,é,ých,ými'.split(',') endings_f = u'á,é,é,ou,á,é,ou,é,ých,ým,é,é,ých,ými'.split(',') endings_n = u'é,ého,ému,é,é,ém,ým,á,ých,ým,á,á,ých,ými'.split(',') formatter = Formatter('cs', Word) class Template(unicode): def format(self, *args, **kwargs): return formatter.format(self, *args, **kwargs)
pouyana/teireader
webui/gluon/contrib/memcache/memcache.py
Python
mit
49,300
0.004138
#!/usr/bin/env python """ client module for memcached (memory cache daemon) Overview ======== See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached. Usage summary ============= This should give you a feel for how this module operates:: import memcache mc = memcache.Client(['127.0.0.1:11211'], debug=0) mc.set("some_key", "Some value") value = mc.get("some_key") mc.set("another_key", 3) mc.delete("another_key") mc.set("key", "1") # note that the key used for incr/decr must be a string. mc.incr("key") mc.decr("key") The standard way to use memcache with a database is like this:: key = derive_key(obj) obj = mc.get(key) if not obj: obj = backend_api.get(...) mc.set(key, obj) # we now have obj, and future passes through this code # will use the object from the cache. Detailed Documentation ====================== More detailed documentation is available in the L{Client} class. """ import sys import socket import time import os import re try: import cPickle as pickle except ImportError: import pickle from binascii import crc32 # zlib version is not cross-platform def cmemcache_hash(key): return((((crc32(key) & 0xffffffff) >> 16) & 0x7fff) or 1) serverHashFunction = cmemcache_hash def useOldServerHashFunction(): """Use the old python-memcache server hash function.""" global serverHashFunction serverHashFunction = crc32 try: from zlib import compress, decompress _supports_compress = True except ImportError: _supports_compress = False # quickly define a decompress just in case we recv compressed data. def decompress(val): raise _Error("received compressed data but I don't support compression (import error)") try: from cStringIO import StringIO except ImportError: from StringIO import StringIO # Original author: Evan Martin of Danga Interactive __author__ = "Sean Reifschneider <[email protected]>" __version__ = "1.48" __copyright__ = "Copyright (C) 2003 Danga Interactive" # http://en.wikipedia.org/wiki/Python_Software_Foundation_License __license__ = "Python Software Foundation License" SERVER_MAX_KEY_LENGTH = 250 # Storing values larger than 1MB requires recompiling memcached. If you do, # this value can be changed by doing "memcache.SERVER_MAX_VALUE_LENGTH = N" # after importing this module. SERVER_MAX_VALUE_LENGTH = 1024*1024 class _Error(Exception): pass class _ConnectionDeadError(Exception): pass try: # Only exists in Python 2.4+ from threading import local except ImportError: # TODO: add the pure-python local implementation class local(object): pass _DEAD_RETRY = 30 # number of seconds before retrying a dead server. _SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout. class Client(local
): """ Object representing a pool of memcache servers. See L{memcache} for an overview.
In all cases where a key is used, the key can be either: 1. A simple hashable type (string, integer, etc.). 2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog @group Insertion: set, add, replace, set_multi @group Retrieval: get, get_multi @group Integers: incr, decr @group Removal: delete, delete_multi @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\ set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi """ _FLAG_PICKLE = 1<<0 _FLAG_INTEGER = 1<<1 _FLAG_LONG = 1<<2 _FLAG_COMPRESSED = 1<<3 _SERVER_RETRIES = 10 # how many times to try finding a free server. # exceptions for Client class MemcachedKeyError(Exception): pass class MemcachedKeyLengthError(MemcachedKeyError): pass class MemcachedKeyCharacterError(MemcachedKeyError): pass class MemcachedKeyNoneError(MemcachedKeyError): pass class MemcachedKeyTypeError(MemcachedKeyError): pass class MemcachedStringEncodingError(Exception): pass def __init__(self, servers, debug=0, pickleProtocol=0, pickler=pickle.Pickler, unpickler=pickle.Unpickler, pload=None, pid=None, server_max_key_length=SERVER_MAX_KEY_LENGTH, server_max_value_length=SERVER_MAX_VALUE_LENGTH, dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT, cache_cas = False): """ Create a new Client object with the given list of servers. @param servers: C{servers} is passed to L{set_servers}. @param debug: whether to display error messages when a server can't be contacted. @param pickleProtocol: number to mandate protocol used by (c)Pickle. @param pickler: optional override of default Pickler to allow subclassing. @param unpickler: optional override of default Unpickler to allow subclassing. @param pload: optional persistent_load function to call on pickle loading. Useful for cPickle since subclassing isn't allowed. @param pid: optional persistent_id function to call on pickle storing. Useful for cPickle since subclassing isn't allowed. @param dead_retry: number of seconds before retrying a blacklisted server. Default to 30 s. @param socket_timeout: timeout in seconds for all calls to a server. Defaults to 3 seconds. @param cache_cas: (default False) If true, cas operations will be cached. WARNING: This cache is not expired internally, if you have a long-running process you will need to expire it manually via "client.reset_cas(), or the cache can grow unlimited. @param server_max_key_length: (default SERVER_MAX_KEY_LENGTH) Data that is larger than this will not be sent to the server. @param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH) Data that is larger than this will not be sent to the server. """ local.__init__(self) self.debug = debug self.dead_retry = dead_retry self.socket_timeout = socket_timeout self.set_servers(servers) self.stats = {} self.cache_cas = cache_cas self.reset_cas() # Allow users to modify pickling/unpickling behavior self.pickleProtocol = pickleProtocol self.pickler = pickler self.unpickler = unpickler self.persistent_load = pload self.persistent_id = pid self.server_max_key_length = server_max_key_length self.server_max_value_length = server_max_value_length # figure out the pickler style file = StringIO() try: pickler = self.pickler(file, protocol = self.pickleProtocol) self.picklerIsKeyword = True except TypeError: self.picklerIsKeyword = False def reset_cas(self): """ Reset the cas cache. This is only used if the Client() object was created with "cache_cas=True". If used, this cache does not expire internally, so it can grow unbounded if you do not clear it yourself. """ self.cas_ids = {} def set_servers(self, servers): """ Set the pool of servers used by this client. @param servers: an array of servers. Servers can be passed in two forms: 1. Strings of the form C{"host:port"}, which implies a default weight of 1. 2. Tuples of the form C{("host:port", weight)}, where C{weight} is an integer weight value. """ self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry, socket_timeout=self.socket_timeout) for s in
yaii/yai
share/extensions/ink2canvas/canvas.py
Python
gpl-2.0
6,499
0.002462
#!/usr/bin/env python ''' Copyright (C) 2011 Karlisson Bezerra <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ''' import inkex import simplestyle class Canvas: """Canvas API helper class""" def __init__(self, parent, width, height, context = "ctx"): self.obj = context self.code = [] #stores the code self.style = {} self.styleCache = {} #stores the previous style applied self.parent = parent self.width = width self.height = height def write(self, text): self.code.append("\t" + text.replace("ctx", self.obj) + "\n") def output(self): from textwrap import dedent html = """ <!DOCTYPE html> <html> <head> <title>Inkscape Output</title> </head> <body> <canvas id='canvas' width='%d' height='%d'></canvas> <script> var %s = document.getElementById("canvas").getContext("2d"); %s </script> </body> </html> """ return dedent(html) % (self.width, self.height, self.obj, "".join(self.code)) def equalStyle(self, style, key): """Checks if the last style used is the same or there's no style yet""" if key in self.styleCache: return True if key not in style: return True return style[key] == self.styleCache[key] def beginPath(self): self.write("ctx.beginPath();") def createLinearGradient(self, href, x1, y1, x2, y2): data = (href, x1, y1, x2, y2) self.write("var %s = \ ctx.createLinearGradient(%f,%f,%f,%f);" % data)
def createRadialGradient(self, href, cx1, cy1, rx, cx2, cy2, ry): data = (href, cx1, cy1, rx, cx2, cy2, ry) self.write("var %s = ctx.createRadialGradient\ (%f,%f,%f,%f,%f,%f);" % data) def addColorStop(self, href, pos, color): self.write("%s.addColorStop(%f, %s);" % (href, pos, color)) def getColor(self, rgb, a): r, g, b = simplestyle.parseColor(rgb) a = float(a) if a < 1:
return "'rgba(%d, %d, %d, %.1f)'" % (r, g, b, a) else: return "'rgb(%d, %d, %d)'" % (r, g, b) def setGradient(self, href): """ for stop in gstops: style = simplestyle.parseStyle(stop.get("style")) stop_color = style["stop-color"] opacity = style["stop-opacity"] color = self.getColor(stop_color, opacity) pos = float(stop.get("offset")) self.addColorStop(href, pos, color) """ return None #href def setOpacity(self, value): self.write("ctx.globalAlpha = %.1f;" % float(value)) def setFill(self, value): try: alpha = self.style["fill-opacity"] except: alpha = 1 if not value.startswith("url("): fill = self.getColor(value, alpha) self.write("ctx.fillStyle = %s;" % fill) def setStroke(self, value): try: alpha = self.style["stroke-opacity"] except: alpha = 1 self.write("ctx.strokeStyle = %s;" % self.getColor(value, alpha)) def setStrokeWidth(self, value): self.write("ctx.lineWidth = %f;" % self.parent.unittouu(value)) def setStrokeLinecap(self, value): self.write("ctx.lineCap = '%s';" % value) def setStrokeLinejoin(self, value): self.write("ctx.lineJoin = '%s';" % value) def setStrokeMiterlimit(self, value): self.write("ctx.miterLimit = %s;" % value) def setFont(self, value): self.write("ctx.font = \"%s\";" % value) def moveTo(self, x, y): self.write("ctx.moveTo(%f, %f);" % (x, y)) def lineTo(self, x, y): self.write("ctx.lineTo(%f, %f);" % (x, y)) def quadraticCurveTo(self, cpx, cpy, x, y): data = (cpx, cpy, x, y) self.write("ctx.quadraticCurveTo(%f, %f, %f, %f);" % data) def bezierCurveTo(self, x1, y1, x2, y2, x, y): data = (x1, y1, x2, y2, x, y) self.write("ctx.bezierCurveTo(%f, %f, %f, %f, %f, %f);" % data) def rect(self, x, y, w, h, rx = 0, ry = 0): if rx or ry: #rounded rectangle, starts top-left anticlockwise self.moveTo(x, y + ry) self.lineTo(x, y+h-ry) self.quadraticCurveTo(x, y+h, x+rx, y+h) self.lineTo(x+w-rx, y+h) self.quadraticCurveTo(x+w, y+h, x+w, y+h-ry) self.lineTo(x+w, y+ry) self.quadraticCurveTo(x+w, y, x+w-rx, y) self.lineTo(x+rx, y) self.quadraticCurveTo(x, y, x, y+ry) else: self.write("ctx.rect(%f, %f, %f, %f);" % (x, y, w, h)) def arc(self, x, y, r, a1, a2, flag): data = (x, y, r, a1, a2, flag) self.write("ctx.arc(%f, %f, %f, %f, %.8f, %d);" % data) def fillText(self, text, x, y): self.write("ctx.fillText(\"%s\", %f, %f);" % (text, x, y)) def translate(self, cx, cy): self.write("ctx.translate(%f, %f);" % (cx, cy)) def rotate(self, angle): self.write("ctx.rotate(%f);" % angle) def scale(self, rx, ry): self.write("ctx.scale(%f, %f);" % (rx, ry)) def transform(self, m11, m12, m21, m22, dx, dy): data = (m11, m12, m21, m22, dx, dy) self.write("ctx.transform(%f, %f, %f, %f, %f, %f);" % data) def save(self): self.write("ctx.save();") def restore(self): self.write("ctx.restore();") def closePath(self): if "fill" in self.style and self.style["fill"] != "none": self.write("ctx.fill();") if "stroke" in self.style and self.style["stroke"] != "none": self.write("ctx.stroke();") #self.write("%s.closePath();" % self.obj)
MicheleDamian/ConnectopicMapping
setup.py
Python
apache-2.0
1,540
0.001299
from
codecs import open from os import path from setuptools import setup, Extension from Cython.Distutils import build_ext import numpy here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open
(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() # Requirements install_requires=['cython>=0.24.1', 'numpy>=1.6.1', 'scipy>=0.16', 'matplotlib>=1.5.1', 'scikit-learn>=0.17.1', 'nibabel>=2.0.2', 'nilearn>=0.2.4', 'GPy>=1.0.7'] setup( name='connectopic_mapping', version='0.3.0', description='Connectopic mapping', long_description=long_description, author='Michele Damian', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering :: Medical Science Apps.', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='neuroscience connectopic mapping research', packages=['connectopic_mapping'], install_requires=install_requires, cmdclass={'build_ext': build_ext}, ext_modules=[Extension("connectopic_mapping.haak", ["connectopic_mapping/haak.pyx"], include_dirs=[numpy.get_include()])], )
mitsuhiko/django
tests/regressiontests/requests/tests.py
Python
bsd-3-clause
15,031
0.002329
import time from datetime import datetime, timedelta from StringIO import StringIO from django.core.handlers.modpython import ModPythonRequest from django.core.handlers.wsgi import WSGIRequest, LimitedStream from django.http import HttpRequest, HttpResponse, parse_cookie, build_request_repr from django.utils import unittest from django.utils.http import cookie_date class RequestsTests(unittest.TestCase): def test_httprequest(self): request = HttpRequest() self.assertEqual(request.GET.keys(), []) self.assertEqual(request.POST.keys(), []) self.assertEqual(request.COOKIES.keys(), []) self.assertEqual(request.META.keys(), []) def test_httprequest_repr(self): request = HttpRequest() request.path = u'/somepath/' request.GET = {u'get-key': u'get-value'} request.POST = {u'post-key': u'post-value'} request.COOKIES = {u'post-key': u'post-value'} request.META = {u'post-key': u'post-value'} self.assertEqual(repr(request), u"<HttpRequest\npath:/somepath/,\nGET:{u'get-key': u'get-value'},\nPOST:{u'post-key': u'post-value'},\nCOOKIES:{u'post-key': u'post-value'},\nMETA:{u'post-key': u'post-value'}>") self.assertEqual(build_request_repr(reque
st), repr(request)) self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={u'a': u'b'}, POST_override={u'c': u'd'}, COOKIES_override={u'e': u'f'}, META_override={u'g'
: u'h'}), u"<HttpRequest\npath:/otherpath/,\nGET:{u'a': u'b'},\nPOST:{u'c': u'd'},\nCOOKIES:{u'e': u'f'},\nMETA:{u'g': u'h'}>") def test_wsgirequest(self): request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus', 'wsgi.input': StringIO('')}) self.assertEqual(request.GET.keys(), []) self.assertEqual(request.POST.keys(), []) self.assertEqual(request.COOKIES.keys(), []) self.assertEqual(set(request.META.keys()), set(['PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'wsgi.input'])) self.assertEqual(request.META['PATH_INFO'], 'bogus') self.assertEqual(request.META['REQUEST_METHOD'], 'bogus') self.assertEqual(request.META['SCRIPT_NAME'], '') def test_wsgirequest_repr(self): request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': StringIO('')}) request.GET = {u'get-key': u'get-value'} request.POST = {u'post-key': u'post-value'} request.COOKIES = {u'post-key': u'post-value'} request.META = {u'post-key': u'post-value'} self.assertEqual(repr(request), u"<WSGIRequest\npath:/somepath/,\nGET:{u'get-key': u'get-value'},\nPOST:{u'post-key': u'post-value'},\nCOOKIES:{u'post-key': u'post-value'},\nMETA:{u'post-key': u'post-value'}>") self.assertEqual(build_request_repr(request), repr(request)) self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={u'a': u'b'}, POST_override={u'c': u'd'}, COOKIES_override={u'e': u'f'}, META_override={u'g': u'h'}), u"<WSGIRequest\npath:/otherpath/,\nGET:{u'a': u'b'},\nPOST:{u'c': u'd'},\nCOOKIES:{u'e': u'f'},\nMETA:{u'g': u'h'}>") def test_modpythonrequest(self): class FakeModPythonRequest(ModPythonRequest): def __init__(self, *args, **kwargs): super(FakeModPythonRequest, self).__init__(*args, **kwargs) self._get = self._post = self._meta = self._cookies = {} class Dummy: def get_options(self): return {} req = Dummy() req.uri = 'bogus' request = FakeModPythonRequest(req) self.assertEqual(request.path, 'bogus') self.assertEqual(request.GET.keys(), []) self.assertEqual(request.POST.keys(), []) self.assertEqual(request.COOKIES.keys(), []) self.assertEqual(request.META.keys(), []) def test_modpythonrequest_repr(self): class Dummy: def get_options(self): return {} req = Dummy() req.uri = '/somepath/' request = ModPythonRequest(req) request._get = {u'get-key': u'get-value'} request._post = {u'post-key': u'post-value'} request._cookies = {u'post-key': u'post-value'} request._meta = {u'post-key': u'post-value'} self.assertEqual(repr(request), u"<ModPythonRequest\npath:/somepath/,\nGET:{u'get-key': u'get-value'},\nPOST:{u'post-key': u'post-value'},\nCOOKIES:{u'post-key': u'post-value'},\nMETA:{u'post-key': u'post-value'}>") self.assertEqual(build_request_repr(request), repr(request)) self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={u'a': u'b'}, POST_override={u'c': u'd'}, COOKIES_override={u'e': u'f'}, META_override={u'g': u'h'}), u"<ModPythonRequest\npath:/otherpath/,\nGET:{u'a': u'b'},\nPOST:{u'c': u'd'},\nCOOKIES:{u'e': u'f'},\nMETA:{u'g': u'h'}>") def test_parse_cookie(self): self.assertEqual(parse_cookie('invalid:key=true'), {}) def test_httprequest_location(self): request = HttpRequest() self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"), 'https://www.example.com/asdf') request.get_host = lambda: 'www.example.com' request.path = '' self.assertEqual(request.build_absolute_uri(location="/path/with:colons"), 'http://www.example.com/path/with:colons') def test_near_expiration(self): "Cookie will expire when an near expiration time is provided" response = HttpResponse() # There is a timing weakness in this test; The # expected result for max-age requires that there be # a very slight difference between the evaluated expiration # time, and the time evaluated in set_cookie(). If this # difference doesn't exist, the cookie time will be # 1 second larger. To avoid the problem, put in a quick sleep, # which guarantees that there will be a time difference. expires = datetime.utcnow() + timedelta(seconds=10) time.sleep(0.001) response.set_cookie('datetime', expires=expires) datetime_cookie = response.cookies['datetime'] self.assertEqual(datetime_cookie['max-age'], 10) def test_far_expiration(self): "Cookie will expire when an distant expiration time is provided" response = HttpResponse() response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6)) datetime_cookie = response.cookies['datetime'] self.assertEqual(datetime_cookie['expires'], 'Sat, 01-Jan-2028 04:05:06 GMT') def test_max_age_expiration(self): "Cookie will expire if max_age is provided" response = HttpResponse() response.set_cookie('max_age', max_age=10) max_age_cookie = response.cookies['max_age'] self.assertEqual(max_age_cookie['max-age'], 10) self.assertEqual(max_age_cookie['expires'], cookie_date(time.time()+10)) def test_httponly_cookie(self): response = HttpResponse() response.set_cookie('example', httponly=True) example_cookie = response.cookies['example'] # A compat cookie may be in use -- check that it has worked # both as an output string, and using the cookie attributes self.assertTrue('; httponly' in str(example_cookie)) self.assertTrue(example_cookie['httponly']) def test_limited_stream(self): # Read all of a limited stream stream = LimitedStream(StringIO('test'), 2) self.assertEqual(stream.read(), 'te') # Reading again returns nothing. self.assertEqual(stream.read(), '') # Read a number of characters greater than the stream has to offer stream = LimitedStream(StringIO('test'), 2) self.assertEqual(stream.read(5), 'te') # Reading again returns nothing. self.assertEqual(stream.readline(5), '') # Read sequentially from a stream stream = LimitedStream(StringIO('12345678'), 8) self.assertEqual(stream.read(5), '12345')
pombredanne/anitya
anitya/lib/backends/folder.py
Python
gpl-2.0
2,702
0.00037
# -*- coding: utf-8 -*- """ (c) 2014 - Copyright Red Hat Inc Authors: Pierre-Yves Chibon <[email protected]> """ from anitya.lib.backends import ( BaseBackend, get_versions_by_regex_for_text, REGEX) from anitya.lib.exceptions import AnityaPluginException import six DEFAULT_REGEX = 'href="([0-9][0-9.]*)/"' class FolderBackend(BaseBackend): ''' The custom class for project having a special hosting. This backend allows to specify a version_url and a regex that will be used to retrieve the version information. ''' name = 'folder' examples = [
'http://ftp.gnu.org/pub/gnu/gnash/', 'http://subsurface.hohndel.org/downloads/', ] @classmethod def get_version(cls, project):
''' Method called to retrieve the latest version of the projects provided, project that relies on the backend of this plugin. :arg Project project: a :class:`model.Project` object whose backend corresponds to the current plugin. :return: the latest version found upstream :return type: str :raise AnityaPluginException: a :class:`anitya.lib.exceptions.AnityaPluginException` exception when the version cannot be retrieved correctly ''' return cls.get_ordered_versions(project)[-1] @classmethod def get_versions(cls, project): ''' Method called to retrieve all the versions (that can be found) of the projects provided, project that relies on the backend of this plugin. :arg Project project: a :class:`model.Project` object whose backend corresponds to the current plugin. :return: a list of all the possible releases found :return type: list :raise AnityaPluginException: a :class:`anitya.lib.exceptions.AnityaPluginException` exception when the versions cannot be retrieved correctly ''' url = project.version_url try: req = cls.call_url(url, insecure=project.insecure) except Exception as err: raise AnityaPluginException( 'Could not call : "%s" of "%s", with error: %s' % ( url, project.name, str(err))) versions = None if not isinstance(req, six.string_types): req = req.text try: regex = REGEX % {'name': project.name.replace('+', '\+')} versions = get_versions_by_regex_for_text( req, url, regex, project) except AnityaPluginException: versions = get_versions_by_regex_for_text( req, url, DEFAULT_REGEX, project) return versions
jekhokie/scriptbox
python--advent-of-code/2021/3/solve.py
Python
mit
1,039
0.014437
#!/usr/bin/env python3 import re from enum import Enum diags = [] with open('input.txt', 'r') as f: diags = f.read().splitlines() #--- challenge 1 gamma = "" for i in range(0, len(diags[0])): zeros = len([x for
x in diags if x[i] == "0"]) ones = len([x for x in diags if x[i] == "1"]) gamma += "0" if zeros > ones else "1" gamma = int(gamma, 2) epsilon = gamma ^ 0b111111111111 print("Solution to challenge 1: {}".format(gamma * epsilon)) #--- challenge 2 class Rating(Enum): OXYGEN = 0 CO2 = 1 def get_val(diags, rating): for i in range(0, len(dia
gs[0])): zeros = len([x for x in diags if x[i] == "0"]) ones = len(diags) - zeros if rating == Rating.OXYGEN: check_val = "0" if zeros > ones else "1" else: check_val = "0" if zeros <= ones else "1" diags = [x for x in diags if x[i] != check_val] if len(diags) == 1: return int(diags[0], 2) oxygen = get_val(diags, Rating.OXYGEN) co2 = get_val(diags, Rating.CO2) print("Solution to challenge 2: {}".format(oxygen * co2))
itkovian/sqlalchemy
test/orm/inheritance/test_magazine.py
Python
mit
9,316
0.008695
from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy import testing from sqlalchemy.testing.util import function_named from sqlalchemy.testing import fixtures from sqlalchemy.testing.schema import Table, Column class BaseObject(object): def __init__(self, *args, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) class Publication(BaseObject): pass class Issue(BaseObject): pass class Location(BaseObject): def __repr__(self): return "%s(%s, %s)" % (self.__class__.__name__, str(getattr(self, 'issue_id', None)), repr(str(self._name.name))) def _get_name(self): return self._name def _set_name(self, name): session = create_session() s = session.query(LocationName).filter(LocationName.name==name).first() session.expunge_all() if s is not None: self._name = s return found = False for i in session.new: if isinstance(i, LocationName) and i.name == name: self._name = i found = True break if found == False: self._name = LocationName(name=name) name = property(_get_name, _set_name) class LocationName(BaseObject): def __repr__(self): return "%s()" % (self.__class__.__name__) class PageSize(BaseObject): def __repr__(self): return "%s(%sx%s, %s)" % (self.__class__.__name__, self.width, self.height, self.name) class Magazine(BaseObject): def __repr__(self): return "%s(%s, %s)" % (self.__class__.__name__, repr(self.location), repr(self.size)) class Page(BaseObject): def __repr__(self): ret
urn "%s(%s)" % (self.__class__.__name__, str(self.page_no)) class MagazinePage(Page): def __repr__(self): return "%s(%s, %s)" % (self.__class__.__
name__, str(self.page_no), repr(self.magazine)) class ClassifiedPage(MagazinePage): pass class MagazineTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global publication_table, issue_table, location_table, location_name_table, magazine_table, \ page_table, magazine_page_table, classified_page_table, page_size_table publication_table = Table('publication', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(45), default=''), ) issue_table = Table('issue', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('publication_id', Integer, ForeignKey('publication.id')), Column('issue', Integer), ) location_table = Table('location', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('issue_id', Integer, ForeignKey('issue.id')), Column('ref', CHAR(3), default=''), Column('location_name_id', Integer, ForeignKey('location_name.id')), ) location_name_table = Table('location_name', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(45), default=''), ) magazine_table = Table('magazine', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('location_id', Integer, ForeignKey('location.id')), Column('page_size_id', Integer, ForeignKey('page_size.id')), ) page_table = Table('page', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('page_no', Integer), Column('type', CHAR(1), default='p'), ) magazine_page_table = Table('magazine_page', metadata, Column('page_id', Integer, ForeignKey('page.id'), primary_key=True), Column('magazine_id', Integer, ForeignKey('magazine.id')), Column('orders', Text, default=''), ) classified_page_table = Table('classified_page', metadata, Column('magazine_page_id', Integer, ForeignKey('magazine_page.page_id'), primary_key=True), Column('titles', String(45), default=''), ) page_size_table = Table('page_size', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('width', Integer), Column('height', Integer), Column('name', String(45), default=''), ) def _generate_round_trip_test(use_unions=False, use_joins=False): def test_roundtrip(self): publication_mapper = mapper(Publication, publication_table) issue_mapper = mapper(Issue, issue_table, properties = { 'publication': relationship(Publication, backref=backref('issues', cascade="all, delete-orphan")), }) location_name_mapper = mapper(LocationName, location_name_table) location_mapper = mapper(Location, location_table, properties = { 'issue': relationship(Issue, backref=backref('locations', lazy='joined', cascade="all, delete-orphan")), '_name': relationship(LocationName), }) page_size_mapper = mapper(PageSize, page_size_table) magazine_mapper = mapper(Magazine, magazine_table, properties = { 'location': relationship(Location, backref=backref('magazine', uselist=False)), 'size': relationship(PageSize), }) if use_unions: page_join = polymorphic_union( { 'm': page_table.join(magazine_page_table), 'c': page_table.join(magazine_page_table).join(classified_page_table), 'p': page_table.select(page_table.c.type=='p'), }, None, 'page_join') page_mapper = mapper(Page, page_table, with_polymorphic=('*', page_join), polymorphic_on=page_join.c.type, polymorphic_identity='p') elif use_joins: page_join = page_table.outerjoin(magazine_page_table).outerjoin(classified_page_table) page_mapper = mapper(Page, page_table, with_polymorphic=('*', page_join), polymorphic_on=page_table.c.type, polymorphic_identity='p') else: page_mapper = mapper(Page, page_table, polymorphic_on=page_table.c.type, polymorphic_identity='p') if use_unions: magazine_join = polymorphic_union( { 'm': page_table.join(magazine_page_table), 'c': page_table.join(magazine_page_table).join(classified_page_table), }, None, 'page_join') magazine_page_mapper = mapper(MagazinePage, magazine_page_table, with_polymorphic=('*', magazine_join), inherits=page_mapper, polymorphic_identity='m', properties={ 'magazine': relationship(Magazine, backref=backref('pages', order_by=magazine_join.c.page_no)) }) elif use_joins: magazine_join = page_table.join(magazine_page_table).outerjoin(classified_page_table) magazine_page_mapper = mapper(MagazinePage, magazine_page_table, with_polymorphic=('*', magazine_join), inherits=page_mapper, polymorphic_identity='m', properties={ 'magazine': relationship(Magazine, backref=backref('pages', order_by=page_table.c.page_no)) }) else: magazine_page_mapper = mapper(MagazinePage, magazine_page_table, inherits=page_mapper, polymorphic_identity='m', properties={ 'magazine': relationship(Magazine, backref=backref('pages', order_by=page_table.c.page_no)) }) classified_page_mapper = mapper(ClassifiedPage, classified_page_table, inherits=magazine_page_mapper, polymorphic_identity='c', primary_key=[page_table.c.id]) session = create_session() pub = Publication(name='Test') issue = Issue(issue=46,publication=pub) location = Location(ref='ABC',name='London',i
saulpw/visidata
visidata/editor.py
Python
gpl-3.0
2,699
0.002594
import os import sys import signal import subprocess import tempfile import curses import visidata visidata.vd.tstp_signal = None class SuspendCurses: 'Context manager to leave windowed mode on enter and restore it on exit.' def __enter__(self): curses.endwin() if visidata.vd.tstp_signal: signal.signal(signal.SIGTSTP, visidata.vd.tstp_signal) def __exit__(self, exc_type, exc_val, tb): curses.reset_prog_mode() visidata.vd.scrFull.refresh() curses.doupdate() @visidata.VisiData.api def launchEditor(vd, *args): 'Launch $EDITOR with *args* as arguments.' editor = os.environ.get('EDITOR') or vd.fail('$EDITOR not set') args = editor.split() + list(args) with SuspendCurses(): return subprocess.call(args) @visidata.VisiData.api def launchBrowser(vd, *args): 'Launch $BROWSER with *args* as arguments.' browser = os.environ.get('BROWSER') or vd.fail('(no $BROWSER) for %s' % args[0]) args = [browser] + list(args) subprocess.call(args) @visidata.VisiData.api def launchExternalEditor(vd, v, linenum=0): 'Launch $EDITOR to edit string *v* starting on line *linenum*.' import tempfile with tempfile.NamedTemporaryFile() as temp: with open(temp.name, 'w') as fp: fp.write(v) return launchExternalEditorPath(visidata.Path(temp.name), linenum) def launchExternalEditorPath(path, linenum=0): 'Launch $EDITOR to edit *path* starting on line *linenum*.' if linenum: visidata.vd.launchEditor(path, '+%s' % linenum) else: visidata.vd.launchEditor(path) with open(path, 'r') as fp: try: return fp.read().rstrip('\n') # trim inevitable trailing newlines except Exception as e: visidata.vd.exceptionCaught(e) return '' def suspend(): import signal with SuspendCurses(): os.k
ill(os.getpid(), signal.SIGSTOP) def _breakpoint(*args, **kwargs): import pdb class VisiDataPdb(pdb.Pdb): def precmd(self, line): r = super().precmd(line)
if not r: SuspendCurses.__exit__(None, None, None, None) return r def postcmd(self, stop, line): if stop: SuspendCurses.__enter__(None) return super().postcmd(stop, line) SuspendCurses.__enter__(None) VisiDataPdb(nosigint=True).set_trace() sys.breakpointhook = _breakpoint visidata.BaseSheet.addCommand('^Z', 'suspend', 'suspend()', 'suspend VisiData process') visidata.BaseSheet.addCommand('', 'breakpoint', 'breakpoint()', 'drop into pdb REPL')
plotly/python-api
packages/python/plotly/plotly/validators/scatterternary/marker/colorbar/_tickvals.py
Python
mit
513
0
import _plotly_utils.basevalidators class TickvalsValidator(
_plotly_utils.basevalidators.DataArrayValidator): def __init__( self, plotly_name="tickvals", parent_name="scatterternary.marker.colorbar", **kwargs ): super(TickvalsValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "colorbars"), role=kwargs.pop("role", "data"),
**kwargs )
Ehtaga/account-financial-reporting
account_financial_report_horizontal/report/report_financial.py
Python
agpl-3.0
2,192
0
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # This module copyright (C) 2015 Therp BV (<http://therp.nl>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is
distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ###############################################################
############### import copy from openerp import models from openerp.addons.account.report.account_financial_report import\ report_account_common class report_account_common_horizontal(report_account_common): def __init__(self, cr, uid, name, context=None): super(report_account_common_horizontal, self).__init__( cr, uid, name, context=context) self.localcontext.update({ 'get_left_lines': self.get_left_lines, 'get_right_lines': self.get_right_lines, }) def get_lines(self, data, side=None): data = copy.deepcopy(data) if data['form']['used_context'] is None: data['form']['used_context'] = {} data['form']['used_context'].update( account_financial_report_horizontal_side=side) return super(report_account_common_horizontal, self).get_lines( data) def get_left_lines(self, data): return self.get_lines(data, side='left') def get_right_lines(self, data): return self.get_lines(data, side='right') class ReportFinancial(models.AbstractModel): _inherit = 'report.account.report_financial' _wrapped_report_class = report_account_common_horizontal
seerjk/reboot06
09/homework08/flask_web.py
Python
mit
768
0.002604
# coding:utf-8 impo
rt MySQLdb as mysql from flask import Flask, request, render_template import json app = Flask(__name__) con = mysql.connect(user="root", passwd="redhat", db="jiangkun") con.autocommit(True) cur = con.cursor() @app.route('/') def index(): return render_template("index.html") @app.ro
ute('/list') def list(): sql = "select * from user" cur.execute(sql) res_json = json.dumps(cur.fetchall()) print res_json return res_json @app.route('/add') def add(): name = request.args.get('name') passwd = request.args.get('passwd') sql = "insert into user (name, passwd) values (%s, %s)" % (name, passwd) cur.execute(sql) return "ok" if __name__ == "__main__": app.run(host="0.0.0.0", debug=True, port=9092)
housecanary/hc-api-python
housecanary/utilities.py
Python
mit
2,345
0.002132
"""Utility functions for hc-api-python""" from datetime import datetime def get_readable_time_string(seconds): """Returns human readable string from number of seconds""" seconds = int(seconds) minutes = seconds // 60 seconds = seconds % 60 hours = minutes // 60 minutes = minutes % 60 days = hours // 24 hours = hours % 24 result = "" if days > 0: result += "%d %s " % (days, "Day" if (days == 1) else "Days") if hours > 0: result += "%d %s " % (hours, "Hour" if (hours == 1) else "Hours") if minutes > 0: result += "%d %s " % (minutes, "Minute" if (minutes == 1) else "Minutes") if seconds > 0: result += "%d %s " % (seconds, "Second" if (seconds == 1) else "Seconds") return result.strip() def get_datetime_from_timestamp(timestamp): """Return datetime from unix timestamp""" try: return datetime.fromtimestamp(int(timestamp)) except: return None def get_rate_limits(response): """Returns a list of rate limit information from a given response's headers.""" periods = response.headers['X-RateLimit-Period'] if not periods: return [] rate_limits = [] periods = periods.split(',') limits = response.headers['X-RateLimit-Limit'].split(',') remaining = response.headers['X-RateLimit-Remaining'].split(',') reset = response.headers['X-RateLimit-
Reset'].split(',') for idx, period in enumerate(periods): rate_limit = {} limit_period = get_readable_time_string(period) rate_limit["peri
od"] = limit_period rate_limit["period_seconds"] = period rate_limit["request_limit"] = limits[idx] rate_limit["requests_remaining"] = remaining[idx] reset_datetime = get_datetime_from_timestamp(reset[idx]) rate_limit["reset"] = reset_datetime right_now = datetime.now() if (reset_datetime is not None) and (right_now < reset_datetime): # add 1 second because of rounding seconds_remaining = (reset_datetime - right_now).seconds + 1 else: seconds_remaining = 0 rate_limit["reset_in_seconds"] = seconds_remaining rate_limit["time_to_reset"] = get_readable_time_string(seconds_remaining) rate_limits.append(rate_limit) return rate_limits
chendx79/Python3HandlerSocket
pyhs/__init__.py
Python
mit
51
0
from .m
anager impor
t Manager __version__ = '0.2.4'
mtagle/airflow
airflow/providers/google/cloud/operators/mssql_to_gcs.py
Python
apache-2.0
3,144
0.000318
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ MsSQL to GCS operator. """ import decimal from airflow.providers.google.cloud.operators.sql_to_gcs import BaseSQLToGCSOperator from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook from airflow.utils.decorators import apply_defaults class MSSQLToGCSOperator(BaseSQLToGCSOperator): """Copy data from Microsoft SQL Server to Google Cloud Storage in JSON or CSV format. :param mssql_conn_id: Reference to a specific MSSQL hook. :type mssql_conn_id: str **Example**: The following operator will export data from the Customers table within the given MSSQL Database and then upload it to the 'mssql-export' GCS bucket (along with a schema file). :: export_customers = MsSqlToGoogleCloudStorageOperator( task_id='export_customers', sql='SELECT * FROM dbo.Customers;', bucket='mssql-export', filename='data/customers/export.json', schema_filename='schemas/export.json', mssql_conn_id='mssql_default', google_cloud_storage_conn_id='goo
gle_cloud_default', dag=dag ) """ ui_color = '#e0a98c' type_map = { 3: 'INTEGER', 4: 'TIMESTAMP', 5: 'NUMERIC' } @apply_defaults def __init__(self, mssql_conn_id='mssql_default', *args, **kwargs): super().__init__(*args, **kwargs) self.mssql_conn_id =
mssql_conn_id def query(self): """ Queries MSSQL and returns a cursor of results. :return: mssql cursor """ mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id) conn = mssql.get_conn() cursor = conn.cursor() cursor.execute(self.sql) return cursor def field_to_bigquery(self, field): return { 'name': field[0].replace(" ", "_"), 'type': self.type_map.get(field[1], "STRING"), 'mode': "NULLABLE", } @classmethod def convert_type(cls, value, schema_type): """ Takes a value from MSSQL, and converts it to a value that's safe for JSON/Google Cloud Storage/BigQuery. """ if isinstance(value, decimal.Decimal): return float(value) return value
alexryndin/ambari
ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
Python
apache-2.0
7,652
0.023523
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import json from stacks.utils.RMFTestCase import * from mock.mock import patch from only_for_platform import not_for_platform, PLATFORM_WINDOWS @not_for_platform(PLATFORM_WINDOWS) class TestKafkaBroker(RMFTestCase): COMMON_SERVICES_PACKAGE_DIR = "KAFKA/0.8.1/package" STACK_VERSION = "2.2" def test_configure_default(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py", classname = "KafkaBroker", command = "configure", config_file="default.json", stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.assertResourceCalled('Directory', '/var/log/kafka', owner = 'kafka', group = 'hadoop', create_parents = True, mode = 0755, cd_access = 'a', recursive_ownership = True, ) self.assertResourceCalled('Directory', '/var/run/kafka', owner = 'kafka', group = 'hadoop', create_parents = True, mode = 0755, cd_access = 'a', recursive_ownership = True, ) self.assertResourceCalled('Directory', '/usr/hdp/current/kafka-broker/config', owner = 'kafka', group = 'hadoop', create_parents = True, mode = 0755, cd_access = 'a', recursive_ownership = True, ) self.assertResourceCalled('Directory', '/tmp/log/dir', owner = 'kafka', create_parents = True, group = 'hadoop', mode = 0755, cd_access = 'a', recursive_ownership = True, ) @patch("os.path.islink") @patch("os.path.realpath") def test_configure_custom_paths_default(self, realpath_mock, islink_mock): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py", classname = "KafkaBroker", command = "configure", config_file="default_custom_path_config.json", stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.assertResourceCalled('Directory', '/customdisk/var/log/kafka', owner = 'kafka', group = 'hadoop', create_parents = True, mode = 0755, cd_access = 'a', recursive_ownership = True, ) self.assertResourceCalled('Directory', '/customdisk/var/run/kafka', owner = 'kafka', group = 'hadoop', create_parents = True, mode = 0755, cd_access = 'a', recursive_ownership = True, ) self.assertResourceCalled('Directory', '/usr/hdp/current/kafka-broker/config', owner = 'kafka',
group = 'hadoop', create_parents = True, mode = 0755, cd_access = 'a', recursive_ownership = True, ) self.assertResourceCalled('D
irectory', '/tmp/log/dir', owner = 'kafka', create_parents = True, group = 'hadoop', mode = 0755, cd_access = 'a', recursive_ownership = True, ) self.assertTrue(islink_mock.called) self.assertTrue(realpath_mock.called) def test_pre_upgrade_restart(self): config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/default.json" with open(config_file, "r") as f: json_content = json.load(f) version = '2.2.1.0-3242' json_content['commandParams']['version'] = version self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py", classname = "KafkaBroker", command = "pre_upgrade_restart", config_dict = json_content, stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES) self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'kafka-broker', version), sudo=True,) self.assertNoMoreResources() @patch("resource_management.core.shell.call") def test_pre_upgrade_restart_23(self, call_mock): config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/default.json" with open(config_file, "r") as f: json_content = json.load(f) version = '2.3.0.0-1234' json_content['commandParams']['version'] = version mocks_dict = {} self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py", classname = "KafkaBroker", command = "pre_upgrade_restart", config_dict = json_content, stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES, call_mocks = [(0, None, ''), (0, None)], mocks_dict = mocks_dict) self.assertResourceCalledIgnoreEarlier('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'kafka-broker', version), sudo=True,) self.assertResourceCalled("Link", "/etc/kafka/conf", to="/usr/hdp/current/kafka-broker/conf") self.assertNoMoreResources() self.assertEquals(1, mocks_dict['call'].call_count) self.assertEquals(1, mocks_dict['checked_call'].call_count) self.assertEquals( ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'kafka', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'), mocks_dict['checked_call'].call_args_list[0][0][0]) self.assertEquals( ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'kafka', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'), mocks_dict['call'].call_args_list[0][0][0])
chriso/gauged
gauged/drivers/__init__.py
Python
mit
1,960
0
""" Gauged https://github.com/chriso/gauged (MIT Licensed) Copyright 2014 (c) Chris O'Hara <[email protected]> """ from urlparse import urlparse, parse_qsl from urllib import unquote from .mysql import MySQLDriver from .sqlite import SQLiteDriver from .postgresql import PostgreSQLDriver def parse_dsn(dsn_string): """Parse a connection string and return the associated driver""" dsn = urlparse(dsn_string) scheme =
dsn.scheme.split('+')[0] username = password = host = port = None host = dsn.netloc if '@' in host: username, host = host.split('@') if ':' in username: username, password = username.split(':') password = unquote(password) username = unquote(username) if ':' in host: host, port = host.split(':') port = int(port) database = dsn.path.split('?')[0][1:] query = dsn.path.split('?')[1] if '
?' in dsn.path else dsn.query kwargs = dict(parse_qsl(query, True)) if scheme == 'sqlite': return SQLiteDriver, [dsn.path], {} elif scheme == 'mysql': kwargs['user'] = username or 'root' kwargs['db'] = database if port: kwargs['port'] = port if host: kwargs['host'] = host if password: kwargs['passwd'] = password return MySQLDriver, [], kwargs elif scheme == 'postgresql': kwargs['user'] = username or 'postgres' kwargs['database'] = database if port: kwargs['port'] = port if 'unix_socket' in kwargs: kwargs['host'] = kwargs.pop('unix_socket') elif host: kwargs['host'] = host if password: kwargs['password'] = password return PostgreSQLDriver, [], kwargs else: raise ValueError('Unknown driver %s' % dsn_string) def get_driver(dsn_string): driver, args, kwargs = parse_dsn(dsn_string) return driver(*args, **kwargs)
kurikaesu/arsenalsuite
cpp/apps/absubmit/nukesubmit/nukeStub.py
Python
gpl-2.0
877
0.019384
#!/usr/bin/python2.5 import sys import time import os import nuke def launchSubmit(): print("nukeStub(): launch submitter dialog") submitCmd = "/drd/software/int/bin/launcher.sh -p %s -d %s --launchBlocking farm -o EPA_CMDLINE python2.5 --arg '$ABSUBMIT/nukesubmit/nuke2AB.py'" % (os.environ['DRD_JOB'], os.environ['DRD_DEPT']) # root.name holds the path to the nuke script submitCmd += " %s" % nuke.value("root.name") submitCmd += " %s" % nuke.Root.firstFrame(nuke.root()) submitCmd += "
%s" % nuke.Root.lastFrame(nuke.root()) writeNodes = [i for i in nuke.allNodes() if i.Class() == "Write"] for i in writeNodes: submitCmd += " %s %s" % (i['name'].value(), nuke.filename(i)) print( "nukeStub(): %s" % submitCmd ) os.system(sub
mitCmd) menubar = nuke.menu("Nuke") m = menubar.addMenu("&Render") m.addCommand("Submit to Farm", "nukeStub.launchSubmit()", "Up")
Sotera/Datawake-Legacy
memex-datawake-stream/src/datawakestreams/extractors/website_bolt.py
Python
apache-2.0
293
0.010239
from extractors.extract_website import Ex
tractWebsite from datawakestreams.extractors.extractor_bolt import ExtractorBolt class WebsiteBolt(Extracto
rBolt): name ='website_extractor' def __init__(self): ExtractorBolt.__init__(self) self.extractor = ExtractWebsite()
monetario/core
monetario/serializers.py
Python
bsd-3-clause
5,724
0.002271
import pycountry from marshmallow import Schema, fields, ValidationError def validate_currency_symbol(val): if val not in [x.letter for x in pycountry.currencies.objects]: raise ValidationError('Symbol is not valid') class CategoryTypeField(fields.Field): def _serialize(self, value, attr, obj): return {'value': value, 'title': dict(obj.CATEGORY_TYPES).get(value)} class RecordTypeField(fields.Field): def _serialize(self, value, attr, obj): return {'value': value, 'title': dict(obj.RECORD_TYPES).get(value)} class PaymentMethodField(fields.Field): def _serialize(self, value, attr, obj): return {'value': value, 'title': dict(obj.PAYMENT_METHODS).get(value)} class GroupSchema(Schema): id = fields.Int(dump_only=True) name = fields.Str(required=True) class UserSchema(Schema): id = fields.Int(dump_only=True) email = fields.Email(required=True) first_name = fields.Str(required=True) last_name = fields.Str() password = fields.Str(load_only=True, required=True) active = fields.Bool() group = fields.Nested(GroupSchema, dump_only=True) invite_hash = fields.Str() date_created = fields.DateTime(dump_only=True) date_modified = fields.DateTime(dump_only=True) class CategorySchema(Schema): id = fields.Int(dump_only=True) name = fields.Str(required=True) category_type = CategoryTypeField() parent = fields.Nested('self', dump_only=True, exclude=('parent', )) parent_id = fields.Int(load_only=True, load_from='parent') colour = fields.Str(required=True) logo = fields.Str(required=True) class GroupCategorySchema(Schema): id = fields.Int(dump_only=True) name = fields.Str(required=True) category_type = CategoryTypeField() group = fields.Nested(GroupSchema, dump_only=True) parent = fields.Nested('self', dump_only=True, exclude=('parent', )) parent_id = fields.Int(load_only=True, load_from='parent') colour = fields.
Str(required=True) logo = fields.Str(required=True) class GroupCurrencySchema(Schema):
id = fields.Int(dump_only=True) name = fields.Str(required=True) symbol = fields.Str( required=True, validate=validate_currency_symbol ) date_modified = fields.DateTime() group = fields.Nested(GroupSchema, dump_only=True) class AccountSchema(Schema): id = fields.Int(dump_only=True) name = fields.Str(required=True) currency = fields.Nested(GroupCurrencySchema, dump_only=True) currency_id = fields.Int(required=True, load_only=True, load_from='currency') user = fields.Nested(UserSchema, dump_only=True) class TransactionSchema(Schema): id = fields.Int(dump_only=True) amount = fields.Float(required=True) source_account = fields.Nested(AccountSchema, dump_only=True, only=('id', 'name')) source_account_id = fields.Int(required=True, load_only=True, load_from='source_account') target_account = fields.Nested(AccountSchema, dump_only=True, only=('id', 'name')) target_account_id = fields.Int(required=True, load_only=True, load_from='target_account') user = fields.Nested( UserSchema, dump_only=True, only=('id', 'first_name', 'last_name', 'email') ) currency = fields.Nested(GroupCurrencySchema, dump_only=True, only=('id', 'name')) currency_id = fields.Int(required=True, load_only=True, load_from='currency') description = fields.Str() date = fields.DateTime() class RecordSchema(Schema): id = fields.Int(dump_only=True) amount = fields.Float(required=True) description = fields.Str() record_type = RecordTypeField(required=True) payment_method = PaymentMethodField() date = fields.DateTime() user = fields.Nested( UserSchema, dump_only=True, only=('id', 'first_name', 'last_name', 'email') ) account = fields.Nested(AccountSchema, dump_only=True, only=('id', 'name')) account_id = fields.Int(required=True, load_only=True, load_from='account') currency = fields.Nested(GroupCurrencySchema, dump_only=True, only=('id', 'name')) currency_id = fields.Int(required=True, load_only=True, load_from='currency') transaction = fields.Nested( TransactionSchema, dump_only=True, only=('id', 'source_account', 'target_account', 'amount', 'currency') ) category = fields.Nested( GroupCategorySchema, dump_only=True, only=('id', 'name', 'logo', 'colour') ) category_id = fields.Int(required=True, load_only=True, load_from='category') class AppSchema(Schema): id = fields.Int(dump_only=True) name = fields.Str(required=True) secret = fields.Str(required=True, dump_only=True) user = fields.Nested(UserSchema, dump_only=True) user_id = fields.Int(required=True, load_only=True, load_from='user') class TokenSchema(Schema): email = fields.Email(required=True) password = fields.Str(load_only=True, required=True) secret = fields.Str(required=True) class BalanceSchema(Schema): cash_flow = fields.Float(required=True) start_balance = fields.Float() end_balance = fields.Float() expense = fields.Float() income = fields.Float() date = fields.Date() record_type = fields.Int() class DateRangeFilterSchema(Schema): date_from = fields.Date() date_to = fields.Date() class CashFlowSchema(Schema): cash_flow = fields.Float(required=True) expense = fields.Float() income = fields.Float() date = fields.Date() class ExpenseSchema(Schema): amount = fields.Float(required=True) category_id = fields.Int() class IncomeSchema(Schema): amount = fields.Float(required=True) category_id = fields.Int()
fujicoin/electrum-fjc
electrum/tests/test_util.py
Python
mit
5,385
0.003714
from decimal import Decimal from electrum.util import
(format_satoshis, format_fee_satoshis, parse_URI, is_hash256_str, chunks) from . import SequentialTestCase class TestUtil(SequentialTestCase): def test_format_satoshis(self): self.assertEqual("0.00001234", format_satoshis(1234)) def test_format_satoshis_negative(self): self.assertEqual("-0.00001234", format_satoshis(-1234)) def test_format_fee_float(self): self.assertEqual("1.7", format_fee_satoshis(1700/1000)) def test_format
_fee_decimal(self): self.assertEqual("1.7", format_fee_satoshis(Decimal("1.7"))) def test_format_fee_precision(self): self.assertEqual("1.666", format_fee_satoshis(1666/1000, precision=6)) self.assertEqual("1.7", format_fee_satoshis(1666/1000, precision=1)) def test_format_satoshis_whitespaces(self): self.assertEqual(" 0.0001234 ", format_satoshis(12340, whitespaces=True)) self.assertEqual(" 0.00001234", format_satoshis(1234, whitespaces=True)) def test_format_satoshis_whitespaces_negative(self): self.assertEqual(" -0.0001234 ", format_satoshis(-12340, whitespaces=True)) self.assertEqual(" -0.00001234", format_satoshis(-1234, whitespaces=True)) def test_format_satoshis_diff_positive(self): self.assertEqual("+0.00001234", format_satoshis(1234, is_diff=True)) def test_format_satoshis_diff_negative(self): self.assertEqual("-0.00001234", format_satoshis(-1234, is_diff=True)) def _do_test_parse_URI(self, uri, expected): result = parse_URI(uri) self.assertEqual(expected, result) def test_parse_URI_address(self): self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', {'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma'}) def test_parse_URI_only_address(self): self._do_test_parse_URI('15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', {'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma'}) def test_parse_URI_address_label(self): self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?label=electrum%20test', {'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'label': 'electrum test'}) def test_parse_URI_address_message(self): self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?message=electrum%20test', {'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'message': 'electrum test', 'memo': 'electrum test'}) def test_parse_URI_address_amount(self): self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.0003', {'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'amount': 30000}) def test_parse_URI_address_request_url(self): self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?r=http://domain.tld/page?h%3D2a8628fc2fbe', {'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'r': 'http://domain.tld/page?h=2a8628fc2fbe'}) def test_parse_URI_ignore_args(self): self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?test=test', {'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'test': 'test'}) def test_parse_URI_multiple_args(self): self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.00004&label=electrum-test&message=electrum%20test&test=none&r=http://domain.tld/page', {'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'amount': 4000, 'label': 'electrum-test', 'message': u'electrum test', 'memo': u'electrum test', 'r': 'http://domain.tld/page', 'test': 'none'}) def test_parse_URI_no_address_request_url(self): self._do_test_parse_URI('bitcoin:?r=http://domain.tld/page?h%3D2a8628fc2fbe', {'r': 'http://domain.tld/page?h=2a8628fc2fbe'}) def test_parse_URI_invalid_address(self): self.assertRaises(BaseException, parse_URI, 'bitcoin:invalidaddress') def test_parse_URI_invalid(self): self.assertRaises(BaseException, parse_URI, 'notbitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma') def test_parse_URI_parameter_polution(self): self.assertRaises(Exception, parse_URI, 'bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.0003&label=test&amount=30.0') def test_is_hash256_str(self): self.assertTrue(is_hash256_str('09a4c03e3bdf83bbe3955f907ee52da4fc12f4813d459bc75228b64ad08617c7')) self.assertTrue(is_hash256_str('2A5C3F4062E4F2FCCE7A1C7B4310CB647B327409F580F4ED72CB8FC0B1804DFA')) self.assertTrue(is_hash256_str('00' * 32)) self.assertFalse(is_hash256_str('00' * 33)) self.assertFalse(is_hash256_str('qweqwe')) self.assertFalse(is_hash256_str(None)) self.assertFalse(is_hash256_str(7)) def test_chunks(self): self.assertEqual([[1, 2], [3, 4], [5]], list(chunks([1, 2, 3, 4, 5], 2))) with self.assertRaises(ValueError): list(chunks([1, 2, 3], 0))
plivo/plivo-python
plivo/rest/client.py
Python
mit
13,434
0.001265
# -*- coding: utf-8 -*- """ Core client, used for all API requests. """ import os import platform from collections import namedtuple from plivo.base import ResponseObject from plivo.exceptions import (AuthenticationError, InvalidRequestError, PlivoRestError, PlivoServerError, ResourceNotFoundError, ValidationError) from plivo.resources import (Accounts, Addresses, Applications, Calls, Conferences, Endpoints, Identities, Messages, Powerpacks, Media, Lookup, Brand,Campaign, Numbers, Pricings, Recordings, Subaccounts, CallFeedback, MultiPartyCalls) from plivo.resources.live_calls import LiveCalls from plivo.resources.queued_calls import QueuedCalls from plivo.resources.regulatory_compliance import EndUsers, ComplianceDocumentTypes, ComplianceDocuments, \ ComplianceRequirements, ComplianceApplications from plivo.utils import is_valid_mainaccount, is_valid_subaccount from plivo.version import __version__ from requests import Request, Session AuthenticationCredentials = namedtuple('AuthenticationCredentials', 'auth_id auth_token') PLIVO_API = 'https://api.plivo.com' PLIVO_API_BASE_URI = '/'.join([PLIVO_API, 'v1/Account']) # Will change these urls before putting this change in production API_VOICE = 'https://api.plivo.com' API_VOICE_BASE_URI = '/'.join([API_VOICE, 'v1/Account']) API_VOICE_FALLBACK_1 = 'https://api.plivo.com' API_VOICE_FALLBACK_2 = 'https://api.plivo.com' API_VOICE_BASE_URI_FALLBACK_1 = '/'.join([API_VOICE_FALLBACK_1, 'v1/Account']) API_VOICE_BASE_URI_FALLBACK_2 = '/'.join([API_VOICE_FALLBACK_2, 'v1/Account']) CALLINSIGHTS_BASE_URL = 'https://stats.plivo.com' def get_user_agent(): return 'plivo-python/%s (Python: %s)' % (__version__, platform.python_version()) def fetch_credentials(auth_id, auth_token): """Fetches the right credentials either from params or from environment""" if not (auth_id and auth_token): try: auth_id = os.environ['PLIVO_AUTH_ID'] auth_token = os.environ['PLIVO_AUTH_TOKEN'] except KeyError: raise AuthenticationError('The Plivo Python SDK ' 'could not find your aut
h credentials.') if not (is_valid_mainaccount(au
th_id) or is_valid_subaccount(auth_id)): raise AuthenticationError('Invalid auth_id supplied: %s' % auth_id) return AuthenticationCredentials(auth_id=auth_id, auth_token=auth_token) class Client(object): def __init__(self, auth_id=None, auth_token=None, proxies=None, timeout=5): """ The Plivo API client. Deals with all the API requests to be made. """ self.base_uri = PLIVO_API_BASE_URI self.session = Session() self.session.headers.update({ 'User-Agent': get_user_agent(), 'Content-Type': 'application/json', 'Accept': 'application/json', }) self.session.auth = fetch_credentials(auth_id, auth_token) self.multipart_session = Session() self.multipart_session.headers.update({ 'User-Agent': get_user_agent(), 'Cache-Control': 'no-cache', }) self.multipart_session.auth = fetch_credentials(auth_id, auth_token) self.proxies = proxies self.timeout = timeout self.account = Accounts(self) self.subaccounts = Subaccounts(self) self.applications = Applications(self) self.calls = Calls(self) self.live_calls = LiveCalls(self) self.queued_calls = QueuedCalls(self) self.conferences = Conferences(self) self.endpoints = Endpoints(self) self.messages = Messages(self) self.lookup = Lookup(self) self.numbers = Numbers(self) self.powerpacks = Powerpacks(self) self.brand = Brand(self) self.campaign = Campaign(self) self.media = Media(self) self.pricing = Pricings(self) self.recordings = Recordings(self) self.addresses = Addresses(self) self.identities = Identities(self) self.call_feedback = CallFeedback(self) self.end_users = EndUsers(self) self.compliance_document_types = ComplianceDocumentTypes(self) self.compliance_documents = ComplianceDocuments(self) self.compliance_requirements = ComplianceRequirements(self) self.compliance_applications = ComplianceApplications(self) self.multi_party_calls = MultiPartyCalls(self) self.voice_retry_count = 0 def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.session.close() self.multipart_session.close() def process_response(self, method, response, response_type=None, objects_type=None): """Processes the API response based on the status codes and method used to access the API """ try: response_json = response.json( object_hook=lambda x: ResponseObject(x) if isinstance(x, dict) else x) if response_type: r = response_type(self, response_json.__dict__) response_json = r if 'objects' in response_json and objects_type: response_json.objects = [ objects_type(self, obj.__dict__) for obj in response_json.objects ] except ValueError: response_json = None if response.status_code == 400: if response_json is not None and 'error' in response_json: raise ValidationError(response_json.error) raise ValidationError( 'A parameter is missing or is invalid while accessing resource' 'at: {url}'.format(url=response.url)) if response.status_code == 401: if response_json and 'error' in response_json: raise AuthenticationError(response_json.error) raise AuthenticationError( 'Failed to authenticate while accessing resource at: ' '{url}'.format(url=response.url)) if response.status_code == 404: if response_json and 'error' in response_json: raise ResourceNotFoundError(response_json.error) raise ResourceNotFoundError( 'Resource not found at: {url}'.format(url=response.url)) if response.status_code == 405: if response_json and 'error' in response_json: raise InvalidRequestError(response_json.error) raise InvalidRequestError( 'HTTP method "{method}" not allowed to access resource at: ' '{url}'.format(method=method, url=response.url)) if response.status_code == 409: if response_json and 'error' in response_json: raise InvalidRequestError(response_json.error) raise InvalidRequestError( 'Conflict: ' '{url}'.format(url=response.url)) if response.status_code == 422: if response_json and 'error' in response_json: raise InvalidRequestError(response_json.error) raise InvalidRequestError( 'Unprocessable Entity: ' '{url}'.format(url=response.url)) if response.status_code == 500: if response_json and 'error' in response_json: raise PlivoServerError(response_json.error) raise PlivoServerError( 'A server error occurred while accessing resource at: ' '{url}'.format(url=response.url)) if method == 'DELETE': if response.status_code not in [200, 204]: raise PlivoRestError('Resource at {url} could not be ' 'deleted'.format(url=response.url)) elif response.status_code not in [
simonmonk/pi_magazine
04_analog_clock/analog_clock_24.py
Python
mit
1,066
0.025328
import time import RPi.GPIO as GPIO # Constants PULSE_LEN = 0.03 # length of clock motor pulse A_PIN = 18 # one motor drive pin B_PIN = 23 # second motor drive pin # Configure the GPIO pins GPIO.setmode(GPIO.BCM) GPIO.setup(A_PIN, GPIO.OUT) GPIO.setup(B_PIN, GPIO.OUT) # Glogal variables positive_polarity = True period = 2.0 # 2 second tick last_tick_time = 0 # the time at which last tick occured def tick(): # Alternate p
ositive and negative pulses global positive_polarity if positive_polarity: pulse(A_PIN, B_PIN) else: pulse(B_PIN, A_PIN) # Flip the polarity ready for the next tick positive_polarity = not positive_polarity def pulse(pos_pin, neg_pin): # Turn on the pulse GPIO.output(pos
_pin, True) GPIO.output(neg_pin, False) time.sleep(PULSE_LEN) # Turn the power off until the next tick GPIO.output(pos_pin, False) try: while True: t = time.time() if t > last_tick_time + period: # its time for the next tick tick() last_tick_time = t finally: print('Cleaning up GPIO') GPIO.cleanup()
plotly/python-api
packages/python/plotly/plotly/graph_objs/area/_hoverlabel.py
Python
mit
17,818
0.000954
from plotly
.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType import copy as _copy class Hoverlabel(_BaseTraceHierarchyType): # class properties # -------------------- _parent_path_str = "area" _path_str = "area.hoverlabel" _valid_props = { "align", "alignsrc",
"bgcolor", "bgcolorsrc", "bordercolor", "bordercolorsrc", "font", "namelength", "namelengthsrc", } # align # ----- @property def align(self): """ Sets the horizontal alignment of the text content within hover label box. Has an effect only if the hover label text spans more two or more lines The 'align' property is an enumeration that may be specified as: - One of the following enumeration values: ['left', 'right', 'auto'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["align"] @align.setter def align(self, val): self["align"] = val # alignsrc # -------- @property def alignsrc(self): """ Sets the source reference on Chart Studio Cloud for align . The 'alignsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["alignsrc"] @alignsrc.setter def alignsrc(self, val): self["alignsrc"] = val # bgcolor # ------- @property def bgcolor(self): """ Sets the background color of the hover labels for this trace The 'bgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["bgcolor"] @bgcolor.setter def bgcolor(self, val): self["bgcolor"] = val # bgcolorsrc # ---------- @property def bgcolorsrc(self): """ Sets the source reference on Chart Studio Cloud for bgcolor . The 'bgcolorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["bgcolorsrc"] @bgcolorsrc.setter def bgcolorsrc(self, val): self["bgcolorsrc"] = val # bordercolor # ----------- @property def bordercolor(self): """ Sets the border color of the hover labels for this trace. The 'bordercolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["bordercolor"] @bordercolor.setter def bordercolor(self, val): self["bordercolor"] = val # bordercolorsrc # -------------- @property def bordercolorsrc(self): """ Sets the source reference on Chart Studio Cloud for bordercolor . The 'bordercolorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["bordercolorsrc"]
ah-anssi/SecuML
SecuML/core/ActiveLearning/QueryStrategies/AnnotationQueries/AnnotationQueries.py
Python
gpl-2.0
3,837
0.001303
# SecuML # Copyright (C) 2016-2017 ANSSI # # SecuML is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # SecuML is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with SecuML. If not, see <http://www.gnu.org/licenses/>. import abc import numpy as np import os.path as path import pandas as pd import time from .AnnotationQuery import AnnotationQuery class AnnotationQueries(object): def __init__(self, iteration, label): self.iteration = iteration self.label = label self.annotation_queries = [] def run(self): self.predictions = self.getPredictedProbabilities() self.runModels() start_time = time.time() self.generateAnnotationQueries() self.generate_queries_time = time.time() - start_time self.exportAnnotationQueries() @abc.abstractmethod def runModels(self): return @abc.abstractmethod def generateAnnotationQueries(self): return def generateAnnotationQuery(self, instance_id, predicted_proba, suggested_label, suggested_family, confidence=None): return AnnotationQuery(instance_id, predicted_proba, suggested_label, suggested_family, confidence=confidence) def getPredictedProbabilities(self): models_conf = self.iteration.conf.models_conf if 'binary' in models_conf: classifier = self.iteration.update_model.models['binary'] predictions = classifier.testing_monitoring.predictions_monitoring.predictions else: test_instances = self.iteration.datasets.getTestInstances() num_instances = test_instances.numInstances() predictions = pd.DataFrame( np.zeros((num_instances, 4)), index=test_instances.ids.getIds(), columns=['predicted_proba', 'predicted_labels', 'ground_truth', 'scores']) predictions['predicted_proba'] = [0.5] * num_instances predictions['predicted_labels'] = [False] * num_instances predictions['ground_truth'] = test_instances.ground_truth.getLabels() predictions['scores'] = [0.5] * num_instances
return predictions def exportAnnotationQueries(self): iteration_dir = self.iteration.iteration_dir if iteration_dir is None: return filename = path.join(iteration_dir, 'toannotate_' +
self.label + '.csv') with open(filename, 'w') as f: for i, annotation_query in enumerate(self.annotation_queries): if i == 0: annotation_query.displayHeader(f) annotation_query.export(f) def annotateAuto(self): for annotation_query in self.annotation_queries: annotation_query.annotateAuto(self.iteration, self.label) def getManualAnnotations(self): for annotation_query in self.annotation_queries: annotation_query.getManualAnnotation(self.iteration) def checkAnnotationQueriesAnswered(self): for annotation_query in self.annotation_queries: if not annotation_query.checkAnswered(self.iteration): return False return True def getInstanceIds(self): return [annotation_query.instance_id for annotation_query in self.annotation_queries]
pastephens/pysal
pysal/spreg/probit.py
Python
bsd-3-clause
34,383
0.002501
"""Probit regression class and diagnostics.""" __author__ = "Luc Anselin [email protected], Pedro V. Amaral [email protected]" import numpy as np import numpy.linalg as la import scipy.optimize as op from scipy.stats import norm, chisqprob import scipy.sparse as SP import user_output as USER import summary_output as SUMMARY from utils import spdot, spbroadcast __all__ = ["Probit"] class BaseProbit(object): """ Probit class to do all the computations Parameters ---------- x : array nxk array of independent variables (assumed to be aligned with y) y : array nx1 array of dependent binary variable w : W PySAL weights instance or spatial weights sparse matrix aligned with y optim : string Optimization method. Default: 'newton' (Newton-Raphson). Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm) scalem : string Method to calculate the scale of the marginal effects. Default: 'phimean' (Mean of individual marginal effects) Alternative: 'xmean' (Marginal effects at variables mean) maxiter : int Maximum number of iterations until optimizer stops Attributes ---------- x : array Two dimensional array with n rows and one column for each independent (exogenous) variable, including the constant y : array nx1 array of dependent variable betas : array kx1 array with estimated coefficients predy : array nx1 array of predicted y values n : int Number of observations k : int Number of variables vm : array Variance-covariance matrix (kxk) z_stat : list of tuples z statistic; each tuple contains the pair (statistic, p-value), where each is a float xmean : array Mean of the independent variables (kx1) predpc : float Percent of y correctly predicted logl : float Log-Likelihhod of the estimation scalem : string Method to calculate the scale of the marginal effects. scale : float Scale of the marginal effects. slopes : array Marginal effects of the independent variables (k-1x1) Note: Disregards the presence of dummies. slopes_vm : array Variance-covariance matrix of the slopes (k-1xk-1) LR : tuple Likelihood Ratio test of all coefficients = 0 (test statistics, p-value) Pinkse_error: float Lagrange Multiplier test against spatial error correlation. Implemented as presented in [Pinkse2004]_ KP_error : float Moran's I type test against spatial error correlation. Implemented as presented in [Kelejian2001]_ PS_error : float Lagrange Multiplier test against spatial error correlation. Implemented as presented in [Pinkse1998]_ warning : boolean if True Maximum number of iterations exceeded or gradient and/or function calls not changing. Examples -------- >>> import numpy as np >>> import pysal >>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r') >>> y = np.array([dbf.by_col('CRIME')]).T >>> x = np.array([dbf.by_col('INC'), dbf.by_col('HOVAL')]).T >>> x = np.hstack((np.ones(y.shape),x)) >>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read() >>> w.transform='r' >>> model = BaseProbit((y>40).astype(float), x, w=w) >>> np.around(model.betas, decimals=6) array([[ 3.353811], [-0.199653], [-0.029514]]) >>> np.around(model.vm, decimals=6) array([[ 0.852814, -0.043627, -0.008052], [-0.043627, 0.004114, -0.000193], [-0.008052, -0.000193, 0.00031 ]]) >>> tests = np.array([['Pinkse_error','KP_error','PS_error']]) >>> stats = np.array([[model.Pinkse_error[0],model.KP_erro
r[0],model.PS_error[0]]]) >>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]]) >>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6))) [['Pinkse_error' '3.131719' '0.076783'] ['KP_error' '1.721312' '0.085194'] ['PS_error' '2.558166'
'0.109726']] """ def __init__(self, y, x, w=None, optim='newton', scalem='phimean', maxiter=100): self.y = y self.x = x self.n, self.k = x.shape self.optim = optim self.scalem = scalem self.w = w self.maxiter = maxiter par_est, self.warning = self.par_est() self.betas = np.reshape(par_est[0], (self.k, 1)) self.logl = -float(par_est[1]) @property def vm(self): try: return self._cache['vm'] except AttributeError: self._cache = {} H = self.hessian(self.betas) self._cache['vm'] = -la.inv(H) except KeyError: H = self.hessian(self.betas) self._cache['vm'] = -la.inv(H) return self._cache['vm'] @vm.setter def vm(self, val): try: self._cache['vm'] = val except AttributeError: self._cache = {} self._cache['vm'] = val @property #could this get packaged into a separate function or something? It feels weird to duplicate this. def z_stat(self): try: return self._cache['z_stat'] except AttributeError: self._cache = {} variance = self.vm.diagonal() zStat = self.betas.reshape(len(self.betas),) / np.sqrt(variance) rs = {} for i in range(len(self.betas)): rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2) self._cache['z_stat'] = rs.values() except KeyError: variance = self.vm.diagonal() zStat = self.betas.reshape(len(self.betas),) / np.sqrt(variance) rs = {} for i in range(len(self.betas)): rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2) self._cache['z_stat'] = rs.values() return self._cache['z_stat'] @z_stat.setter def z_stat(self, val): try: self._cache['z_stat'] = val except AttributeError: self._cache = {} self._cache['z_stat'] = val @property def slopes_std_err(self): try: return self._cache['slopes_std_err'] except AttributeError: self._cache = {} self._cache['slopes_std_err'] = np.sqrt(self.slopes_vm.diagonal()) except KeyError: self._cache['slopes_std_err'] = np.sqrt(self.slopes_vm.diagonal()) return self._cache['slopes_std_err'] @slopes_std_err.setter def slopes_std_err(self, val): try: self._cache['slopes_std_err'] = val except AttributeError: self._cache = {} self._cache['slopes_std_err'] = val @property def slopes_z_stat(self): try: return self._cache['slopes_z_stat'] except AttributeError: self._cache = {} zStat = self.slopes.reshape( len(self.slopes),) / self.slopes_std_err rs = {} for i in range(len(self.slopes)): rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2) self._cache['s
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_07_01/aio/operations/_ddos_custom_policies_operations.py
Python
mit
20,544
0.004965
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class DdosCustomPoliciesOperations: """DdosCustomPoliciesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_07_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _delete_initial( self, resource_group_name: str, ddos_custom_policy_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-07-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_i
nitial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore async def begin_delete( self, resource_group_name: str, ddos_custom_policy_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes the specified DDoS custom policy. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param ddos_custom_policy_name: The name of the DDoS custom policy. :type ddos_custom_policy_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, ddos_custom_policy_name=ddos_custom_policy_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore async def get( self, resource_group_name: str, ddos_custom_policy_name: str, **kwargs: Any ) -> "_models.DdosCustomPolicy": """Gets information about the specified DDoS custom policy. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param ddos_custom_policy_name: The name of the DDoS custom policy. :type ddos_custom_policy_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: DdosCustomPolicy, or the result of cls(response) :rtype: ~azure.mgmt.network.v20
JonathanRaiman/Dali
data/score_informed_transcription/midi/example_transpose_octave.py
Python
mit
1,038
0.010597
from .MidiOutFile import MidiOutFile from .MidiInFile import MidiI
nFile """ This is an example of the smallest possible type 0 midi file, where all the midi events are in the same track. """ class Transposer(MidiOutFile): "Transposes all notes by 1 octave" def _transp(self, ch, note): if ch != 9: # not the drums!
note += 12 if note > 127: note = 127 return note def note_on(self, channel=0, note=0x40, velocity=0x40): note = self._transp(channel, note) MidiOutFile.note_on(self, channel, note, velocity) def note_off(self, channel=0, note=0x40, velocity=0x40): note = self._transp(channel, note) MidiOutFile.note_off(self, channel, note, velocity) out_file = 'midiout/transposed.mid' midi_out = Transposer(out_file) #in_file = 'midiout/minimal_type0.mid' #in_file = 'test/midifiles/Lola.mid' in_file = 'test/midifiles/tennessee_waltz.mid' midi_in = MidiInFile(midi_out, in_file) midi_in.read()
Vladimir-Ivanov-Git/raw-packet
raw_packet/Scanners/nmap_scanner.py
Python
mit
5,743
0.002438
# region Description """ nmap_scanner.py: Scan local network with NMAP Author: Vladimir Ivanov License: MIT Copyright 2020, Raw-packet Project """ # endregion # region Import from raw_packet.Utils.base import Base import xml.etree.ElementTree as ET import subprocess as sub from tempfile import gettempdir from os.path import isfile, join from os import remove from typing import Union, List, Dict, NamedTuple from collections import namedtuple # endregion # region Authorship information __author__ = 'Vladimir Ivanov' __copyright__ = 'Copyright 2020, Raw-packet Project' __credits__ = [''] __license__ = 'MIT' __version__ = '0.2.1' __maintainer__ = 'Vladimir Ivanov' __email__ = '[email protected]' __status__ = 'Development' # endregion # region Main class - NmapScanner class NmapScanner: # region Variables _base: Base = Base(admin_only=True, available_platforms=['Linux', 'Darwin', 'Windows']) try: Info = namedtuple(typename='Info', field_names='vendor, os, mac_address, ipv4_address, ports', defaults=('', '', '', '', [])) except TypeError: Info = namedtuple(typename='Info', field_names='vendor, os, mac_address, ipv4_address, ports') # endregion # region Init def __init__(self, network_interface: str): self._your: Dict[str, Union[None, str]] = \ self._base.get_interface_settings(interface_name=network_interface, required_parameters=['mac-address', 'ipv4-address', 'first-ipv4-address', 'last-ipv4-address']) self.local_network: str = \ self._your['first-ipv4-address'] + '-' + \ self._your['last-ipv4-address'].split('.')[3] if self._base.get_platform().startswith('Darwin'): self._nmap_scan_result: str = '/tmp/nmap_scan.xml' else: self._nmap_scan_result: str = join(gettempdir(), 'nmap_scan.xml') # endregion # region Find devices in local network with nmap def scan(self, exit_on_failure: bool = True, quiet: bool = False) -> Union[None, List[NamedTuple]]: try: # region Variables network_devices: List[NamedTuple] = list() ipv4_address: str = '' mac_address: str = '' vendor: str = '' os: str = '' ports: List[int] = list() # endregion nmap_command: str = 'nmap ' + self.local_network + \ ' --open -n -O --osscan-guess -T5 -oX ' + self._nmap_scan_result if not quiet: self._base.print_info('Start nmap scan: ', nmap_command) if self._base.get_platform().startswith('Windows'): nmap_process = sub.Popen(nmap_command, shell=True, stdout=sub.PIPE, stderr=sub.STDOUT) else: nmap_process = sub.Popen([nmap_command], shell=True, stdout=sub.PIPE, stderr=sub.STDOUT) nmap_process.wait() assert isfile(self._nmap_scan_result), \ 'Not found nmap scan result file: ' + self._base.error_text(self._nmap_scan_result) nmap_report = ET.parse(self._nmap_scan_result) root_tree = nmap_report.getroot() for element in root_tree: try: assert element.tag == 'host'
state = element.find('status').attrib['state'] assert state == 'up' # region Address for address in element.findall('address'): if address.attrib['addrtype'] == 'ipv4': ipv4_address = address.a
ttrib['addr'] if address.attrib['addrtype'] == 'mac': mac_address = address.attrib['addr'].lower() try: vendor = address.attrib['vendor'] except KeyError: pass # endregion # region Open TCP ports for ports_info in element.find('ports'): if ports_info.tag == 'port': ports.append(ports_info.attrib['portid']) # endregion # region OS for os_info in element.find('os'): if os_info.tag == 'osmatch': try: os = os_info.attrib['name'] except TypeError: pass break # endregion network_devices.append(self.Info(vendor=vendor, os=os, mac_address=mac_address, ipv4_address=ipv4_address, ports=ports)) except AssertionError: pass remove(self._nmap_scan_result) assert len(network_devices) != 0, \ 'Could not find any devices on interface: ' + self._base.error_text(self._your['network-interface']) return network_devices except OSError: self._base.print_error('Something went wrong while trying to run ', 'nmap') if exit_on_failure: exit(2) except KeyboardInterrupt: self._base.print_info('Exit') exit(0) except AssertionError as Error: self._base.print_error(Error.args[0]) if exit_on_failure: exit(1) return None # endregion # endregion
googleads/google-ads-python
google/ads/googleads/v10/services/services/remarketing_action_service/transports/base.py
Python
apache-2.0
6,058
0.000495
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from typing import Awaitable, Callable, Optional, Sequence, Union import pkg_resources import google.auth # type: ignore import google.api_core # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore from google.ads.googleads.v10.services.types import remarketing_action_service try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-ads",).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() class RemarketingActionServiceTransport(abc.ABC): """Abstract transport class for RemarketingActionService.""" AUTH_SCOPES = ("https://www.goog
leapis.com/auth/adwords",) DEFAULT_HOST: str = "googleads.googleapis.com" def __init__( self, *, host: str = DEFAULT_HOST, credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, **kwargs, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" self._host = host scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} # Save the scopes. self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: raise core_exceptions.DuplicateCredentialArgs( "'credentials_file' and 'credentials' are mutually exclusive" ) if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id, ) elif credentials is None: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) # If the credentials are service account credentials, then always try to use self signed JWT. if ( always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr( service_account.Credentials, "with_always_use_jwt_access" ) ): credentials = credentials.with_always_use_jwt_access(True) # Save the credentials. self._credentials = credentials def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.mutate_remarketing_actions: gapic_v1.method.wrap_method( self.mutate_remarketing_actions, default_timeout=None, client_info=client_info, ), } def close(self): """Closes resources associated with the transport. .. warning:: Only call this method if the transport is NOT shared with other clients - this may cause errors in other clients! """ raise NotImplementedError() @property def mutate_remarketing_actions( self, ) -> Callable[ [remarketing_action_service.MutateRemarketingActionsRequest], Union[ remarketing_action_service.MutateRemarketingActionsResponse, Awaitable[ remarketing_action_service.MutateRemarketingActionsResponse ], ], ]: raise NotImplementedError() __all__ = ("RemarketingActionServiceTransport",)
physicalattraction/kerstpuzzel
src/Conundrum/key_cipher.py
Python
mit
1,290
0.000775
import math import string from Conundrum.utils import sanitize letter_to_value = dict(zip('z' + string.ascii_lowercase, range(0, 27))) value_to_letter = dict(zip(range(0, 27), 'z' + string.ascii_lowercase)) def encrypt(msg: s
tr, key: str) -> str: msg = sanitize(msg) key = sanitize(key) repeat = int(math.ceil(len(msg) / len(key))) key = key * repeat return ''.join([value_to_letter[(letter_to_value[msg_letter] + letter_to_value[key_letter]) % 26] for msg_letter, key_letter in zip(msg, key)]) def decrypt(msg: str, key: str) -> str: msg = sanitize(msg) key = san
itize(key) repeat = int(math.ceil(len(msg) / len(key))) key = key * repeat return ''.join([value_to_letter[(letter_to_value[msg_letter] - letter_to_value[key_letter]) % 26] for msg_letter, key_letter in zip(msg, key)]) if __name__ == '__main__': # Used in Movies 1 encrypted_msg = 'oape dhzoawx cz hny' guessed_key = 'plum scarlett green mustard' print(decrypt(encrypted_msg, guessed_key)) # Used in Movies 3 # decrypted_msg = 'metropolis' # film_key = 'Close Encounters Of The Third Kind' # print(encrypt(decrypted_msg, film_key))
jakubczaplicki/projecteuler
problem005.py
Python
mit
1,000
0.008
#!/usr/bin/env python # pylint: disable=invalid-name """ 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder. What is the smallest positive num
ber that is evenly divisible by all of the numbers from 1 to 20? """ import sys from problembaseclass import ProblemBaseClass class Problem5(ProblemBaseClass): """ @class Solution for Problem 5 @brief """ def __init__(self, range): self.result = None self.range = range def compute(self): notfound=True val=0 while(notfound): notfound = False val = val + 1 for n in range(1, self.ran
ge): if (val % n): notfound = True self.result = val if __name__ == '__main__': problem = Problem5(10) problem.compute() print problem.result del problem problem = Problem5(20) problem.compute() print problem.result #232792560 del problem
elric/virtaal-debian-snapshots
virtaal/plugins/autocompletor.py
Python
gpl-2.0
10,937
0.002834
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2008-2010 Zuza Software Foundation # # This file is part of Virtaal. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """Contains the AutoCompletor class.""" import gobject import re try: from collections import defaultdict except ImportError: class defaultdict(dict): def __init__(self, default_factory=lambda: None): self.__factory = default_factory def __getitem__(self, key): if key in self: return super(defaultdict, self).__getitem__(key) else: return self.__factory() from virtaal.controllers.baseplugin import BasePlugin from virtaal.views.widgets.textbox import TextBox class AutoCompletor(object): """ Does auto-completion of registered words in registered widgets. """ wordsep_re = re.compile(r'\W+', re.UNICODE) MAX_WORDS = 10000 DEFAULT_COMPLETION_LENGTH = 4 # The default minimum length of a word that may # be auto-completed. def __init__(self, main_controller, word_list=[], comp_len=DEFAULT_COMPLETION_LENGTH): """Constructor. @type word_list: iterable @param word_list: A list of words that should be auto-completed.""" self.main_controller = main_controller assert isinstance(word_list, list) self.comp_len = comp_len self._word_list = [] self._word_freq = defaultdict(lambda: 0) self.add_words(word_list) self.widgets = set() def add_widget(self, widget): """Add a widget to the list of widgets to do auto-completion for.""" if widget in self.widgets: return # Widget already added if isinstance(widget, TextBox): self._add_text_box(widget) return raise ValueError("Widget type %s not supported." % (type(widget))) def add_words(self, words, update=True): """Add a word or words to the list of words to auto-complete.""" for word in words: if self.isusable(word): self._word_freq[word] += 1 if update: self._update_word_list() def add_words_from_units(self, units): """Collect all words from the given translation units to use for auto-completion. @type units: list @param units: The translation units to collect words from. """ for unit in units: target = unit.target if not target: continue self.add_words(self.wordsep_re.split(target), update=False) if len(self._word_freq) > self.MAX_WORDS: break self._update_word_list() def autocomplete(self, word): for w in self._word_list: if w.startswith(word): return w, w[len(word):] return None, u'' def clear_widgets(self): """Release all registered widgets from the spell of auto-completion.""" for w in set(self.widgets): self.remove_widget(w) def clear_words(self): """Remove all registered words; effectively turns off auto-completion.""" self._word_freq = [] self._word_list = defaultdict(lambda: 0) def isusable(self, word): """Returns a value indicating if the given word should be kept as a suggestion for autocomplete.""" return len(word) > self.comp_len + 2 def remove_widget(self, widget): """Remove a widget (currently only L{TextBox}s are accepted) from the list of widgets to do auto-correction for. """ if isinstance(widget, TextBox) and widget in self.widgets: self._remove_textbox(widget) def remove_words(self, words): """Remove a word or words from the list of words to auto-complete.""" if isinstance(words, basestring): del self._word_freq[words] self._word_list.remove(words) else: for w in words: try: del self._word_freq[w] self._word_list.remove(w) except KeyError:
pass def _add_text_box(self, textbox): """Add the given L{TextBox} to the list of widgets to do auto- correction on.""" if not hasattr(self, '_textbox_insert_ids'): self._textbox_insert_ids = {} handler_id = textbox.connect('text-inserted', self._on_insert_text) self._textbox_insert_ids[textbox] = handler_id self.widgets.add(textbox) def _on_insert_text(self, textbo
x, text, offset, elem): if not isinstance(text, basestring) or self.wordsep_re.match(text): return # We are only interested in single character insertions, otherwise we # react similarly for paste and similar events if len(text.decode('utf-8')) > 1: return prefix = unicode(textbox.get_text(0, offset) + text) postfix = unicode(textbox.get_text(offset)) buffer = textbox.buffer # Quick fix to check that we don't autocomplete in the middle of a word. right_lim = len(postfix) > 0 and postfix[0] or ' ' if not self.wordsep_re.match(right_lim): return lastword = self.wordsep_re.split(prefix)[-1] if len(lastword) >= self.comp_len: completed_word, word_postfix = self.autocomplete(lastword) if completed_word == lastword: return if completed_word: # Updating of the buffer is deferred until after this signal # and its side effects are taken care of. We abuse # gobject.idle_add for that. insert_offset = offset + len(text) def suggest_completion(): textbox.handler_block(self._textbox_insert_ids[textbox]) #logging.debug("textbox.suggestion = {'text': u'%s', 'offset': %d}" % (word_postfix, insert_offset)) textbox.suggestion = {'text': word_postfix, 'offset': insert_offset} textbox.handler_unblock(self._textbox_insert_ids[textbox]) sel_iter_start = buffer.get_iter_at_offset(insert_offset) sel_iter_end = buffer.get_iter_at_offset(insert_offset + len(word_postfix)) buffer.select_range(sel_iter_start, sel_iter_end) return False gobject.idle_add(suggest_completion, priority=gobject.PRIORITY_HIGH) def _remove_textbox(self, textbox): """Remove the given L{TextBox} from the list of widgets to do auto-correction on. """ if not hasattr(self, '_textbox_insert_ids'): return # Disconnect the "insert-text" event handler textbox.disconnect(self._textbox_insert_ids[textbox]) self.widgets.remove(textbox) def _update_word_list(self): """Update and sort found words according to frequency.""" wordlist = self._word_freq.items() wordlist.sort(key=lambda x:x[1], reverse=True) self._word_list = [items[0] for items in wordlist] class Plugin(BasePlugin): description = _('Automatically complete long words while you type') display_name = _('AutoCompletor') version = 0.1 # INITIALIZERS # def __init__(self, internal_name, main_controller): self.internal_name = internal_name self.main_controller = main_controller