repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
blorenz/indie-film-rentals
|
indiefilmrentals/products/processors.py
|
Python
|
bsd-3-clause
| 169
| 0.017751
|
from indiefilmrentals
|
.products.models import *
from shop_simplecategories.models import *
def categories(request):
return {'categories': Category.o
|
bjects.all()}
|
owlabs/incubator-airflow
|
docs/conf.py
|
Python
|
apache-2.0
| 17,740
| 0.001522
|
# -*- coding: utf-8 -*-
# flake8: noqa
# Disable Flake8 because of all the sphinx imports
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
"""Configuration of Airflow Docs"""
import os
import sys
from typing import Dict
import airflow
from airflow.configuration import default_config_yaml
try:
import sphinx_airflow_theme # pylint: disable=unused-import
airflow_theme_is_available = True
except ImportError:
airflow_theme_is_available = False
autodoc_mock_imports = [
'MySQLdb',
'adal',
'analytics',
'azure',
'azure.cosmos',
'azure.datalake',
'azure.mgmt',
'boto3',
'botocore',
'bson',
'cassandra',
'celery',
'cloudant',
'cryptography',
'cx_Oracle',
'datadog',
'distributed',
'docker',
'google',
'google_auth_httplib2',
'googleapiclient',
'grpc',
'hdfs',
'httplib2',
'jaydebeapi',
'jenkins',
'jira',
'kubernetes',
'mesos',
'msrestazure',
'pandas',
'pandas_gbq',
'paramiko',
'pinotdb',
'psycopg2',
'pydruid',
'pyhive',
'pyhive',
'pymongo',
'pymssql',
'pysftp',
'qds_sdk',
'redis',
'simple_salesforce',
'slackclient',
'smbclient',
'snowflake',
'sshtunnel',
'tenacity',
'vertica_python',
'winrm',
'zdesk',
]
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.join(os.path.dirname(__file__), 'exts'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinxarg.ext',
'sphinxcontrib.httpdomain',
'sphinxcontrib.jinja',
'sphinx.ext.intersphinx',
'autoapi.extension',
'exampleinclude',
'docroles',
'removemarktransform',
]
autodoc_default_options = {
'show-inheritance': True,
'members': True
}
jinja_contexts = {
'config_ctx': {"configs": default_config_yaml()}
}
viewcode_follow_imported_members = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Airflow'
# copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1.0.0'
version = airflow.__version__
# The full version, including alpha/beta/rc tags.
# release = '1.0.0'
release = airflow.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'_api/airflow/_vendor',
'_api/airflow/api',
'_api/airflow/bin',
'_api/airflow/config_templates',
'_api/airflow/configuration',
'_api/airflow/contrib/auth',
'_api/airflow/contrib/example_dags',
'_api/airflow/contrib/index.rst',
'_api/airflow/contrib/kubernetes',
'_api/airflow/contrib/task_runner',
'_api/airflow/contrib/utils',
'_api/airflow/dag',
'_api/airflow/default_login',
'_api/airflow/example_dags',
'_api/airflow/exceptions',
'_api/airflow/index.rst',
'_api/airflow/jobs',
'_api/air
|
flow/lineage',
'_api/airflow/logging_config',
'_api/airflow/macros',
'_api/airflow/migrations',
'_api/airflow/plugins_manager',
'_api/airflow/security',
'_api/airflow/serialization',
'_api/airflow/settings',
'_api/airflow/sentry',
'_api/airflow/stats',
'_api/airflow/task',
'_api/airflow/ti_deps',
'_api/airflow/utils',
'_api/airflow/version',
'_api/airflow/www',
'_api/airflow/www_rbac',
'_api/main',
'aut
|
oapi_templates',
'howto/operator/gcp/_partials',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
intersphinx_mapping = {
'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None),
'mongodb': ('https://api.mongodb.com/python/current/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'python': ('https://docs.python.org/3/', None),
'requests': ('https://requests.readthedocs.io/en/master/', None),
'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None),
'hdfs': ('https://hdfscli.readthedocs.io/en/latest/', None),
# google-cloud-python
'google-cloud-automl': ('https://googleapis.dev/python/automl/latest', None),
'google-cloud-bigquery': ('https://googleapis.dev/python/bigquery/latest', None),
'google-cloud-bigquery-datatransfer': ('https://googleapis.dev/python/bigquerydatatransfer/latest', None),
'google-cloud-bigquery-storage': ('https://googleapis.dev/python/bigquerystorage/latest', None),
'google-cloud-bigtable': ('https://googleapis.dev/python/bigtable/latest', None),
'google-cloud-container': ('https://googleapis.dev/python/container/latest', None),
'google-cloud-core': ('https://googleapis.dev/python/google-cloud-core/latest', None),
'google-cloud-datastore': ('https://googleapis.dev/python/datastore/latest', No
|
NelisVerhoef/scikit-learn
|
doc/sphinxext/gen_rst.py
|
Python
|
bsd-3-clause
| 40,198
| 0.000697
|
"""
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
|
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
|
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance
|
smallyear/linuxLearn
|
salt/salt/proxy/junos.py
|
Python
|
apache-2.0
| 1,727
| 0.001158
|
# -*- coding: utf-8 -*-
'''
Interface with a Junos device via proxy-minion.
'''
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
import logging
# Import 3rd-party libs
import jnpr.junos
import jnpr.junos.utils
import jnpr.junos.utils.config
import json
HAS_JUNOS = True
__proxyenabled__ = ['junos']
thisproxy = {}
log = logging.getLogger(__name__)
def init(opts):
'''
Open the connection to the Junos device, login, and bind to the
Resource class
'''
log.debug('Opening connection to junos')
thisproxy['conn'] = jnpr.junos.Device(user=opts['proxy']['username'],
host=opts['proxy']['host'],
password=opts['proxy']['passwd'])
thisproxy['conn'].open()
thisproxy['conn'].bind(cu=jnpr.junos.utils.config.Config)
def conn():
return thisproxy['conn']
def facts():
return thisproxy['conn'].facts
def refresh():
return thisproxy['conn'].facts_refresh()
def proxytype():
'''
Returns the name of this proxy
'''
return 'junos'
def id(opts):
'''
Returns a unique ID for this proxy minion
'''
return thisproxy['conn'].facts['hostname']
def ping():
'''
Ping? Pong!
'''
return th
|
isproxy['conn'].connected
def shutdown(opts):
'''
This is called when the proxy-minion is exiting to make sure the
connection to the device is closed cleanly.
'''
log.debug('Proxy module {0} shutting down!!'.format(opts['id']))
try:
thisproxy['conn'].close()
except Exception:
pass
def rpc():
return json.dumps(thisp
|
roxy['conn'].rpc.get_software_information())
|
Learning-from-our-past/Kaira
|
qtgui/xmlImport.py
|
Python
|
gpl-2.0
| 4,469
| 0.005147
|
from PyQt5.QtCore import pyqtSlot, QThread, pyqtSignal
import os
from PyQt5.QtWidgets import QFileDialog, QProgressDialog, QMessageBox
from PyQt5.QtCore import pyqtSlot, QObject
from books.soldiers import processData
import route_gui
from lxml import etree
import multiprocessing
import math
class XmlImport(QObject):
threadUpdateSignal = pyqtSignal(int, int, name="progressUpdate")
threadExceptionSignal = pyqtSignal(object, name="exceptionInProcess")
threadResultsSignal = pyqtSignal(dict, name="results")
finishedSignal = pyqtSignal(dict, str, name="processFinished")
def __init__(self, parent):
super(XmlImport, self).__init__(parent)
self.parent = parent
self.processCount = 0
self.result = {}
self.thread = QThread(parent = self.parent)
self.threadUpdateSignal.connect(self._updateProgressBarInMainThread)
self.threadExceptionSignal.connect(self._loadingFailed)
self.threadResultsSignal.connect(self._processFinished)
self.filepath = ""
def importOne(self, xmlEntry):
if self.processor is not None:
result = self.processor.extractOne(xmlEntry)
return result
else:
return None
@pyqtSlot()
def openXMLFile(self):
filename = QFileDialog.getOpenFileName(self.parent, "Open xml-file containing the data to be analyzed.",
".", "Person data files (*.xml);;All files (*)")
if filename[0] != "":
self.filepath = filename[0]
self.parent.setWindowTitle("Kaira " + filename[0])
self._analyzeOpenedXml(filename)
def _analyzeOpenedXml(self, file):
self.progressDialog = QProgressDialog(self.parent)
self.progressDialog.setCancelButton(None)
self.progressDialog.setLabelText("Extracting provided datafile...")
self.progressDialog.open()
self.progressDialog.setValue(0)
self.file = file
self.thread.run = self._runProcess
self.thread.start()
def _runProcess(self):
try:
xmlDataDocument = self._getXMLroot(self.file[0])
#TODO: Lue xml:n metadata
try:
#TODO: Moniprosarituki?
self.processor = route_gui.Router.get_processdata_class(xmlDataDocument.attrib["bookseries"])(self._processUpdateCallback)
result = self.processor.startExtractionProcess(xmlDataDocument, self.file[0])
self.threadResultsSignal.emit(result)
except KeyError:
raise MetadataException()
except Exception as e:
if "DEV" in os.environ and os.environ["DEV"]:
raise e
else:
print(e)
self.threadExceptionSignal.emit(e)
@pyqtSlot(int, int)
def _updateProgressBarInMainThread(self, i, max):
self.progressDialog.setRange(0, max)
self.progressDialog.setValue(i)
@pyqtSlot(object)
def _loadingFailed(self, e):
self.progressDialog.cancel()
import pymongo
errMessage = "Error in data-file. Extraction failed. Is the xml valid and in utf-8 format? More info: "
if isinstance(e, pymongo.errors.ServerSelectionTimeoutError):
errMessage = "Couldn't connect to database. Try going to '/mongodb/data/db' in application directory and deleting 'mongod.lock' file and restart application. More info: "
msgbox = QMessageBox()
msgbox.information(self.parent,
|
"Extraction failed", errMessage + str(e))
msgbox.show()
@pyqtSlot(dict)
def _processFinished(self, result):
self.result = result
self.finishedSignal.emit(self.result, self.filepath)
def _processUpdateCallback(self, i, max):
self.threadUpdateSignal.emit(i, max)
def _getXMLroot(self, filepath):
#read the data in XML-format to be processed
parser = etree.XMLP
|
arser(encoding="utf-8")
tree = etree.parse(filepath, parser=parser) #ET.parse(filepath)
return tree.getroot()
class MetadataException(Exception):
def __init__(self):
self.msg = "ERROR: The document doesn't contain bookseries attribute in the beginning of the file. Couldn't import. Try " \
"to generate new xml-file from the source ocr-text or add the missing attribute to the file manually."
def __str__(self):
return repr(self.msg)
|
semiirs/ai_project
|
common/experience.py
|
Python
|
gpl-3.0
| 462
| 0.002165
|
import datetime
import json
class Experience:
def __init__(self, date, event):
|
self._date = date
self._event = event
@classmethod
def reconstruct_from_db_data_event(cls, date, event):
|
event = json.loads(event)
date = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f")
exp = Experience(date, event)
return exp
def time_from_this_event(self, event):
return self._date - event._date
|
google/gnxi
|
oc_config_validate/oc_config_validate/models/macsec.py
|
Python
|
apache-2.0
| 391,144
| 0.007626
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
fr
|
om pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
fr
|
om pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class yc_config_openconfig_macsec__macsec_mka_policies_policy_config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-macsec - based on the path /macsec/mka/policies/policy/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration of the MKA policy
"""
__slots__ = ('_path_helper', '_extmethods', '__name','__key_server_priority','__macsec_cipher_suite','__confidentiality_offset','__delay_protection','__include_icv_indicator','__sak_rekey_interval','__sak_rekey_on_live_peer_loss','__use_updated_eth_header',)
_yang_name = 'config'
_yang_namespace = 'http://openconfig.net/yang/macsec'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='string', is_config=True)
self.__key_server_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(16), is_leaf=True, yang_name="key-server-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='uint8', is_config=True)
self.__macsec_cipher_suite = YANGDynClass(unique=True, base=TypedListType(allowed_type=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'GCM_AES_128': {}, 'GCM_AES_256': {}, 'GCM_AES_XPN_128': {}, 'GCM_AES_XPN_256': {}},)), is_leaf=False, yang_name="macsec-cipher-suite", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='macsec-types:macsec-cipher-suite', is_config=True)
self.__confidentiality_offset = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'0_BYTES': {}, '30_BYTES': {}, '50_BYTES': {}},), default=six.text_type("0_BYTES"), is_leaf=True, yang_name="confidentiality-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='macsec-types:confidentiality-offset', is_config=True)
self.__delay_protection = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="delay-protection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='boolean', is_config=True)
self.__include_icv_indicator = YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="include-icv-indicator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='boolean', is_config=True)
self.__sak_rekey_interval = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['0', '30..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(0), is_leaf=True, yang_name="sak-rekey-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='uint32', is_config=True)
self.__sak_rekey_on_live_peer_loss = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="sak-rekey-on-live-peer-loss", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='boolean', is_config=True)
self.__use_updated_eth_header = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="use-updated-eth-header", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='boolean', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['macsec', 'mka', 'policies', 'policy', 'config']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /macsec/mka/policies/policy/config/name (string)
YANG Description: Name of the MKA policy.
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /macsec/mka/policies/policy/config/name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Name of the MKA policy.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='string', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=six.text_t
|
dmlc/tvm
|
python/tvm/relay/testing/mobilenet.py
|
Python
|
apache-2.0
| 7,444
| 0.000672
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Port of NNVM version of MobileNet to Relay.
"""
# pylint: disable=invalid-name
from tvm import relay
from . import layers
from .init import create_workload
def conv_block(
data,
name,
channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=(1, 1),
epsilon=1e-5,
layout="NCHW",
):
"""Helper function to construct conv_bn-relu"""
# convolution + bn + relu
conv = layers.conv2d(
data=data,
channels=channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
name=name + "_conv",
)
bn = layers.batch_norm_infer(data=conv, epsilon=epsilon, name=name + "_bn")
act = relay.nn.relu(data=bn)
return act
def separable_conv_block(
data,
name,
depthwise_channels,
pointwise_channels,
kernel_size=(3, 3),
downsample=False,
padding=(1, 1),
epsilon=1e-5,
layout="NCHW",
dtype="float32",
):
"""Helper function to get a separable conv block"""
if downsample:
strides = (2, 2)
else:
strides = (1, 1)
# depthwise convolution + bn + relu
if layout == "NCHW":
w
|
shape = (depthwise_channels, 1) + kernel_size
elif layout == "NHWC":
wshape = kernel_size + (depthwise_channels, 1)
else:
raise Valu
|
eError("Invalid layout: " + layout)
bn_axis = layout.index("C")
weight = relay.var(name + "_weight", shape=wshape, dtype=dtype)
conv1 = layers.conv2d(
data=data,
weight=weight,
channels=depthwise_channels,
groups=depthwise_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout, True),
name=name + "_depthwise_conv1",
)
bn1 = layers.batch_norm_infer(data=conv1, epsilon=epsilon, axis=bn_axis, name=name + "_bn1")
act1 = relay.nn.relu(data=bn1)
# pointwise convolution + bn + relu
conv2 = layers.conv2d(
data=act1,
channels=pointwise_channels,
kernel_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
name=name + "_conv2",
)
bn2 = layers.batch_norm_infer(data=conv2, epsilon=epsilon, axis=bn_axis, name=name + "_bn2")
act2 = relay.nn.relu(data=bn2)
return act2
def mobile_net(
num_classes=1000,
data_shape=(1, 3, 224, 224),
dtype="float32",
alpha=1.0,
is_shallow=False,
layout="NCHW",
):
"""Function to construct a MobileNet"""
data = relay.var("data", shape=data_shape, dtype=dtype)
body = conv_block(data, "conv_block_1", int(32 * alpha), strides=(2, 2), layout=layout)
body = separable_conv_block(
body, "separable_conv_block_1", int(32 * alpha), int(64 * alpha), layout=layout, dtype=dtype
)
body = separable_conv_block(
body,
"separable_conv_block_2",
int(64 * alpha),
int(128 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_3",
int(128 * alpha),
int(128 * alpha),
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_4",
int(128 * alpha),
int(256 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_5",
int(256 * alpha),
int(256 * alpha),
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_6",
int(256 * alpha),
int(512 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
if is_shallow:
body = separable_conv_block(
body,
"separable_conv_block_7",
int(512 * alpha),
int(1024 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_8",
int(1024 * alpha),
int(1024 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
else:
for i in range(7, 12):
body = separable_conv_block(
body,
"separable_conv_block_%d" % i,
int(512 * alpha),
int(512 * alpha),
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_12",
int(512 * alpha),
int(1024 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_13",
int(1024 * alpha),
int(1024 * alpha),
layout=layout,
dtype=dtype,
)
pool = relay.nn.global_avg_pool2d(data=body, layout=layout)
flatten = relay.nn.batch_flatten(data=pool)
weight = relay.var("fc_weight")
bias = relay.var("fc_bias")
fc = relay.nn.dense(data=flatten, weight=weight, units=num_classes)
fc = relay.nn.bias_add(fc, bias)
softmax = relay.nn.softmax(data=fc)
return relay.Function(relay.analysis.free_vars(softmax), softmax)
def get_workload(
batch_size=1, num_classes=1000, image_shape=(3, 224, 224), dtype="float32", layout="NCHW"
):
"""Get benchmark workload for mobilenet
Parameters
----------
batch_size : int, optional
The batch size used in the model
num_classes : int, optional
Number of classes
image_shape : tuple, optional
The input image shape, cooperate with layout
dtype : str, optional
The data type
layout : str, optional
The data layout of image_shape and the operators
cooperate with image_shape
Returns
-------
mod : tvm.IRModule
The relay module that contains a MobileNet network.
params : dict of str to NDArray
The parameters.
"""
data_shape = tuple([batch_size] + list(image_shape))
net = mobile_net(
num_classes=num_classes,
data_shape=data_shape,
dtype=dtype,
alpha=1.0,
is_shallow=False,
layout=layout,
)
return create_workload(net)
|
thanm/devel-scripts
|
normalize-dwarf-objdump.py
|
Python
|
apache-2.0
| 10,730
| 0.014073
|
#!/usr/bin/python3
"""Filter to normalize/canonicalize objdump --dwarf=info dumps.
Reads stdin, normalizes / canonicalizes and/or strips the output of objdump
--dwarf to make it easier to "diff".
Support is provided for rewriting absolute offsets within the dump to relative
offsets. A chunk like
<2><2d736>: Abbrev Number: 5 (DW_TAG_variable)
<2d737> DW_AT_name : oy
<2d73a> DW_AT_decl_line : 23
<2d73b> DW_AT_location : 0x15eee8 (location list)
<2d73f> DW_AT_type : <0x2f1f2>
Would be rewritten as
<2><0>: Abbrev Number: 5 (DW_TAG_variable)
<0> DW_AT_name : oy
<0> DW_AT_decl_line : 23
<0> DW_AT_location : ... (location list)
<0> DW_AT_type : <0x10>
You can also request that all offsets and PC info be stripped, although that can
can obscure some important differences. Abstract origin references are tracked
and annotated (unless disabled).
"""
import getopt
import os
import re
import sys
import script_utils as u
# Input and output file (if not specified, defaults to stdin/stdout)
flag_infile = None
flag_outfile = None
# Perform normalization
flag_normalize = True
# Compile units to be included in dump.
flag_compunits = {}
# Strip offsets if true
flag_strip_offsets = False
# Strip hi/lo PC and location lists
flag_strip_pcinfo = False
# Annotate abstract origin refs
flag_annotate_abstract = True
# Strip these
pcinfo_attrs = {"DW_AT_low_pc": 1, "DW_AT_high_pc": 1}
# Untracked DW refs
untracked_dwrefs = {}
# Line buffer
linebuf = None
linebuffered = False
#......................................................................
# Regular expressions to match:
# Begin-DIE preamble
bdiere = re.compile(r"^(\s*)\<(\d+)\>\<(\S+)\>\:(.*)$")
bdiezre = re.compile(r"^\s*Abbrev Number\:\s+0\s*$")
bdiebodre = re.compile(r"^\s*Abbrev Number\:\s+\d+\s+\(DW_TAG_(\S+)\)\s*$")
# Within-DIE regex
indiere = re.compile(r"^(\s*)\<(\S+)\>(\s+)(DW_AT_\S+)(\s*)\:(.*)$")
indie2re = re.compile(r"^(\s*)\<(\S+)\>(\s+)(Unknown\s+AT\s+value)(\s*)\:(.*)$")
# For grabbing dwarf ref from attr value
absore = re.compile(r"^\s*\<\S+\>\s+DW_AT_\S+\s*\:\s*\<0x(\S+)\>.*$")
# Attr value dwarf offset
attrdwoffre = re.compile(r"^(.*)\<0x(\S+)\>(.*)$")
def compute_reloff(absoff, origin):
"""Compute relative offset from absolute offset."""
oabs = int(absoff, 16)
if not flag_normalize:
return oabs
odec = int(origin, 16)
delta = oabs - odec
return delta
def abstorel(val, diestart):
"""Convert absolute to relative DIE offset."""
# FIXME: this will not handle backwards refs; that would
# require multiple passes.
m1 = attrdwoffre.match(val)
if m1:
absref = m1.group(2)
if absref in diestart:
val = re.sub(r"<0x%s>" % absref, r"<0x%x>" % diestart[absref], val)
u.verbose(3, "abs %s converted to rel %s" % (absref, val))
return (0, val)
return (1, absref)
return (2, None)
def munge_attrval(attr, oval, diestart):
"""Munge attr value."""
# Convert abs reference to rel reference.
# FIXME: this will not handle backwards refs; that would
# require multiple passes.
code, val = abstorel(oval, diestart)
if code == 1:
absref = val
if absref in untracked_dwrefs:
val = untracked_dwrefs[absref]
else:
|
n = len(untracked_dwrefs)
if flag_normalize:
unk = (" <untracked %d>" % (n+1))
else:
unk = (" <untracked 0x%s>" % absref)
untracked_dwrefs[absref] = unk
val = unk
if code == 2:
val = oval
if flag_strip_pcinfo:
if attr in pcinfo_attrs:
val = "<stripped>"
return val
def read_line(inf):
"""Read
|
an input line."""
global linebuffered
global linebuf
if linebuffered:
linebuffered = False
u.verbose(3, "buffered line is %s" % linebuf)
return linebuf
line = inf.readline()
u.verbose(3, "line is %s" % line.rstrip())
return line
def unread_line(line):
"""Unread an input line."""
global linebuffered
global linebuf
u.verbose(3, "unread_line on %s" % line.rstrip())
if linebuffered:
u.error("internal error: multiple line unread")
linebuffered = True
linebuf = line
def read_die(inf, outf):
"""Reads in and returns the next DIE."""
lines = []
indie = False
while True:
line = read_line(inf)
if not line:
break
m1 = bdiere.match(line)
if not indie:
if m1:
lines.append(line)
indie = True
continue
outf.write(line)
else:
if m1:
unread_line(line)
break
m2 = indiere.match(line)
if not m2:
m2 = indie2re.match(line)
if not m2:
unread_line(line)
break
lines.append(line)
u.verbose(2, "=-= DIE read:")
for line in lines:
u.verbose(2, "=-= %s" % line.rstrip())
return lines
def emit_die(lines, outf, origin, diename, diestart):
"""Emit body of DIE."""
# First line
m1 = bdiere.match(lines[0])
if not m1:
u.error("internal error: first line of DIE "
"should match bdiere: %s" % lines[0])
sp = m1.group(1)
depth = m1.group(2)
absoff = m1.group(3)
rem = m1.group(4)
off = compute_reloff(absoff, origin)
if flag_strip_offsets:
outf.write("%s<%s>:%s\n" % (sp, depth, rem))
else:
outf.write("%s<%s><%0x>:%s\n" % (sp, depth, off, rem))
# Remaining lines
for line in lines[1:]:
m2 = indiere.match(line)
if not m2:
m2 = indie2re.match(line)
if not m2:
u.error("internal error: m2 match failed on attr line")
sp1 = m2.group(1)
absoff = m2.group(2)
sp2 = m2.group(3)
attr = m2.group(4)
sp3 = m2.group(5)
rem = m2.group(6)
addend = ""
off = compute_reloff(absoff, origin)
u.verbose(3, "attr is %s" % attr)
# Special sauce if abs origin.
if attr == "DW_AT_abstract_origin":
m3 = absore.match(line)
if m3:
absoff = m3.group(1)
reloff = compute_reloff(absoff, origin)
if reloff in diename:
addend = "// " + diename[reloff]
else:
u.verbose(2, "absore() failed on %s\n", line)
# Post-process attr value
rem = munge_attrval(attr, rem, diestart)
# Emit
if flag_strip_offsets:
outf.write("%s%s%s:%s%s%s\n" % (sp1, sp2, attr,
sp3, rem, addend))
else:
outf.write("%s<%0x>%s%s:%s%s%s\n" % (sp1, off, sp2,
attr, sp3, rem, addend))
def attrval(lines, tattr):
"""Return the specified attr for this DIE (or empty string if no name)."""
for line in lines[1:]:
m2 = indiere.match(line)
if not m2:
m2 = indie2re.match(line)
if not m2:
u.error("attr match failed for %s" % line)
attr = m2.group(4)
if attr == tattr:
rem = m2.group(6)
return rem.strip()
return ""
def perform_filt(inf, outf):
"""Read inf and filter contents to outf."""
# Records DIE starts: hex string => new offset
diestart = {}
# Maps rel DIE offset to name. Note that not all DIEs have names.
diename = {}
# Origin (starting absolute offset)
origin = None
# Set to true if output is filtered off
filtered = False
if flag_compunits:
u.verbose(1, "Selected compunits:")
for cu in sorted(flag_compunits):
u.verbose(1, "%s" % cu)
# Read input
while True:
dielines = read_die(inf, outf)
if not dielines:
break
# Process starting line of DIE
line1 = dielines[0]
m1 = bdiere.match(line1)
if not m1:
u.error("internal error: first line of DIE should match bdiere")
absoff = m1.group(3)
rem = m1.group(4)
if not origin:
u.verbose(2, "origin set to %s" % absoff)
origin = absoff
off = compute_reloff(absoff, origin)
diestart[absoff] = off
# Handle zero terminators.
if bdiezre.match(rem):
if not filtered:
emit_die(dielines, outf, origin, diename, diestart)
continue
# See what flavor of DIE this is to adjust filtering.
m2 = bdiebodre.match(rem)
if not m2:
u.error("bdiebodre/bdiezre match failed on: '%s'" % rem)
tag = m2.group(1)
u.verbose(2, "=-= tag = %s" % tag)
if flag_compun
|
spektom/incubator-airflow
|
tests/providers/zendesk/hooks/test_zendesk.py
|
Python
|
apache-2.0
| 5,046
| 0.000396
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from zdesk import RateLimitError
from airflow.providers.zendesk.hooks.zendesk import ZendeskHook
class TestZendeskHook(unittest.TestCase):
@mock.patch("airflow.providers.zendesk.hooks.zendesk.time")
def test_sleeps_for_correct_interval(self, mocked_time):
sleep_time = 10
# To break out of the otherwise infinite tries
mocked_time.sleep = mock.Mock(side_effect=ValueError, return_value=3)
conn_mock = mock.Mock()
mock_response = mock.Mock()
mock_respo
|
nse.headers.get.return_value = sleep_time
conn_mock.call = mock
|
.Mock(
side_effect=RateLimitError(msg="some message",
code="some code",
response=mock_response))
zendesk_hook = ZendeskHook("conn_id")
zendesk_hook.get_conn = mock.Mock(return_value=conn_mock)
with self.assertRaises(ValueError):
zendesk_hook.call("some_path", get_all_pages=False)
mocked_time.sleep.assert_called_once_with(sleep_time)
@mock.patch("airflow.providers.zendesk.hooks.zendesk.Zendesk")
def test_returns_single_page_if_get_all_pages_false(self, _):
zendesk_hook = ZendeskHook("conn_id")
mock_connection = mock.Mock()
mock_connection.host = "some_host"
zendesk_hook.get_connection = mock.Mock(return_value=mock_connection)
zendesk_hook.get_conn()
mock_conn = mock.Mock()
mock_call = mock.Mock(
return_value={'next_page': 'https://some_host/something',
'path': []})
mock_conn.call = mock_call
zendesk_hook.get_conn = mock.Mock(return_value=mock_conn)
zendesk_hook.call("path", get_all_pages=False)
mock_call.assert_called_once_with("path", None)
@mock.patch("airflow.providers.zendesk.hooks.zendesk.Zendesk")
def test_returns_multiple_pages_if_get_all_pages_true(self, _):
zendesk_hook = ZendeskHook("conn_id")
mock_connection = mock.Mock()
mock_connection.host = "some_host"
zendesk_hook.get_connection = mock.Mock(return_value=mock_connection)
zendesk_hook.get_conn()
mock_conn = mock.Mock()
mock_call = mock.Mock(
return_value={'next_page': 'https://some_host/something',
'path': []})
mock_conn.call = mock_call
zendesk_hook.get_conn = mock.Mock(return_value=mock_conn)
zendesk_hook.call("path", get_all_pages=True)
assert mock_call.call_count == 2
@mock.patch("airflow.providers.zendesk.hooks.zendesk.Zendesk")
def test_zdesk_is_inited_correctly(self, mock_zendesk):
conn_mock = mock.Mock()
conn_mock.host = "conn_host"
conn_mock.login = "conn_login"
conn_mock.password = "conn_pass"
zendesk_hook = ZendeskHook("conn_id")
zendesk_hook.get_connection = mock.Mock(return_value=conn_mock)
zendesk_hook.get_conn()
mock_zendesk.assert_called_once_with(zdesk_url='https://conn_host', zdesk_email='conn_login',
zdesk_password='conn_pass', zdesk_token=True)
@mock.patch("airflow.providers.zendesk.hooks.zendesk.Zendesk")
def test_zdesk_sideloading_works_correctly(self, mock_zendesk):
zendesk_hook = ZendeskHook("conn_id")
mock_connection = mock.Mock()
mock_connection.host = "some_host"
zendesk_hook.get_connection = mock.Mock(return_value=mock_connection)
zendesk_hook.get_conn()
mock_conn = mock.Mock()
mock_call = mock.Mock(
return_value={'next_page': 'https://some_host/something',
'tickets': [],
'users': [],
'groups': []})
mock_conn.call = mock_call
zendesk_hook.get_conn = mock.Mock(return_value=mock_conn)
results = zendesk_hook.call(".../tickets.json",
query={"include": "users,groups"},
get_all_pages=False,
side_loading=True)
assert results == {'groups': [], 'users': [], 'tickets': []}
|
PnEcrins/GeoNature-atlas
|
atlas/modeles/repositories/vmSearchTaxonRepository.py
|
Python
|
gpl-3.0
| 1,550
| 0.000647
|
# -*- coding:utf-8 -*-
from sqlalchemy import desc, func
from atlas.modeles.entities.vmSearchTaxon import VmSearchTaxon
def listeTaxons(session):
"""
revoie un tableau de dict :
label = nom latin et nom francais concatene, value = cd_ref
|
TODO Fonction inutile à supprimer !!!
"""
req = session.query(VmSearchTaxon.search_name, VmSearchTaxon.cd_ref).all()
taxonList = list()
for r in req:
temp = {"label": r[0], "value": r[1]}
taxonList.append(temp)
return taxonList
def listeTaxonsSearch(session, search, limit=50):
"""
Recherche dans la VmSearchTaxon en ilike
Utilisé pour l'autocomplétion de la recherche de taxon
:query SQLA_Session session
:query st
|
r search : chaine de charactere pour la recherche
:query int limit: limite des résultats
**Returns:**
list: retourne un tableau {'label':'str': 'value': 'int'}
label = search_name
value = cd_ref
"""
req = session.query(
VmSearchTaxon.search_name,
VmSearchTaxon.cd_ref,
func.similarity(VmSearchTaxon.search_name, search).label("idx_trgm"),
).distinct()
search = search.replace(" ", "%")
req = (
req.filter(VmSearchTaxon.search_name.ilike("%" + search + "%"))
.order_by(desc("idx_trgm"))
.order_by(VmSearchTaxon.cd_ref == VmSearchTaxon.cd_nom)
.limit(limit)
)
data = req.all()
return [{"label": d[0], "value": d[1]} for d in data]
|
ryfx/modrana
|
core/backports/urllib3_python25/util.py
|
Python
|
gpl-3.0
| 11,071
| 0.000542
|
# urllib3/util.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
from base64 import b64encode
from socket import error as SocketError
from hashlib import md5, sha1
from binascii import hexlify, unhexlify
import sys
from core.backports.collections import namedtuple
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
try: # Test for SSL features
SSLContext = None
HAS_SNI = False
import ssl
from ssl import wrap_socket, CERT_NONE,
|
PROTOCOL_SSLv23
from ssl import SSLContext # Modern SSL?
|
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
from .packages import six
from .exceptions import LocationParseError, SSLError
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example: ::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this imeplementations does silly things to be optimal
# on CPython.
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
auth, url = url.split('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url[1:].split(']', 1)
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = 'gzip,deflate'
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
return headers
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if not sock: # Platform-specific: AppEngine
return False
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except SocketError:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_req
|
btimby/fulltext
|
fulltext/backends/__hwp.py
|
Python
|
mit
| 544
| 0
|
from __future__ import absolute_import
from fulltext.backends import __html
from fulltext.util import run, assert_cmd_exists
from fulltext.util import BaseBackend
def cmd(path, **kwargs):
cmd = ['hwp5proc', 'xml']
cmd.extend([p
|
ath])
return cmd
def to_text_with_backend(html):
return __html.handle_fobj(html)
class Backend(BaseBackend):
def check(self, title):
assert_cmd_exists('hwp5proc')
|
def handle_path(self, path):
out = self.decode(run(*cmd(path)))
return to_text_with_backend(out)
|
nttks/edx-platform
|
common/lib/xmodule/xmodule/seq_module.py
|
Python
|
agpl-3.0
| 13,629
| 0.001761
|
"""
xModule implementation of a learning sequence
"""
# pylint: disable=abstract-method
import json
import logging
from pkg_resources import resource_string
import warnings
from lxml import etree
from xblock.core import XBlock
from xblock.fields import Integer, Scope, Boolean, Dict
from xblock.fragment import Fragment
from .exceptions import NotFoundError
from .fields import Date
from .mako_module import MakoModuleDescriptor
from .progress import Progress
from .x_module import XModule, STUDENT_VIEW
from .xml_module import XmlDescriptor
log = logging.getLogger(__name__)
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['problem', 'video']
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class SequenceFields(object):
has_children = True
# NOTE: Position is 1-indexed. This is silly, but there are now student
# positions saved on prod, so it's not easy to fix.
position = Integer(help="Last tab viewed in this sequence", scope=Scope.user_state)
due = Date(
display_name=_("Due Date"),
help=_("Enter the date by which problems are due."),
scope=Scope.settings,
)
# Entrance Exam flag -- see cms/contentstore/views/entrance_exam.py for usage
is_entrance_exam = Boolean(
display_name=_("Is Entrance Exam"),
help=_(
"Tag this course module as an Entrance Exam. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
individual_start_days = Integer(
help=_("Number of days from the base date to the chapter starts"),
scope=Scope.settings
)
individual_start_hours = Integer(
help=_("Number of hours from the base date to the chapter starts"),
scope=Scope.settings
)
individual_start_minutes = Integer(
help=_("Number of minutes from the base date to the chapter starts"),
scope=Scope.settings
)
individual_due_days = Integer(
help=_("Number of days from the base date to the due"),
scope=Scope.settings
)
individual_due_hours = Integer(
help=_("Number of hours from the base date to the due"),
scope=Scope.settings
)
individual_due_minutes = Integer(
help=_("Number of minutes from the base date to the due"),
scope=Scope.settings
)
progress_restriction = Dict(
help=_("Settings for progress restriction"),
default={
"type": "No Restriction",
},
scope=Scope.settings
)
class ProctoringFields(object):
"""
Fields that are specific to Proctored or Timed Exams
"""
is_time_limited = Boolean(
display_name=_("Is Time Limited"),
help=_(
"This setting indicates whether students have a limited time"
" to view or interact with this courseware component."
),
default=False,
scope=Scope.settings,
)
default_time_limit_minutes = Integer(
display_name=_("Time Limit in Minutes"),
help=_(
"The number of minutes available to students for viewing or interacting with this courseware component."
),
default=None,
scope=Scope.settings,
)
is_proctored_enabled = Boolean(
display_name=_("Is Proctoring Enabled"),
help=_(
"This setting indicates whether this exam is a proctored exam."
),
default=False,
scope=Scope.settings,
)
is_practice_exam = Boolean(
display_name=_("Is Practice Exam"),
help=_(
"This setting indicates whether this exam is for testing purposes only. Practice exams are not verified."
),
default=False,
scope=Scope.settings,
)
@property
def is_proctored_exam(self):
""" Alias the is_proctored_enabled field to the more legible is_proctored_exam """
return self.is_proctored_enabled
@is_proctored_exam.setter
def is_proctored_exam(self, value):
""" Alias the is_proctored_enabled field to the more legible is_proctored_exam """
self.is_proctored_enabled = value
@XBlock.wants('proctoring')
@XBlock.wants('credit')
class SequenceModule(SequenceFields, ProctoringFields, XModule):
''' Layout module which lays out content in a temporal sequence
'''
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/display.coffee')],
'js': [resource_string(__name__, 'js/src/sequence/display/jquery.sequence.js')],
}
css = {
'scss': [resource_string(__name__, 'css/sequence/display.scss')],
}
js_module_name = "Sequence"
def __init__(self, *args, **kwargs):
super(SequenceModule, self).__init__(*args, **kwargs)
# If position is specified in system, then use that instead.
position = getattr(self.system, 'position', None)
if position is not None:
try:
self.position = int(self.system.position)
except (ValueError, TypeError):
# Check for https://openedx.atlassian.net/browse/LMS-6496
warnings.warn(
"Sequential position cannot be converted to an integer: {pos!r}".format(
pos=self.system.position,
),
RuntimeWarning,
)
def get_progress(self):
''' Return the total progress, adding total done and total available.
(assumes that each submodule uses the same "units" for progress.)
'''
# TODO: Cache progress or children array?
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def handle_ajax(self, dispatch, data): # TODO: bounds checking
''' get = request.POST instance '''
if dispatch == 'goto_position':
# set position to default value if either 'position' argument not
# found in request or it is a non-positive integer
position = data.get('position', u'1')
if position.isdigit() and int(position) > 0:
self.position = int(position)
else:
self.position = 1
return json.dumps({'success': True})
raise NotFoundError('Unexpected dispatch type')
def student_view(self, context):
# If we're rendering this sequence, but no position is set yet,
# default the position to the first element
if self.position is None:
self.position = 1
## Returns a set of all types of all sub-children
contents = []
fragment = Fragment()
# Is this sequential part of a timed or proctored exam?
if self.is_time_limited:
view_html = self._time_limite
|
d_student_view(context)
# Do we have an alternate rendering
# from the edx_proctoring subsystem?
if view_html:
fragment.add_content(view_html)
return fragment
for child in self.get_display_items():
progress = child.get_progress()
rendered_child = child.render(STUDENT_VIEW, context)
frag
|
ment.add_frag_resources(rendered_child)
# `titles` is a list of titles to inject into the sequential tooltip display.
# We omit any blank titles to avoid blank lines in the tooltip display.
titles = [title.strip() for title in child.get_content_titles() if title.strip()]
childinfo = {
'content': rendered_child.content,
'title': "\n".join(titles),
'page_title': titles[0] if titles else '',
'progress_status': Progress.to_js_status_str(progress),
'progress_detail': Progress.to_js_detail_str(progress),
'type': child.get_icon
|
orf/django-rest-framework-jwt
|
setup.py
|
Python
|
mit
| 3,404
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import shutil
import sys
from setuptools import setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
with open(os.path.join(package, '__init__.py'), 'rb') as init_py:
src = init_py.read().decode('utf-8')
return re.search("__version__ = ['\"]([^'\"]+)['\"]", src).group(1)
name = 'djangorestframework-jwt'
version = get_version('rest_framework_jwt')
package = 'rest_framework_jwt'
description = 'JSON Web Token based authentication for Django REST framework'
url = 'https://github.com/GetBlimp/django-rest-framework-jwt'
author = 'Jose Padilla'
author_email = '[email protected]'
license = 'MIT'
install_requires = [
'PyJWT>=1.4.0,<2.0.0',
]
def read(*paths):
"""
Build a file path from paths and return the contents.
"""
with open(os.path.join(*paths), 'r') as f:
return f.read()
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: file
|
paths}
if sys.argv[-1] == 'publish':
|
if os.system('pip freeze | grep wheel'):
print('wheel not installed.\nUse `pip install wheel`.\nExiting.')
sys.exit()
if os.system('pip freeze | grep twine'):
print('twine not installed.\nUse `pip install twine`.\nExiting.')
sys.exit()
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
shutil.rmtree('dist')
shutil.rmtree('build')
shutil.rmtree('djangorestframework_jwt.egg-info')
print('You probably want to also tag the version now:')
print(" git tag -a {0} -m 'version {0}'".format(version))
print(' git push --tags')
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
long_description=read('README.rst'),
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
]
)
|
titilambert/home-assistant
|
homeassistant/components/template/switch.py
|
Python
|
apache-2.0
| 6,023
| 0.000332
|
"""Support for switches which integrates with other components."""
import logging
import voluptuous as vol
from homeassistant.components.switch import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SwitchEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_ICON_TEMPLATE,
CONF_SWITCHES,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.script import Script
from .const import CONF_AVAILABILITY_TEMPLATE, DOMAIN, PLATFORMS
from .template_entity import TemplateEntity
_LOGGER = logging.getLogger(__name__)
_VALID_STATES = [STATE_ON, STATE_OFF, "true", "false"]
ON_ACTION = "turn_on"
OFF_ACTION = "turn_off"
SWITCH_SCHEMA = vol.Schema(
{
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Required(ON_ACTION): cv.SCRIPT_SCHEMA,
vol.Required(OFF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SWITCHES): cv.schema_with_slug_keys(SWITCH_SCHEMA)}
)
async def _async_create_entities(hass, config):
"""Create the Template switches."""
switches = []
for device, device_config in config[CONF_SWITCHES].items():
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
state_template = device_config.get(CONF_VALUE_TEMPLATE)
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
on_action = device_config[ON_ACTION]
off_action = device_config[OFF_ACTION]
unique_id = device_config.get(CONF_UNIQUE_ID)
switches.append(
SwitchTemplate(
hass,
device,
friendly_name,
state_template,
icon_template,
entity_picture_template,
availability_template,
on_action,
off_action,
unique_id,
)
)
return switches
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template switches."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
async_add_entities(await _async_create_entities(hass, config))
class SwitchTemplate(TemplateEntity, SwitchEntity, RestoreEntity):
"""Representation of a Template switch."""
def __init__(
self,
hass,
device_id,
f
|
riendly_name,
state_template,
icon_template,
entity_picture_template,
availability_template,
on_action,
off_action,
unique_id,
):
"""Initialize the Template switch."""
super().__
|
init__(
availability_template=availability_template,
icon_template=icon_template,
entity_picture_template=entity_picture_template,
)
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._name = friendly_name
self._template = state_template
domain = __name__.split(".")[-2]
self._on_script = Script(hass, on_action, friendly_name, domain)
self._off_script = Script(hass, off_action, friendly_name, domain)
self._state = False
self._unique_id = unique_id
@callback
def _update_state(self, result):
super()._update_state(result)
if isinstance(result, TemplateError):
self._state = None
return
self._state = result.lower() in ("true", STATE_ON)
async def async_added_to_hass(self):
"""Register callbacks."""
if self._template is None:
# restore state after startup
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
self._state = state.state == STATE_ON
# no need to listen for events
else:
self.add_template_attribute(
"_state", self._template, None, self._update_state
)
await super().async_added_to_hass()
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this switch."""
return self._unique_id
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def should_poll(self):
"""Return the polling state."""
return False
async def async_turn_on(self, **kwargs):
"""Fire the on action."""
await self._on_script.async_run(context=self._context)
if self._template is None:
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Fire the off action."""
await self._off_script.async_run(context=self._context)
if self._template is None:
self._state = False
self.async_write_ha_state()
@property
def assumed_state(self):
"""State is assumed, if no template given."""
return self._template is None
|
CyrusRoshan/lolbuddy
|
setup.py
|
Python
|
mit
| 674
| 0.001484
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
version = '0.2.4'
setup(
name='lolbuddy',
version=version,
description='a cli tool to update league of legends itemsets and ability order from champion.gg',
author='Cyrus Roshan',
author_email='[email protected]',
license='MIT',
keywords=['lol', 'league', 'league of legends', 'item', 'ability'],
url='https://github.com/
|
CyrusR
|
oshan/lolbuddy',
packages=find_packages(),
package_data={},
install_requires=[
'requests-futures >= 0.9.5',
],
entry_points={
'console_scripts': [
'lolbuddy=lolbuddy:main',
],
},
)
|
KernelAnalysisPlatform/KlareDbg
|
static2/ida/remoteobj.py
|
Python
|
gpl-3.0
| 16,409
| 0.016942
|
#!/usr/bin/env python
# remoteobj v0.4, best yet!
# TODO: This will get wrecked by recursive sets/lists/dicts; need a more picklish method.
# TODO: Dict/sets/lists should get unpacked to wrappers that are local for read-only access,
# but update the remote for write access. Note that __eq__ will be an interesting override.
import marshal
import struct
import socket
import sys, exceptions, errno, traceback
from types import CodeType, FunctionType
from os import urandom
from hashlib import sha1
DEBUG = False
class Proxy(object):
def __init__(self, conn, info, _hash=None, parent=None):
object.__setattr__(self, '_proxyconn', conn)
object.__setattr__(self, '_proxyinfo', info)
object.__setattr__(self, '_proxyparent', parent)
def __getattribute__(self, attr):
t = object.__getattribute__(self, '_proxyinfo').getattr(attr)
if t:
# We need to retain parent for garbage collection purposes.
return Proxy(object.__getattribute__(self, '_proxyconn'), t, parent=self)
else:
return object.__getattribute__(self, '_proxyconn').get(self, attr)
def __getattr__(self, attr):
return object.__getattribute__(self, '__getattribute__')(attr)
def __setattr__(self, attr, val):
object.__getattribute__(self, '_proxyconn').set(self, attr, val)
def __delattr__(self, attr):
object.__getattribute__(self, '_proxyconn').callattr(self, '__delattr__', (attr,), {})
def __call__(self, *args, **kwargs):
return object.__getattribute__(self, '_proxyconn').call(self, args, kwargs)
# GC isn't useful anyway...
"""
def __del__(self):
if object.__getattribute__(self, '_proxyparent') is not None: return
if not marshal or not struct or not socket: return # Reduce spurious messages when quitting python
object.__getattribute__(self, '_proxyconn').delete(self)
"""
# hash and repr need to be handled specially, due to hash(type) != type.__hash__()
# (and the same for repr). Incidentally, we'll cache the hash.
def __hash__(self):
info = object.__getattribute__(self, '_proxyinfo')
if info.proxyhash is None:
info.proxyhash = object.__getattribute__(self, '_proxyconn').hash(self)
return info.proxyhash
def __repr__(self):
return object.__getattribute__(self, '_proxyconn').repr(self)
# Special methods don't always go through __getattribute__, so redirect them all there.
for special in ('__str__', '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__', '__cmp__', '__rcmp__', '__nonzero__', '__unicode__', '__len__', '__getitem__', '__setitem__', '__delitem__', '__iter__
|
', '__reversed__', '__contains__', '__getslice__', '__setslice__', '__delslice__', '__add__', '__sub__', '__mul__', '__floordiv__', '__mod__', '__divmod__', '__pow__', '__lshift__', '__rshift__', '__and__', '__xor__', '__or__', '__div__', '__truediv__', '__radd__', '__rsub__', '__rmul__', '__rdiv__', '__rtruediv__', '__rfloordiv__', '__rmod__', '__rdivmod__', '__rpow__', '__rlshift__', '__rrshift__', '__rand__', '__rxor__', '__ror__', '__iadd__', '__isub__', '__imul__', '__idiv__', '__itruediv__', '__if
|
loordiv__', '__imod__', '__ipow__', '__ilshift__', '__irshift__', '__iand__', '__ixor__', '__ior__', '__neg__', '__pos__', '__abs__', '__invert__', '__complex__', '__int__', '__long__', '__float__', '__oct__', '__hex__', '__index__', '__coerce__', '__enter__', '__exit__'):
exec "def {special}(self, *args, **kwargs):\n\treturn object.__getattribute__(self, '_proxyconn').callattr(self, '{special}', args, kwargs)".format(special=special) in None, None
class ProxyInfo(object):
@classmethod
def isPacked(self, obj):
return type(obj) == tuple and len(obj) == 7 and obj[:2] == (StopIteration, Ellipsis)
@classmethod
def fromPacked(self, obj):
return self(obj[2], obj[3], obj[4] or '', obj[5], obj[6] or ())
def __init__(self, endpoint, remoteid, attrpath = '', proxyhash = None, lazyattrs = (), dbgnote = ''):
self.endpoint = endpoint
self.remoteid = remoteid
self.attrpath = attrpath
self.proxyhash = proxyhash
self.lazyattrs = set(lazyattrs)
self.dbgnote = dbgnote
def __repr__(self):
return 'ProxyInfo'+repr((self.endpoint, hex(self.remoteid))) + ('' if not self.dbgnote else ' <'+self.dbgnote+'>')
def packed(self):
return (StopIteration, Ellipsis, self.endpoint, self.remoteid, self.attrpath or None, self.proxyhash, None) # Don't pack lazyattrs
def getattr(self, attr):
if attr not in self.lazyattrs: return None
path = self.attrpath+'.'+attr if self.attrpath else attr
return type(self)(self.endpoint, self.remoteid, attrpath = path, lazyattrs = self.lazyattrs)
class Connection(object):
def __init__(self, sock, secret, endpoint = urandom(8).encode('hex')):
self.sock = sock
self.secret = secret
self.endpoint = endpoint
self.garbage = []
def __del__(self):
try: self.sock.close()
except: pass
def sendmsg(self, msg):
x = marshal.dumps(msg)
self.sock.sendall(struct.pack('<I', len(x)))
self.sock.sendall(x)
def recvmsg(self):
x = self.sock.recv(4)
if len(x) == 4:
y = struct.unpack('<I', x)[0]
z = self.sock.recv(y)
if len(z) == y:
return marshal.loads(z)
raise socket.error(errno.ECONNRESET, 'The socket was closed while receiving a message.')
# Note: must send after non-info_only packing, or objects will be left with +1 retain count in self.vended
def pack(self, val, info_only = False, isDictKey = False):
if type(val) in (bool, int, long, float, complex, str, unicode) or val is None or val is StopIteration or val is Ellipsis:
return val
elif type(val) == tuple:
return tuple(self.pack(i, info_only) for i in val)
elif type(val) == list:
return [self.pack(i, info_only) for i in val]
elif type(val) == set:
return {self.pack(i, info_only) for i in val}
elif type(val) == frozenset:
return frozenset(self.pack(i, info_only) for i in val)
elif type(val) == dict:
return {self.pack(k, info_only, isDictKey = True):self.pack(v, info_only) for k,v in val.iteritems()}
elif type(val) == Proxy:
return object.__getattribute__(val, '_proxyinfo').packed()
elif type(val) == CodeType:
return val
else:
if not info_only:
self.vended.setdefault(id(val), [val, 0])[1] += 1
t = hash(val) if isDictKey else None
return ProxyInfo(self.endpoint, id(val), proxyhash=t).packed()
def unpack(self, val, info_only = False):
if ProxyInfo.isPacked(val):
info = ProxyInfo.fromPacked(val)
try:
if self.endpoint == info.endpoint:
try:
obj = self.vended[info.remoteid][0]
except KeyError:
if not info_only:
raise Exception("Whoops, "+self.endpoint+" can't find reference to object "+repr(info.remoteid))
else:
info.dbgnote = 'missing local reference'
return info
if info.attrpath:
for i in info.attrpath.split('.'):
obj = getattr(obj, i)
return obj
else:
return Proxy(self, info) if not info_only else info
except:
if not info_only: raise
info.dbgnote = 'While unpacking, ' + ''.join(traceback.format_exc())
return info
elif type(val) == tuple:
return tuple(self.unpack(i, info_only) for i in val)
elif type(val) == list:
return [self.unpack(i, info_only) for i in val]
elif type(val) == set:
return {self.unpack(i, info_only) for i in val}
elif type(val) == frozenset:
return frozenset(self.unpack(i, info_only) for i in val)
elif type(val) == dict:
return {self.unpack(k, info_only):self.unpack(v, info_only) for k,v in val.iteritems()}
elif type(val) == CodeType:
return val
else:
return val
def connectProxy(self):
self.vended = {}
self.sock.sendall('yo')
chal = urandom(20)
self.sock.sendall(chal)
if self.sock.recv(20) != sha1(self.secret+chal).digest():
print >> sys.stderr, "Server failed challenge!"
return None
self.sock.sendall(sha1(self.secret+self.sock.recv(20)).digest())
return
|
dtudares/hello-world
|
yardstick/yardstick/vTC/apexlake/experimental_framework/api.py
|
Python
|
apache-2.0
| 6,381
| 0
|
# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is dist
|
ributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for th
|
e specific language governing permissions and
# limitations under the License.
import experimental_framework.benchmarking_unit as b_unit
from experimental_framework import heat_template_generation, common
class FrameworkApi(object):
@staticmethod
def init():
"""
Initializes the Framework
:return: None
"""
common.init(api=True)
# @staticmethod
# def get_available_test_cases():
# """
# Returns a list of available test cases.
# This list include eventual modules developed by the user, if any.
# Each test case is returned as a string that represents the full name
# of the test case and that can be used to get more information
# calling get_test_case_features(test_case_name)
#
# :return: list of strings
# """
# return b_unit.BenchmarkingUnit.get_available_test_cases()
@staticmethod
def get_test_case_features(test_case):
"""
Returns a list of features (description, requested parameters,
allowed values, etc.) for a specified test case.
:param test_case: name of the test case (string)
The string represents the test case and can be
obtained calling "get_available_test_cases()"
method.
:return: dict() containing the features of the test case
"""
if not isinstance(test_case, str):
raise ValueError('The provided test_case parameter has to be '
'a string')
benchmark = b_unit.BenchmarkingUnit.get_required_benchmarks(
[test_case])[0]
return benchmark.get_features()
@staticmethod
def execute_framework(
test_cases,
iterations,
heat_template,
heat_template_parameters,
deployment_configuration,
openstack_credentials
):
"""
Executes the framework according the inputs
:param test_cases: Test cases to be ran on the workload
(dict() of dict())
Example:
test_case = dict()
test_case['name'] = 'module.Class'
test_case['params'] = dict()
test_case['params']['throughput'] = '1'
test_case['params']['vlan_sender'] = '1007'
test_case['params']['vlan_receiver'] = '1006'
test_cases = [test_case]
:param iterations: Number of cycles to be executed (int)
:param heat_template: (string) File name of the heat template of the
workload to be deployed. It contains the
parameters to be evaluated in the form of
#parameter_name. (See heat_templates/vTC.yaml as
example).
:param heat_template_parameters: (dict) Parameters to be provided
as input to the heat template.
See http://docs.openstack.org/developer/heat/
template_guide/hot_guide.html - section
"Template input parameters" for further info.
:param deployment_configuration: ( dict[string] = list(strings) ) )
Dictionary of parameters representing the
deployment configuration of the workload
The key is a string corresponding to the name of
the parameter, the value is a list of strings
representing the value to be assumed by a specific
param.
The parameters are user defined: they have to
correspond to the place holders (#parameter_name)
specified in the heat template.
:return: dict() Containing results
"""
common.init(api=True)
# Input Validation
common.InputValidation.validate_os_credentials(openstack_credentials)
credentials = openstack_credentials
msg = 'The provided heat_template does not exist'
if common.RELEASE == 'liberty':
heat_template = 'vTC_liberty.yaml'
else:
heat_template = 'vTC.yaml'
template = "{}{}".format(common.get_template_dir(), heat_template)
common.InputValidation.validate_file_exist(template, msg)
msg = 'The provided iterations variable must be an integer value'
common.InputValidation.validate_integer(iterations, msg)
msg = 'The provided heat_template_parameters variable must be a ' \
'dictionary'
common.InputValidation.validate_dictionary(heat_template_parameters,
msg)
log_msg = "Generation of all the heat templates " \
"required by the experiment"
common.LOG.info(log_msg)
heat_template_generation.generates_templates(heat_template,
deployment_configuration)
benchmarking_unit = \
b_unit.BenchmarkingUnit(
heat_template, credentials, heat_template_parameters,
iterations, test_cases)
try:
common.LOG.info("Benchmarking Unit initialization")
benchmarking_unit.initialize()
common.LOG.info("Benchmarking Unit Running")
results = benchmarking_unit.run_benchmarks()
finally:
common.LOG.info("Benchmarking Unit Finalization")
benchmarking_unit.finalize()
return results
|
czpython/django-cms
|
cms/cms_toolbars.py
|
Python
|
bsd-3-clause
| 32,747
| 0.003237
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib import admin
from django.contrib.auth import get_permission_codename, get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse, NoReverseMatch, resolve, Resolver404
from django.db.models import Q
from django.utils.translation import override as force_language, ugettext_lazy as _
from cms.api import get_page_draft, can_change_page
from cms.constants import TEMPLATE_INHERITANCE_MAGIC, PUBLISHER_STATE_PENDING
from cms.models import Placeholder, Title, Page, PageType, StaticPlaceholder
from cms.toolbar.items import ButtonList, TemplateItem, REFRESH_PAGE
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
from cms.utils import get_language_from_request, page_permissions
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import get_language_tuple, get_language_dict
from cms.utils.page_permissions import (
user_can_change_page,
user_can_delete_page,
user_can_publish_page,
)
from cms.utils.urlutils import add_url_parameters, admin_reverse
from menus.utils import DefaultLanguageChanger
# Identifiers for search
ADMIN_MENU_IDENTIFIER = 'admin-menu'
LANGUAGE_MENU_IDENTIFIER = 'language-menu'
TEMPLATE_MENU_BREAK = 'Template Menu Break'
PAGE_MENU_IDENTIFIER = 'page'
PAGE_MENU_ADD_IDENTIFIER = 'add_page'
PAGE_MENU_FIRST_BREAK = 'Page Menu First Break'
PAGE_MENU_SECOND_BREAK = 'Page Menu Second Break'
PAGE_MENU_THIRD_BREAK = 'Page Menu Third Break'
PAGE_MENU_FOURTH_BREAK = 'Page Menu Fourth Break'
PAGE_MENU_LAST_BREAK = 'Page Menu Last Break'
HISTORY_MENU_BREAK = 'History Menu Break'
MANAGE_PAGES_BREAK = 'Manage Pages Break'
ADMIN_SITES_BREAK = 'Admin Sites Break'
ADMINISTRATION_BREAK = 'Administration Break'
CLIPBOARD_BREAK = 'Clipboard Break'
USER_SETTINGS_BREAK = 'User Settings Break'
ADD_PAGE_LANGUAGE_BREAK = "Add page language Break"
REMOVE_PAGE_LANGUAGE_BREAK = "Remove page language Break"
COPY_PAGE_LANGUAGE_BREAK = "Copy page language Break"
TOOLBAR_DISABLE_BREAK = 'Toolbar disable Break'
SHORTCUTS_BREAK = 'Shortcuts Break'
@toolbar_pool.register
class PlaceholderToolbar(CMSToolbar):
"""
Adds placeholder edit buttons if placeholders or static placeholders are detected in the template
"""
def populate(self):
self.page = get_page_draft(self.request.current_page)
def post_template_populate(self):
super(PlaceholderToolbar, self).post_template_populate()
self.add_wizard_button()
def add_wizard_button(self):
from cms.wizards.wizard_pool import entry_choices
title = _("Create")
if self.page:
user = self.request.user
page_pk = self.page.pk
disabled = len(list(entry_choices(user, self.page))) == 0
else:
page_pk = ''
disabled = True
url = '{url}?page={page}&language={lang}&edit'.format(
url=reverse("cms_wizard_create"),
page=page_pk,
lang=self.toolbar.site_language,
)
self.toolbar.add_modal_button(title, url,
side=self.toolbar.RIGHT,
disabled=disabled,
on_close=REFRESH_PAGE)
@toolbar_pool.register
class BasicToolbar(CMSToolbar):
"""
Basic Toolbar for site and languages menu
"""
page = None
_language_menu = None
_admin_menu = None
def init_from_request(self):
self.page = get_page_draft(self.request.current_page)
def populate(self):
if not self.page:
self.init_from_request()
self.clipboard = self.request.toolbar.user_settings.clipboard
self.add_admin_menu()
self.add_language_menu()
def add_admin_menu(self):
if not self._admin_menu:
self._admin_menu = self.toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, self.current_site.name)
# Users button
self.add_users_button(self._admin_menu)
# sites menu
sites_queryset = Site.objects.order_by('name')
if len(sites_queryset) > 1:
sites_menu = self._admin_menu.get_or_create_menu('sites', _('Sites'))
sites_menu.add_sideframe_item(_('Admin Sites'), url=admin_reverse('sites_site_changelist'))
sites_menu.add_break(ADMIN_SITES_BREAK)
for site in sites_queryset:
sites_menu.add_link_item(site.name, url='http://%s' % site.domain,
active=site.pk == self.current_site.pk)
|
# admin
self._admin_me
|
nu.add_sideframe_item(_('Administration'), url=admin_reverse('index'))
self._admin_menu.add_break(ADMINISTRATION_BREAK)
# cms users settings
self._admin_menu.add_sideframe_item(_('User settings'), url=admin_reverse('cms_usersettings_change'))
self._admin_menu.add_break(USER_SETTINGS_BREAK)
# clipboard
if self.toolbar.edit_mode_active:
# True if the clipboard exists and there's plugins in it.
clipboard_is_bound = self.toolbar.clipboard_plugin
self._admin_menu.add_link_item(_('Clipboard...'), url='#',
extra_classes=['cms-clipboard-trigger'],
disabled=not clipboard_is_bound)
self._admin_menu.add_link_item(_('Clear clipboard'), url='#',
extra_classes=['cms-clipboard-empty'],
disabled=not clipboard_is_bound)
self._admin_menu.add_break(CLIPBOARD_BREAK)
# Disable toolbar
self._admin_menu.add_link_item(_('Disable toolbar'), url='?%s' % get_cms_setting('CMS_TOOLBAR_URL__DISABLE'))
self._admin_menu.add_break(TOOLBAR_DISABLE_BREAK)
self._admin_menu.add_link_item(_('Shortcuts...'), url='#',
extra_classes=('cms-show-shortcuts',))
self._admin_menu.add_break(SHORTCUTS_BREAK)
# logout
self.add_logout_button(self._admin_menu)
def add_users_button(self, parent):
User = get_user_model()
if User in admin.site._registry:
opts = User._meta
if self.request.user.has_perm('%s.%s' % (opts.app_label, get_permission_codename('change', opts))):
user_changelist_url = admin_reverse('%s_%s_changelist' % (opts.app_label, opts.model_name))
parent.add_sideframe_item(_('Users'), url=user_changelist_url)
def add_logout_button(self, parent):
# If current page is not published or has view restrictions user is redirected to the home page:
# * published page: no redirect
# * unpublished page: redirect to the home page
# * published page with login_required: redirect to the home page
# * published page with view permissions: redirect to the home page
page_is_published = self.page and self.page.is_published(self.current_lang)
if page_is_published and not self.page.login_required:
anon_can_access = page_permissions.user_can_view_page(
user=AnonymousUser(),
page=self.page,
site=self.current_site,
)
else:
anon_can_access = False
on_success = self.toolbar.REFRESH_PAGE if anon_can_access else '/'
# We'll show "Logout Joe Bloggs" if the name fields in auth.User are completed, else "Logout jbloggs". If
# anything goes wrong, it'll just be "Logout".
user_name = self.get_username()
logout_menu_text = _('Logout %s') % user_name if user_name else _('Logout')
parent.add_ajax_item(
logout_menu_text,
action=admin_reverse('logout'),
active=True,
on_success=on_success,
method='GET',
)
def add_language_menu(self):
if settings.USE_I18N and not self._language_menu:
self._language_menu = s
|
dgilland/yummly.py
|
yummly/models.py
|
Python
|
mit
| 8,795
| 0
|
"""Yummly data models.
"""
from inspect import getargspec
class Storage(dict):
"""An object that is like a dict except `obj.foo` can be used in addition
to `obj['foo']`.
Raises Attribute/Key errors for missing references.
>>> o = Storage(a=1, b=2)
>>> assert(o.a == o['a'])
>>> assert(o.b == o['b'])
>>> o.a = 2
>>> print o['a']
2
>>> x = o.copy()
>>> assert(x == o)
>>> del o.a
>>> print o.a
Traceback (most recent call last):
...
AttributeError: a
>>> print o['a']
Traceback (most recent call last):
...
KeyError: 'a'
>>> o._get_fields()
Traceback (most recent call last):
...
TypeError: ...
"""
def __getattr__(self, key):
if key in self:
return self[key]
else:
raise AttributeError(key)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
if key in self:
del self[key]
else:
raise AttributeError(key)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, dict.__repr__(self))
@classmethod
def _get_fields(cls):
"""Return class' __init__() args excluding `self`.
Assumes that calling class has actually implemented __init__(),
otherwise, this will fail.
"""
# For classes, first element of args == self which we don't want.
return getargspec(cls.__init__).args[1:]
##################################################
# Get recipe related models
##################################################
class Recipe(Storage):
"""Recipe model."""
def __init__(self, **kargs):
self.id = kargs['id']
self.name = kargs['name']
self.rating = kargs.get('rating')
self.totalTime = kargs.get('totalTime') or 0
self.totalTimeInSeconds = kargs.get('totalTimeInSeconds') or 0
self.ingredientLines = kargs.get('ingredientLines') or []
self.numberOfServings = kargs.get('numberOfServings')
self.yields = kargs.get('yields')
self.attributes = kargs.get('attributes') or {}
self.source = RecipeSource(**(kargs.get('source') or {}))
self.attribution = Attribution(**(kargs.get('attribution') or {}))
# NOTE: For `flavors`, the keys are returned capitalized so normalize
# to lowercase since search results' flavor keys are lowercase.
flavors = kargs.get('flavors') or {}
self.flavors = Flavors(**{key.lower(): value
for key, value in flavors.iteritems()})
self.nutritionEstimates = [NutritionEstimate(**nute)
for nute in (kargs.get('nutritionEstimates')
or [])]
self.images = [RecipeImages(**imgs)
for imgs in (kargs.get('images') or [])]
class Flavors(Storage):
"""Flavors model."""
def __init__(self, **kargs):
self.salty = kargs.get('salty')
self.meaty = kargs.get('meaty')
self.piquant = kargs.get('piquant')
self.bitter = kargs.get('bitter')
self.sour = kargs.get('sour')
self.sweet = kargs.get('sweet')
class Attribution(Storage):
"""Attribution model."""
def __init__(self, **kargs):
self.html = kargs.get('html')
self.url = kargs.get('url')
self.text = kargs.get('text')
self.logo = kargs.get('logo')
class NutritionEstimate(Storage):
"""Nutrition estimate model."""
def __init__(self, **kargs):
self.attribute = kargs.get('attribute')
self.description = kargs.get('description')
self.value = kargs.get('value')
self.unit = NutritionUnit(**(kargs.get('unit') or {}))
class NutritionUnit(Storage):
"""Nutrition unit model."""
def __init__(self, **kargs):
self.id = kargs['id']
self.abbreviation = kargs.get('abbreviation')
self.plural = kargs.get('plural')
self.pluralAbbreviation = kargs.get('pluralAbbreviation')
class RecipeImages(Storage):
"""Recipe images model."""
def __init__(self, **kargs):
self.hostedLargeUrl = kargs.get('hostedLargeUrl')
self.hostedSmallUrl = kargs.get('hostedSmallUrl')
class RecipeSource(Storage):
"""Recipe source model."""
def __init__(self, **kargs):
self.sourceRecipeUrl = kargs.get('sourceRecipeUrl')
self.sourceSiteUrl = kargs.get('sourceSiteUrl')
self.sourceDisplayName = kargs.get('sourceDisplayName')
##################################################
# Search related models
##################################################
class SearchResult(Storage):
"""Search result model."""
def __init__(self, **kargs):
self.totalMatchCount = kargs['totalMatchCount']
self.criteria = SearchCriteria(**kargs['criteria'])
self.facetCounts = kargs['facetCounts']
self.matches = [SearchMatch(**match) for match in kargs['matches']]
self.attribution = Attribution(**kargs['attribution'])
class SearchMatch(Storage):
"""Search match model."""
def __init__(self, **kargs):
self.id = kargs['id']
self.recipeName = kargs['recipeName']
self.rating = kargs.get('rating')
self.totalTimeInSeconds = kargs.get('totalTimeInSeconds', 0)
self.ingredients = kargs.get('ingredients')
self.flavors = Flavors(**(kargs.get('flavors') or {}))
self.smallImageUrls = kargs.get('smallImageUrls')
self.sourceDisplayName = kargs.get('sourceDisplayName', '')
self.attributes = kargs.get('attributes')
class SearchCriteria(Storage):
"""Search criteria model."""
def __init__(self, **kargs):
self.maxResults = kargs.get('maxResults')
self.resultsToSkip = kargs.get('resultsToSkip')
self.terms = kargs.get('terms')
self.requirePictures = kargs.get('requirePictures')
self.facetFields = kargs.get('facetFields')
self.allowedIngredients = kargs.get('allowedIngredients')
self.excludedIngredients = kargs.get('excludedIngredients')
self.attributeRanges = kargs.get('attributeRanges', {})
self.allowedAttributes = kargs.get('allowedAttributes', [])
self.excludedAttributes = kargs.get('excludedAttributes', [])
self.allowedDiets = kargs.get('allowedDiets', [])
self.nutritionRestrictions = kargs.get('nutritionRestrictions', {})
##################################################
# Metadata related models
##################################################
class MetaAttribute(Storage):
"""Base class for metadata attributes."""
def __init__(self, **kargs):
self.id = kargs['id']
self.description = kargs['description']
self.localesAvailableIn = kargs['localesAvailableIn']
self.name = kargs['name']
self.searchValue = kargs['searchValue']
self.type = kargs['type']
class MetaHoliday(MetaAttribute):
"""Holiday metadata model."""
pass
class MetaCuisine(MetaAttribute):
"""Cuisine metadata model."""
pass
class MetaCourse(MetaAttribute):
"""Course metadata model."""
pass
class MetaTechnique(MetaAttribute):
"""Technique metadata model."""
pass
class MetaSource(Storage):
"""Source metadata model."""
def __init__(self, **kargs):
self.faviconUrl = kargs['faviconUrl']
self.description = kargs['description']
self.searchValue = kargs['searchValue']
class MetaBrand(Storage):
"""Brand metadata model."""
def __init__(self, **kargs):
self.faviconUrl = kargs['faviconUrl']
self.description = kargs['description']
self.searchValue = kargs['searchValue']
class MetaDiet(Storage):
"""Diet metadata model."""
def __init__(self, **kargs):
self.id = kargs['id']
self.localesAvailableIn = kargs['localesAvailableIn']
self.longDescription = kargs['longDescription']
self.searchValue = k
|
args['searchValue']
|
self.shortDescription = kargs['shortDescription']
self.type = kargs['type']
c
|
verma-varsha/zulip
|
zerver/lib/digest.py
|
Python
|
apache-2.0
| 10,690
| 0.001403
|
from __future__ import absolute_import
from typing import Any, Callable, Dict, Iterable, List, Set, Tuple, Text
from collections import defaultdict
import datetime
import pytz
import six
from django.db.models import Q, QuerySet
from django.template import loader
from django.conf import settings
from django.utils.timezone import now as timezone_now
from zerver.lib.notifications import build_message_list, hash_util_encode, \
one_click_unsubscribe_link
from zerver.lib.send_email import send_future_email, FromAddress
from zerver.models import UserProfile, UserMessage, Recipient, Stream, \
Subscription, UserActivity, get_active_streams, get_user_profile_by_id, \
Realm
from zerver.context_processors import common_context
from zerver.lib.queue import queue_json_publish
from zerver.lib.logging_util import create_logger
logger = create_logger(__name__, settings.DIGEST_LOG_PATH, 'DEBUG')
VALID_DIGEST_DAY = 1 # Tuesdays
DIGEST_CUTOFF = 5
# Digests accumulate 4 types of interesting traffic for a user:
# 1. Missed PMs
# 2. New streams
# 3. New users
# 4. Interesting stream traffic, as determined by the longest and most
# diversely comment upon topics.
def inactive_since(user_profile, cutoff):
# type: (UserProfile, datetime.datetime) -> bool
# Hasn't used the app in the last DIGEST_CUTOFF (5) days.
most_recent_visit = [row.last_visit for row in
UserActivity.objects.filter(
user_profile=user_profile)]
if not most_recent_visit:
# This person has never used the app.
return True
last_visit = max(most_recent_visit)
return last_visit < cutoff
def should_process_digest(realm_str):
# type: (str) -> bool
if realm_str in settings.SYSTEM_ONLY_REALMS:
# Don't try to send emails to system-only realms
return False
return True
# Changes to this should also be reflected in
# zerver/worker/queue_processors.py:DigestWorker.consume()
def queue_digest_recipient(user_profile, cutoff):
# type: (UserProfile, datetime.datetime) -> None
# Convert cutoff to epoch seconds for transit.
event = {"user_profile_id": user_profile.id,
"cutoff": cutoff.strftime('%s')}
queue_json_publish("digest_emails", event, lambda event: None)
def enqueue_emails(cutoff):
# type: (datetime.datetime) -> None
# To be really conservative while we don't have user timezones or
# special-casing for companies with non-standard workweeks, only
# try to send mail on Tuesdays.
if timezone_now().weekday() != VALID_DIGEST_DAY:
return
for realm in Realm.objects.filter(deactivated=False, show_digest_email=True):
if not should_process_digest(realm.string_id):
continue
user_profiles = UserProfile.objects.filter(
realm=realm, is_active=True, is_bot=False, enable_digest_emails=True)
for user_profile in user_profiles:
if inactive_since(user_profile, cutoff):
queue_digest_recipient(user_profile, cutoff)
logger.info("%s is inactive, queuing for potential digest" % (
user_profile.email,))
def gather_hot_conversations(user_profile, stream_messages):
# type: (UserProfile, QuerySet) -> List[Dict[str, Any]]
# Gather stream conversations of 2 types:
# 1. long conversations
# 2. conversations where many different people participated
#
# Returns a list of dictionaries containing the templating
# information for each hot conversation.
conversation_length = defaultdict(int) # type: Dict[Tuple[int, Text], int]
conversation_diversity = defaultdict(set) # type: Dict[Tuple[int, Text], Set[Text]]
for user_message in stream_messages:
if not user_message.message.sent_by_human():
# Don't include automated messages in the count.
continue
key = (user_message.message.recipient.type_id,
user_message.message.subject)
conversation_diversity[key].add(
user_message.message.sender.full_name)
conversation_length[key] += 1
diversity_list = list(conversation_diversity.items())
diversity_list.sort(key=lambda entry: len(entry[1]), reverse=True)
length_list = list(conversation_length.items())
length_list.sort(key=lambda entry: entry[1], reverse=True)
# Get up to the 4 best conversations from the diversity list
# and length list, filtering out overlapping conversations.
hot_conversations = [elt[0
|
] for elt in diversity_list[:2]]
for candidate, _ in length_list:
if candidate not in hot_conversations:
hot_conversations.append(candidate)
if len(hot_conversations) >= 4:
break
# There was so much overlap between the diversity and length lists that we
# still have < 4 conversations. Try to use remaining d
|
iversity items to pad
# out the hot conversations.
num_convos = len(hot_conversations)
if num_convos < 4:
hot_conversations.extend([elt[0] for elt in diversity_list[num_convos:4]])
hot_conversation_render_payloads = []
for h in hot_conversations:
stream_id, subject = h
users = list(conversation_diversity[h])
count = conversation_length[h]
# We'll display up to 2 messages from the conversation.
first_few_messages = [user_message.message for user_message in
stream_messages.filter(
message__recipient__type_id=stream_id,
message__subject=subject)[:2]]
teaser_data = {"participants": users,
"count": count - len(first_few_messages),
"first_few_messages": build_message_list(
user_profile, first_few_messages)}
hot_conversation_render_payloads.append(teaser_data)
return hot_conversation_render_payloads
def gather_new_users(user_profile, threshold):
# type: (UserProfile, datetime.datetime) -> Tuple[int, List[Text]]
# Gather information on users in the realm who have recently
# joined.
if user_profile.realm.is_zephyr_mirror_realm:
new_users = [] # type: List[UserProfile]
else:
new_users = list(UserProfile.objects.filter(
realm=user_profile.realm, date_joined__gt=threshold,
is_bot=False))
user_names = [user.full_name for user in new_users]
return len(user_names), user_names
def gather_new_streams(user_profile, threshold):
# type: (UserProfile, datetime.datetime) -> Tuple[int, Dict[str, List[Text]]]
if user_profile.realm.is_zephyr_mirror_realm:
new_streams = [] # type: List[Stream]
else:
new_streams = list(get_active_streams(user_profile.realm).filter(
invite_only=False, date_created__gt=threshold))
base_url = u"%s/ # narrow/stream/" % (user_profile.realm.uri,)
streams_html = []
streams_plain = []
for stream in new_streams:
narrow_url = base_url + hash_util_encode(stream.name)
stream_link = u"<a href='%s'>%s</a>" % (narrow_url, stream.name)
streams_html.append(stream_link)
streams_plain.append(stream.name)
return len(new_streams), {"html": streams_html, "plain": streams_plain}
def enough_traffic(unread_pms, hot_conversations, new_streams, new_users):
# type: (Text, Text, int, int) -> bool
if unread_pms or hot_conversations:
# If you have any unread traffic, good enough.
return True
if new_streams and new_users:
# If you somehow don't have any traffic but your realm did get
# new streams and users, good enough.
return True
return False
def handle_digest_email(user_profile_id, cutoff):
# type: (int, float) -> None
user_profile = get_user_profile_by_id(user_profile_id)
# We are disabling digest emails for soft deactivated users for the time.
# TODO: Find an elegant way to generate digest emails for these users.
if user_profile.long_term_idle:
return None
# Convert from epoch seconds to a datetime object.
cutoff_date =
|
muraliselva10/cloudkitty
|
cloudkitty/tests/gabbi/rating/pyscripts/test_gabbi.py
|
Python
|
apache-2.0
| 1,187
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or i
|
mplied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Stéphane Albert
#
import os
from gabbi import driver
from cloudkitty.tests.gabbi import fixtures
from cloudkitty.tests.gabbi.rating.pyscripts import fixtures as py_fixtures
TESTS_DIR = 'gabbits'
def load_tests(loader, tests, pattern):
test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
return driver.build_tests(test_dir,
|
loader,
host=None,
intercept=fixtures.setup_app,
fixture_module=py_fixtures)
|
vals/google_devnull
|
tasks.py
|
Python
|
mit
| 361
| 0.00831
|
from time import sleep
from worker import delayable
import requests
@delayable
def add(x
|
, y, delay=None):
# Simulate your work here, preferably something interesting so Python doesn't sleep
sleep(de
|
lay or (x + y if 0 < x + y < 5 else 3))
return x + y
@delayable
def get(*args, **kwargs):
r = requests.get(*args, **kwargs)
return r.content
|
ajagnanan/docker-opencv-api
|
openface/images/collapse.py
|
Python
|
apache-2.0
| 465
| 0.004301
|
# Use this script to collapse a
|
multi-level folder of images into one folder.
import fnmatch
import os
import shutil
folderName = 'unknown'
matches = []
for root, dirnames, filenames in os.walk(folderName):
for filename in fnmatch.filter(filenames, '*.jpg'):
matches.append(os.path.join(root, filename))
idx = 0
for match in matches:
print match
shutil.move('./' + match, './' + folderName + '/' + str(idx) + '.jpg')
idx = idx +
|
1
|
ysrc/xunfeng
|
views/lib/QueryLogic.py
|
Python
|
gpl-3.0
| 2,319
| 0.002621
|
# -*- coding: UTF-8 -*-
import re
def mgo_text_split(query_text):
''' split text to support mongodb $text match on a phrase '''
sep = r'[`\-=~!@#$%^&*()_+\[\]{};\'\\:"|<
|
,./<>?]'
word_lst = re.split(sep, query_text)
text_query = ' '.join('\"{}\"'.format(w) for w in word_lst)
return text_query
# 搜索逻辑
def querylogic(list):
query = {}
if len(list) > 1 or len(list[0].split(':')) > 1:
for _ in list:
if _.find(':') > -1:
q_key, q_value = _.split(':', 1)
if q_key == 'port':
query['port'] = int(q_value)
|
elif q_key == 'banner':
zhPattern = re.compile(u'[\u4e00-\u9fa5]+')
contents = q_value
match = zhPattern.search(contents)
# 如果没有中文用全文索引
if match:
query['banner'] = {"$regex": q_value, '$options': 'i'}
else:
text_query = mgo_text_split(q_value)
query['$text'] = {'$search': text_query, '$caseSensitive':True}
elif q_key == 'ip':
query['ip'] = {"$regex": q_value}
elif q_key == 'server':
query['server'] = q_value.lower()
elif q_key == 'title':
query['webinfo.title'] = {"$regex": q_value, '$options': 'i'}
elif q_key == 'tag':
query['webinfo.tag'] = q_value.lower()
elif q_key == 'hostname':
query['hostname'] = {"$regex": q_value, '$options': 'i'}
elif q_key == 'all':
filter_lst = []
for i in ('ip', 'banner', 'port', 'time', 'webinfo.tag', 'webinfo.title', 'server', 'hostname'):
filter_lst.append({i: {"$regex": q_value, '$options': 'i'}})
query['$or'] = filter_lst
else:
query[q_key] = q_value
else:
filter_lst = []
for i in ('ip', 'banner', 'port', 'time', 'webinfo.tag', 'webinfo.title', 'server', 'hostname'):
filter_lst.append({i: {"$regex": list[0], '$options': 'i'}})
query['$or'] = filter_lst
return query
|
sighill/shade_app
|
apis/raw/005_raw/005_cleaner.py
|
Python
|
mit
| 1,625
| 0.015423
|
# 005_cleaner.py
#####################################################################
##################################
# Import des modules et ajout du path de travail pour import relatif
import sys
sys.path.insert(0 , 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/')
from voca import AddLog , StringFormatter , OutFileCreate , OdditiesFinder
##################################
# Init des paths et noms de fichiers
missionName = '005'
AddLog('title' , '{} : Début du nettoyage du fichier'.format(missionName))
work_dir = 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/raw/{}_raw/'.format(missionName)
# Nom du fichier source
raw_file = 'src'
##################################
# retreiving raw string
raw_string_with_tabs = open(work_dir + raw_file , 'r').read()
# replacing tabs with carriage return
raw_string_with_cr = raw_string_with_tabs.replace( '\t', '\n' )
# turn
|
ing the string into a list
raw_list = raw_string_with_cr.splitlines()
# going through oddities finder
AddL
|
og('subtitle' , 'Début de la fonction OdditiesFinder')
list_without_oddities = OdditiesFinder( raw_list )
# going through string formatter
ref_list = []
AddLog('subtitle' , 'Début de la fonction StringFormatter')
for line in list_without_oddities:
ref_list.append( StringFormatter( line ) )
##################################
# Enregistrement des fichiers sortie
AddLog('subtitle' , 'Début de la fonction OutFileCreate')
OutFileCreate('C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/out/','{}_src'.format(missionName),ref_list,'prenoms masculins italiens')
|
kron4eg/django-btt
|
settings.py
|
Python
|
gpl-3.0
| 39
| 0
|
PE
|
R_PAGE = 50
ANNOUNC
|
E_INTERVAL = 300
|
benjamincongdon/adept
|
tradingUI.py
|
Python
|
mit
| 3,586
| 0.037646
|
import pygame
import os
from buffalo import utils
from item import Item
# User interface for trading with NPCs
# Similar to the crafting UI, with some minor differences
# The biggest thing is that it only appears when you "talk to" (read click on)
# A trader NPC and disappears when you leave that window, and only contains a
# Limited number of trades
class
|
TradingUI:
BUTTON_SIZE = 32
PADDING = 6
def __init__(self, inventory, tradeSet):
self.tradeSet = tradeSet
self.inventory = inventory
self.surface = utils.empty_surface((228,500))
self.surface.fill((100,100,100,100))
self.pos = (utils.SCREEN_W / 2 + self.surface.get_width() / 2 + 350, utils.SCREEN_H / 2 - 150)
self.tileRects = list()
self.tileTrades = list()
self.updateTradeTable()
def updateTr
|
adeTable(self):
self.surface = utils.empty_surface((228,500))
self.surface.fill((100,100,100,100))
self.tileRects = list()
self.tileTrades = list()
tradeTiles = list()
total_y = 0
for t in self.tradeSet:
newTile = self.generateTradeTile(t)
tradeTiles.append(newTile)
self.tileRects.append(pygame.Rect(0, total_y, newTile.get_width(), newTile.get_height()))
self.tileTrades.append(t)
total_y += newTile.get_height()
newSurface = utils.empty_surface((228, total_y))
newSurface.fill((100,100,100,255))
currY = 0
for surf in tradeTiles:
newSurface.blit(surf, (0, currY))
currY += surf.get_height()
self.surface = newSurface
def generateTradeTile(self, trade):
y_length = 36 * (len(trade.price.keys()) / 3) + 78;
newScreen = utils.empty_surface((228, y_length))
for num, item in enumerate(trade.price.keys()):
x = ((num % 3) * TradingUI.BUTTON_SIZE) + TradingUI.PADDING
y = ((num / 3) * TradingUI.BUTTON_SIZE) + TradingUI.PADDING
itemSurface = pygame.Surface.copy(Item(item, quantity = trade.price[item]).surface)
if self.inventory.getTotalItemQuantity(item) < trade.price[item]:
itemSurface.fill(pygame.Color(255,0,0,250)[0:3] + (0,), None, pygame.BLEND_RGBA_ADD)
newScreen.blit(itemSurface, (x,y))
for num, item in enumerate(trade.goods.keys()):
x = 192 - (((num % 2) * TradingUI.BUTTON_SIZE) + TradingUI.PADDING)
y = ((num / 2) * TradingUI.BUTTON_SIZE) + TradingUI.PADDING
newScreen.blit(Item(item, quantity = trade.goods[item]).surface, (x,y))
path = os.path.join(os.path.join(*list(['assets'] + ['items'] + ["arrow.png"])))
arrowSurface = pygame.image.load(path)
newScreen.blit(arrowSurface,(114, (newScreen.get_height() / 2) - arrowSurface.get_height() / 2))
myfont = pygame.font.SysFont("monospace", 15)
color = (255,255,0)
if not trade.canTrade(self.inventory):
color = (255,0,0)
label = myfont.render(str(trade.name), 1, color)
newScreen.blit(label, (newScreen.get_width() - label.get_width() - 2, newScreen.get_height() - label.get_height() - 2))
pygame.draw.rect(newScreen, (0,0,0,255), pygame.Rect(0,0,228, y_length), 1)
return newScreen
def blit(self, dest, pos):
dest.blit(self.surface, pos)
def update(self):
pass
def mouseDown(self, pos):
for tile in self.tileRects:
if(tile.collidepoint(pos)):
clickedTrade = self.tileTrades[self.tileRects.index(tile)]
if not clickedTrade.canTrade(self.inventory):
return
for item in clickedTrade.price.keys():
self.inventory.removeItemQuantity(item, clickedTrade.price[item])
for item in clickedTrade.goods.keys():
newItem = Item(item)
newItem.quantity = clickedTrade.goods[item]
self.inventory.addItem(newItem)
self.inventory.update()
self.updateTradeTable()
return
|
albatros69/Divers
|
scrap_ffme.py
|
Python
|
gpl-3.0
| 3,373
| 0.006235
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import datetime
from os import environ
from requests import get
from bs4 import BeautifulSoup, NavigableString
from icalendar import Calendar, Event
# Pour faire du TLS1.0...
environ["OPENSSL_CONF"] = "openssl.cnf"
def scrape_url(url):
page = get(url)
result = [ ]
for div in BeautifulSoup(page.text, 'lxml').find_all('div', class_='infos_colonne_box'):
rows = div.find_all('tr')
if rows:
headers = [ ' '.join(x.stripped_strings) for x in rows[0] if not isinstance(x, NavigableString) ]
for row in rows[1:]:
result.append(
dict(zip(headers, [ x for x in row if not isinstance(x, NavigableString) ]))
)
return result
def create_event_formation(d):
event = Event()
dates = tuple(d['Date'].stripped_strings)
num = ''.join(d['N°'].stripped_strings)
event.add('summary', ' '.join(d['Nom'].stripped_strings))
event.add('dtstart', datetime.strptime(dates[0], '%d/%m/%y').date())
if len(dates) > 1:
event.add('dtend', datetime.strptime(dates[1], '%d/%m/%y').date())
event.add('location', ' '.join(d['Lieu'].stripped_strings).replace("\r\n", ' '))
event.add('uid', "%[email protected]" % (num,) )
event.add('description', 'http://www.ffme.fr/formation/fiche-evenement/%s.html' % (num, ))
return event
def create_event_compet(d):
event = Event()
nom_lieu = tuple(d['Nom de la compétition Lieu'].stripped_strings)
dates = tuple(d['Date'].stripped_strings)
link = 'http://www.ffme.fr'+d['Nom de la compétition Lieu'].a.get('href')
event.add('summary', nom_lieu[0])
event.add('location', nom_lieu[1])
event.add('dtstart', datetime.strptime(dates[0], '%d/%m/%y').date())
if len(dates) > 1:
event.add('dtend', datetime.strptime(dates[1], '%d/%m/%y').date())
event.add('uid', "%[email protected]" % (''.join(( a for a in link if a.isdigit())),) )
event.add('description', link)
return event
cal = Calendar()
cal.add('prodid', '-//Calendrier formations FFME//ffme.fr//')
cal.add('version', '2.0')
cal.add("X-W
|
R-CALNAME", "Calendrier formations FFME")
urls = ('http://www.ffme.fr/formation/calendrier-liste/FMT_ESCSAE.html',
'http://www.ffme.fr/formation/calendrier-liste/
|
FMT_ESCSNE.html',
#'http://www.ffme.fr/formation/calendrier-liste/FMT_ESCFCINI.html',
'http://www.ffme.fr/formation/calendrier-liste/FMT_ESCMONESP.html',
)
for u in urls:
for d in scrape_url(u):
cal.add_component(create_event_formation(d))
with open('cal_formation.ics', 'w') as f:
f.write(cal.to_ical().decode('utf-8'))
cal = Calendar()
cal.add('prodid', '-//Calendrier compétitions FFME//ffme.fr//')
cal.add('version', '2.0')
cal.add("X-WR-CALNAME", "Calendrier compétitions FFME")
url = 'http://www.ffme.fr/competition/calendrier-liste.html?DISCIPLINE=ESC&CPT_FUTUR=1'
page = 1
while True:
data = scrape_url(url + "&page=" + repr(page))
if not data:
break
for d in data:
cal.add_component(create_event_compet(d))
page +=1
with open('cal_competition.ics', 'w') as f:
f.write(cal.to_ical().decode('utf-8'))
|
DonaldWhyte/multidimensional-search-fyp
|
scripts/read_multifield_dataset.py
|
Python
|
mit
| 1,612
| 0.030397
|
import sys
if __name__ == "__main__":
# Parse command line arguments
if len(sys.argv) < 2:
sys.exit("python {} <datasetFilename> {{<maxPoints>}}".format(sys.argv[0]))
datasetFilename = sys.argv[1]
if len(sys.argv) >= 3:
maxPoints = int(sys.argv[2])
else:
maxPoints = None
# Perform initial pass through file to determine line count (i.e. # of points)
lineCount = 0
with open(datasetFilename, "r") as f:
line = f.readline()
while line:
lineCount += 1
line = f.readline()
# Read first line and use to make assumption about the dimensionality of each point
numDimensions = 0
with open(datasetFilename, "r") as f:
firstLine = f.readline()
numDimensions = len(firstLine.split())
# If dimensionality of dataset is 0, print error message and exit
if numDimensions == 0:
sys.exit("Could not determine dimensionality of dataset")
# Print initial header at END of file (so we have number of points already)
if maxPoints:
numPoints = min(lineCount, maxPoints)
else:
numPoints = lineCount
print("{}
|
{}".format(numDimensions, numPoints))
# Output dataset header which defines dimensionality of data and number of points
# Read entire file line-by-line, printing out each line as a point
with open(datasetFilename, "r") as f:
pointsRead = 0
line = f.readline()
while line:
fields = line.split()
floatFields = [ str(float(x)) for x in fields ]
print(" ".join(floatFields))
# Stop reading file
|
is maximum number of points have been read
pointsRead += 1
if maxPoints and pointsRead >= maxPoints:
break
# Read next line of file
line = f.readline()
|
valefranz/AI-Project-VacuumEnvironment
|
aima-ui-2a.py
|
Python
|
apache-2.0
| 17,529
| 0.000628
|
'''
aima-ui project
=============
This is just a graphic user interface to test
agents for an AI college course.
'''
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.spinner import Spinner
from kivy.uix.popup import Popup
from kivy.uix.image import Image
from kivy.app import App
from kivy.clock import Clock
from kivy.graphics import Color, Rectangle
from functools import partial
from agent_dir.agents import *
import agent_list
import env_list
from os import path
ALL_AGENTS = agent_list.load_agents()
ALL_MAPS = env_list.get_maps()
def check_img(img_name):
"""Check if the image is in img dir of the agents."""
return path.isfile(path.join("agents_dir", path.join("img", img_name)))
def memoize(func):
"""Memoize decorator."""
memo = {}
def helper(*args):
"""Helper function for memoize paradigm."""
if args not in memo:
memo[args] = func(*args)
return memo[args]
return helper
@memoize
def gen_popup(type_, text, dismiss=True):
"""Generate a popup."""
popup_layout = BoxLayout(orientation='vertical')
content = Button(text='Dismiss', size_hint=(1, .3))
popup_layout.add_widget(Label(text=text))
popup = Popup(title=type_,
content=popup_layout)
if dismiss:
popup_layout.add_widget(content)
content.bind(on_press=popup.dismiss)
return popup
class AimaUI(App):
"""Class to manage aima agents and environments."""
def __init__(self):
"""Initialize the user interface."""
App.__init__(self)
self.scoreA = 0
self.scoreB = 0
self.agentA = "Agent A"
self.agentAImgDef = "img/agent_1.png"
self.agentAImg = None
self.agentB = "Agent B"
self.agentBImgDef = "img/agent_2.png"
self.agentBImg = None
self.wall_img = Image(source="img/wall.png")
# self.trash_img = Image(source="img/trash.png")
self.trash_img = Image(source="img/cake.png")
self.map = None
self.running = False
self.env = None
self.counter_steps = 0
self.initialized = False
def __initialize_env(self):
"""Initialize aima environment."""
if self.env is not None:
del self.env
self.env = None
if self.map is not None:
self.env = ALL_MAPS[self.map]()
if self.agentA in ALL_AGENTS:
agent_A = TraceAgent(ALL_AGENTS[self.agentA]())
if agent_A.img is not None and check_img(agent_A.img):
self.agentAImg = Image(
source=path.join("agents_dir", path.join("img", agent_A.img)))
else:
self.agentAImg = Image(source=self.agentAImgDef)
self.env.add_thing(agent_A, location=self.env.start_from)
if self.agentB in ALL_AGENTS:
agent_B = TraceAgent(ALL_AGENTS[self.agentB]())
if agent_B.img is not None and check_img(agent_B.img):
self.agentBImg = Image(
source=path.join("agents_dir", path.join("img", agent_B.img)))
else:
self.agentBImg = Image(source=self.agentBImgDef)
self.env.add_thing(agent_B, location=self.env.start_from)
def get_scores(self):
"""Get agents' scores."""
return ("ScoreA = {0:d}".format(self.scoreA),
"ScoreB = {0
|
:d}".format(self.scoreB))
def update_canvas(self, labels, wid, *largs):
"""Update the canvas to respect the environment."""
wid.canvas.clear()
self.counter.text = str(self.counter_steps)
n_x, n_y = max([thing.location for thing in self.env.things])
tile_x = wid.width / float(n_x + 1)
tile_y = wid.height / floa
|
t(n_y + 1)
labelA, labelB = labels
with wid.canvas:
for thing in [thing for thing in self.env.things
if isinstance(thing, Dirt) or
isinstance(thing, Clean)]:
pos_x, pos_y = thing.location
if isinstance(thing, Dirt):
Color(0.5, 0, 0)
Rectangle(
pos=(
pos_x * tile_x + wid.x,
pos_y * tile_y + wid.y),
size=(tile_x, tile_y))
Color(1, 1, 1, 1)
Rectangle(texture=self.trash_img.texture,
pos=(
pos_x * tile_x + wid.x,
pos_y * tile_y + wid.y
),
size=(tile_x, tile_y))
elif isinstance(thing, Clean):
Color(0.1, 0.5, 0.1)
Rectangle(
pos=(
pos_x * tile_x + wid.x,
pos_y * tile_y + wid.y),
size=(tile_x, tile_y))
for thing in [thing for thing in self.env.things
if isinstance(thing, Wall)]:
pos_x, pos_y = thing.location
Color(1, 1, 1, 1)
Rectangle(texture=self.wall_img.texture,
pos=(pos_x * tile_x + wid.x,
pos_y * tile_y + wid.y),
size=(tile_x, tile_y))
for thing in [thing for thing in self.env.things
if isinstance(thing, ALL_AGENTS.get(self.agentA, Agent)) or
isinstance(thing, ALL_AGENTS.get(self.agentB, Agent))]:
pos_x, pos_y = thing.location
if self.agentA in ALL_AGENTS and\
isinstance(thing, ALL_AGENTS[self.agentA]):
self.scoreA = thing.performance
labelA.text = self.get_scores()[0]
Color(1, 1, 1, 1)
Rectangle(texture=self.agentAImg.texture,
pos=(pos_x * tile_x + wid.x,
pos_y * tile_y + wid.y),
size=(tile_x, tile_y))
if self.agentB in ALL_AGENTS and\
isinstance(thing, ALL_AGENTS[self.agentB]):
self.scoreB = thing.performance
labelB.text = self.get_scores()[1]
Color(1, 1, 1, 1)
Rectangle(texture=self.agentBImg.texture,
pos=(pos_x * tile_x + wid.x,
pos_y * tile_y + wid.y),
size=(tile_x, tile_y))
def load_env(self, labels, wid, *largs):
"""Load and prepare the environment."""
self.running = False
self.counter_steps = 0
if self.map is None or self.map == "Maps":
gen_popup("Error!", "No map selected...").open()
return
elif self.agentA not in ALL_AGENTS and\
self.agentB not in ALL_AGENTS:
gen_popup("Error!", "You must choose at least one agent...").open()
return
self.__initialize_env()
self.initialized = True
self.update_canvas(labels, wid)
def running_step(self, labels, wid, n_step=None, *largs):
"""Run the program of the environment, called from run."""
if self.env is not None:
if n_step is not None:
if self.counter_steps == n_step:
self.running = False
self.btn_100step.state = "normal"
self.counter_steps = 0
return False
else:
self.counter_steps += 1
if not self.running:
return False
self.env.step()
self.update_canvas(labels, wid)
def btn_step(self, labels, wid, *largs):
"""Update the environment one step."""
if not self.initialized:
gen_popup("Er
|
obi1kenobi/pyre
|
WiFiMouse/wifimouse_driver.py
|
Python
|
mit
| 2,923
| 0.002053
|
import socket
import string
from driver import driver
class WiFiMouseDriver(driver):
ACTION_KEYS = {
'TAB': 'TAB',
'ENTER': 'RTN',
'ESCAPE': 'ESC',
'PAGE_UP': 'PGUP',
'PAGE_DOWN': 'PGDN',
'END': 'END',
'HOME': 'HOME',
'LEFT': 'LF',
'UP': 'UP',
'RIGHT': 'RT',
'DOWN': 'DW',
'BACK_SPACE': 'BAS',
'F1': 'F1',
'F2': 'F2',
'F3': 'F3',
'F4': 'F4',
'F5': 'F5',
'F6': 'F6',
'F7': 'F7',
'F8': 'F8',
'F9': 'F9',
'F10': 'F10',
'F11': 'F11',
'F12': 'F12',
'CONTROL': 'CTRL',
'ALT': 'ALT',
'SHIFT': 'SHIFT',
}
SERVER_PORT = 1978
def __init__(self, ip):
self._ip = ip
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((ip, WiFiMouseDriver.SERVER_PORT))
self._socket.setblocking(1)
def _send(self, data):
print "sending: " + data
self._socket.send(data)
def left_click(self):
self._send("mos 1c")
return self
def right_click(self):
self._send("mos 5R r d")
self._send("mos 5R r u")
return self
def move_mouse(self, deltaX, deltaY):
# maximum movement is 99 in any direction
currX = deltaX
if deltaX > 0:
while currX > 0:
moveX = min(currX, 99)
self._send("mos " + str(len(str(moveX)) + len(str(0)) + 3) + "m " + str(moveX) + " " + str(0))
currX -= moveX
elif deltaX < 0:
while currX < 0:
moveX = max(currX, -99)
self._send("mos " + str(len(str(moveX)) + len(str(0)) + 3) + "m " + str(moveX) + " " + str(0))
currX -= moveX
currY = deltaY
if deltaY > 0:
while currY > 0:
moveY = min(currY, 99)
self._send("mos " + str(len(str(0)) + len(str(moveY)) + 3) + "m " + str(0) + " " + str(moveY))
currY -= moveY
elif deltaY < 0:
while currY < 0:
moveY = max(currY, -99)
self._send("mos " + str(len(str(0)) + len(str(moveY)) + 3) + "m " + str(0) + " "
|
+ str(moveY))
currY -= moveY
return self
def typeText(self, text):
|
format = "key "
for char in text:
self._send(format + str(len(char)) + char)
return self
def press_action_key(self, name, shift=False, ctrl=False, alt=False):
if name not in WiFiMouseDriver.ACTION_KEYS:
raise ValueError('Unknown action key name: %s' % name)
format = "key "
command = str(WiFiMouseDriver.ACTION_KEYS[name])
self._send(format + str(len(command)) + command)
return self
def close(self):
self._socket.close()
|
Caerostris/marathon
|
tests/system/marathon_auth_common_tests.py
|
Python
|
apache-2.0
| 2,070
| 0.004831
|
"""
Authenication and Authorization tests which require DC/OS Enterprise.
Currently test against root marathon. Assume we will want to test these
against MoM EE
"""
import common
import dcos
import pytest
import shakedown
from urllib.parse import urljoin
from dcos import marathon
from shakedown import credentials, ee_version
@pytest.mark.skipif("ee_version() is None")
@pytest.mark.usefixtures('credentials')
def test_non_authenicated_user():
with shakedown.no_user():
with pytest.raises(dcos.errors.DCOSAuthenticationException) as exec_info:
response = dcos.http.get(urljoin(shakedown.dcos_url(), 'service/marathon/v2/apps'))
error = exc_info.value
assert str(error) == "Authentication failed. Please run `dcos auth login`"
@pytest.mark.skipif("ee_version() is None")
@pytest.mark.usefixtures('credentials')
def test_non_authorized_user():
with shakedown.new_dcos_user('kenny', 'kenny'):
with pytest.raises(dcos.errors.DCOSAuthorizationException) as exec_info:
response = dcos.http.get(urljoin(shakedown.dcos_url(), 'service/marathon/v2/apps'))
error = exc_info.value
assert str(error) == "You are not authorized to perform this operation"
@pytest.fixture(scope="function")
def billy():
shakedown.add_user('billy', 'billy')
shakedown.set_user_permission(rid='dcos:adminrou
|
ter:service:marathon', uid='billy', action='full')
shakedown.set_user_permission(rid='dcos:service:marathon:marathon:services:/', uid='billy', action='full')
yield
shakedown.remove_user_permission(rid='dcos:adminrouter:service:marathon', uid='billy', action='full')
shakedown.remove_user_permission(rid='dcos:service:marathon:marathon:services:/', uid='billy', action='full')
shakedown.remove_user('billy')
@pytest.mark.skipif("ee_version() is None"
|
)
@pytest.mark.usefixtures('credentials')
def test_authorized_non_super_user(billy):
with shakedown.dcos_user('billy', 'billy'):
client = marathon.create_client()
len(client.get_apps()) == 0
|
noam09/kodi
|
script.module.israeliveresolver/lib/interalSimpleDownloader.py
|
Python
|
gpl-3.0
| 10,978
| 0.017216
|
import xml.etree.ElementTree as etree
import base64
from struct import unpack, pack
import sys
import io
import os
import time
import itertools
import xbmcaddon
import xbmc
import urllib2,urllib
import traceback
import urlparse
import posixpath
import re
import socket
from flvlib import tags
from flvlib import helpers
from flvlib.astypes import MalformedFLV
import zlib
from StringIO import StringIO
import hmac
import hashlib
import base64
addon_id = 'plugin.video.israelive'
selfAddon = xbmcaddon.Addon(id=addon_id)
__addonname__ = selfAddon.getAddonInfo('name')
__icon__ = selfAddon.getAddonInfo('icon')
downloadPath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))#selfAddon["profile"])
#F4Mversion=''
class interalSimpleDownloader():
outputfile =''
clientHeader=None
def __init__(self):
self.init_done=False
def thisme(self):
return 'aaaa'
def openUrl(self,url, ischunkDownloading=False):
try:
post=None
openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
if post:
req = urllib2.Request(url, post)
else:
req = urllib2.Request(url)
ua_header=False
if self.clientHeader:
for n,v in self.clientHeader:
req.add_header(n,v)
if n=='User-Agent':
ua_header=True
if not ua_header:
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
#response = urllib2.urlopen(req)
if self.proxy and ( (not ischunkDownloading) or self.use_proxy_for_chunks ):
|
req.set_proxy(self.proxy, 'http')
response = openner.open(req)
return response
except:
#print 'Error in getUrl'
traceback.print_exc()
return None
def getUrl(self,url, ischunkDownloading=False):
try:
post=None
openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
if post:
req = urllib2.Request(url, post)
|
else:
req = urllib2.Request(url)
ua_header=False
if self.clientHeader:
for n,v in self.clientHeader:
req.add_header(n,v)
if n=='User-Agent':
ua_header=True
if not ua_header:
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
#response = urllib2.urlopen(req)
if self.proxy and ( (not ischunkDownloading) or self.use_proxy_for_chunks ):
req.set_proxy(self.proxy, 'http')
response = openner.open(req)
data=response.read()
return data
except:
#print 'Error in getUrl'
traceback.print_exc()
return None
def init(self, out_stream, url, proxy=None,g_stopEvent=None, maxbitRate=0):
try:
self.init_done=False
self.init_url=url
self.clientHeader=None
self.status='init'
self.proxy = proxy
self.maxbitRate=maxbitRate
if self.proxy and len(self.proxy)==0:
self.proxy=None
self.out_stream=out_stream
self.g_stopEvent=g_stopEvent
if '|' in url:
sp = url.split('|')
url = sp[0]
self.clientHeader = sp[1]
self.clientHeader= urlparse.parse_qsl(self.clientHeader)
#print 'header recieved now url and headers are',url, self.clientHeader
self.status='init done'
self.url=url
#self.downloadInternal( url)
return True
#os.remove(self.outputfile)
except:
traceback.print_exc()
self.status='finished'
return False
def keep_sending_video(self,dest_stream, segmentToStart=None, totalSegmentToSend=0):
try:
self.status='download Starting'
self.downloadInternal(self.url,dest_stream)
except:
traceback.print_exc()
self.status='finished'
def downloadInternal(self,url,dest_stream):
try:
url=self.url
fileout=dest_stream
self.status='bootstrap done'
while True:
response=self.openUrl(url)
buf="start"
firstBlock=True
try:
while (buf != None and len(buf) > 0):
if self.g_stopEvent and self.g_stopEvent.isSet():
return
buf = response.read(200 * 1024)
fileout.write(buf)
#print 'writing something..............'
fileout.flush()
try:
if firstBlock:
firstBlock=False
if self.maxbitRate and self.maxbitRate>0:# this is for being sports for time being
#print 'maxbitrate',self.maxbitRate
ec=EdgeClass(buf,url,'http://www.en.beinsports.net/i/PerformConsole_BEIN/player/bin-release/PerformConsole.swf',sendToken=False)
ec.switchStream(self.maxbitRate,"DOWN")
except:
traceback.print_exc()
response.close()
fileout.close()
#print time.asctime(), "Closing connection"
except socket.error, e:
#print time.asctime(), "Client Closed the connection."
try:
response.close()
fileout.close()
except Exception, e:
return
except Exception, e:
traceback.print_exc(file=sys.stdout)
response.close()
fileout.close()
except:
traceback.print_exc()
return
class EdgeClass():
def __init__(self, data, url, swfUrl, sendToken=False, switchStream=None):
self.url = url
self.swfUrl = swfUrl
self.domain = self.url.split('://')[1].split('/')[0]
self.control = 'http://%s/control/' % self.domain
self.onEdge = self.extractTags(data,onEdge=True)
self.sessionID=self.onEdge['session']
self.path=self.onEdge['streamName']
#print 'session',self.onEdge['session']
#print 'Edge variable',self.onEdge
#print 'self.control',self.control
#self.MetaData = self.extractTags(data,onMetaData=True)
if sendToken:
self.sendNewToken(self.onEdge['session'],self.onEdge['streamName'],self.swfUrl,self.control)
def getURL(self, url, post=False, sessionID=False, sessionToken=False):
try:
#print 'GetURL --> url = '+url
opener = urllib2.build_opener()
if sessionID and sessionToken:
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:14.0) Gecko/20100101 Firefox/14.0.1' ),
('x-Akamai-Streaming-SessionToken', sessionToken ),
('x-Akamai-Streaming-SessionID', sessionID ),
('Content-Type', 'text/xml' )]
elif sessionID:
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:14.0) Gecko/20100101 Firefox/14.0.1' ),
('x-Akamai-Streaming-SessionID', sessionID ),
|
Solthis/Fugen-2.0
|
data/indicators/lost_back_patients.py
|
Python
|
gpl-3.0
| 3,277
| 0.000305
|
# coding: utf-8
# Copyright 2017 Solthis.
#
# This file is part of Fugen 2.0.
#
# Fugen 2.0 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Fugen 2.0 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fugen 2.0. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
from dateutil.relativedelta import relativedelta
from data.indicators.patient_indicator import PatientIndicator
from data.indicators.lost_patients import LostPatients
from data.indicators.arv_started_patients import ArvStartedPatients
from data.indicators.dead_patients import ArvDeadPatientsDuringPeriod
from data.indicators.transferred_patients import ArvTransferredPatientsDuringPeriod
from utils import getFirstDayOfPeriod, getLastDayOfPeriod
class ArvLostBackPatients(PatientIndicator):
def under_arv(self):
return False
@classmethod
def get_key(cls):
return "ARV_LOST_BACK"
@classmethod
def get_display_label(cls):
return "Perdus de vue de retour dans le TARV"
def filter_patients_datafr
|
ame(self, limit_date, start_date=None,
include_null_dates=False):
lost_prev = LostPatient
|
s(self.fuchia_database)
arv_started = ArvStartedPatients(self.fuchia_database)
n_limit = limit_date - relativedelta(months=1)
n_start = start_date - relativedelta(months=1)
i = (lost_prev & arv_started)
prev_lost_patients = i.get_filtered_patients_dataframe(
getLastDayOfPeriod(n_limit.month, n_limit.year),
start_date=getFirstDayOfPeriod(n_start.month, n_start.year),
include_null_dates=include_null_dates
)
visits = self.filter_visits_by_category(
limit_date,
start_date=None,
include_null_dates=include_null_dates
)
c1 = (visits['visit_date'] >= start_date)
c2 = (visits['visit_date'] <= limit_date)
visits = visits[c1 & c2]
seen_id = pd.Index(visits['patient_id'].unique())
# Arv dead during the period must be re-included
arv_dead = ArvDeadPatientsDuringPeriod(self.fuchia_database)
dead = arv_dead.get_filtered_patients_dataframe(
limit_date,
start_date=start_date,
include_null_dates=include_null_dates
)
seen_id = seen_id.union(dead.index)
# Transferred during the period must be re-included
arv_trans = ArvTransferredPatientsDuringPeriod(self.fuchia_database)
trans = arv_trans.get_filtered_patients_dataframe(
limit_date,
start_date=start_date,
include_null_dates=include_null_dates
)
seen_id = seen_id.union(trans.index)
n_index = prev_lost_patients.index.intersection(seen_id)
return prev_lost_patients.loc[n_index], None
|
openstenoproject/plover
|
plover/engine.py
|
Python
|
gpl-2.0
| 20,307
| 0.00064
|
from collections import namedtuple, OrderedDict
from functools import wraps
from queue import Queue
import os
import shutil
import threading
from plover import log, system
from plover.dictionary.loading_manager import DictionaryLoadingManager
from plover.exception import DictionaryLoaderException
from plover.formatting import Formatter
from plover.misc import shorten_path
from plover.registry import registry
from plover.resource import ASSET_SCHEME, resource_filename
from plover.steno import Stroke
from plover.steno_dictionary import StenoDictionary, StenoDictionaryCollection
from plover.suggestions import Suggestions
from plover.translation import Translator
class StartingStrokeState(namedtuple('StartingStrokeState', 'attach capitalize space_char')):
def __new__(cls, attach=False, capitalize=False, space_char=' '):
return super().__new__(cls, attach, capitalize, space_char)
MachineParams = namedtuple('MachineParams', 'type options keymap')
class ErroredDictionary(StenoDictionary):
""" Placeholder for dictionaries that failed to load. """
def __init__(self, path, exception):
super().__init__()
self.enabled = False
self.readonly = True
self.path = path
self.exception = exception
def __eq__(self, other):
if not isinstance(other, ErroredDictionary):
return False
return (self.path, self.exception) == (other.path, other.exception)
def copy_default_dictionaries(dictionaries_fil
|
es):
'''Recreate default dictionaries.
Each default dictionary is recreated if it's
in use by the current config and missing.
'''
for dictionary in dictionaries_files:
# Ignore assets.
if dictionary.startswith(ASSET_SCHEME):
continue
# Nothing to do if dictiona
|
ry file already exists.
if os.path.exists(dictionary):
continue
# Check it's actually a default dictionary.
basename = os.path.basename(dictionary)
if basename not in system.DEFAULT_DICTIONARIES:
continue
default_dictionary = os.path.join(system.DICTIONARIES_ROOT, basename)
log.info('recreating %s from %s', dictionary, default_dictionary)
shutil.copyfile(resource_filename(default_dictionary), dictionary)
def with_lock(func):
# To keep __doc__/__name__ attributes of the initial function.
@wraps(func)
def _with_lock(self, *args, **kwargs):
with self:
return func(self, *args, **kwargs)
return _with_lock
class StenoEngine:
HOOKS = '''
stroked
translated
machine_state_changed
output_changed
config_changed
dictionaries_loaded
send_string
send_backspaces
send_key_combination
add_translation
focus
configure
lookup
suggestions
quit
'''.split()
def __init__(self, config, controller, keyboard_emulation):
self._config = config
self._controller = controller
self._is_running = False
self._queue = Queue()
self._lock = threading.RLock()
self._machine = None
self._machine_state = None
self._machine_params = MachineParams(None, None, None)
self._formatter = Formatter()
self._formatter.set_output(Formatter.output_type(
self._send_backspaces,
self._send_string,
self._send_key_combination,
self._send_engine_command,
))
self._formatter.add_listener(self._on_translated)
self._translator = Translator()
self._translator.add_listener(log.translation)
self._translator.add_listener(self._formatter.format)
self._dictionaries = self._translator.get_dictionary()
self._dictionaries_manager = DictionaryLoadingManager()
self._running_state = self._translator.get_state()
self._keyboard_emulation = keyboard_emulation
self._hooks = { hook: [] for hook in self.HOOKS }
self._running_extensions = {}
def __enter__(self):
self._lock.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._lock.__exit__(exc_type, exc_value, traceback)
def _in_engine_thread(self):
raise NotImplementedError()
def _same_thread_hook(self, func, *args, **kwargs):
if self._in_engine_thread():
func(*args, **kwargs)
else:
self._queue.put((func, args, kwargs))
def run(self):
while True:
func, args, kwargs = self._queue.get()
try:
with self._lock:
if func(*args, **kwargs):
break
except Exception:
log.error('engine %s failed', func.__name__[1:], exc_info=True)
def _on_control_message(self, msg):
if msg[0] == 'command':
self._same_thread_hook(self._execute_engine_command,
*msg[1:], force=True)
else:
log.error('ignoring invalid control message: %r', msg)
def _stop(self):
self._controller.stop()
self._stop_extensions(self._running_extensions.keys())
if self._machine is not None:
self._machine.stop_capture()
self._machine = None
def _start(self):
self._set_output(self._config['auto_start'])
self._update(full=True)
self._controller.start(self._on_control_message)
def _set_dictionaries(self, dictionaries):
def dictionaries_changed(l1, l2):
if len(l1) != len(l2):
return True
for d1, d2 in zip(l1, l2):
if d1 is not d2:
return True
return False
if not dictionaries_changed(dictionaries, self._dictionaries.dicts):
# No change.
return
self._dictionaries = StenoDictionaryCollection(dictionaries)
self._translator.set_dictionary(self._dictionaries)
self._trigger_hook('dictionaries_loaded', self._dictionaries)
def _update(self, config_update=None, full=False, reset_machine=False):
original_config = self._config.as_dict()
# Update configuration.
if config_update is not None:
self._config.update(**config_update)
config = self._config.as_dict()
else:
config = original_config
# Create configuration update.
if full:
config_update = config
else:
config_update = {
option: value
for option, value in config.items()
if value != original_config[option]
}
# Save config if anything changed.
if config_update:
self._config.save()
# Update logging.
log.set_stroke_filename(config['log_file_name'])
log.enable_stroke_logging(config['enable_stroke_logging'])
log.enable_translation_logging(config['enable_translation_logging'])
# Update output.
self._formatter.set_space_placement(config['space_placement'])
self._formatter.start_attached = config['start_attached']
self._formatter.start_capitalized = config['start_capitalized']
self._translator.set_min_undo_length(config['undo_levels'])
# Update system.
system_name = config['system_name']
if system.NAME != system_name:
log.info('loading system: %s', system_name)
system.setup(system_name)
# Update machine.
update_keymap = False
start_machine = False
machine_params = MachineParams(config['machine_type'],
config['machine_specific_options'],
config['system_keymap'])
# Do not reset if only the keymap changed.
if self._machine_params is None or \
self._machine_params.type != machine_params.type or \
self._machine_params.options != machine_params.options:
reset_machine = True
if reset_machine:
if self._machine is not None:
self._
|
thisfred/breakfast
|
tests/test_attempt_12.py
|
Python
|
bsd-2-clause
| 21,250
| 0.000659
|
import ast
from collections import defaultdict
from contextlib import ExitStack, contextmanager
from functools import singledispatch
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
from breakfast.position import Position
from breakfast.source import Source
from tests import make_source
QualifiedName = Tuple[str, ...]
class Node:
def __init__(self, parent: Optional["Node"]):
self.parent = parent
self.children: Dict[str, "Node"] = defaultdict(lambda: Node(parent=self))
self.occurrences: Set[Position] = set()
self.is_class = False
def add_occurrence(self, occurrence: Any):
self.occurrences.add(occurrence)
def __getitem__(self, name: str) -> "Node":
return self.children[name]
def __contains__(self, name: str) -> bool:
return name in self.children
def alias(self, other: "Node") -> None:
for name, value in other.children.items():
if name not in self.children:
self.children[name] = value
else:
self.children[name].alias(value)
other.children = self.children
self.occurrences |= other.occurrences
other.occurrences = self.occurrences
def flatten(
self,
prefix: Tuple[str, ...] = tuple(),
seen: Optional[Set[Position]] = None,
) -> Dict[Tuple[str, ...], List[Tuple[int, int]]]:
if not seen:
seen = set()
result = {}
next_values = []
for key, value in self.children.items():
new_prefix = prefix + (key,)
if value.occurrences:
occurrence = next(iter(value.occurrences))
if occurrence in seen:
continue
positions = [(o.row, o.column) for o in value.occurrences]
result[new_prefix] = positions
seen |= value.occurrences
next_values.append((new_prefix, value))
for new_prefix, value in next_values:
result.update(value.flatten(prefix=new_prefix, seen=seen))
return result
class State:
def __init__(self, position: Position):
self.position = position
self.root = Node(parent=None)
self.current_node = self.root
self.current_path: QualifiedName = tuple()
self.lookup_scopes = [self.root]
self.found: Optional[Node] = None
@contextmanager
def scope(self, name: str, lookup_scope: bool = False, is_class: bool = False):
previous_node = self.current_node
self.current_node = self.current_node[name]
self.current_node.is_class = is_class
if lookup_scope:
self.lookup_scopes.append(self.current_node)
self.current_path += (name,)
yield
self.current_node = previous_node
self.current_path = self.current_path[:-1]
if lookup_scope:
self.lookup_scopes.pop()
def add_occurrence(self, *, position: Optional[Position] = None) -> None:
if position:
self.current_node.occurrences.add(position)
if position == self.position:
self.found = self.current_node
print(
f"{self.current_path}: {[(o.row,o.column) for o in self.current_node.occurrences]}"
)
def alias(self, path: QualifiedName) -> None:
other_node = self.current_node
for name in path:
if name == "..":
if other_node.parent:
other_node = other_node.parent
else:
other_node = other_node[name]
self.current_node.alias(other_node)
def node_position(
node: ast.AST, source: Source, row_offset=0, column_offset=0
) -> Position:
return source.position(
row=(node.lineno - 1) + row_offset, column=node.col_offset + column_offset
)
def generic_visit(node: ast.AST, source: Source, state: State) -> None:
"""Called if no explicit visitor function exists for a node.
Adapted from NodeVisitor in:
https://github.com/python/cpython/blob/master/Lib/ast.py
"""
for _, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
visit(item, source, state)
elif isinstance(value, ast.AST):
visit(value, source, state)
@singledispatch
def visit(node: ast.AST, source: Source, state: State) -> None:
generic_visit(node, source, state)
@visit.register
def visit_module(node: ast.Module, source: Source, state: State) -> None:
with state.scope(source.module_name):
with state.scope(".", lookup_scope=True):
generic_visit(node, source, state)
@visit.register
def visit_name(node: ast.Name, source: Source, state: State) -> None:
position = node_position(node, source)
if isinstance(node.ctx, ast.Store):
with state.scope(node.id):
state.add_occurrence(position=position)
else:
if node.id not in state.current_node:
for scope in state.lookup_scopes[::-1]:
if node.id in scope or scope is state.root:
scope[node.id].alias(state.current_node[node.id])
break
with state.scope(node.id):
state.add_occurrence(position=node_position(node, source))
@singledispatch
def names_for(node: ast.AST) -> QualifiedName: # pylint: disable= unused-argument
return ()
@names_for.register
def names_for_name(node: ast.Name) -> QualifiedName:
return (node.id,)
@names_for.register
def names_for_attribute(node: ast.Attribute) -> QualifiedName:
return names_for(node.value) + (node.attr,)
@names_for.register
def names_for_call(node: ast.Call) -> QualifiedName:
return names_for(node.func) + ("()",)
def get_names(value: ast.AST) -> List[QualifiedName]:
if isinstance(value, ast.Tuple):
return [names_for(v) for v in value.elts]
return [names_for(value)]
@visit.register
def visit_assign(node: ast.Assign, source: Source, state: State) -> None:
for node_target in node.targets:
visit(node_target, source, state)
visit(node.value, source, state)
target_names = get_names(node.targets[0])
value_names = get_names(node.value)
for target, value in zip(target_names, value_names):
if target and value:
path: QualifiedName = ("..",)
with ExitStack() as stack:
for name in target:
stack.enter_context(state.scope(name))
stack.enter_context(state.scope("."))
path += ("..",)
state.alias(path + value + (".",))
def is_static_method(node: ast.FunctionDef) -> bool:
return any(
n.id == "staticmethod" for n in node.decorator_list if isinstance(n, ast.Name)
)
@visit.register
def visit_function_definition(
node: ast.FunctionDef, source: Source, state: State
) -> None:
is_method = state.lookup_scopes[-1] and state.lookup_scopes[-1].is_class
position = node_position(node, source, column_offset=len("def "))
with state.scope(node.name):
state.add_occurrence(position=position)
with state.scope("()"):
for i, arg in enumerate(node.args.args):
position = node_position(arg, source)
with state.scope(arg.arg):
state.add_occurrence(position=position)
if i == 0
|
and is_method and not is_static_method(node):
|
with state.scope("."):
state.alias(("..", "..", "..", ".."))
generic_visit(node, source, state)
@visit.register
def visit_class(node: ast.ClassDef, source: Source, state: State) -> None:
position = node_position(node, source, column_offset=len("class "))
for base in node.bases:
visit(base, source, state)
with state.scope(node.name, lookup_scope=True, is_class=True):
state.add_occurrence(position=position)
with state.scope("()"):
with state.scope("."):
state.alias(("..", "..", "."))
|
CHT5/program-y
|
src/programy/config/brain.py
|
Python
|
mit
| 8,932
| 0.007053
|
"""
Copyright (c) 2016 Keith Sterling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from programy.config.base import BaseConfigurationData
class BrainFileConfiguration(object):
def __init__(self, files, extension=".aiml", directories=False):
self._files = files
self._extension = extension
self._directories = directories
@property
def files(self):
return self._files
@property
def extension(self):
return self._extension
@property
def directories(self):
return self._directories
class BrainServiceConfiguration(object):
def __init__(self, name, data=None):
self._name = name.upper()
self._params = {}
if data is not None:
for key in data.keys():
self._params[key.upper()] = data[key]
@property
def name(self):
return self._name
@property
def path(self):
return self._params['PATH']
def parameters(self):
return self._params.keys()
def set_parameter(self, key, value):
self._params[key] = value
def parameter(self, name):
if name in self._params:
return self._params[name]
else:
return None
class BrainConfiguration(BaseConfigurationData):
DEFAULT_SUPRESS_WARNINGS = False
DEFAULT_ALLOW_SYSTEM_AIML = True
DEFAULT_ALLOW_LEARN_AIML = True
DEFAULT_ALLOW_LEARNF_AIML = True
def __init__(self):
self._supress_warnings = BrainConfiguration.DEFAULT_SUPRESS_WARNINGS
self._allow_system_aiml = BrainConfiguration.DEFAULT_ALLOW_SYSTEM_AIML
self._allow_learn_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARN_AIML
self._allow_learnf_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML
self._aiml_files = None
self._set_files = None
self._map_files = None
self._denormal = None
self._normal = None
self._gender = None
self._person = None
self._person2 = None
self._predicates = None
self._pronouns = None
self._properties = None
self._triples = None
self._preprocessors = None
self._postprocessors = None
self._services = []
BaseConfigurationData.__init__(self, "brain")
def _get_brain_file_configuration(self, config_file, section, bot_root):
files = config_file.get_option(section, "files")
files = self.sub_bot_root(files, bot_root)
extension = config_file.get_option(section, "extension")
directories = config_file.get_option(section, "directories")
return BrainFileConfiguration(files, extension, directories)
def load_config_section(self, config_file, bot_root):
brain = config_file.get_section(self.section_name)
if brain is not None:
self._supress_warnings = config_file.get_option(brain, "supress_warnings", BrainConfiguration.DEFAULT_SUPRESS_WARNINGS)
self._allow_system_aiml = config_file.get_option(brain, "allow_system_aiml", BrainConfiguration.DEFAULT_ALLOW_SYSTEM_AIML)
self._allow_learn_aiml = config_file.get_option(brain, "allow_learn_aiml", BrainConfiguration.DEFAULT_ALLOW_LEARN_AIML)
self._allow_learnf_aiml = config_file.get_option(brain, "allow_learnf_aiml", BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML)
self._allow_learnf_aiml = config_file.get_option(brain, "allow_learnf_aiml", BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML)
files = config_file.get_section("files", brain)
if files is not None:
aiml = config_file.get_section("aiml", files)
self._aiml_files = self._get_brain_file_configuration(config_file, aiml, bot_root)
sets = config_file.get_section("sets", files)
self._set_files = self._get_brain_file_configuration(config_file, sets, bot_root)
maps = config_file.get_section("maps", files)
self._map_files = self._get_brain_file_configuration(config_file, maps, bot_root)
self._denormal = self._get_file_option(config_file, "denormal", files, bot_root)
self._normal = self._get_file_option(config_file, "normal", files, bot_root)
self._gender = self._get_file_option(config_file, "gender", files, bot_root)
self._person = self._get_file_option(config_file, "person", files, bot_root)
self._person2 = self._get_file_option(config_file, "person2", files, bot_root)
self._predicates = self._get_file_option(config_file, "predicates", files, bot_root)
self._pronouns = self._get_file_option(config_file, "pronouns", files, bot_root)
self._properties = self._get_file_option(config_file, "properties", files, bot_root)
|
self._triples = self._get_file_option(config_file, "triples", files, bot_root)
self._preprocessors = self._get_file_option(config_file, "preprocessors", files, bot_root)
self._postprocessors = self._get_f
|
ile_option(config_file, "postprocessors", files, bot_root)
else:
logging.warning("Config section [files] missing from Brain, default values not appropriate")
raise Exception ("Config section [files] missing from Brain")
services = config_file.get_section("services", brain)
if services is not None:
service_keys = config_file.get_child_section_keys("services", brain)
for name in service_keys:
service_data = config_file.get_section_data(name, services)
self._services.append(BrainServiceConfiguration(name, service_data))
else:
logging.warning("Config section [services] missing from Brain, no services loaded")
else:
logging.warning("Config section [%s] missing, using default values", self.section_name)
self._supress_warnings = BrainConfiguration.DEFAULT_SUPRESS_WARNINGS
self._allow_system_aiml = BrainConfiguration.DEFAULT_ALLOW_SYSTEM_AIML
self._allow_learn_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARN_AIML
self._allow_learnf_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML
self._allow_learnf_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML
@property
def supress_warnings(self):
return self._supress_warnings
@property
def allow_system_aiml(self):
return self._allow_system_aiml
@property
def allow_learn_aiml(self):
return self._allow_learn_aiml
@property
def allow_learnf_aiml(self):
return self._allow_learnf_aiml
@property
def aiml_files(self):
return self._aiml_files
@property
def set_files(self):
return self._set_files
@property
def map_files(self):
return self._map_files
@property
def denormal(self):
return self._denormal
@property
def
|
mindflayer/python-mocket
|
mocket/plugins/httpretty/__init__.py
|
Python
|
bsd-3-clause
| 3,152
| 0.000317
|
from mocket import Mocket, mocketize
from mocket.async_mocket import async_mocketize
from mocket.compat import byte_type, text_type
from mocket.mockhttp import Entry as MocketHttpEntry
from mocket.mockhttp import Request as MocketHttpRequest
from mocket.mockhttp import Response as MocketHttpResponse
def httprettifier_headers(headers):
return {k.lower().replace("_", "-"): v for k, v in headers.items()}
class Request(MocketHttpRequest):
@property
def body(self):
if self._body is None:
self._body = self.parser.recv_body()
return self._body
class Response(MocketHttpResponse):
def get_protocol_data(self, str_format_fun_name="lower"):
if "server" in self.headers and self.headers["server"] == "Python/Mocket":
self.headers["server"] = "Python/HTTPretty"
return super(Response, self).get_protocol_data(
str_format_fun_name=str_format_fun_name
)
def set_base_headers(self):
super(Response, self).set_base_headers()
self.headers = httprettifier_headers(self.headers)
original_set_base_headers = set_base_headers
def set_extra_headers(self, headers):
self.headers.update(headers)
class Entry(MocketHttpEntry):
request_cls = Request
response_cls = Response
activate = mocketize
httprettified = mocketize
async_httprettified = async_mocketize
enable = Mocket.enable
disable = Mocket.disable
reset = Mocket.reset
GET = Entry.GET
PUT = Entry.PUT
POST = Entry.POST
DELETE = Entry.DELETE
HEAD = Entry.HEAD
PATCH = Entry.PATCH
OPTIONS = Entry.OPTIONS
def register_uri(
method,
uri,
body="HTT
|
Pretty :)",
adding_headers=None,
forcing_headers=None,
status=200,
responses=None,
match_querystring=False,
priority=0,
**headers
):
headers = httprettifier_headers(headers)
if adding_headers is not None:
headers.update(httprettifier_headers(adding_headers))
if forcing_headers is not None:
def force_headers(self):
self.headers = httprettifier_headers(forcing_headers)
Respo
|
nse.set_base_headers = force_headers
else:
Response.set_base_headers = Response.original_set_base_headers
if responses:
Entry.register(method, uri, *responses)
else:
Entry.single_register(
method,
uri,
body=body,
status=status,
headers=headers,
match_querystring=match_querystring,
)
class MocketHTTPretty:
Response = Response
def __getattr__(self, name):
if name == "last_request":
return Mocket.last_request()
if name == "latest_requests":
return Mocket.request_list()
return getattr(Entry, name)
HTTPretty = MocketHTTPretty()
HTTPretty.register_uri = register_uri
httpretty = HTTPretty
__all__ = (
"HTTPretty",
"activate",
"async_httprettified",
"httprettified",
"enable",
"disable",
"reset",
"Response",
"GET",
"PUT",
"POST",
"DELETE",
"HEAD",
"PATCH",
"register_uri",
"text_type",
"byte_type",
)
|
jawilson/home-assistant
|
homeassistant/components/smartthings/switch.py
|
Python
|
apache-2.0
| 1,911
| 0.000523
|
"""Support for switches through the SmartThings cloud API."""
from __future__ import annotations
from collections.abc import Sequence
from pysmartthings import Capability
from homeassistant.components.switch import SwitchEntity
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add switches for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
async_add_entities(
[
SmartThingsSwitch(device)
for device in broker.devices.values()
if broker.any_assigned(device.device_id, "switch")
]
)
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None:
"""Return all capabilities supported if minimum required are present."""
# Must be able to be turned on/off.
if Capability.switch in capabilities:
return [Capability.switch, Capability.energy_meter, Capability.power_meter]
return None
class SmartThingsSwitch(SmartThingsEntity, SwitchEntity):
"""Define a SmartThings switch."""
async def async_turn_off(self, **kwargs) -> None:
|
"""Turn the switch off."""
await self._device.switch_off(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
async def async_turn_on(self, **kwargs) -> None:
""
|
"Turn the switch on."""
await self._device.switch_on(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._device.status.switch
|
OKThess/website
|
main/migrations/0063_event_date_end.py
|
Python
|
mit
| 469
| 0
|
#
|
-*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-05-30 17:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0062_auto_20171223_1552'),
]
operations = [
migrations.AddField(
model_name
|
='event',
name='date_end',
field=models.DateField(blank=True, default=None, null=True),
),
]
|
dpressel/baseline
|
baseline/pytorch/lm/model.py
|
Python
|
apache-2.0
| 11,032
| 0.001813
|
from baseline.pytorch.torchy import *
from eight_mile.pytorch.layers import TransformerEncoderStack, subsequent_mask, MultiHeadedAttention
from baseline.model import LanguageMode
|
l, register_model
from eight_mile.pytorch.serialize import load_tlm_npz
import torch.autograd
import os
class LanguageModelBase(nn.Module, LanguageModel):
def __init__(self):
super().__init__()
def save(self, outname):
torch.save(self, outname)
basename, _ = os.path.splitext(outname)
|
def create_loss(self):
return SequenceCriterion(LossFn=nn.CrossEntropyLoss)
@classmethod
def load(cls, filename, **kwargs):
device = kwargs.get('device')
if not os.path.exists(filename):
filename += '.pyt'
model = torch.load(filename, map_location=device)
model.gpu = False if device == 'cpu' else model.gpu
return model
def zero_state(self, batchsz):
return None
@property
def requires_state(self):
pass
def make_input(self, batch_dict, numpy_to_tensor=False):
example_dict = dict({})
for key in self.src_keys:
tensor = batch_dict[key]
if numpy_to_tensor:
tensor = torch.from_numpy(tensor)
if self.gpu:
tensor = tensor.cuda()
example_dict[key] = tensor
y = batch_dict.get('y')
if y is not None:
if numpy_to_tensor:
y = torch.from_numpy(y)
if self.gpu:
y = y.cuda()
example_dict['y'] = y
return example_dict
@classmethod
def create(cls, embeddings, **kwargs):
lm = cls()
lm.gpu = kwargs.get('gpu', True)
lm.tgt_key = kwargs.get('tgt_key')
if lm.tgt_key is None:
raise Exception('Need a `tgt_key` to know which source vocabulary should be used for destination ')
lm.src_keys = kwargs.get('src_keys', embeddings.keys())
lm.create_layers(embeddings, **kwargs)
checkpoint_name = kwargs.get('checkpoint')
if checkpoint_name is not None:
if checkpoint_name.endswith('npz'):
load_tlm_npz(lm, checkpoint_name)
else:
lm.load_state_dict(torch.load(checkpoint_name))
return lm
def create_layers(self, embeddings, **kwargs):
"""This method defines the model itself, and must be overloaded by derived classes
This function will update `self` with the layers required to execute the `call()` method
:param embeddings: The input feature indices
:param kwargs:
:return:
"""
def predict(self, batch_dict, **kwargs):
self.eval()
numpy_to_tensor = bool(kwargs.get('numpy_to_tensor', True))
batch_dict = self.make_input(batch_dict, numpy_to_tensor=numpy_to_tensor)
hidden = batch_dict.get('h')
step_softmax, _ = self(batch_dict, hidden)
return F.softmax(step_softmax, dim=-1)
class AbstractGeneratorLanguageModel(LanguageModelBase):
def create_layers(self, embeddings, **kwargs):
self.embeddings = self.init_embed(embeddings, **kwargs)
self.embeddings_proj = self.init_embeddings_proj(**kwargs)
self.generator = self.init_generate(**kwargs)
self.output_layer = self.init_output(embeddings, **kwargs)
def forward(self, input: Dict[str, TensorDef], hidden: TensorDef) -> Tuple[TensorDef, TensorDef]:
emb = self.embed(input)
output, hidden = self.generate(emb, hidden)
return self.output_layer(output), hidden
def embed(self, input):
embedded_dropout = self.embeddings(input)
return self.embeddings_proj(embedded_dropout)
def init_embed(self, embeddings: Dict[str, TensorDef], **kwargs) -> BaseLayer:
"""This method creates the "embedding" layer of the inputs, with an optional reduction
:param embeddings: A dictionary of embeddings
:Keyword Arguments: See below
* *embeddings_reduction* (defaults to `concat`) An operator to perform on a stack of embeddings
* *embeddings_dropout = float(kwargs.get('embeddings_dropout', 0.0))
:return: The output of the embedding stack followed by its reduction. This will typically be an output
with an additional dimension which is the hidden representation of the input
"""
reduction = kwargs.get('embeddings_reduction', 'concat')
embeddings_dropout = float(kwargs.get('embeddings_dropout', 0.0))
return EmbeddingsStack({k: embeddings[k] for k in self.src_keys}, embeddings_dropout, reduction=reduction)
def init_embeddings_proj(self, **kwargs):
input_sz = self.embeddings.output_dim
hsz = kwargs.get('hsz', kwargs.get('d_model'))
if hsz != input_sz:
proj = pytorch_linear(input_sz, hsz)
print('Applying a transform from {} to {}'.format(input_sz, hsz))
else:
proj = nn.Identity()
return proj
def init_generate(self, **kwargs):
pass
def generate(self, emb, hidden):
return self.generator((emb, hidden))
def init_output(self, embeddings, **kwargs):
self.vsz = embeddings[self.tgt_key].get_vsz()
hsz = kwargs.get('hsz', kwargs.get('d_model'))
unif = float(kwargs.get('unif', 0.0))
do_weight_tying = bool(kwargs.get('tie_weights', False))
output_bias = kwargs.get('output_bias', False)
if do_weight_tying:
output = WeightTieDense(embeddings[self.tgt_key], output_bias)
else:
output = pytorch_linear(hsz, self.vsz, unif)
return output
@register_model(task='lm', name='default')
class RNNLanguageModel(AbstractGeneratorLanguageModel):
def __init__(self):
super().__init__()
def zero_state(self, batchsz):
weight = next(self.parameters()).data
return (torch.autograd.Variable(weight.new(self.num_layers, batchsz, self.hsz).zero_()),
torch.autograd.Variable(weight.new(self.num_layers, batchsz, self.hsz).zero_()))
@property
def requires_state(self):
True
def init_generate(self, **kwargs):
pdrop = float(kwargs.get('dropout', 0.5))
self.num_layers = kwargs.get('layers', kwargs.get('num_layers', 1))
self.hsz = kwargs.get('hsz', kwargs.get('d_model'))
return WithDropoutOnFirst(LSTMEncoderWithState(self.hsz, self.hsz, self.num_layers, pdrop, batch_first=True),
pdrop,
kwargs.get('variational', False))
@register_model(task='lm', name='transformer')
class TransformerLanguageModel(AbstractGeneratorLanguageModel):
def __init__(self):
super().__init__()
@property
def requires_state(self):
False
def init_layer_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)):
module.weight.data.normal_(mean=0.0, std=self.weight_std)
if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None:
module.bias.data.zero_()
def init_generate(self, **kwargs):
pdrop = float(kwargs.get('dropout', 0.1))
layers = kwargs.get('layers', kwargs.get('num_layers', 1))
d_model = int(kwargs.get('d_model', kwargs.get('hsz')))
num_heads = kwargs.get('num_heads', 4)
d_ff = int(kwargs.get('d_ff', 4 * d_model))
rpr_k = kwargs.get('rpr_k')
d_k = kwargs.get('d_k')
scale = bool(kwargs.get('scale', True))
activation = kwargs.get('activation', 'gelu')
ffn_pdrop = kwargs.get('ffn_pdrop', 0.0)
layer_norm_eps = kwargs.get('layer_norm_eps', 1e-12)
layer_norms_after = kwargs.get('layer_norms_after', False)
layer_drop = kwargs.get('layer_drop', 0.0)
windowed_ra = kwargs.get('windowed_ra', False)
rpr_value_on = kwargs.get('rpr_value_on', True)
return TransformerEncoderStack(num_heads, d_model=d_model, pdrop=pdrop, scale=scale,
layers=layers, d_ff=d_ff, rpr_k=rpr_k, d_k=d_k,
|
Raynes/quarantine
|
quarantine/__main__.py
|
Python
|
apache-2.0
| 719
| 0.002782
|
import click
from pycolorterm.pycolorterm import print_pretty
from quarantine.cdc import CDC
from sh import ErrorReturnCode
@click.group()
def cli():
pass
@cli.command()
@click.argument('name')
@click.argument('pip_args', nargs=-1)
def install(name, pip_args):
"""Install the package. Pip args specified with --."""
cdc = CDC(name)
try:
cdc.install(pip_args)
except ErrorReturnCode as e:
print_pretty("<FG_RED>Something went wrong! Rolling back...<END>")
cdc.uninstall()
@cli.command()
@click.argument('name')
def uninstall(name):
"""Uninstall
|
the package, environment and all."""
cdc
|
= CDC(name)
cdc.uninstall()
if __name__ == '__main__':
quarantine()
|
ksrajkumar/openerp-6.1
|
openerp/osv/orm.py
|
Python
|
agpl-3.0
| 245,233
| 0.004592
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#.apidoc title: Object Relational Mapping
#.apidoc module-mods: member-order: bysource
"""
Object relational mapping to database (postgresql) module
* Hierarchical structure
* Constraints consistency, validations
* Object meta Data depends on its status
* Optimised processing by complex query (multiple actions at once)
* Default fields value
* Permissions optimisation
* Persistant object: DB postgresql
* Datas conversions
* Multi-level caching system
* 2 different inheritancies
* Fields:
- classicals (varchar, integer, boolean, ...)
- relations (one2many, many2one, many2many)
- functions
"""
import calendar
import copy
import datetime
import itertools
import logging
import operator
import pickle
import re
import simplejson
import time
import types
from lxml import etree
import fields
import openerp
import openerp.netsvc as netsvc
import openerp.tools as tools
from openerp.tools.config import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from query import Query
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from openerp.tools import SKIPPED_ELEMENT_TYPES
regex_order = re.compile('^(([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
def transfer_field_to_modifiers(field, modifiers):
default_values = {}
state_exceptions = {}
for attr in ('invisible', 'readonly', 'required'):
state_exceptions[attr] = []
default_values[attr] = bool(field.get(attr))
for state, modifs in (field.get("states",{})).items():
for modif in modifs:
if default_values[modif[0]] != modif[1]:
state_exceptions[modif[0]].append(state)
for attr, default_value in default_values.items():
if state_exceptions[attr]:
modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
else:
modifiers[attr] = default_value
# Don't deal with groups, it is done by check_group().
# Need the context to evaluate the invisible attribute on tree views.
# For non-tree views, the context shouldn't be given.
def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
if node.get('attrs'):
modifiers.update(eval(node.get('attrs')))
if node.get('states'):
if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
# TODO combine with AND or OR, use implicit AND for now.
modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
else:
modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
for a in ('invisible', 'readonly', 'required'):
if node.get(a):
v = bool(eval(node.get(a), {'context': context or {}}))
if in_tree_view and a == 'invisible':
# Invisible in a tree view has a specific meaning, make it a
# new key in the modifiers attribute.
modifiers['tree_invisible'] = v
elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
# Don't set the attribute to False if a dynamic value was
# provided (i.e. a domain from attrs or states).
modifiers[a] = v
def simplify_modifiers(modifiers):
for a in ('invisible', 'readonly', 'required'):
if a in modifiers and not modifiers[a]:
del modifiers[a]
def transfer_modifiers_to_node(modifiers, node):
if modifiers:
simplify_modifiers(modifiers)
node.set('modifiers', simplejson.dumps(modifiers))
def setup_modifiers(node, field=None, context=None, in_tree_view=False):
""" Processes node attributes and field descriptors to generate
the ``modifiers`` node attribute and set it on the provided node.
Alters its first argument in-place.
:param node: ``field`` node from an OpenERP view
:type node: lxml.etree._Element
:param dict field: field descriptor corresponding to the provided node
:param dict context: execution context used to evaluate node attributes
:param bool in_tree_view: triggers the ``tree_invisible`` code
path (separate from ``invisible``): in
tree view there are two levels of
invisibility, cell content (a column is
present but the cell itself is not
displayed) with ``invisible`` and column
invisibility (the whole column is
hidden) with ``tree_invisible``.
:returns: nothing
"""
modifiers = {}
if field is not None:
transfer_field_to_modifiers(field, modifiers)
transfer_node_to_modifiers(
node, modifiers, context=context, in_tree_view=
|
in_tree_view)
transfer_modifiers_to_node(
|
modifiers, node)
def test_modifiers(what, expected):
modifiers = {}
if isinstance(what, basestring):
node = etree.fromstring(what)
transfer_node_to_modifiers(node, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
elif isinstance(what, dict):
transfer_field_to_modifiers(what, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
# To use this test:
# import openerp
# openerp.osv.orm.modifiers_tests()
def modifiers_tests():
test_modifiers('<field name="a"/>', '{}')
test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
test_modifiers('<field name="a" required="1"/>', '{"required": true}')
test_modifiers('<field name="a" invisible="0"/>', '{}')
test_modifiers('<field name="a" readonly="0"/>', '{}')
test_modifiers('<field name="a" required="0"/>', '{}')
test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
# The dictionary is supposed to be the result of fields_get().
test_modifiers({}, '{}')
test_modifiers({"invisible": True}, '{"invisible": true}')
test_modifiers({"invisible": False}, '{}')
def check_object_name(name):
""" Check if the given name is a valid openerp object name.
The _name attribute in osv and osv_memory object is subject to
some restrictions. This function returns True or False whether
the given name
|
Alex-Chizhov/python_training
|
home_works/data/contacts.py
|
Python
|
apache-2.0
| 166
| 0.018072
|
from model.info_contact import
|
Infos
testdata = [
Infos(firstname="firstname
|
1",lastname="lastname1"),
Infos(firstname="firstname2",lastname="lastname2")
]
|
vfulco/scalpel
|
lib/gravity/tae/match/__init__.py
|
Python
|
lgpl-3.0
| 94
| 0.031915
|
#__all__ = [ 'search', 'ham_distance', 'lev_distance', 'distance', 'distance_matrix' ]
| ||
b1-systems/kiwi
|
test/unit/storage/subformat/vhdx_test.py
|
Python
|
gpl-3.0
| 1,314
| 0
|
from mock import patch
import mock
from kiwi.storage.subformat.vhdx import DiskFormatVhdx
class TestDiskFormatVhdx:
@patch('platform.machine')
def setup(self, mock_machine):
mock_machine.return_value = 'x86_64'
xml_data = mock.Mock()
xml_data.get_name = mock.Mock(
return_value='some-disk-image'
)
self.xml_state = mock.Mock()
self.xml_state.xml_data = xml_data
self.xml_state.get_image_version = mock.Mock(
return_value='1.2.3'
)
self.disk_format = DiskFormatVhdx(
self.xml_state, 'root_dir', 'target_dir'
)
def test_post_init(self):
self.disk_format.post_init({'option': 'value'})
assert self.disk_format.options == [
'-o', 'option=value', '-o', 'subformat=dynamic'
]
@patch('kiwi.storage.subformat.vhdx.Command.run')
|
def test_create_image_format(self, mock_command):
self.disk_format.create_i
|
mage_format()
mock_command.assert_called_once_with(
[
'qemu-img', 'convert', '-f', 'raw',
'target_dir/some-disk-image.x86_64-1.2.3.raw', '-O', 'vhdx',
'-o', 'subformat=dynamic',
'target_dir/some-disk-image.x86_64-1.2.3.vhdx'
]
)
|
Ma3X/boot-talker
|
codes/python/talk.py
|
Python
|
gpl-3.0
| 19,742
| 0.020819
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
DEBUG = True
observer = None
ser_port = None
s = 0
ser = None
#--------------------------------------------------------------------
import signal
import sys
import os
def signal_handler(
|
signal, frame):
glob
|
al s, ser
print '\nYou pressed Ctrl+C!'
if s > 18:
print "MTK_Finalize"
serialPost(ser, "B7".decode("hex"))
time.sleep(0.1)
if ser.isOpen(): ser.close()
#sys.exit(0)
os._exit(0)
signal.signal(signal.SIGINT, signal_handler)
#--------------------------------------------------------------------
import os
import serial
from serial.tools import list_ports
def serial_ports():
"""
Returns a generator for all available serial ports
"""
if os.name == 'nt':
# windows
for i in range(256):
try:
s = serial.Serial(i)
s.close()
yield 'COM' + str(i + 1)
except serial.SerialException:
pass
else:
# unix
for port in list_ports.comports():
yield port[0]
#if __name__ == '__main__':
# print(list(serial_ports()))
#exit()
#--------------------------------------------------------------------
import serial, time, binascii
def serialPost(ser, data):
#time.sleep(0.5)
#data = chr(0x44)
print " -> " + binascii.b2a_hex(data)
ser.write(data)
#ser.flush()
def serialPostL(ser, data, slen, scnt):
sys.stdout.write("\r" + str(scnt) + " of " + str(slen) + " <- " + binascii.b2a_hex(data))
if slen == scnt: sys.stdout.write("\n")
#sys.stdout.flush()
ser.write(data)
def summ(block, length):
res = 0
for i in range(length):
res = res + ord(block[i])
#print str(res)
return chr(res & int(0xFF))
def swapSerialData(data):
l = len(data)
#if l > 16:
# print "-> " + str(l) + " bytes"
#else:
# print "-> " + binascii.b2a_hex(data)
if len(data) > 0: ser.write(data)
n = 0
while n < 1:
n = ser.inWaiting()
#time.sleep(1)
data = ser.read(n)
l = len(data)
#print "RX is L: " + str(l) + " -> " + binascii.b2a_hex(data)
return data
#----- CONNECT TO PORT----------
def conn_port (ser_port):
print ser_port
print "module PySerial version: " + serial.VERSION
# if: error open serial port: (22, 'Invalid argument')
# http://superuser.com/questions/572034/how-to-restart-ttyusb
# cat /proc/tty/drivers
# lsmod | grep usbserial
# sudo modprobe -r pl2303 qcaux
# sudo modprobe -r usbserial
#import subprocess
#subprocess.call(['statserial', ser_port])
#subprocess.call(['setserial', '-G', ser_port])
# http://www.roman10.net/serial-port-communication-in-python/
# initialization and open the port
# possible timeout values:
# 1. None: wait forever, block call
# 2. 0: non-blocking mode, return immediately
# 3. x, x is bigger than 0, float allowed, timeout block call
global ser
ser = serial.Serial()
#ser.port = "COM29"
ser.port = ser_port
ser.baudrate = 115200
ser.bytesize = serial.EIGHTBITS # number of bits per bytes
ser.parity = serial.PARITY_EVEN
ser.stopbits = serial.STOPBITS_ONE # number of stop bits
ser.timeout = None # block read
ser.rtscts = True # enable hardware (RTS/CTS) flow control (Hardware handshaking)
#ser.port = "/dev/ttyS0"
#ser.port = "/dev/ttyUSB0"
#ser.port = "2" # COM3
#ser.baudrate = 9600
#ser.parity = serial.PARITY_NONE # set parity check: no parity
#ser.timeout = 0 # non-block read
#ser.xonxoff = False # disable software flow control
#ser.rtscts = False # disable hardware (RTS/CTS) flow control
#ser.dsrdtr = False # disable hardware (DSR/DTR) flow control
#ser.writeTimeout = 2 # timeout for write
#data = chr(0x44) + chr(0x59)
#print "-> " + binascii.b2a_hex(data)
#exit()
try:
ser.open()
except Exception, e:
print "error open serial port: " + str(e)
print "for full reset serial device you must reload drivers:"
print " "
print " cat /proc/tty/drivers "
print " lsmod | grep usbserial "
print " sudo modprobe -r pl2303 qcaux "
print " sudo modprobe -r usbserial "
print " "
exit()
from hktool.bootload.samsung import sgh_e730
#loader1 = open("loader1.bin", "rb").read()
loader1 = sgh_e730.load_bootcode_first()
print "loader1.bin data size is: " + str(len(loader1))
ldr1_i = 0
ldr1_l = len(loader1)
ldr1_c = "4c00".decode("hex")
#loader2 = open("loader2.bin", "rb").read()
loader2 = sgh_e730.load_bootcode_second()
print "loader2.bin data size is: " + str(len(loader2))
ldr2_i = 0
ldr2_l = len(loader2)
#f = open("loader1.bin", "rb")
#try:
# byte = f.read(1)
# while byte != "":
# # Do stuff with byte.
# byte = f.read(1)
#except Exception, e1:
# print "error: " + str(e1)
# ser.close()
# import traceback
# traceback.print_exc()
#finally:
# f.close()
global s
if ser.isOpen():
try:
print 'Work with Samsung SGH-E730:'
print '- wait for SWIFT power on...'
ser.flushInput() # flush input buffer, discarding all its contents
ser.flushOutput() # flush output buffer, aborting current output
# and discard all that is in buffer
# write data
#ser.write("AT+CSQ=?\x0D")
#print("write data: AT+CSQ=?\x0D")
# steps
s = 0
serialPost(ser, "A0".decode("hex"))
while True:
n = 0
s += 1
while n < 1:
n = ser.inWaiting()
#time.sleep(1)
data = ser.read(n)
l = len(data)
#if s != 6 or ldr1_i == 0:
print "RX is L: " + str(l) + " <- " + binascii.b2a_hex(data)
if s == 1:
if data[l-1] == chr(0x5F):
serialPost(ser, chr(0x0A))
elif s == 2:
if data[l-1] == chr(0xF5):
serialPost(ser, chr(0x50))
elif s == 3:
#if l == 16:
# serialPost(ser, "4412345678".decode("hex") + data)
# -> AF
serialPost(ser, "05".decode("hex"))
elif s == 4:
#if data[l-1] == chr(0x4f):
# # set timeout to 1600 ms (10h)
# serialPost(ser, chr(0x54) + chr(0x10))
# # set timeout to 1600 ms (20h)
# #serialPost(ser, chr(0x54) + chr(0x20))
# -> FA
# A2 - read from memory
serialPost(ser, "A2".decode("hex"))
elif s == 5:
#if data[l-1] == chr(0x4f):
# serialPost(ser, "530000000c".decode("hex"))
# -> A2 - read command ACK
# 80 01 00 00 - Configuration Register: Hardware Version Register
serialPost(ser, "80010000".decode("hex"))
elif s == 6:
# -> 80 01 00 00
# 00 00 00 01 - read one byte
serialPost(ser, "00000001".decode("hex"))
#ldr1_i4 = 4*ldr1_i
#ldr1_i8 = 4*ldr1_i + 4
#if ldr1_i8 < ldr1_l:
# serialPostL(ser, ldr1_c + loader1[ldr1_i4:ldr1_i8], ldr1_l, ldr1_i8)
# s -= 1
#else:
# serialPostL(ser, ldr1_c + loader1[ldr1_i4:ldr1_l ], ldr1_l, ldr1_l )
#ldr1_i += 1
elif s == 7:
if l == 6: s += 1
elif s == 8:
# -> 00 00 00 01 - byte is read
# -> XX XX - byte:
serialPost(ser, "A2".decode("hex"))
#if data[l-1] == chr(0x4f):
# serialPost(ser, "530000000c".decode("hex"))
elif s == 9:
# -> A2
# 80 01 00 08 - Hardware Code Register
serialPost(ser, "80010008".decode("hex"))
#if data[l-1] == chr(0x4f):
# serialPost(ser, "4a".decode("hex"))
elif s == 10:
# -> 80 01 00 08
serialPost(ser, "00000001".decode("hex"))
#s = 20;
#if data[l-1] == chr(0xAB):
# # 0x00 -> Speed = 115200
# # 0x01 -> Speed = 230400
# # 0x02 -> Speed = 460800
# # 0x03 -> Speed
|
tomachalek/kontext
|
lib/plugins/abstract/chart_export.py
|
Python
|
gpl-2.0
| 3,124
| 0
|
# Copyright (c) 2017 Charles University in Prague, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2017 Tomas Machalek <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
class UnknownFormatException(Exception):
pass
class AbstractChartExport(object):
"""
AbstractChartExport represents a single
format export (e.g. PDF, Excel).
"""
def get_content_type(self):
|
"""
return a content type identifier (e.g. 'application/json')
"""
raise NotImplementedError()
def get_format_name(self):
"""
Return a format identifier. It should be both
human-readable and unique within a single plug-in
installation. It means that in case of mixing of
different AbstractChartExport implementatio
|
ns
it may be necessary to modify some names to
keep all the export functions available.
"""
raise NotImplementedError()
def get_suffix(self):
"""
Return a proper file suffix (e.g. 'xlsx' for Excel).
"""
raise NotImplementedError()
def export_pie_chart(self, data, title):
"""
Generate a PIE chart based on passed data and title.
The method is expected to return raw file data ready
to be downloaded by a client.
"""
raise NotImplementedError()
class AbstractChartExportPlugin(object):
"""
AbstractChartExportPlugin represents plug-in itself
which is expected to contain one or more implementations
of AbstractChartExport.
"""
def get_supported_types(self):
"""
Return a list of supported format names
(i.e. the values returned by AbstractChartExport.get_format_name()
of all the installed export classes).
"""
return []
def get_content_type(self, format):
"""
Return a content type for a specified format
(e.g. 'PDF' -> 'application/pdf')
arguments:
format -- format name (AbstractChartExport.get_format_name())
"""
raise NotImplementedError()
def get_suffix(self, format):
"""
Return a suffix for a specified format.
arguments:
format -- format name (AbstractChartExport.get_format_name())
"""
raise NotImplementedError()
def export_pie_chart(self, data, title, format):
"""
Export PIE chart data to a PIE chart of
a specified format.
arguments:
data -- chart data
title -- chart label
format -- format name (AbstractChartExport.get_format_name())
"""
raise NotImplementedError()
|
SiddharthSham/PetAteMyHW
|
wolfram.py
|
Python
|
gpl-3.0
| 692
| 0.023121
|
import config
#This module is used for calling the Wolfram Alpha API
#It defines a funct
|
ion that constructs an URL based on the query.
#NOTE: This module returns only the URL. This URL is passed in the bot.py file. Telegram Takes care of the rest.
de
|
f query(query):
question = query.replace(" ","+") #plus encoding
return "http://api.wolframalpha.com/v1/simple?appid={}&i=".format(config.WOLFRAM) + question + "&format=image"
#returns ONLY the URL directly.
#Telegram's servers handle the requests by themselves for docs lesser than 20MB
|
vesgel/quicknotes
|
plasmoid/contents/code/main.py
|
Python
|
gpl-3.0
| 1,970
| 0.001015
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2011, Volkan Esgel
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
# PyQt4 Section
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# PyKDE4 Section
from PyKDE4.plasma import Plasma
from PyKDE4 import plasmascript
# Application Section
from notemodel import NoteModel
from notedelegate import NoteDelegate
class QuickNotes(plasmascript.Applet):
def __init__(self, parent, args=None):
plasmascript.Applet.__init__(self, parent)
def init(self):
self.setHasConfigurationInterface(True)
self.setAspectRatioMode(Plasma.IgnoreAspectRatio)
"""
self.theme = Plasma.Svg(self)
self.theme.setImagePath("widgets/background")
self.setBa
|
ckgroundHints(Plasma.Applet.DefaultBackground)
"""
self.setBackgroundHints(Plasma.Applet.NoBackground)
self.__createMainLayout()
width = self.viewerSize.width() + 20
height = self.viewerSize.height() - 20
self.
|
resize(width, height)
def __createMainLayout(self):
self.mainLayout = QGraphicsLinearLayout(Qt.Vertical, self.applet)
noteview = Plasma.TreeView(self.applet)
noteview.setStyleSheet("QTreeView { background: Transparent }")
nmodel = NoteModel(self.package().path(), noteview)
noteview.setModel(nmodel)
noteview.nativeWidget().setItemDelegate(NoteDelegate(self))
noteview.nativeWidget().setHeaderHidden(True)
noteview.nativeWidget().setIndentation(0)
self.mainLayout.addItem(noteview)
self.viewerSize = noteview.size()
self.applet.setLayout(self.mainLayout)
def CreateApplet(parent):
return QuickNotes(parent)
|
YannickJadoul/Parselmouth
|
pybind11/tests/test_kwargs_and_defaults.py
|
Python
|
gpl-3.0
| 10,048
| 0.001393
|
# -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import kwargs_and_defaults as m
def test_function_signatures(doc):
asse
|
rt doc(m.kw_func0) == "kw_func0(arg0: int, arg1: int) -> str"
assert doc(m.kw_func1) == "
|
kw_func1(x: int, y: int) -> str"
assert doc(m.kw_func2) == "kw_func2(x: int = 100, y: int = 200) -> str"
assert doc(m.kw_func3) == "kw_func3(data: str = 'Hello world!') -> None"
assert doc(m.kw_func4) == "kw_func4(myList: List[int] = [13, 17]) -> str"
assert doc(m.kw_func_udl) == "kw_func_udl(x: int, y: int = 300) -> str"
assert doc(m.kw_func_udl_z) == "kw_func_udl_z(x: int, y: int = 0) -> str"
assert doc(m.args_function) == "args_function(*args) -> tuple"
assert (
doc(m.args_kwargs_function) == "args_kwargs_function(*args, **kwargs) -> tuple"
)
assert (
doc(m.KWClass.foo0)
== "foo0(self: m.kwargs_and_defaults.KWClass, arg0: int, arg1: float) -> None"
)
assert (
doc(m.KWClass.foo1)
== "foo1(self: m.kwargs_and_defaults.KWClass, x: int, y: float) -> None"
)
def test_named_arguments(msg):
assert m.kw_func0(5, 10) == "x=5, y=10"
assert m.kw_func1(5, 10) == "x=5, y=10"
assert m.kw_func1(5, y=10) == "x=5, y=10"
assert m.kw_func1(y=10, x=5) == "x=5, y=10"
assert m.kw_func2() == "x=100, y=200"
assert m.kw_func2(5) == "x=5, y=200"
assert m.kw_func2(x=5) == "x=5, y=200"
assert m.kw_func2(y=10) == "x=100, y=10"
assert m.kw_func2(5, 10) == "x=5, y=10"
assert m.kw_func2(x=5, y=10) == "x=5, y=10"
with pytest.raises(TypeError) as excinfo:
# noinspection PyArgumentList
m.kw_func2(x=5, y=10, z=12)
assert excinfo.match(
r"(?s)^kw_func2\(\): incompatible.*Invoked with: kwargs: ((x=5|y=10|z=12)(, |$))"
+ "{3}$"
)
assert m.kw_func4() == "{13 17}"
assert m.kw_func4(myList=[1, 2, 3]) == "{1 2 3}"
assert m.kw_func_udl(x=5, y=10) == "x=5, y=10"
assert m.kw_func_udl_z(x=5) == "x=5, y=0"
def test_arg_and_kwargs():
args = "arg1_value", "arg2_value", 3
assert m.args_function(*args) == args
args = "a1", "a2"
kwargs = dict(arg3="a3", arg4=4)
assert m.args_kwargs_function(*args, **kwargs) == (args, kwargs)
def test_mixed_args_and_kwargs(msg):
mpa = m.mixed_plus_args
mpk = m.mixed_plus_kwargs
mpak = m.mixed_plus_args_kwargs
mpakd = m.mixed_plus_args_kwargs_defaults
assert mpa(1, 2.5, 4, 99.5, None) == (1, 2.5, (4, 99.5, None))
assert mpa(1, 2.5) == (1, 2.5, ())
with pytest.raises(TypeError) as excinfo:
assert mpa(1)
assert (
msg(excinfo.value)
== """
mixed_plus_args(): incompatible function arguments. The following argument types are supported:
1. (arg0: int, arg1: float, *args) -> tuple
Invoked with: 1
""" # noqa: E501 line too long
)
with pytest.raises(TypeError) as excinfo:
assert mpa()
assert (
msg(excinfo.value)
== """
mixed_plus_args(): incompatible function arguments. The following argument types are supported:
1. (arg0: int, arg1: float, *args) -> tuple
Invoked with:
""" # noqa: E501 line too long
)
assert mpk(-2, 3.5, pi=3.14159, e=2.71828) == (
-2,
3.5,
{"e": 2.71828, "pi": 3.14159},
)
assert mpak(7, 7.7, 7.77, 7.777, 7.7777, minusseven=-7) == (
7,
7.7,
(7.77, 7.777, 7.7777),
{"minusseven": -7},
)
assert mpakd() == (1, 3.14159, (), {})
assert mpakd(3) == (3, 3.14159, (), {})
assert mpakd(j=2.71828) == (1, 2.71828, (), {})
assert mpakd(k=42) == (1, 3.14159, (), {"k": 42})
assert mpakd(1, 1, 2, 3, 5, 8, then=13, followedby=21) == (
1,
1,
(2, 3, 5, 8),
{"then": 13, "followedby": 21},
)
# Arguments specified both positionally and via kwargs should fail:
with pytest.raises(TypeError) as excinfo:
assert mpakd(1, i=1)
assert (
msg(excinfo.value)
== """
mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:
1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple
Invoked with: 1; kwargs: i=1
""" # noqa: E501 line too long
)
with pytest.raises(TypeError) as excinfo:
assert mpakd(1, 2, j=1)
assert (
msg(excinfo.value)
== """
mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:
1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple
Invoked with: 1, 2; kwargs: j=1
""" # noqa: E501 line too long
)
def test_keyword_only_args(msg):
assert m.kw_only_all(i=1, j=2) == (1, 2)
assert m.kw_only_all(j=1, i=2) == (2, 1)
with pytest.raises(TypeError) as excinfo:
assert m.kw_only_all(i=1) == (1,)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
assert m.kw_only_all(1, 2) == (1, 2)
assert "incompatible function arguments" in str(excinfo.value)
assert m.kw_only_some(1, k=3, j=2) == (1, 2, 3)
assert m.kw_only_with_defaults(z=8) == (3, 4, 5, 8)
assert m.kw_only_with_defaults(2, z=8) == (2, 4, 5, 8)
assert m.kw_only_with_defaults(2, j=7, k=8, z=9) == (2, 7, 8, 9)
assert m.kw_only_with_defaults(2, 7, z=9, k=8) == (2, 7, 8, 9)
assert m.kw_only_mixed(1, j=2) == (1, 2)
assert m.kw_only_mixed(j=2, i=3) == (3, 2)
assert m.kw_only_mixed(i=2, j=3) == (2, 3)
assert m.kw_only_plus_more(4, 5, k=6, extra=7) == (4, 5, 6, {"extra": 7})
assert m.kw_only_plus_more(3, k=5, j=4, extra=6) == (3, 4, 5, {"extra": 6})
assert m.kw_only_plus_more(2, k=3, extra=4) == (2, -1, 3, {"extra": 4})
with pytest.raises(TypeError) as excinfo:
assert m.kw_only_mixed(i=1) == (1,)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
m.register_invalid_kw_only(m)
assert (
msg(excinfo.value)
== """
arg(): cannot specify an unnamed argument after an kw_only() annotation
"""
)
def test_positional_only_args(msg):
assert m.pos_only_all(1, 2) == (1, 2)
assert m.pos_only_all(2, 1) == (2, 1)
with pytest.raises(TypeError) as excinfo:
m.pos_only_all(i=1, j=2)
assert "incompatible function arguments" in str(excinfo.value)
assert m.pos_only_mix(1, 2) == (1, 2)
assert m.pos_only_mix(2, j=1) == (2, 1)
with pytest.raises(TypeError) as excinfo:
m.pos_only_mix(i=1, j=2)
assert "incompatible function arguments" in str(excinfo.value)
assert m.pos_kw_only_mix(1, 2, k=3) == (1, 2, 3)
assert m.pos_kw_only_mix(1, j=2, k=3) == (1, 2, 3)
with pytest.raises(TypeError) as excinfo:
m.pos_kw_only_mix(i=1, j=2, k=3)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.pos_kw_only_mix(1, 2, 3)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.pos_only_def_mix()
assert "incompatible function arguments" in str(excinfo.value)
assert m.pos_only_def_mix(1) == (1, 2, 3)
assert m.pos_only_def_mix(1, 4) == (1, 4, 3)
assert m.pos_only_def_mix(1, 4, 7) == (1, 4, 7)
assert m.pos_only_def_mix(1, 4, k=7) == (1, 4, 7)
with pytest.raises(TypeError) as excinfo:
m.pos_only_def_mix(1, j=4)
assert "incompatible function arguments" in str(excinfo.value)
def test_signatures():
assert "kw_only_all(*, i: int, j: int) -> tuple\n" == m.kw_only_all.__doc__
assert "kw_only_mixed(i: int, *, j: int) -> tuple\n" == m.kw_only_mixed.__doc__
assert "pos_only_all(i: int, j: int, /) -> tuple\n" == m.pos_only_all.__doc__
assert "pos_only_mix(i: int, /, j: int) -> tuple\n" == m.pos_only_mix.__doc__
assert (
"pos_kw_only_mix(i: int, /, j: int, *, k: int) -> tuple\n"
== m.pos_kw_only_mix.__doc__
)
@pyte
|
vgripon/PyRat
|
imports/dummyplayer.py
|
Python
|
gpl-3.0
| 440
| 0.018182
|
import random
def preprocessing(mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, piecesOfCheese, timeAllowed):
retur
|
n
def turn (mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese, timeAllowed):
return
def postprocessing (mazeMap, mazeWidth, mazeHeight, play
|
erLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese, timeAllowed):
return
|
noox-/stbgui-1
|
lib/python/Plugins/Extensions/GraphMultiEPG/GraphMultiEpg.py
|
Python
|
gpl-2.0
| 40,917
| 0.034216
|
from skin import parseColor, parseFont, parseSize
from Components.config import config, ConfigClock, Con
|
figInteger, Confi
|
gSubsection, ConfigYesNo, ConfigSelection, ConfigSelectionNumber
from Components.Pixmap import Pixmap
from Components.Button import Button
from Components.ActionMap import HelpableActionMap
from Components.HTMLComponent import HTMLComponent
from Components.GUIComponent import GUIComponent
from Components.EpgList import Rect
from Components.Sources.Event import Event
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.TimerList import TimerList
from Components.Renderer.Picon import getPiconName
from Components.Sources.ServiceEvent import ServiceEvent
from Screens.Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Screens.EventView import EventViewEPGSelect
from Screens.TimeDateInput import TimeDateInput
from Screens.TimerEntry import TimerEntry
from Screens.EpgSelection import EPGSelection
from Screens.TimerEdit import TimerSanityConflict
from Screens.MessageBox import MessageBox
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT
from ServiceReference import ServiceReference, isPlayableForCur
from Tools.LoadPixmap import LoadPixmap
from Tools.Alternatives import CompareWithAlternatives
from Tools import Notifications
from enigma import eEPGCache, eListbox, ePicLoad, gFont, eListboxPythonMultiContent, RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_HALIGN_CENTER, RT_VALIGN_CENTER, RT_WRAP,\
eSize, eRect, eTimer, getBestPlayableServiceReference
from GraphMultiEpgSetup import GraphMultiEpgSetup
from time import localtime, time, strftime
MAX_TIMELINES = 6
config.misc.graph_mepg = ConfigSubsection()
config.misc.graph_mepg.prev_time = ConfigClock(default = time())
config.misc.graph_mepg.prev_time_period = ConfigInteger(default = 120, limits = (60, 300))
config.misc.graph_mepg.ev_fontsize = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 8, wraparound = True)
config.misc.graph_mepg.items_per_page = ConfigSelectionNumber(min = 3, max = 40, stepwidth = 1, default = 6, wraparound = True)
config.misc.graph_mepg.items_per_page_listscreen = ConfigSelectionNumber(min = 3, max = 60, stepwidth = 1, default = 12, wraparound = True)
config.misc.graph_mepg.default_mode = ConfigYesNo(default = False)
config.misc.graph_mepg.overjump = ConfigYesNo(default = True)
config.misc.graph_mepg.center_timeline = ConfigYesNo(default = False)
config.misc.graph_mepg.servicetitle_mode = ConfigSelection(default = "picon+servicename", choices = [
("servicename", _("Service name")),
("picon", _("Picon")),
("picon+servicename", _("Picon and service name")) ])
config.misc.graph_mepg.roundTo = ConfigSelection(default = "900", choices = [("900", _("%d minutes") % 15), ("1800", _("%d minutes") % 30), ("3600", _("%d minutes") % 60)])
config.misc.graph_mepg.OKButton = ConfigSelection(default = "info", choices = [("info", _("Show detailed event info")), ("zap", _("Zap to selected channel"))])
possibleAlignmentChoices = [
( str(RT_HALIGN_LEFT | RT_VALIGN_CENTER ) , _("left")),
( str(RT_HALIGN_CENTER | RT_VALIGN_CENTER ) , _("centered")),
( str(RT_HALIGN_RIGHT | RT_VALIGN_CENTER ) , _("right")),
( str(RT_HALIGN_LEFT | RT_VALIGN_CENTER | RT_WRAP) , _("left, wrapped")),
( str(RT_HALIGN_CENTER | RT_VALIGN_CENTER | RT_WRAP) , _("centered, wrapped")),
( str(RT_HALIGN_RIGHT | RT_VALIGN_CENTER | RT_WRAP) , _("right, wrapped"))]
config.misc.graph_mepg.event_alignment = ConfigSelection(default = possibleAlignmentChoices[0][0], choices = possibleAlignmentChoices)
config.misc.graph_mepg.servicename_alignment = ConfigSelection(default = possibleAlignmentChoices[0][0], choices = possibleAlignmentChoices)
listscreen = config.misc.graph_mepg.default_mode.value
class EPGList(HTMLComponent, GUIComponent):
def __init__(self, selChangedCB = None, timer = None, time_epoch = 120, overjump_empty = True):
GUIComponent.__init__(self)
self.cur_event = None
self.cur_service = None
self.offs = 0
self.timer = timer
self.last_time = time()
self.onSelChanged = [ ]
if selChangedCB is not None:
self.onSelChanged.append(selChangedCB)
self.l = eListboxPythonMultiContent()
self.l.setBuildFunc(self.buildEntry)
self.setOverjump_Empty(overjump_empty)
self.epgcache = eEPGCache.getInstance()
self.clocks = [ LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_post.png')) ]
self.time_base = None
self.time_epoch = time_epoch
self.list = None
self.select_rect = None
self.event_rect = None
self.service_rect = None
self.picon_size = None
self.currentlyPlaying = None
self.showPicon = False
self.showServiceTitle = True
self.picload = ePicLoad()
self.nowEvPix = None
self.othEvPix = None
self.selEvPix = None
self.recEvPix = None
self.curSerPix = None
self.foreColor = 0xffffff
self.foreColorSelected = 0xffc000
self.borderColor = 0x464445
self.backColor = 0x595959
self.backColorSelected = 0x808080
self.foreColorService = 0xffffff
self.foreColorServiceSelected = 0xffffff
self.backColorService = 0x000000
self.backColorServiceSelected = 0x508050
self.borderColorService = 0x000000
self.foreColorNow = 0xffffff
self.backColorNow = 0x505080
self.foreColorRec = 0xffffff
self.backColorRec = 0x805050
self.serviceFont = gFont("Regular", 20)
self.entryFontName = "Regular"
self.entryFontSize = 18
self.listHeight = None
self.listWidth = None
self.serviceBorderWidth = 1
self.serviceNamePadding = 0
self.eventBorderWidth = 1
self.eventNamePadding = 0
def applySkin(self, desktop, screen):
if self.skinAttributes is not None:
attribs = [ ]
for (attrib, value) in self.skinAttributes:
if attrib == "EntryForegroundColor":
self.foreColor = parseColor(value).argb()
elif attrib == "EntryForegroundColorSelected":
self.foreColorSelected = parseColor(value).argb()
elif attrib == "EntryBackgroundColor":
self.backColor = parseColor(value).argb()
elif attrib == "EntryBackgroundColorSelected":
self.backColorSelected = parseColor(value).argb()
elif attrib == "EntryBorderColor":
self.borderColor = parseColor(value).argb()
elif attrib == "EntryFont":
font = parseFont(value, ((1,1),(1,1)) )
self.entryFontName = font.family
self.entryFontSize = font.pointSize
elif attrib == "ServiceForegroundColor" or attrib == "ServiceNameForegroundColor":
self.foreColorService = parseColor(value).argb()
elif attrib == "ServiceForegroundColorSelected":
self.foreColorServiceSelected = parseColor(value).argb()
elif attrib == "ServiceBackgroundColor" or attrib == "ServiceNameBackgroundColor":
self.backColorService = parseColor(value).argb()
elif attrib == "ServiceBackgroundColorSelected":
self.backColorServiceSelected = parseColor(value).argb()
elif attrib == "ServiceBackgroundColorRecording" or attrib == "ServiceNameBackgroundColor":
self.backColorRec = parseColor(value).argb()
elif attrib == "ServiceForegroundColorRecording":
self.foreColorRec = parseColor(value).argb()
elif attrib == "ServiceBorderColor":
self.borderColorService = parseColor(value).argb()
elif attrib == "ServiceFont":
self.serviceFont = parseFont(value, ((1,1),(1,1)) )
elif attrib == "EntryBackgroundColorNow":
self.backColorNow = parseColor(value).argb()
elif attrib == "EntryForegroundColorNow":
self.foreColorNow = parseColor(value).argb()
elif attrib == "ServiceBorderWidth":
self.serviceBorderWidth = int(value)
elif attrib == "
|
hiuwo/acq4
|
acq4/pyqtgraph/parametertree/Parameter.py
|
Python
|
mit
| 29,533
| 0.009142
|
from ..Qt import QtGui, QtCore
import os, weakref, re
from ..pgcollections import OrderedDict
from .ParameterItem import ParameterItem
PARAM_TYPES = {}
PARAM_NAMES = {}
def registerParameterType(name, cls, override=False):
global PARAM_TYPES
if name in PARAM_TYPES and not override:
raise Exception("Parameter type '%s' already exists (use override=True to replace)" % name)
PARAM_TYPES[name] = cls
PARAM_NAMES[cls] = name
class Parameter(QtCore.QObject):
"""
A Parameter is the basic unit of data in a parameter tree. Each parameter has
a name, a type, a value, and several other properties that modify the behavior of the
Parameter. Parameters may have parent / child / sibling relationships to construct
organized hierarchies. Parameters generally do not have any inherent GUI or visual
interpretation; instead they manage ParameterItem instances which take care of
display and user interaction.
Note: It is fairly uncommon to use the Parameter class directly; mostly you
will use subclasses which provide specialized type and data handling. The static
pethod Parameter.create(...) is an easy way to generate instances of these subclasses.
For more Parameter types, see ParameterTree.parameterTypes module.
=================================== =========================================================
**Signals:**
sigStateChanged(self, change, info) Emitted when anything changes about this parameter at
all.
The second argument is a string indicating what changed
('value', 'childAdded', etc..)
The third argument can be any extra information about
the change
sigTreeStateChanged(self, changes) Emitted when any child in the tree changes state
(but only if monitorChildren() is called)
the format of *changes* is [(param, change, info), ...]
sigValueChanged(self, value) Emitted when value is finished changing
sigValueChanging(self, value) Emitted immediately for all value changes,
including during editing.
sigChildAdded(self, child, index) Emitted when a child is added
sigChildRemoved(self, child) Emitted when a child is removed
sigRemoved(self) Emitted when this parameter is removed
sigParentChanged(self, parent) Emitted when this parameter's parent has changed
sigLimitsChanged(self, limits) Emitted when this parameter's limits have changed
sigDefaultChanged(self, default) Emitted when this parameter's default value has changed
sigNameChanged(self, name) Emitted when this parameter's name has changed
sigOptionsChanged(self, opts) Emitted when any of this parameter's options have changed
=================================== =========================================================
"""
## name, type, limits, etc.
## can also carry UI hints (slider vs spinbox, etc.)
sigValueChanged = QtCore.Signal(object, object) ## self, value emitted when value is finished being edited
sigValueChanging = QtCore.Signal(object, object) ## self, value emitted as value is being edited
sigChildAdded = QtCore.Signal(object, object, object) ## self, child, index
sigChildRemoved = QtCore.Signal(object, object) ## self, child
sigRemoved = QtCore.Signal(object) ## self
sigParentChanged = QtCore.Signal(object, object) ## self, parent
sigLimitsChanged = QtCore.Signal(object, object) ## self, limits
sigDefaultChanged = QtCore.Signal(object, object) ## self, default
sigNameChanged = QtCore.Signal(object, object) ## self, name
sigOptionsChanged = QtCore.Signal(object, object) ## self, {
|
opt:val, ...}
## Emitted when anything changes about this parameter at all.
## The second argument is a string indicating what changed ('value', 'childAdded', etc..)
## The third argument can be any extra information about the change
sigStateChanged = QtCore.Signal(object, object, object) ## self, change, info
## emitted when any child in the tree changes state
## (but only if monitorChildren() is called)
sigTreeStateChanged =
|
QtCore.Signal(object, object) # self, changes
# changes = [(param, change, info), ...]
# bad planning.
#def __new__(cls, *args, **opts):
#try:
#cls = PARAM_TYPES[opts['type']]
#except KeyError:
#pass
#return QtCore.QObject.__new__(cls, *args, **opts)
@staticmethod
def create(**opts):
"""
Static method that creates a new Parameter (or subclass) instance using
opts['type'] to select the appropriate class.
All options are passed directly to the new Parameter's __init__ method.
Use registerParameterType() to add new class types.
"""
typ = opts.get('type', None)
if typ is None:
cls = Parameter
else:
cls = PARAM_TYPES[opts['type']]
return cls(**opts)
def __init__(self, **opts):
"""
Initialize a Parameter object. Although it is rare to directly create a
Parameter instance, the options available to this method are also allowed
by most Parameter subclasses.
================= =========================================================
Keyword Arguments
name The name to give this Parameter. This is the name that
will appear in the left-most column of a ParameterTree
for this Parameter.
value The value to initially assign to this Parameter.
default The default value for this Parameter (most Parameters
provide an option to 'reset to default').
children A list of children for this Parameter. Children
may be given either as a Parameter instance or as a
dictionary to pass to Parameter.create(). In this way,
it is possible to specify complex hierarchies of
Parameters from a single nested data structure.
readonly If True, the user will not be allowed to edit this
Parameter. (default=False)
enabled If False, any widget(s) for this parameter will appear
disabled. (default=True)
visible If False, the Parameter will not appear when displayed
in a ParameterTree. (default=True)
renamable If True, the user may rename this Parameter.
(default=False)
removable If True, the user may remove this Parameter.
(default=False)
expanded If True, the Parameter will appear expanded when
displayed in a ParameterTree (its children will be
visible). (default=True)
================= =========================================================
"""
QtCore.QObject.__init__(self)
self.opts = {
'type': None,
'readonly': False,
'visible': True,
'enabled': True,
'renamable': False,
'removable': False,
'strictNaming': False, # forces name to be usable as a python variable
'expanded': True,
#'limits': None, ## This is a bad plan--each parameter type may have a different data type for limits.
}
self.opts.update(opts)
self.childs = []
self.names = {} ## map name:child
self.items = weakref.WeakKeyDictionary()
|
deffi/protoplot
|
protoplot/engine/item.py
|
Python
|
agpl-3.0
| 6,684
| 0.006882
|
from protoplot.engine.options_container import OptionsContainer
from protoplot.engine.tag import make_tags_list
from protoplot.engine.item_metaclass import ItemMetaclass # @UnusedImport
from protoplot.engine.item_container import ItemContainer
# TODO options should be resolved in the proper order. Here's the proposed
# resulting order for series:
# my_series .set(...)
# my_plot .series.all.set(...)
# my_page .plots.all.series.all.set(...)
# Page.all.plots.all.series.all.set(...)
# Plot .all.series.all.set(...)
# Series.all.set(...)
# For testability, a resolved option should store probably store a complete list
# of values in order of priority.
class Item(metaclass=ItemMetaclass):
'''
Represents an item in the tree. Items typically contain (a) other items, and
(b) item containers.
An Item *instance* has the following attributes:
* An "options" property (of type OptionsContainer), which contains the
options for this specific instance.
* A "set" method as a shortcut for setting these options.
* A (potentially empty) set of tags to allow selective application of
options (a tag is similar to a class in CSS).
An Item *subclass* has the following (class) attributes:
* An item accessor which will return a template item instance for a given
tag specification, which can be a string or the empty slice to specify
the default template. (TODO really empty slice?)
* An "all" property as a shortcut for [:]
* A "set" method as a shortcut for [:].set
* A constructor taking options like the set method
Item subclasses should call the Item constructor with all *args and **kwargs
and define a register_options method to register the options, like so:
self.options.register("color", False, "black")
Note that the Item constructor, which runs before the Item subclass
constructor, sets the initial options. The options must already be
registered at this point, so this cannot be done by the Item subclass
constructor.
'''
def __init__(self, **kwargs):
'''
All kwargs will be used as options, except:
* tag => use as tag(s)
'''
# Create the tag list and remove the tag argument from kwargs.
if 'tag' in kwargs:
self.tags = make_tags_list(kwargs['tag'])
del kwargs['tag']
else:
self.tags = []
# Create the instance-level options and initialize them from the
# remaining kwargs.
#self.options = OptionsContainer(self.__class__.options)
self.options = OptionsContainer()
# Subclasses must override this method. We cannot do this in the
# subclass constructor because it must be done before setting the kwargs
# as options.
self.register_options()
self.options.set(**kwargs)
# Add the instance-level set method. See __set for an explanation.
self.set = self.__set
##############
## Children ##
##############
def childr
|
en(self):
return [(name, attrib
|
ute)
for name, attribute in self.__dict__.items()
if isinstance(attribute, Item)]
def containers(self):
return [(name, attribute)
for name, attribute in self.__dict__.items()
if isinstance(attribute, ItemContainer)]
#############
## Options ##
#############
def register_options(self):
raise NotImplementedError(
"Item subclasses must implement the register_options method")
def __set(self, **kwargs):
'''
A setter shortcut for the instance-level options.
This can't be called "set" because there is already a class method with
the same name (defined in the metaclass) and Python does not have
separate namespaces for class methods and instance methods. Therefore,
this method will be assigned to the name of "set" in the instance
namespace by the constructor.
'''
self.options.set(**kwargs)
def resolve_options(self, templates = None, inherited = None, indent="", verbose = False):
def p(*args, **kwargs):
if verbose:
print(indent, *args, **kwargs)
p("Resolve options for", self)
p("* Templates:", templates)
p("* Tags:", self.tags)
# Determine the applicable templates: the ones kindly selected by our
# parent, plus the matching templates from our own class.
templates = templates or []
templates = templates + type(self).matching_templates(self.tags)
template_option_containers = [t.options for t in templates]
inherited = inherited or dict()
# Determine the options for self
own_options = self.options.resolve(template_option_containers, inherited)
#print(indent+"* Own options: {}".format(own_options))
# Determine the options for direct children (recursively)
children_options = {}
for name, child in self.children():
p("* Child", name)
child_templates = [
getattr(template, name)
for template in templates
]
child_inherited = own_options
child_options = child.resolve_options(child_templates, child_inherited, indent = indent+" ", verbose = verbose)
children_options.update(child_options)
# Determine the options for children in containers (recursively)
containers_options = {}
for name, container in self.containers():
p("* Container", name, container)
template_containers = [
getattr(template, name)
for template in templates + [self]
]
p("* Template_containers", template_containers)
for child in container.items:
# Select the matching templates for the child
child_templates = []
for container in template_containers:
child_templates += container.matching_templates(child.tags)
child_inherited = own_options
child_options = child.resolve_options(child_templates, own_options, indent = indent+" ", verbose = verbose)
containers_options.update(child_options)
result = {}
result[self] = own_options
result.update(children_options)
result.update(containers_options)
return result
|
nwjs/chromium.src
|
testing/unexpected_passes_common/result_output.py
|
Python
|
bsd-3-clause
| 21,925
| 0.007206
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Methods related to outputting script results in a human-readable format.
Also probably a good example of how to *not* write HTML.
"""
from __future__ import print_function
import collections
import logging
import sys
import tempfile
import six
from unexpected_passes_common import data_types
FULL_PASS = 'Fully passed in the following'
PARTIAL_PASS = 'Partially passed in the following'
NEVER_PASS = 'Never passed in the following'
HTML_HEADER = """\
<!DOCTYPE html>
<html>
<head>
<meta content="width=device-width">
<style>
.collapsible_group {
background-color: #757575;
border: none;
color: white;
font-size:20px;
outline: none;
text-align: left;
width: 100%;
}
.active_collapsible_group, .collapsible_group:hover {
background-color: #474747;
}
.highlighted_collapsible_group {
background-color: #008000;
border: none;
color: white;
font-size:20px;
outline: none;
text-align: left;
width: 100%;
}
.active_highlighted_collapsible_group, .highlighted_collapsible_group:hover {
background-color: #004d00;
}
.content {
background-color: #e1e4e8;
display: none;
padding: 0 25px;
}
button {
user-select: text;
}
h1 {
background-color: black;
color: white;
}
</style>
</head>
<body>
"""
HTML_FOOTER = """\
<script>
function OnClickImpl(element) {
let sibling = element.nextElementSibling;
if (sibling.style.display === "block") {
sibling.style.display = "none";
} else {
sibling.style.display = "block";
}
}
function OnClick() {
this.classList.toggle("active_collapsible_group");
OnClickImpl(this);
}
function OnClickHighlighted() {
this.classList.toggle("active_highlighted_collapsible_group");
OnClickImpl(this);
}
// Repeatedly bubble up the highlighted_collapsible_group class as long as all
// siblings are highlighted.
let found_element_to_convert = false;
do {
found_element_to_convert = false;
// Get an initial list of all highlighted_collapsible_groups.
let highlighted_collapsible_groups = document.getElementsByClassName(
"highlighted_collapsible_group");
let highlighted_list = [];
for (elem of highlighted_collapsible_groups) {
highlighted_list.push(elem);
}
// Bubble up the highlighted_collapsible_group class.
while (highlighted_list.length) {
elem = highlighted_list.shift();
if (elem.tagName == 'BODY') {
continue;
}
if (elem.classList.contains("content")) {
highlighted_list.push(elem.previousElementSibling);
continue;
}
if (elem.classList.contains("collapsible_group")) {
found_element_to_convert = true;
elem.classList.add("highlighted_collapsible_group");
elem.classList.remove("collapsible_group");
}
sibling_elements = elem.parentElement.children;
let found_non_highlighted_group = false;
for (e of sibling_elements) {
if (e.classList.contains("collapsible_group")) {
found_non_highlighted_group = true;
break
}
}
if (!found_non_highlighted_group) {
highlighted_list.push(elem.parentElement);
}
}
} while (found_element_to_convert);
// Apply OnClick listeners so [highlighted_]collapsible_groups properly
// shrink/expand.
let collapsible_groups = document.getElementsByClassName("collapsible_group");
for (element of collapsible_groups) {
element.addEventListener("click", OnClick);
}
highlighted_collapsible_groups = document.getElementsByClassName(
"highlighted_collapsible_group");
for (element of highlighted_collapsible_groups) {
element.addEventListener("click", OnClickHighlighted);
}
</script>
</body>
</html>
"""
SECTION_STALE = 'Stale Expectations (Passed 100% Everywhere, Can Remove)'
SECTION_SEMI_STALE = ('Semi Stale Expectations (Passed 100% In Some Places, '
'But Not Everywhere - Can Likely Be Modified But Not '
'Necessarily Removed)')
SECTION_ACTIVE = ('Active Expectations (Failed At Least Once Everywhere, '
'Likely Should Be Left Alone)')
SECTION_UNMATCHED = ('Unmatched Results (An Expectation Existed When The Test '
'Ran, But No Matching One Currently Exists)')
SECTION_UNUSED = ('Unused Expectations (Indicative Of The Configuration No '
'Longer Being Tested Or Tags Changing)')
MAX_BUGS_PER_LINE = 5
MAX_CHARACTERS_PER_CL_LINE = 72
def OutputResults(stale_dict,
semi_stale_dict,
active_dict,
unmatched_results,
unused_expectations,
output_format,
file_handle=None):
"""Outputs script results to |file_handle|.
Args:
stale_dict: A data_types.TestExpectationMap containing all the stale
expectations.
semi_stale_dict: A data_types.TestExpectationMap containing all the
semi-stale expectations.
active_dict: A data_types.TestExpectationmap containing all the active
expectations.
ummatched_results: Any unmatched results found while filling
|test_expectation_map|, as returned by
queries.FillExpectationMapFor[Ci|Try]Builders().
unused_expectations: A dict from expectation file (str) to list of
unmatched Expectations that were pulled out of |test_expectation_map|
output_format: A string denoting the format to output to. Valid values are
"print" and "html".
file_handle: An optional open file-like object to output to. If not
specified, a suitable default will be used.
"""
assert isinstance(stale_dict, data_types.TestExpectationMap)
assert isinst
|
ance(semi_stale_dict, data_types.TestExpectationMap)
assert isinstance(active_dict, data_types.TestExpectationMap)
logging.info('Outputting results in format %s', output_format)
stale_str_dict = _ConvertTestExpectationMapToStringDict(stale_dict)
semi_stale_str_dict = _ConvertTestExpectationMapToStringDict(semi_stale_dict)
active_str_dict = _ConvertTestExpectationMapToStringDict(active_dict)
unmatched_results_str_dict = _ConvertUnmatchedResultsToStringDict(
unmatched_res
|
ults)
unused_expectations_str_list = _ConvertUnusedExpectationsToStringDict(
unused_expectations)
if output_format == 'print':
file_handle = file_handle or sys.stdout
if stale_dict:
file_handle.write(SECTION_STALE + '\n')
RecursivePrintToFile(stale_str_dict, 0, file_handle)
if semi_stale_dict:
file_handle.write(SECTION_SEMI_STALE + '\n')
RecursivePrintToFile(semi_stale_str_dict, 0, file_handle)
if active_dict:
file_handle.write(SECTION_ACTIVE + '\n')
RecursivePrintToFile(active_str_dict, 0, file_handle)
if unused_expectations_str_list:
file_handle.write('\n' + SECTION_UNUSED + '\n')
RecursivePrintToFile(unused_expectations_str_list, 0, file_handle)
if unmatched_results_str_dict:
file_handle.write('\n' + SECTION_UNMATCHED + '\n')
RecursivePrintToFile(unmatched_results_str_dict, 0, file_handle)
elif output_format == 'html':
should_close_file = False
if not file_handle:
should_close_file = True
file_handle = tempfile.NamedTemporaryFile(delete=False,
suffix='.html',
mode='w')
file_handle.write(HTML_HEADER)
if stale_dict:
file_handle.write('<h1>' + SECTION_STALE + '</h1>\n')
_RecursiveHtmlToFile(stale_str_dict, file_handle)
if semi_stale_dict:
file_handle.write('<h1>' + SECTION_SEMI_STALE + '</h1>\n')
_RecursiveHtmlToFile(semi_stale_str_dict, file_handle)
if active_dict:
file_handle.write('<h1>' + SECTION_ACTIVE + '</h1>\n')
_RecursiveHtmlToFile(active_str_dict, file_handle)
if unused_expectations_str_list:
file_handle.write('\n<h1>' + SECTION_UNUSED + "</h1>\n")
_RecursiveHtmlToFile(unused_expectations_str_list, file_handle)
if unmatched_results_str_dict:
file_handle.write('\n<h1>' + SECTION_UNMATCHED + '</h1>\n')
_RecursiveHtmlToFile(unmatched
|
jaliste/sanaviron
|
sanaviron/ui/gradienteditor.py
|
Python
|
apache-2.0
| 10,331
| 0.00242
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import gtk
from objects.gradientcolor import GradientColor
from objects.gradient import Gradient
from interfaces.signalizable import Signalizable
class GradientLine(gtk.Viewport):
def __init__(self, moving_callback=None, color_callback=None, gradient=None):
"""
moving_callback - callback function to be called when changing position of the selected color(for spin widget)
gradient - editable gradient
"""
gtk.Viewport.__init__(self)
self.set_size_request(-1, 70)
self.set_shadow_type(gtk.SHADOW_NONE)
self.width = 0
self.height = 0
self._motion = False
self.selected = -1
self.x = 0
self.move = False
self.gradient = gradient
self.gradient.change_size(0, 0, 1, 0)
self.moving_callback = moving_callback
self.color_callback = color_callback
self.layout = gtk.Layout()
self.add(self.layout)
self.layout.set_events(0)
self.layout.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.layout.connect("button-press-event", self.press)
self.layout.add_events(gtk.gdk.EXPOSURE_MASK)
self.layout.connect("expose-event", self.expose)
self.layout.add_events(gtk.gdk.BUTTON_RELEASE_MASK)
self.layout.connect("button-release-event", self.release)
self.layout.add_events(gtk.gdk.POINTER_MOTION_MASK)
self.layout.connect("motion-notify-event", self.motion)
self.layout.add_events(gtk.gdk.ENTER_NOTIFY_MASK)
self.layout.connect("enter-notify-event", self.enter)
self.layout.add_events(gtk.gdk.LEAVE_NOTIFY_MASK)
self.layout.connect("leave-notify-event", self.leave)
def update(self):
self.queue_draw()
def set_position_for_selected(self, x):
self.gradient.set_position(self.selected, x)
def set_color_for_selected(self, color):
color.position = self.gradient.colors[self.selected].position
self.gradient.set_color(self.selected, color)
def motion(self, widget, event):
self._motion = True
self.x = event.x
if self.move:
if self.selected >= 0:
if self.moving_callback:
self.moving_callback(event.x / self.width)
self.set_position_for_selected(event.x / self.width)
|
self.gradient.update()
self.queue_draw()
return True
def enter(self, widget, event):
return True
def leave(self, widget, event):
self._motion = False
self.x = event.x
self.queue_draw()
return True
def press(self, widget, event):
sel
|
f.move = True
cnt = len(self.gradient.colors)
if cnt > 0:
for col in range(0, cnt):
if (self.gradient.colors[col].position > (event.x / self.width - 0.01)) and (
self.gradient.colors[col].position < (event.x / self.width + 0.01)):
self.selected = col
self.moving_callback(self.gradient.colors[col].position)
self.color_callback(self.gradient.colors[col])
break
else:
self.selected = -1
if self.selected == -1 or not cnt:
self.gradient.add_new_color(GradientColor(1, 1, 0.1, 1.0, event.x / self.width))
self.selected = len(self.gradient.colors)-1
self.moving_callback(self.gradient.colors[self.selected].position)
self.color_callback(self.gradient.colors[self.selected])
self.gradient.update()
self.queue_draw()
def release(self, widget, event):
self.move = False
self.queue_draw()
def expose(self, widget, event):
context = widget.bin_window.cairo_create()
self.width, self.height = widget.window.get_size()
context.save()
context.new_path()
#context.translate(0, 0)
if (self.width > 0) and (self.height > 0):
context.scale(self.width, self.height)
context.rectangle(0, 0, 1, 1)
context.set_source(self.gradient.gradient)
context.fill_preserve()
context.restore()
if self._motion and not self.move:
context.new_path()
dash = list()
context.set_dash(dash)
context.set_line_width(2)
context.move_to(self.x, 0)
context.line_to(self.x, 30)
context.move_to(self.x, self.height - 30)
context.line_to(self.x, self.height)
scol = sorted(self.gradient.colors,
key=lambda color: color.position) # better in __init__ and update when necessary
cnt = len(scol)
rx = self.x / self.width
index = 0
for col in scol:
if rx < col.position:
for c in range(0, cnt):
if self.gradient.colors[c].position == col.position:
index = c
break
break
r = self.gradient.colors[index].red
g = self.gradient.colors[index].green
b = self.gradient.colors[index].blue
l = 1 - (r + g + b) / 3.0
if l >= 0.5:
l = 1
else:
l = 0
r, g, b = l, l, l
context.set_source_rgba(r, g, b, 1.0)
context.stroke()
for color in range(len(self.gradient.colors)):
if color == self.selected:
delta = 10
else:
delta = 0
context.new_path()
pos = int(self.width * self.gradient.colors[color].position)
context.move_to(pos - 5, 0)
context.line_to(pos + 5, 0)
context.line_to(pos, 20)
context.line_to(pos - 5, 0)
context.set_source_rgb(self.gradient.colors[color].alpha, self.gradient.colors[color].alpha,
self.gradient.colors[color].alpha)
context.fill_preserve()
if delta:
context.move_to(pos, 20)
context.line_to(pos, 20 + delta)
context.set_source_rgb(0.44, 0.62, 0.81)
context.stroke()
class LinearGradientEditor(gtk.VBox, Signalizable):
def __init__(self):
gtk.VBox.__init__(self)
from canvas import Canvas
self.canvas = Canvas()
table = gtk.Table(4, 4, False)
self.pack_start(table)
self.combobox = gtk.combo_box_new_text()
table.attach(self.combobox, 1, 2, 0, 1, gtk.FILL, 0)
gradient = Gradient()
self.gl = GradientLine(self.moving_callback, self.color_callback, gradient)
table.attach(self.gl, 1, 2, 1, 2, gtk.FILL | gtk.EXPAND, 0)
new_color = gtk.Button()
image = gtk.Image()
image.set_from_stock(gtk.STOCK_NEW, gtk.ICON_SIZE_MENU)
new_color.add(image)
table.attach(new_color, 2, 3, 0, 1, 0, 0, 0)
button = gtk.Button()
image = gtk.Image()
image.set_from_stock(gtk.STOCK_GO_FORWARD, gtk.ICON_SIZE_MENU)
button.add(image)
button.connect("clicked", self.forward)
table.attach(button, 2, 3, 1, 2, 0, gtk.FILL, 0)
button = gtk.Button()
image = gtk.Image()
image.set_from_stock(gtk.STOCK_GO_BACK, gtk.ICON_SIZE_MENU)
button.add(image)
button.connect("clicked", self.back)
table.attach(button, 0, 1, 1, 2, 0, gtk.FILL, 0)
hbox = gtk.HBox()
label = gtk.Label(_("Color:"))
hbox.pack_start(label)
self.color_button = gtk.ColorButton()
self.color_button.set_use_alpha(True)
self.color_button.connect("color-set", self.set_gradient_color)
hbox.pack_start(self.color_button)
label = gtk.Label(_("Position:"))
hbox.pack_start(label)
self.sel_position = gtk.SpinButton(climb_rate=0.00001, digits=5)
self.sel_position.set_range(0.0, 1.0)
self.sel_position.set_wrap(True)
self.sel_position.set_increments(0.0
|
GoogleCloudPlatform/declarative-resource-client-library
|
python/services/identitytoolkit/beta/tenant.py
|
Python
|
apache-2.0
| 10,862
| 0.002025
|
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.identity_toolkit import tenant_pb2
from google3.cloud.graphite.mmv2.services.google.identity_toolkit import tenant_pb2_grpc
from typing import List
class Tenant(object):
def __init__(
self,
name: str = None,
display_name: str = None,
allow_password_signup: bool = None,
enable_email_link_signin: bool = None,
disable_auth: bool = None,
enable_anonymous_user: bool = None,
mfa_config: dict = None,
test_phone_numbers: dict = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.display_name = display_name
self.allow_password_signup = allow_password_signup
self.enable_email_link_signin = enable_email_link_signin
self.disable_auth = disable_auth
self.enable_anonymous_user = enable_anonymous_user
self.mfa_config = mfa_config
self.test_phone_numbers = test_phone_numbers
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = tenant_pb2_grpc.IdentitytoolkitBetaTenantServiceStub(channel.Channel())
request = tenant_pb2.ApplyIdentitytoolkitBetaTenantRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.allow_password_signup):
request.resource.allow_password_signup = Primitive.to_proto(
self.allow_password_signup
)
if Primitive.to_proto(self.enable_email_link_signin):
request.resource.enable_email_link_signin = Primitive.to_proto(
self.enable_email_link_signin
)
if Primitive.to_proto(self.disable_auth):
request.resource.disable_auth = Primitive.to_proto(self.disable_auth)
if Primitive.to_proto(self.enable_anonymous_user):
request.resource.enable_anonymous_user = Primitive.to_proto(
self.enable_anonymous_user
)
if TenantMfaConfig.to_proto(self.mfa_config):
request.resource.mfa_config.CopyFrom(
TenantMfaConfig.to_proto(self.mfa_config)
)
else:
request.resource.ClearField("mfa_config")
if Primitive.to_proto(self.test_phone_numbers):
request.resource.test_phone_numbers = Primitive.to_proto(
self.test_phone_numbers
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyIdentitytoolkitBetaTenant(request)
self.name = Primitive.from_proto(response.name)
self.display_name = Primitive.from_proto(response.display_name)
self.allow_password_signup = Primitive.from_proto(
response.allow_password_sign
|
up
)
self.enable_email_link_signin = Primitive.from_proto(
response.enable_email_link_signin
)
self.disable_auth = Primitive.from_proto(response.disable_auth)
self.enable_anonymous_user = Primitive.from_proto(
response.enable_anonymous_user
)
self.mfa_config = TenantMfaConfig.from_proto(response.mfa_config)
self.test_phone_numbers = Primitive.from_proto(respon
|
se.test_phone_numbers)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = tenant_pb2_grpc.IdentitytoolkitBetaTenantServiceStub(channel.Channel())
request = tenant_pb2.DeleteIdentitytoolkitBetaTenantRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.allow_password_signup):
request.resource.allow_password_signup = Primitive.to_proto(
self.allow_password_signup
)
if Primitive.to_proto(self.enable_email_link_signin):
request.resource.enable_email_link_signin = Primitive.to_proto(
self.enable_email_link_signin
)
if Primitive.to_proto(self.disable_auth):
request.resource.disable_auth = Primitive.to_proto(self.disable_auth)
if Primitive.to_proto(self.enable_anonymous_user):
request.resource.enable_anonymous_user = Primitive.to_proto(
self.enable_anonymous_user
)
if TenantMfaConfig.to_proto(self.mfa_config):
request.resource.mfa_config.CopyFrom(
TenantMfaConfig.to_proto(self.mfa_config)
)
else:
request.resource.ClearField("mfa_config")
if Primitive.to_proto(self.test_phone_numbers):
request.resource.test_phone_numbers = Primitive.to_proto(
self.test_phone_numbers
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteIdentitytoolkitBetaTenant(request)
@classmethod
def list(self, project, service_account_file=""):
stub = tenant_pb2_grpc.IdentitytoolkitBetaTenantServiceStub(channel.Channel())
request = tenant_pb2.ListIdentitytoolkitBetaTenantRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListIdentitytoolkitBetaTenant(request).items
def to_proto(self):
resource = tenant_pb2.IdentitytoolkitBetaTenant()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.allow_password_signup):
resource.allow_password_signup = Primitive.to_proto(
self.allow_password_signup
)
if Primitive.to_proto(self.enable_email_link_signin):
resource.enable_email_link_signin = Primitive.to_proto(
self.enable_email_link_signin
)
if Primitive.to_proto(self.disable_auth):
resource.disable_auth = Primitive.to_proto(self.disable_auth)
if Primitive.to_proto(self.enable_anonymous_user):
resource.enable_anonymous_user = Primitive.to_proto(
self.enable_anonymous_user
)
if TenantMfaConfig.to_proto(self.mfa_config):
resource.mfa_config.CopyFrom(TenantMfaConfig.to_proto(self.mfa_config))
else:
resource.ClearField("mfa_config")
if Primitive.to_proto(self.test_phone_numbers):
resource.test_phone_numbers = Primitive.to_proto(self.test_phone_numbers)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class TenantMfaConfig(object):
def __init__(self, state: str = None, enabled_providers: list = None):
self.state = state
self.enabled_providers = enabled_providers
@classmetho
|
debuggerman/gitstats.py
|
gitstats.py
|
Python
|
mit
| 344
| 0.031977
|
import urllib2
import base64
import json
from li
|
nk import *;
from GitFetcher import GitHubFetcher;
username = "debuggerman"
password = "megadeth"
orgUrl = "https://api.github.com/orgs"
orgName = "coeus-solutions"
gitFetcher = GitHubFetcher(username = username, password = password, orgUrl = orgUrl, orgName = orgName)
gitFetcher.getOr
|
gInfo()
|
pienkowb/omelette
|
omelette/fromage/test/data/node.py
|
Python
|
gpl-3.0
| 175
| 0.005714
|
from omelette.fromage.common import Draw
|
ableNode
c
|
lass DrawableNode(DrawableNode):
def __init__(self, uml_object):
super(DrawableNode, self).__init__(uml_object)
|
arkadoel/AprendiendoPython
|
PrimerosPasos/dos.py
|
Python
|
gpl-3.0
| 579
| 0.018998
|
'''
Created on 20/10/2014
@author: fer
'''
if __name__ == '__main__':
print('hola')
x=32 #entero
print x
# variabl
|
e mensaje
mensaje = "hola mundo"
print mensaje
#booleano
my_bool=True
print my_bool
#exponentes
calculo = 10**2
print calculo
print ("La variable calculo es de tipo: %s" % type(calculo))
print ("Clase %s" % type(calculo).__name__)
''' Conversion de tipos '''
entero = int(3.999)
print entero
real = float(3)
print real
cadena = str(32
|
)
print type(cadena)
pass
|
runt18/nupic
|
src/nupic/frameworks/opf/metrics.py
|
Python
|
agpl-3.0
| 54,801
| 0.012992
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
import numbers
import copy
import random
import numpy as np
from nupic.data.fieldmeta import FieldMetaType
import nupic.math.roc_utils as roc
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.frameworks.opf.opfutils import InferenceType
from nupic.utils import MovingAverage
from collections import deque
from operator import itemgetter
from safe_interpreter import SafeInterpreter
from io import BytesIO, StringIO
from functools import partial
###############################################################################
# Public Metric specification class
###############################################################################
class MetricSpec(object):
""" This class represents a single Metrics specification in the TaskControl
block
"""
_LABEL_SEPARATOR = ":"
def __init__(self, metric, inferenceElement, field=None, params=None):
"""
metric: A metric type name that identifies which metrics module is
to be constructed by the metrics factory method
opf.metrics.getModule(); e.g., "rmse"
inferenceElement: Some inference types (such as classification), can output
more than one type of inference (i.e. the predicted class
AND the predicted next step). This field specifies which
of these inferences to compute the metrics on
field: Field name on which this metric is to be collected
params: Custom parameters dict for the metrics module's constructor
"""
self.metric = metric
self.inferenceElement = inferenceElement
self.field = field
self.params = params
return
def __repr__(self):
return "{0!s}(metric={1!r}, inferenceElement={2!r}, field={3!r}, params={4!r})".format(self.__class__.__name__,
self.metric,
self.inferenceElement,
self.field,
self.params)
def getLabel(self, inferenceType=None):
""" Helper method that generates a unique label
for a MetricSpec / InferenceType pair. The label is formatted
as follows:
<predictionKind>:<metric type>:(paramName=value)*:field=<fieldname>
For example:
classification:aae:paramA=10.2:paramB=20:window=100:field=pounds
"""
result = []
if inferenceType is not None:
result.append(InferenceType.getLabel(inferenceType))
result.append(self.inferenceElement)
result.append(self.metric)
params = self.params
if params is not None:
sortedParams= params.keys()
sortedParams.sort()
for param in sortedParams:
# Don't include the customFuncSource - it is too long an unwieldy
if param in ('customFuncSource', 'customFuncDef', 'customExpr'):
continue
value = params[param]
if isinstance(value, str):
result.extend(["{0!s}='{1!s}'".format(param, value)])
else:
result.extend(["{0!s}={1!s}".format(param, value)])
if self.field:
result.append("field={0!s}".format((self.field)) )
return self._LABEL_SEPARATOR.join(result)
@classmethod
def getInferenceTypeFromLabel(cls, label):
""" Extracts the PredicitonKind (temporal vs. nontemporal) from the given
metric label
Parameters:
-----------------------------------------------------------------------
label: A label (string) for a metric spec generated by getMetricLabel
(above)
Returns: An InferenceType value
"""
infType, _, _= label.partition(cls._LABEL_SEPARATOR)
if not InferenceType.validate(infType):
return None
return infType
def getModule(metricSpec):
"""
factory method to return an appropriate MetricsIface-based module
args:
metricSpec - an instance of MetricSpec.
metricSpec.metric must be one of:
rmse (root-mean-square error)
aae (average absolute error)
acc (accuracy, for enumerated types)
return:
an appropriate Metric module
"""
metricName = metricSpec.metric
if metricName == 'rmse':
return MetricRMSE(metricSpec)
if metricName == 'nrmse':
return MetricNRMSE(metricSpec)
elif metricName == 'aae':
return MetricAAE(metricSpec)
elif metricName == 'acc':
return MetricAccuracy(metricSpec)
elif metricName == 'avg_err':
return MetricAveError(metricSpec)
elif metricName == 'trivial':
return MetricTrivial(metricSpec)
elif metricName == 'two_gram':
return MetricTwoGram(metricSpec)
elif metricName == 'moving_mean':
return MetricMovingMean(metricSpec)
elif metricName == 'moving_mode':
return MetricMovingMode(metricSpec)
elif metricName == 'neg_auc':
return MetricNegAUC(metricSpec)
elif metricName == 'custom_error_metric':
return CustomErrorMetric(metricSpec)
elif metricName == 'multiStep':
return MetricMultiStep(metricSpec)
elif metricName == 'multiStepProbability':
return MetricMultiStepProbability(metricSpec)
elif metricName == 'ms_aae':
return MetricMultiStepAAE(metricSpec)
elif metricName == 'ms_avg_err':
return MetricMultiStepAveError(metricSpec)
elif metricName == 'passThruPrediction':
return MetricPassThruPredict
|
ion(metricSpec)
elif metricName == 'altMAPE':
return MetricAltMAPE(metricSpec)
elif metricName == 'MAPE':
return MetricMAPE(metricSpec)
elif metricName == 'multi':
return MetricMulti(metricSpec)
elif metricName == 'negativeLogLikelihood':
return MetricNegativeLogLikelihood(metricSpec)
else:
raise Exception("Unsupported metric type: {0!s}".format(metricName))
#
|
###############################################################################
# Helper Methods and Classes #
################################################################################
class _MovingMode(object):
""" Helper class for computing windowed moving
mode of arbitrary values """
def __init__(self, windowSize = None):
"""
Parameters:
-----------------------------------------------------------------------
windowSize: The number of values that are used to compute the
moving average
"""
self._windowSize = windowSize
self._countDict = dict()
self._history = deque([])
def __call__(self, value):
if len(self._countDict) == 0:
pred = ""
else:
pred = max(self._countDict.items(), key = itemgetter(1))[0]
# Update count dict and history buffer
self._history.appendleft(value)
if not value in self._countDict:
self._countDict[value] = 0
self._countDict[value] += 1
if len(self._history) > self._windowSize:
removeElem = self._history.pop()
self._countDict[removeElem] -= 1
assert(self._countDict[removeElem] > -1)
return pred
def _isNumber(value):
return isinstance(value, (numbers.Number, np.number))
class MetricsIface(object):
"""
A Metrics module compares a prediction Y to corresponding ground truth X and returns
|
morphis/home-assistant
|
homeassistant/components/camera/synology.py
|
Python
|
apache-2.0
| 8,436
| 0.000119
|
"""
Support for Synology Surveillance Station Cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.synology/
"""
import asyncio
import logging
import voluptuous as vol
import aiohttp
import async_timeout
from homeassistant.const import (
CONF_NAME, CONF_USERNAME, CONF_PASSWORD,
CONF_URL, CONF_WHITELIST, CONF_VERIFY_SSL)
from homeassistant.components.camera import (
Camera, PLATFORM_SCHEMA)
from homeassistant.helpers.aiohttp_client import (
async_get_clientsession, async_create_clientsession,
async_aiohttp_proxy_stream)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.async import run_coroutine_threadsafe
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Synology Camera'
DEFAULT_STREAM_ID = '0'
TIMEOUT = 5
CONF_CAMERA_NAME = 'camera_name'
CONF_STREAM_ID = 'stream_id'
QUERY_CGI = 'query.cgi'
QUERY_API = 'SYNO.API.Info'
AUTH_API = 'SYNO.API.Auth'
CAMERA_API = 'SYNO.SurveillanceStation.Camera'
STREAMING_API = 'SYNO.SurveillanceStation.VideoStream'
SESSION_ID = '0'
WEBAPI_PATH = '/webapi/'
AUTH_PATH = 'auth.cgi'
CAMERA_PATH = 'camera.cgi'
STREAMING_PATH = 'SurveillanceStation/videoStreaming.cgi'
CONTENT_TYPE_HEADER = 'Content-Type'
SYNO_API_URL = '{0}{1}{2}'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_WHITELIST, default=[]): cv.ensure_list,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Setup a Synology IP Camera."""
verify_ssl = config.get(CONF_VERIFY_SSL)
websession_init = async_get_clientsession(hass, verify_ssl)
# Determine API to use for authentication
syno_api_url = SYNO_API_URL.format(
config.get(CONF_URL), WEBAPI_PATH, QUERY_CGI)
query_payload = {
'api': QUERY_API,
'method': 'Query',
'version': '1',
'query': 'SYNO.'
}
query_req = None
try:
with async_timeout.timeout(TIMEOUT, loop=hass.loop):
query_req = yield from websession_init.get(
syno_api_url,
params=query_payload
)
query_resp = yield from query_req.json()
auth_path = query_resp['data'][AUTH_API]['path']
camera_api = query_resp['data'][CAMERA_API]['path']
camera_path = query_resp['data'][CAMERA_API]['path']
streaming_path = query_resp['data'][STREAMING_API]['path']
except (asyncio.TimeoutError, aiohttp.errors.ClientError):
_LOGGER.exception("Error on %s", syno_api_url)
return False
finally:
if query_req is not None:
yield from query_req.release()
# Authticate to NAS to get a session id
syno_auth_url = SYNO_API_URL.format(
config.get(CONF_URL), WEBAPI_PATH, auth_path)
session_id = yield from get_session_id(
hass,
websession_init,
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
syno_auth_url
)
# init websession
websession = async_create_clientsession(
hass, verify_ssl, cookies={'id': session_id})
# Use SessionID to get cameras in system
syno_camera_url = SYNO_API_URL.format(
config.get(CONF_URL), WEBAPI_PATH, camera_api)
camera_payload = {
'api': CAMERA_API,
'method': 'List',
'version': '1'
}
try:
with async_timeout.timeout(TIMEOUT, loop=hass.loop):
camera_req = yield from websession.get(
syno_camera_url,
params=camera_payload
)
except (asyncio.TimeoutError, aiohttp.errors.ClientError):
_LOGGER.exception("Error on %s", syno_camera_url)
return False
camera_resp = yield from camera_req.json()
cameras = camera_resp['data']['cameras']
yield from camera_req.release()
# add cameras
devices = []
for camera in cameras:
if not config.get(CONF_WHITELIST):
camera_id = camera['id']
snapshot_path = camera['snapshot_path']
device = SynologyCamera(
hass,
websession,
config,
camera_id,
camera['name'],
snapshot_path,
streaming_path,
camera_path,
auth_path
)
devices.append(device)
async_add_devices(devices)
@asyncio.coroutine
def get_session_id(hass, websession, username, password, login_url):
"""Get a session id."""
auth_payload = {
'api': AUTH_API,
'method': 'Login',
'version': '2',
'account': username,
|
'passwd': password,
'session': 'SurveillanceStation',
|
'format': 'sid'
}
auth_req = None
try:
with async_timeout.timeout(TIMEOUT, loop=hass.loop):
auth_req = yield from websession.get(
login_url,
params=auth_payload
)
auth_resp = yield from auth_req.json()
return auth_resp['data']['sid']
except (asyncio.TimeoutError, aiohttp.errors.ClientError):
_LOGGER.exception("Error on %s", login_url)
return False
finally:
if auth_req is not None:
yield from auth_req.release()
class SynologyCamera(Camera):
"""An implementation of a Synology NAS based IP camera."""
def __init__(self, hass, websession, config, camera_id,
camera_name, snapshot_path, streaming_path, camera_path,
auth_path):
"""Initialize a Synology Surveillance Station camera."""
super().__init__()
self.hass = hass
self._websession = websession
self._name = camera_name
self._synology_url = config.get(CONF_URL)
self._camera_name = config.get(CONF_CAMERA_NAME)
self._stream_id = config.get(CONF_STREAM_ID)
self._camera_id = camera_id
self._snapshot_path = snapshot_path
self._streaming_path = streaming_path
self._camera_path = camera_path
self._auth_path = auth_path
def camera_image(self):
"""Return bytes of camera image."""
return run_coroutine_threadsafe(
self.async_camera_image(), self.hass.loop).result()
@asyncio.coroutine
def async_camera_image(self):
"""Return a still image response from the camera."""
image_url = SYNO_API_URL.format(
self._synology_url, WEBAPI_PATH, self._camera_path)
image_payload = {
'api': CAMERA_API,
'method': 'GetSnapshot',
'version': '1',
'cameraId': self._camera_id
}
try:
with async_timeout.timeout(TIMEOUT, loop=self.hass.loop):
response = yield from self._websession.get(
image_url,
params=image_payload
)
except (asyncio.TimeoutError, aiohttp.errors.ClientError):
_LOGGER.exception("Error on %s", image_url)
return None
image = yield from response.read()
yield from response.release()
return image
@asyncio.coroutine
def handle_async_mjpeg_stream(self, request):
"""Return a MJPEG stream image response directly from the camera."""
streaming_url = SYNO_API_URL.format(
self._synology_url, WEBAPI_PATH, self._streaming_path)
streaming_payload = {
'api': STREAMING_API,
'method': 'Stream',
'version': '1',
'cameraId': self._camera_id,
'format': 'mjpeg'
}
stream_coro = self._websessio
|
akshaykurmi/reinforcement-learning
|
atari_breakout/per.py
|
Python
|
mit
| 4,539
| 0.000881
|
import os
import pickle
import numpy as np
from tqdm import tqdm
class SumTree:
def __init__(self, capacity):
self.capacity = capacity
self.tree = np.zeros(2 * capacity - 1, dtype=np.float32)
self.data = np.empty(capacity, dtype=object)
self.head = 0
@property
def total_priority(self):
return self.tree[0]
@property
def max_priority(self):
return np.max(self.tree[-self.capacity:])
@property
def min_priority(self):
return np.min(self.tree[-self.capacity:])
def _tree_to_data_index(self, i):
return i - self.capacity + 1
def _data_to_tree_index(self, i):
return i + self.capacity - 1
def add(self, priority, data):
tree_index = self._data_to_tree_index(self.head)
self.update_priority(tree_index, priority)
self.data[self.head] = data
self.head += 1
if self.head >= self.capacity:
self.head = 0
def update_priority(self, tree_index, priority):
delta = priority - self.tree[tree_index]
self.tree[tree_index] = priority
while tree_index != 0:
tree_index = (tree_index - 1) // 2
self.tree[tree_index] += delta
def get_leaf(self, value):
parent = 0
while True:
left = 2 * parent + 1
right = left + 1
if left >= len(self.tree):
leaf = parent
break
else:
if value <= self.tree[left]:
parent = left
else:
value -= self.tree[left]
parent = right
data_index = self._tree_to_data_index(leaf)
return leaf, self.tree[leaf], self.data[data_index]
class PrioritizedExperienceReplay:
def __init__(self, capacity, initial_size, epsilon, alpha, beta, beta_annealing_rate, max_td_error, ckpt_dir):
self.tree = SumTree(capacity)
self.capacity = capacity
self.epsilon = epsilon
self.initial_size = initial_size
self.alpha = alpha
self.beta = beta
self.beta_annealing_rate = beta_annealing_rate
self.max_td_error = max_td_error
self.ckpt_dir = ckpt_dir
def add(self, transition):
max_priority = self.tree.max_priority
if max_priority == 0:
max_priority = self.max_td_error
self.tree.add(max_priority, transition)
def samp
|
le(self, batch_size):
self.beta = np.min([1., self.beta + self.beta_annealing_rate])
priority_segment =
|
self.tree.total_priority / batch_size
min_probability = self.tree.min_priority / self.tree.total_priority
max_weight = (min_probability * batch_size) ** (-self.beta)
samples, sample_indices, importance_sampling_weights = [], [], []
for i in range(batch_size):
value = np.random.uniform(priority_segment * i, priority_segment * (i + 1))
index, priority, transition = self.tree.get_leaf(value)
sample_probability = priority / self.tree.total_priority
importance_sampling_weights.append(((batch_size * sample_probability) ** -self.beta) / max_weight)
sample_indices.append(index)
samples.append(transition)
return sample_indices, samples, importance_sampling_weights
def update_priorities(self, tree_indices, td_errors):
td_errors += self.epsilon
clipped_errors = np.minimum(td_errors, self.max_td_error)
priorities = clipped_errors ** self.alpha
for tree_index, priority in zip(tree_indices, priorities):
self.tree.update_priority(tree_index, priority)
def load_or_instantiate(self, env):
if os.path.exists(os.path.join(self.ckpt_dir, "memory.pkl")):
self.load()
return
state = env.reset()
for _ in tqdm(range(self.initial_size), desc="Initializing replay memory", unit="transition"):
action = env.action_space.sample()
next_state, reward, done, info = env.step(action)
transition = (state, action, reward, next_state, done)
self.add(transition)
state = next_state
if done:
state = env.reset()
def load(self):
with open(os.path.join(self.ckpt_dir, "memory.pkl"), "rb") as f:
self.tree = pickle.load(f)
def save(self):
with open(os.path.join(self.ckpt_dir, "memory.pkl"), "wb") as f:
pickle.dump(self.tree, f)
|
instinct-vfx/rez
|
src/rez/cli/plugins.py
|
Python
|
apache-2.0
| 1,176
| 0
|
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
"""
Get a list of a package's plugins.
"""
from __future__ import print_function
def setup_parser(parser, completions=False):
parser.add_argument(
|
"--paths", type=str, default=None,
help="set package search path")
PKG_action = parser.add_argument(
"PKG", type=str,
help="package to list plugins for")
if completions:
from rez.cli._complete_util import PackageFamilyCompleter
PKG_action.comp
|
leter = PackageFamilyCompleter
def command(opts, parser, extra_arg_groups=None):
from rez.package_search import get_plugins
from rez.config import config
import os
import os.path
import sys
config.override("warn_none", True)
if opts.paths is None:
pkg_paths = None
else:
pkg_paths = opts.paths.split(os.pathsep)
pkg_paths = [os.path.expanduser(x) for x in pkg_paths if x]
pkgs_list = get_plugins(package_name=opts.PKG, paths=pkg_paths)
if pkgs_list:
print('\n'.join(pkgs_list))
else:
print("package '%s' has no plugins." % opts.PKG, file=sys.stderr)
|
joachimmetz/dfvfs
|
tests/vfs/apfs_file_entry.py
|
Python
|
apache-2.0
| 30,722
| 0.002181
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the file entry implementation using pyfsapfs."""
import unittest
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from dfvfs.resolver import resolver
from dfvfs.vfs import apfs_attribute
from dfvfs.vfs import apfs_file_entry
from dfvfs.vfs import apfs_file_system
from tests import test_lib as shared_test_lib
class APFSFileEntryTest(shared_test_lib.BaseTestCase):
"""Tests the APFS file entry."""
# pylint: disable=protected-access
_IDENTIFIER_A_DIRECTORY = 16
_IDENTIFIER_A_FILE = 17
_IDENTIFIER_A_LINK = 20
_IDENTIFIER_ANOTHER_FILE = 19
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['apfs.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
self._apfs_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS_CONTAINER, location='/apfs1',
parent=test_raw_path_spec)
self._apfs_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, location='/',
parent=self._apfs_container_path_spec)
self._file_system = apfs_file_system.APFSFileSystem(
self._resolver_context, self._apfs_path_spec)
self._file_system.Open()
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testInitialize(self):
"""Tests the __init__ function."""
file_entry = apfs_file_entry.APFSFileEntry(
self._resolver_context, self._file_system, self._apfs_path_spec)
self.assertIsNotNone(file_entry)
# TODO: add tests for _GetDirectory
# TODO: add tests for _GetLink
# TODO: add tests for _GetStat
# TODO: add tests for _GetSubFileEntries
def testAccessTime(self):
"""Test the access_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._apfs_container_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.access_time)
def testAddedTime(self):
"""Test the added_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._apfs_container_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.added_time)
def testChangeTime(self):
"""Test the change_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._apfs_container_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.change_time)
def testCreationTime(self):
"""Test the creation_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._apfs_container_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.creation_time)
def testModificationTime(self):
"""Test the modification_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._apfs_container_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.modification_time)
def testName(self):
"""Test the name property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._apfs_container_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNo
|
ne(file_entry)
self.assertEqual(file_entry.name, 'another_file')
def testSize(self):
"""Test the size property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
|
parent=self._apfs_container_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.size, 22)
def testGetAttributes(self):
"""Tests the _GetAttributes function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, identifier=self._IDENTIFIER_A_FILE,
location='/a_directory/a_file', parent=self._apfs_container_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry._attributes)
file_entry._GetAttributes()
self.assertIsNotNone(file_entry._attributes)
self.assertEqual(len(file_entry._attributes), 1)
test_attribute = file_entry._attributes[0]
self.assertIsInstance(test_attribute, apfs_attribute.APFSExtendedAttribute)
self.assertEqual(test_attribute.name, 'myxattr')
test_attribute_value_data = test_attribute.read()
self.assertEqual(test_attribute_value_data, b'My extended attribute')
def testGetStat(self):
"""Tests the _GetStat function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._apfs_container_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
stat_object = file_entry._GetStat()
self.assertIsNotNone(stat_object)
self.assertEqual(stat_object.type, stat_object.TYPE_FILE)
self.assertEqual(stat_object.size, 22)
self.assertEqual(stat_object.mode, 420)
self.assertEqual(stat_object.uid, 99)
self.assertEqual(stat_object.gid, 99)
self.assertEqual(stat_object.atime, 1642144781)
self.assertEqual(stat_object.atime_nano, 2174301)
self.assertEqual(stat_object.ctime, 1642144781)
self.assertEqual(stat_object.ctime_nano, 2206372)
self.assertEqual(stat_object.crtime, 1642144781)
self.assertEqual(stat_object.crtime_nano, 2206372)
self.assertEqual(stat_object.mtime, 1642144781)
self.assertEqual(stat_object.mtime_nano, 2174301)
def testGetStatAttribute(self):
"""Tests the _GetStatAttribute function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._apfs_container_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
stat_attribute = file_entry._GetStatAttribute()
self.assertIsNotNone(stat_attribute)
self.assertEqual(stat_attribute.group_identifier, 99)
self.assertEqual(stat_attribute.inode_number, 19)
self.assertEqual(stat_attribute.mode, 0o100644)
# TODO: implement number of hard links support
|
bikashgupta11/javarobot
|
src/main/resources/jython/Lib/xml/etree/ElementTree.py
|
Python
|
gpl-3.0
| 56,932
| 0.001194
|
#
# ElementTree
# $Id: ElementTree.py 3440 2008-07-18 14:45:01Z fredrik $
#
# light-weight XML support for Python 2.3 and later.
#
# history (since 1.2.6):
# 2005-11-12 fl added tostringlist/fromstringlist helpers
# 2006-07-05 fl merged in selected changes from the 1.3 sandbox
# 2006-07-05 fl removed support for 2.1 and earlier
# 2007-06-21 fl added deprecation/future warnings
# 2007-08-25 fl added doctype hook, added parser version attribute etc
# 2007-08-26 fl added new serializer code (better namespace handling, etc)
# 2007-08-27 fl warn for broken /tag searches on tree level
# 2007-09-02 fl added html/text methods to serializer (experimental)
# 2007-09-05 fl added method argument to tostring/tostringlist
# 2007-09-06 fl improved error handling
# 2007-09-13 fl added itertext, iterfind; assorted cleanups
# 2007-12-15 fl added C14N hooks, copy method (experimental)
#
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML",
"XMLParser", "XMLTreeBuilder",
]
VERSION = "1.3.0"
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} constructor
# or the {@link #SubElement} factory function.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import sys
import re
import warnings
class _SimpleElementPath(object):
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag, namespaces=None):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None, namespaces=None):
elem = self.find(element, tag)
if elem is None:
return default
return elem.text or ""
def iterfind(self, element, tag, namespaces=None):
if tag[:3] == ".//":
for elem in element.iter(tag[3:]):
yield elem
for elem in element:
if elem.tag == tag:
yield elem
def findall(self, element, tag, namespaces=None):
return list(self.iterfind(element, tag, namespaces))
try:
from . import ElementPath
except ImportError:
ElementPath = _SimpleElementPath()
##
# Parser error. This is a subclass of <b>SyntaxError</b>.
# <p>
# In addition to the exception value, an exception instance contains a
# specific exception code in the <b>code</b> attribute, and the line and
# column of the error in the <b>position</b> attribute.
class ParseError(SyntaxError):
pass
# --------------------------------------------------------------------
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, Element) or hasattr(element, "tag")
##
# Element class. This class defines the Element interface, and
# provides a reference implementation of this interface.
# <p>
# The element name, attribute names, and attribute values can be
# either ASCII strings (ordinary Python strings containing only 7-bit
# ASCII characters) or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class Element(object):
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #Element.get},
# {@link #Element.set},
# {@link #Element.keys}, and
# {@link #Element.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None. Note that if there was no text, this
# attribute may be either None or an empty string, depending on
# the parser.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None. Note that if there was no text, this attribute
# may be either None or an empty string, depending on the parser.
tail = None # text after end tag, if any
# constructor
def __init__(self, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at 0x%x>" % (repr(self.tag), id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return self._
|
_class__(tag, attrib)
##
# (Experimental) Copies the current element. This creates a
# shallow copy; subelements will be shared with the original tree.
#
# @return A new element instance.
def copy(self):
elem = self.makeelement(self.tag, self.attrib)
|
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
##
# Returns the number of subelements. Note that this only counts
# full elements; to check if there's any content in an element, you
# have to check both the length and the <b>text</b> attribute.
|
azumimuo/family-xbmc-addon
|
plugin.video.showboxarize/resources/lib/sources_pl/filister.py
|
Python
|
gpl-2.0
| 5,852
| 0.003418
|
# -*- coding: utf-8 -*-
'''
Flixnet Add-on
Copyright (C) 2017 homik
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
|
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, urlparse, re
from re
|
sources.lib.modules import cleantitle
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['pl']
self.domains = ['filiser.tv']
self.base_link = 'http://filiser.tv/'
self.url_transl = 'embed?salt=%s'
self.search_link = 'szukaj?q=%s'
self.episode_link = '-Season-%01d-Episode-%01d'
def do_search(self, title, year, is_movie_search):
try:
url = urlparse.urljoin(self.base_link, self.search_link)
url = url % urllib.quote(title)
result = client.request(url)
result = result.decode('utf-8')
result = client.parseDOM(result, 'ul', attrs={'id': 'resultList2'})
result = client.parseDOM(result[0], 'li')
result = [(client.parseDOM(i, 'a', ret='href')[0],
client.parseDOM(i, 'div', attrs={'class': 'title'})[0],
(client.parseDOM(i, 'div', attrs={'class': 'title_org'}) + [None])[0],
client.parseDOM(i, 'div', attrs={'class': 'info'})[0],
) for i in result]
search_type = 'Film' if is_movie_search else 'Serial'
cleaned_title = cleantitle.get(title)
# filter by name
result = [x for x in result if cleaned_title == cleantitle.get(self.get_first_not_none([x[2], x[1]]))]
# filter by type
result = [x for x in result if x[3].startswith(search_type)]
# filter by year
result = [x for x in result if x[3].endswith(str(year))]
if len(result) > 0:
return result[0][0]
else:
return
except :
return
def get_first_not_none(self, collection):
return next(item for item in collection if item is not None)
def movie(self, imdb, title, localtitle, year):
return self.do_search(title, year, True)
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year):
return self.do_search(tvshowtitle, year, False)
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = client.request(url)
result = client.parseDOM(result, 'ul', attrs={'data-season-num': season})[0]
result = client.parseDOM(result, 'li')
for i in result:
s = client.parseDOM(i, 'a', attrs={'class': 'episodeNum'})[0]
e = int(s[7:-1])
if e == int(episode):
return client.parseDOM(i, 'a', attrs={'class': 'episodeNum'}, ret='href')[0]
except :
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.request(url)
result = client.parseDOM(result, 'div', attrs={'id': 'links'})
attr = client.parseDOM(result, 'ul', ret='data-type')
result = client.parseDOM(result, 'ul')
for x in range(0, len(result)):
transl_type = attr[x]
links = result[x]
sources += self.extract_sources(transl_type, links)
return sources
except:
return sources
def get_lang_by_type(self, lang_type):
if lang_type == 'DUBBING':
return 'pl', 'Dubbing'
elif lang_type == 'NAPISY_PL':
return 'pl', 'Napisy'
if lang_type == 'LEKTOR_PL':
return 'pl', 'Lektor'
elif lang_type == 'POLSKI':
return 'pl', None
return 'en', None
def extract_sources(self, transl_type, links):
sources = []
data_refs = client.parseDOM(links, 'li', ret='data-ref')
result = client.parseDOM(links, 'li')
lang, info = self.get_lang_by_type(transl_type)
for i in range(0, len(result)):
el = result[i];
host = client.parseDOM(el, 'span', attrs={'class': 'host'})[0]
quality = client.parseDOM(el, 'span', attrs={'class': 'quality'})[0]
q = 'SD'
if quality.endswith('720p'):
q = 'HD'
elif quality.endswith('1080p'):
q = '1080p'
sources.append({'source': host, 'quality': q, 'language': lang, 'url': data_refs[i], 'info': info, 'direct': False, 'debridonly': False})
return sources
def resolve(self, url):
try:
url_to_exec = urlparse.urljoin(self.base_link, self.url_transl) % url
result = client.request(url_to_exec)
m = re.search("(?<=var url = ')(.*\n?)(?=')", result)
result_url = m.group(0)
result_url = result_url.replace('#WIDTH', '100')
result_url = result_url.replace('#HEIGHT', '100')
return result_url
except:
return
|
eahneahn/free
|
djangoproject/gh_frespo_integration/services/github_services.py
|
Python
|
agpl-3.0
| 5,113
| 0.008801
|
from gh_frespo_integration.utils import github_adapter
from gh_frespo_integration.models import *
from django.conf import settings
import logging
from datetime import timedelta
__author__ = 'tony'
logger = logging.getLogger(__name__)
def get_repos_and_configs(user):
repos = []
github_username = user.github_username(
|
)
if github_username:
repos = github_adapter.fetch_repos(github_username)
for repo_dict in repos:
gh_id = repo_dict['id']
repodb = get_repodb_by_githubid(gh_id)
if repodb:
user_re
|
po_config = get_repo_config_by_repo_and_user(repodb, user)
if user_repo_config:
repo_dict['add_links'] = user_repo_config.add_links
repo_dict['new_only'] = user_repo_config.new_only
return repos
def get_repodb_by_githubid(gh_id):
repos = Repo.objects.filter(gh_id = gh_id)
if repos.count() > 1:
logger.error('Database inconsistency: multiple repos found with gh_id:%s'%gh_id)
elif repos.count() == 1:
return repos[0]
else:
return None
def get_repo_config_by_repo_and_user(repo, user):
configs = UserRepoConfig.objects.filter(repo__id = repo.id, user__id = user.id)
if configs.count() > 1:
logger.error('Database inconsistency: multiple configs found for repo:%s / user:%s'%(repo.id, user.id))
elif configs.count() == 1:
return configs[0]
else:
return None
def update_user_configs(user, dict):
github_username = user.github_username()
if github_username:
repos = github_adapter.fetch_repos(github_username)
my_repo_ids = []
for repo_dict in repos:
gh_id = repo_dict['id']
repodb = get_repodb_by_githubid(gh_id)
if not repodb:
owner = repo_dict['owner']['login']
owner_type = repo_dict['owner']['type']
name = repo_dict['name']
repodb = Repo.newRepo(owner, owner_type, name, gh_id, user)
repodb.save()
config = get_repo_config_by_repo_and_user(repodb, user)
if not config:
config = UserRepoConfig.newConfig(user, repodb)
config.add_links = dict.has_key('check_addlink_%s' % gh_id)
# config.new_only = dict.has_key('check_newonly_%s' % gh_id)
config.new_only = True
config.save()
my_repo_ids.append(gh_id)
UserRepoConfig.objects.filter(user__id = user.id).exclude(repo__gh_id__in = my_repo_ids).delete()
def add_sponsorthis_comments():
configs = UserRepoConfig.objects.filter(add_links = True)
logger.debug('starting sponsor_this routine...')
for config in configs:
repo_owner = config.repo.owner
repo_name = config.repo.name
last_ran = None
logger.debug('processing repo_config %s (%s/%s)' % (config.id, config.repo.owner, config.repo.name))
if config.new_only or config.already_did_old:
last_ran = config.last_ran - timedelta(hours=1)
logger.debug('will list issues after %s' % last_ran)
else:
logger.debug('will list all issues')
config.update_last_ran()
try:
issues = github_adapter.fetch_issues(repo_owner, repo_name, last_ran)
logger.debug('issues are fetched')
for issue in issues:
_add_comment_if_not_already(config, int(issue['number']), repo_owner, repo_name)
if not config.new_only:
config.set_already_did_old()
except BaseException as e:
logger.error("Error adding comments repository %s/%s: %s" % (repo_owner, repo_name, e))
logger.debug('sponsor_this ended successfully')
def _add_comment_if_not_already(repo_config, issue_number, repo_owner, repo_name):
issue_already_commented = get_issue_already_commented(repo_config.repo, issue_number)
if not issue_already_commented:
body = u"""Do you care about this issue? To get it fixed quickly, [offer a cash incentive to developers on FreedomSponsors.org](%s/core/issue/sponsor?trackerURL=https://github.com/%s/%s/issues/%s).
If you can only give US$5, offering just that will invite other people to do the same. Sharing the cost will soon add up!""" % (settings.SITE_HOME, repo_owner, repo_name, issue_number)
github_adapter.bot_comment(repo_owner, repo_name, issue_number, body)
issue_already_commented = IssueAlreadyCommented.newIssueAlreadyCommented(repo_config.repo, issue_number)
issue_already_commented.save()
logger.info('commented on issue %s of %s/%s' % (issue_number, repo_owner, repo_name))
else:
logger.debug('NOT commenting on issue %s of %s/%s because it was already commented on' % (issue_number, repo_owner, repo_name))
def get_issue_already_commented(repo, number):
iacs = IssueAlreadyCommented.objects.filter(repo__id = repo.id, number = number)
if iacs.count() > 1:
logger.error('Database inconsistency: multiple issue_already_commented found for repo:%s / number:%s'%(repo.id, number))
elif iacs.count() == 1:
return iacs[0]
else:
return None
|
vijeshm/eezyReport
|
eezyReport.py
|
Python
|
mit
| 16,797
| 0.006727
|
from lxml import etree
import os
from BeautifulSoup import BeautifulSoup
from itertools import chain
def replacements(text):
text = text.replace('>', '\\textgreater ')
text = text.replace('<', '\\textless ')
text = text.replace('&', '\&')
text = text.replace('_', '\_')
text = text.replace('%', '\%')
text = text.replace('[', '\lbrack')
text = text.replace(']', '\\rbrack')
return text
def fillContent(tex, srchStr, insStr):
insStr = replacements(insStr)
insIndex = tex.index(srchStr)
tex = tex[:insIndex+len(srchStr)] + insStr + tex[insIndex+len(srchStr):]
return tex
def convertToTex(text, figInTabular=False):
text = replacements(text)
soup = BeautifulSoup(text)
contents = soup.contents[0].contents
retTxt = ''
for content in contents:
if str(type(content)) == "<class 'BeautifulSoup.NavigableString'>":
content = content.replace('\\newline', '~\\\\')
content = content.replace('\\newpara', '~\\\\\\\\')
content = content.replace('\\backslash', '\\textbackslash')
content = content.replace('|', '\\textbar ')
retTxt += content
elif str(type(content)) == "<class 'BeautifulSoup.Tag'>":
if content.name == 'b':
retTxt += '\\textbf{' + convertToTex(str(content)) + '}'
elif content.name == 'u':
retTxt += '\underline{' + convertToTex(str(content)) + '}'
elif content.name == 'i':
retTxt += '\\textit{' + convertToTex(str(content)) + '}'
elif content.name == 'ul':
retTxt += '\n\\begin{itemize}'
for item in content.contents:
if str(type(item)) == "<class 'BeautifulSoup.Tag'>":
retTxt += '\n \item ' + convertToTex(str(item))
retTxt += '\n\end{itemize}\n'
elif content.name == 'chapter':
attrs = dict(content.attrs)
if not attrs.has_key('name'):
print "One of the chapters do not have a 'name' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif attrs['name'] == '':
print "One of the chapters' name is empty. Please correct it and re-run."
exit(0)
else:
retTxt += '\\begin{projChapter}{' + attrs['name'] + '}' + convertToTex(str(content)) + '\\end{projChapter}'
elif content.name == 'section':
attrs = dict(content.attrs)
if not attrs.has_key('name'):
print "One of the sections do not have a 'name' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif attrs['name'] == '':
print "One of the sections' name is empty. Please correct it and re-run."
exit(0)
else:
retTxt += '\\begin{projSection}{' + attrs['name'] + '}' + convertToTex(str(content)) + '\\end{projSectio
|
n}'
elif content.name == 'subsection':
attrs = dict(content.attrs)
if not attrs.has_key('name'):
print "One of the subsections do not have a 'name' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif attrs['name'] == '':
print "One of the subsections' name is empty. Please correct it and re-run."
exit(0)
|
else:
retTxt += '\\begin{projSubSection}{' + attrs['name'] + '}' + convertToTex(str(content)) + '\\end{projSubSection}'
elif content.name == 'img':
props = dict(content.attrs)
if not props.has_key('id'):
print "One of the images do not have an 'id' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('src'):
print "One of the images do not have a 'src' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('caption'):
print "One of the images do not have a 'caption' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('scale'):
print "One of the images do not have a 'scale' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif props['id'] == '':
print "One of the images has an empty 'id'. Please correct it and re-run."
exit(0)
elif props['src'] == '':
print "One of the images has an empty 'src'. Please correct it and re-run."
exit(0)
elif props['scale'] == '':
print "Scaling factor for one of the images hasnt been defined. Please correct it and re-run."
exit(0)
else:
if figInTabular:
retTxt += '\\raisebox{-\\totalheight}{\centering\n\includegraphics[scale=' + props['scale'] + ']{' + props['src'] + '}\n\label{' + props['id'] + '}}\n'
else:
retTxt += '\\begin{figure}[ht!]\n\centering\n\includegraphics[scale=' + props['scale'] + ']{' + props['src'] + '}\n\caption{' + props['caption'] + '}\n\label{' + props['id'] + '}\n\end{figure}\n'
elif content.name == 'ref':
props = dict(content.attrs)
if not props.has_key('type'):
print "One of the references doesnt have a 'type' attribute. Please correct it and re-run."
exit(0)
elif props['type'] == '':
print "One of the references has an empty string for 'type'. Please correct it and re-run."
exit(0)
else:
if props['type'] == 'figure':
retTxt += 'Figure \\ref{' + content.text + '}'
elif props['type'] == 'table':
retTxt += 'Table \\ref{' + content.text +'}'
elif content.name == 'table':
props = dict(content.attrs)
if not props.has_key('id'):
print "One of the tables do not have an 'id' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('alignments'):
print "One of the tables do not have a 'alignments' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('caption'):
print "One of the tables do not have a 'caption' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif props['id'] == '':
print "One of the tables has an empty 'id'. Please correct it and re-run."
exit(0)
elif props['alignments'] == '':
print "One of the tables has an empty 'alignments'. Please correct it and re-run."
exit(0)
else:
alignments = props['alignments']
retTxt += '\\begin{table}[h]\\begin{center}\\begin{tabular}{' + alignments + '}'
for horizontal in content.contents:
if str(type(horizontal)) == "<class 'BeautifulSoup.Tag'>":
if horizontal.name == "tr":
cols = horizontal.contents
numOfCols = len(cols)
for i in range(numOfCols):
if str(type(cols[i])) == "<class 'BeautifulSoup.Tag'>":
retTxt += convertToTex(str(cols[i]), figInTabular=True)
print str(cols[i])
|
j00bar/django-widgy
|
widgy/contrib/page_builder/admin.py
|
Python
|
apache-2.0
| 165
| 0
|
from django.contrib import admin
f
|
rom widgy.admin import WidgyAdmin
from widgy.contrib.page_builder.models import Callout
admin.site.register(Callout, WidgyAdmi
|
n)
|
gitireadme/gitireadme.server
|
gitireadme/article/models.py
|
Python
|
apache-2.0
| 391
| 0.023018
|
from django.db import models
from gitireadme.utils import getUploadToPath
import datetime
class Article(models.Model):
name = models.Char
|
Field(max_length=255,blank=True,null=True)
path = models.CharField(max_length=255,blank=True,null=True)
class Artic
|
leAlias(models.Model):
repo = models.CharField(max_length=255,blank=True,null=True)
article = models.ForeignKey(Article)
|
npuichigo/ttsflow
|
third_party/tensorflow/tensorflow/python/util/deprecation_test.py
|
Python
|
apache-2.0
| 28,547
| 0.004134
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deprecation tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
class DeprecationTest(test.TestCase):
@test.mock.patch.object(logging, "warning", autospec=True)
def test_silence(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn():
pass
_fn()
self.assertEqual(1, mock_warning.call_count)
with deprecation.silence():
_fn()
self.assertEqual(1, mock_warning.call_count)
_fn()
self.assertEqual(2, mock_warning.call_count)
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated("", instructions)
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated("07-04-2016", instructions)
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated(date, None)
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated(date, "")
@test.mock.patch.object(logging, "warning", autospec=True)
def test_no_date(self, mock_warning):
date = None
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed in a future version."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % instructions, _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(
args[0], r"deprecated and will be removed")
self._assert_subset(set(["in a future version", instructions]),
set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions), _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc."""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
|
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date
|
, instructions), _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
"""fn doc."""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"fn doc. (deprecated)"
"
|
ken1277725/pythonweb-STT
|
server/server.py
|
Python
|
mit
| 1,001
| 0.034965
|
# coding=UTF-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
#import tornado
from tornado import ioloop , web , httpserver , websocket , options
#import handler function
import handler
import os
#set server settings
server_settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
"xsrf_cookies": True,
"autoreload": True,
#"login_url": "/accounts/login",
"debug":True,
"template_path":os.path.join(os.path.dirname(__file__),"templates"),
}
#the handlers list
handlers=[
(r"/?",handler.MainHandler),
(r"/upload",handler.WavFileHandler)
]
options.define("port", defa
|
ult=8080, help="the application will be run on the given port", type=int)
if __name__ == "__main__":
options.parse_command_line()
app_server = httpserver.HTTPServer(web.Application(handlers,**server_settings))
|
app_server.listen(options.options.port)
ioloop.IOLoop.current().start()
|
sdpython/cvxpy
|
cvxpy/tests/test_lin_ops.py
|
Python
|
gpl-3.0
| 5,078
| 0
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.lin_ops.lin_utils import *
from cvxpy.lin_ops.lin_op import *
from cvxpy.expressions.constants import Parameter
import cvxpy.interface as intf
import numpy as np
import scipy.sparse as sp
import unittest
from cvxpy.tests.base_test import BaseTest
import sys
PY2 = sys.version_info < (3, 0)
class test_lin_ops(BaseTest):
""" Unit tests for the lin_ops module. """
def test_variables
|
(self):
"""Test creating a variable.
"""
var = create_var((5, 4), var_id=1)
self.assertEqual(var.size, (5, 4))
self.assertEqua
|
l(var.data, 1)
self.assertEqual(len(var.args), 0)
self.assertEqual(var.type, VARIABLE)
def test_param(self):
"""Test creating a parameter.
"""
A = Parameter(5, 4)
var = create_param(A, (5, 4))
self.assertEqual(var.size, (5, 4))
self.assertEqual(len(var.args), 0)
self.assertEqual(var.type, PARAM)
def test_constant(self):
"""Test creating a constant.
"""
# Scalar constant.
size = (1, 1)
mat = create_const(1.0, size)
self.assertEqual(mat.size, size)
self.assertEqual(len(mat.args), 0)
self.assertEqual(mat.type, SCALAR_CONST)
assert mat.data == 1.0
# Dense matrix constant.
size = (5, 4)
mat = create_const(np.ones(size), size)
self.assertEqual(mat.size, size)
self.assertEqual(len(mat.args), 0)
self.assertEqual(mat.type, DENSE_CONST)
assert (mat.data == np.ones(size)).all()
# Sparse matrix constant.
size = (5, 5)
mat = create_const(sp.eye(5), size, sparse=True)
self.assertEqual(mat.size, size)
self.assertEqual(len(mat.args), 0)
self.assertEqual(mat.type, SPARSE_CONST)
assert (mat.data.todense() == sp.eye(5).todense()).all()
def test_add_expr(self):
"""Test adding lin expr.
"""
size = (5, 4)
x = create_var(size)
y = create_var(size)
# Expanding dict.
add_expr = sum_expr([x, y])
self.assertEqual(add_expr.size, size)
assert len(add_expr.args) == 2
def test_get_vars(self):
"""Test getting vars from an expression.
"""
size = (5, 4)
x = create_var(size)
y = create_var(size)
A = create_const(np.ones(size), size)
# Expanding dict.
add_expr = sum_expr([x, y, A])
vars_ = get_expr_vars(add_expr)
ref = [(x.data, size), (y.data, size)]
if PY2:
self.assertItemsEqual(vars_, ref)
else:
self.assertCountEqual(vars_, ref)
def test_neg_expr(self):
"""Test negating an expression.
"""
size = (5, 4)
var = create_var(size)
expr = neg_expr(var)
assert len(expr.args) == 1
self.assertEqual(expr.size, size)
self.assertEqual(expr.type, NEG)
def test_eq_constr(self):
"""Test creating an equality constraint.
"""
size = (5, 5)
x = create_var(size)
y = create_var(size)
lh_expr = sum_expr([x, y])
value = np.ones(size)
rh_expr = create_const(value, size)
constr = create_eq(lh_expr, rh_expr)
self.assertEqual(constr.size, size)
vars_ = get_expr_vars(constr.expr)
ref = [(x.data, size), (y.data, size)]
if PY2:
self.assertItemsEqual(vars_, ref)
else:
self.assertCountEqual(vars_, ref)
def test_leq_constr(self):
"""Test creating a less than or equal constraint.
"""
size = (5, 5)
x = create_var(size)
y = create_var(size)
lh_expr = sum_expr([x, y])
value = np.ones(size)
rh_expr = create_const(value, size)
constr = create_leq(lh_expr, rh_expr)
self.assertEqual(constr.size, size)
vars_ = get_expr_vars(constr.expr)
ref = [(x.data, size), (y.data, size)]
if PY2:
self.assertItemsEqual(vars_, ref)
else:
self.assertCountEqual(vars_, ref)
def test_sum_entries(self):
"""Test sum entries op.
"""
size = (5, 5)
x = create_var(size)
expr = sum_entries(x)
self.assertEqual(expr.size, (1, 1))
self.assertEqual(len(expr.args), 1)
self.assertEqual(expr.type, lo.SUM_ENTRIES)
|
xuru/pyvisdk
|
pyvisdk/enums/virtual_disk_adapter_type.py
|
Python
|
mit
| 239
| 0
|
#############################
|
###########
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
VirtualDiskAdapterType = Enum(
'busLogic',
'ide',
'lsiLogic',
|
)
|
shedskin/shedskin
|
examples/bh.py
|
Python
|
gpl-3.0
| 20,915
| 0.001004
|
"""
A Python implementation of the _bh_ Olden benchmark.
The Olden benchmark implements the Barnes-Hut benchmark
that is decribed in:
J. Barnes and P. Hut, "A hierarchical o(N log N) force-calculation algorithm",
Nature, 324:446-449, Dec. 1986
The original code in the Olden benchmark suite is derived from the
ftp://hubble.ifa.hawaii.edu/pub/barnes/treecode
source distributed by Barnes.
This code comes from the third Java version.
This uses copy() instead of Vec3.clone(), and it's adapted for ShedSkin.
"""
from time import clock
from sys import stderr, maxint, argv
from copy import copy
from math import sqrt, pi, floor
class Random(object):
"""
Basic uniform random generator: Minimal Standard in Park and
Miller (1988): "Random Number Generators: Good Ones Are Hard to
Find", Comm. of the ACM, 31, 1192-1201.
Parameters: m = 2^31-1, a=48271.
Adapted from Pascal code by Jesper Lund:
http:#www.gnu-pascal.de/crystal/gpc/en/mail1390.html
"""
__slots__ = ["seed"]
m = maxint
a = 48271
q = m / a
r = m % a
def __init__(self, the_seed):
self.seed = the_seed
def uniform(self, min, max):
k = self.seed / Random.q
self.seed = Random.a * (self.seed - k * Random.q) - Random.r * k
if self.seed < 1:
self.seed += Random.m
r = float(self.seed) / Random.m
return r * (max - min) + min
class Vec3(object):
"""
A class representing a three dimensional vector that implements
several math operations. To improve speed we implement the
vector as an array of doubles rather than use the exising
code in the java.util.class.
"""
__slots__ = ["d0", "d1", "d2"]
# The number of dimensions in the vector
NDIM = 3
def __init__(self):
"""Construct an empty 3 dimensional vector for use in Barnes-Hut algorithm."""
self.d0 = 0.0
self.d1 = 0.0
self.d2 = 0.0
def __getitem__(self, i):
"""
Return the value at the i'th index of the vector.
@param i the vector index
@return the value at the i'th index of the vector.
"""
if i == 0:
return self.d0
elif i == 1:
return self.d1
else:
return self.d2
def __setitem__(self, i, v):
"""
Set the value of the i'th index of the vector.
@param i the vector index
@param v the value to store
"""
if i == 0:
self.d0 = v
elif i == 1:
self.d1 = v
else:
self.d2 = v
def __iadd__(self, u):
"""
Add two vectors and the result is placed in self vector.
@param u the other operand of the addition
"""
self.d0 += u.d0
self.d1 += u.d1
self.d2 += u.d2
return self
def __isub__(self, u):
"""
Subtract two vectors and the result is placed in self vector.
This vector contain the first operand.
|
@param u the other operand of the subtraction.
"""
self.d0 -= u.d0
self.d1 -= u.d1
self.d2 -= u.d2
return self
def __imul__(self, s):
"""
Multiply the vector times a scalar.
@param s the scalar v
|
alue
"""
self.d0 *= s
self.d1 *= s
self.d2 *= s
return self
def __idiv__(self, s):
"""
Divide each element of the vector by a scalar value.
@param s the scalar value.
"""
self.d0 /= s
self.d1 /= s
self.d2 /= s
return self
def add_scalar(self, u, s):
self.d0 = u.d0 + s
self.d1 = u.d1 + s
self.d2 = u.d2 + s
def subtraction2(self, u, v):
"""
Subtract two vectors and the result is placed in self vector.
@param u the first operand of the subtraction.
@param v the second opernd of the subtraction
"""
self.d0 = u.d0 - v.d0
self.d1 = u.d1 - v.d1
self.d2 = u.d2 - v.d2
def mult_scalar2(self, u, s):
"""
Multiply the vector times a scalar and place the result in self vector.
@param u the vector
@param s the scalar value
"""
self.d0 = u.d0 * s
self.d1 = u.d1 * s
self.d2 = u.d2 * s
def dot(self):
"""
Return the dot product of a vector.
@return the dot product of a vector.
"""
return self.d0 * self.d0 + self.d1 * self.d1 + self.d2 * self.d2
def __repr__(self):
return "%.17f %.17f %.17f " % (self.d0, self.d1, self.d2)
class HG(object):
"""
A sub class which is used to compute and save information during the
gravity computation phase.
"""
__slots__ = ["pskip", "pos0", "phi0", "acc0"]
def __init__(self, b, p):
"""
Create a object.
@param b the body object
@param p a vector that represents the body
"""
# Body to skip in force evaluation
self.pskip = b
# Poat which to evaluate field
self.pos0 = copy(p)
# Computed potential at pos0
self.phi0 = 0.0
# computed acceleration at pos0
self.acc0 = Vec3()
class Node(object):
"""A class that represents the common fields of a cell or body data structure."""
# highest bit of coord
IMAX = 1073741824
# potential softening parameter
EPS = 0.05
def __init__(self):
"""Construct an empty node"""
self.mass = 0.0 # mass of the node
self.pos = Vec3() # Position of the node
def load_tree(self, p, xpic, l, root):
raise NotImplementedError()
def hack_cofm(self):
raise NotImplementedError()
def walk_sub_tree(self, dsq, hg):
raise NotImplementedError()
@staticmethod
def old_sub_index(ic, l):
i = 0
for k in xrange(Vec3.NDIM):
if (int(ic[k]) & l) != 0:
i += Cell.NSUB >> (k + 1)
return i
def __repr__(self):
return "%f : %f" % (self.mass, self.pos)
def grav_sub(self, hg):
"""Compute a single body-body or body-cell interaction"""
dr = Vec3()
dr.subtraction2(self.pos, hg.pos0)
drsq = dr.dot() + (Node.EPS * Node.EPS)
drabs = sqrt(drsq)
phii = self.mass / drabs
hg.phi0 -= phii
mor3 = phii / drsq
dr *= mor3
hg.acc0 += dr
return hg
class Body(Node):
"""A class used to representing particles in the N-body simulation."""
def __init__(self):
"""Create an empty body."""
Node.__init__(self)
self.vel = Vec3()
self.acc = Vec3()
self.new_acc = Vec3()
self.phi = 0.0
def expand_box(self, tree, nsteps):
"""
Enlarge cubical "box", salvaging existing tree structure.
@param tree the root of the tree.
@param nsteps the current time step
"""
rmid = Vec3()
inbox = self.ic_test(tree)
while not inbox:
rsize = tree.rsize
rmid.add_scalar(tree.rmin, 0.5 * rsize)
for k in xrange(Vec3.NDIM):
if self.pos[k] < rmid[k]:
rmin = tree.rmin[k]
tree.rmin[k] = rmin - rsize
tree.rsize = 2.0 * rsize
if tree.root is not None:
ic = tree.int_coord(rmid)
if ic is None:
raise Exception("Value is out of bounds")
k = Node.old_sub_index(ic, Node.IMAX >> 1)
newt = Cell()
newt.subp[k] = tree.root
tree.root = newt
inbox = self.ic_test(tree)
def ic_test(self, tree):
"""Check the bounds of the body and return True if it isn't in the correct bounds."""
pos0 = self.pos[0]
|
DiegoCorrea/ouvidoMusical
|
config/data/oneMillionSongs/mining.py
|
Python
|
mit
| 2,978
| 0.003359
|
from random import sample
import os
songList = []
songDict = None
userPlayList = []
directory = ''
def getSongs(name, limit):
global songList
global directory
print('*'*30)
print('* Minerando ', str(limit), ' músicas *')
print('*'*30)
status = 0
if not os.path.exists(directory):
os.makedirs(directory)
toSaveFile = open(
'config/data/oneMillionSongs/sets/' + str(name) + '/songs.csv',
'w+'
)
toSaveFile.write('id,title\n')
songSet = sample(
set(
open(
'config/data/oneMillionSongs/originalCleanEntry/songs.csv',
'r+'
)
), limit
)
for line in songSet:
if (status % 1000 == 0):
print ("-> [", status, "]")
lineSplit = line.split(',')
songList.append(lineSplit[0])
toSaveFile.write(lineSplit[0] + ',' + lineSplit[1] + '\n')
if (status > limit):
break
status += 1
print ('- Total de Musicas: ', len(songList))
toSaveFile.close()
print ('- Finalizando o script!')
def getPlayCount(name, limit, userLimit):
global songDict
global userPlayList
global directory
print ('*'*30)
print ('* Pegando Lista de pessoas que ouviram as musicas *')
print ('*'*30)
status = 0
if not os.path.exists(directory):
os.makedirs(directory)
toSaveFile = open(
'config/data/oneMillionSongs/sets/' + str(name) + '/playCount.csv',
'w+'
)
toSaveFile.write('user_id,song_id,play_count\n')
for line in open(
'config/data/oneMillionSongs/originalCleanEntry/playCount.csv',
'r+'
):
status += 1
if status == 1:
continue
if (status % 1000 == 0):
print ("-> [", status, "]")
lineSplit = line.split(',')
if (lineSplit[1] not in songDict):
continue
if (len(userPlayList) >= userLimit and lineSplit[0] not in userPlayList):
continue
if lineSplit[0] not in userPlayList:
userPlayList.append(lineSplit[0])
toSaveFile.write(line)
userDict = set(userPlayList)
print ('- Total de usuarios: ', len(userDict))
usersToSaveFile = open(
'config/data/oneMillionSongs/sets/' + str(name) + '/users.csv',
'w+'
)
usersToSaveFile.write('id\n')
for user in userDict:
usersToSaveFile.write(user + "\n")
toSaveFile.close()
usersToSaveFile.close()
print (
|
'- Finalizando o script!')
def start(name, limit, userLimit=None):
global directory
global songDict
directory = 'config/data/oneMillionSongs/sets/' + str(name)
getSongs(name, limit)
songDict = set(song
|
List)
getPlayCount(name, limit, userLimit)
##########
def main():
start(name="thousand", limit=1000)
start(name="two_thousand", limit=2000)
start(name="three_thousand", limit=3000)
start(name="ten_thousand", limit=10000)
|
jbzdak/migration_manager
|
migration_manager/manager.py
|
Python
|
bsd-2-clause
| 2,915
| 0.009262
|
# -*- coding: utf-8 -*-
from django.core import exceptions
from django.utils.importlib import import_module
__author__ = 'jb'
MIGRATION_MANAGERS = {}
DEFAULT_MANAGER = None
def load_manager(path):
"""
Code taken from django.
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT N
|
OT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
try:
mw_module, mw_classname = path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a manager module' % path)
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing manager %s: "%s"' % (mw_module, e))
try:
manager_instance = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Manager module "%s" does not define a "%s" object' % (mw_module, mw_classname))
return manager_instance
def initialize():
from django.conf import settings
global MIGRATION_MANAGERS, DEFAULT_MANAGER
MANAGERS = getattr(settings, "MIGRATION_MANAGERS", None)
if MANAGERS is not None:
for k, v in MANAGERS:
MIGRATION_MANAGERS[k] = load_manager(v)
DEFAULT_MANAGER = MIGRATION_MANAGERS[MANAGERS[0][0]]
initialize()
|
reubano/ckanutils
|
manage.py
|
Python
|
mit
| 2,296
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" A script to manage development tasks """
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
from os import path as p
from manager import Manager
from subprocess import call
manager = Manager()
_basedir = p.dirname(__file__)
@manager.command
def clean():
"""Remove Python file and build artifacts"""
call(p.join(_basedir, 'helpers', 'clean'), shell=True)
@manager.command
def check():
"""Check staged changes for lint errors"""
call(p.join(_basedir, 'helpers', 'check-stage'), shell=True)
@manager.arg('where', 'w', help='Modules to check')
@manager.command
def lint(where=None):
"""Check
|
style with flake8"""
call('flake8 %s' % (where if where els
|
e ''), shell=True)
@manager.command
def pipme():
"""Install requirements.txt"""
call('pip install -r requirements.txt', shell=True)
@manager.command
def require():
"""Create requirements.txt"""
cmd = 'pip freeze -l | grep -vxFf dev-requirements.txt > requirements.txt'
call(cmd, shell=True)
@manager.arg('where', 'w', help='test path', default=None)
@manager.arg(
'stop', 'x', help='Stop after first error', type=bool, default=False)
@manager.command
def test(where=None, stop=False):
"""Run nose and script tests"""
opts = '-xv' if stop else '-v'
opts += 'w %s' % where if where else ''
call([p.join(_basedir, 'helpers', 'test'), opts])
@manager.command
def register():
"""Register package with PyPI"""
call('python %s register' % p.join(_basedir, 'setup.py'), shell=True)
@manager.command
def release():
"""Package and upload a release"""
sdist()
wheel()
upload()
@manager.command
def build():
"""Create a source distribution and wheel package"""
sdist()
wheel()
@manager.command
def upload():
"""Upload distribution files"""
call('twine upload %s' % p.join(_basedir, 'dist', '*'), shell=True)
@manager.command
def sdist():
"""Create a source distribution package"""
call(p.join(_basedir, 'helpers', 'srcdist'), shell=True)
@manager.command
def wheel():
"""Create a wheel package"""
call(p.join(_basedir, 'helpers', 'wheel'), shell=True)
if __name__ == '__main__':
manager.main()
|
rhambach/TEMareels
|
runscripts/_set_pkgdir.py
|
Python
|
mit
| 674
| 0.01632
|
"""
Unique place for all runscripts to set the module path.
(i.e. if you have a copy of TEMareels in some place on your
hard disk but not in PYTHONPATH).
NOTE: it is recommended to keep a copy of all TEMareels
module fil
|
es with your data/analysis, as future versions
will be not necessarily backwards compatible.
Copyright (c) 2013, rhambach.
|
This file is part of the TEMareels package and released
under the MIT-Licence. See LICENCE file for details.
"""
# location of the TEMareels package on the hard disk
# (if not specified in PYTHONPATH)
pkgdir = '../..';
import sys
from os.path import abspath;
sys.path.insert(0,abspath(pkgdir));
|
plotly/python-api
|
packages/python/plotly/plotly/validators/layout/mapbox/layer/_sourceattribution.py
|
Python
|
mit
| 521
| 0
|
import _plotly_utils.basevalidators
class SourceattributionValidator(_plotly_utils.
|
basevalidators.StringValidator):
def __init__(
self,
plotly_name="sourceattribution",
parent_name="layout.mapbox.layer",
**kwargs
):
super(SourceattributionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("ed
|
it_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
Gustry/inasafe
|
safe/datastore/test/test_geopackage.py
|
Python
|
gpl-3.0
| 5,299
| 0.000189
|
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool by AusAid - **Clipper test suite.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import unittest
import sys
from tempfile import mktemp
from qgis.core import QgsVectorLayer, QgsRasterLayer
from PyQt4.QtCore import QFileInfo
from osgeo import gdal
from safe.test.utilities import (
get_qgis_app,
load_test_vector
|
_layer,
standard_data_path)
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
from safe.datastore.geopackage import GeoPackage
# Decorator for expecting fails in windows but not other OS's
# Probably we should move this somewhere in utils
|
for easy re-use...TS
def expect_failure_in_windows(exception):
"""Marks test to expect a fail in windows - call assertRaises internally.
..versionadded:: 4.0.0
"""
def test_decorator(fn):
def test_decorated(self, *args, **kwargs):
if sys.platform.startswith('win'):
self.assertRaises(exception, fn, self, *args, **kwargs)
return test_decorated
return test_decorator
class TestGeoPackage(unittest.TestCase):
"""Test the GeoPackage datastore."""
def setUp(self):
pass
def tearDown(self):
pass
@unittest.skipIf(
int(gdal.VersionInfo('VERSION_NUM')) < 2000000,
'GDAL 2.0 is required for geopackage.')
def test_create_geopackage(self):
"""Test if we can store geopackage."""
# Create a geopackage from an empty file.
path = QFileInfo(mktemp() + '.gpkg')
self.assertFalse(path.exists())
data_store = GeoPackage(path)
path.refresh()
self.assertTrue(path.exists())
# Let's add a vector layer.
layer_name = 'flood_test'
layer = standard_data_path('hazard', 'flood_multipart_polygons.shp')
vector_layer = QgsVectorLayer(layer, 'Flood', 'ogr')
result = data_store.add_layer(vector_layer, layer_name)
self.assertTrue(result[0])
# We should have one layer.
layers = data_store.layers()
self.assertEqual(len(layers), 1)
self.assertIn(layer_name, layers)
# Add the same layer with another name.
layer_name = 'another_vector_flood'
result = data_store.add_layer(vector_layer, layer_name)
self.assertTrue(result[0])
# We should have two layers.
layers = data_store.layers()
self.assertEqual(len(layers), 2)
self.assertIn(layer_name, layers)
# Test the URI of the new layer.
expected = path.absoluteFilePath() + '|layername=' + layer_name
self.assertEqual(data_store.layer_uri(layer_name), expected)
# Test a fake layer.
self.assertIsNone(data_store.layer_uri('fake_layer'))
# Test to add a raster
layer_name = 'raster_flood'
layer = standard_data_path('hazard', 'classified_hazard.tif')
raster_layer = QgsRasterLayer(layer, layer_name)
result = data_store.add_layer(raster_layer, layer_name)
self.assertTrue(result[0])
# We should have 3 layers inside.
layers = data_store.layers()
self.assertEqual(len(layers), 3)
# Check the URI for the raster layer.
expected = 'GPKG:' + path.absoluteFilePath() + ':' + layer_name
self.assertEqual(data_store.layer_uri(layer_name), expected)
# Add a second raster.
layer_name = 'big raster flood'
self.assertTrue(data_store.add_layer(raster_layer, layer_name))
self.assertEqual(len(data_store.layers()), 4)
# Test layer without geometry
layer = load_test_vector_layer(
'gisv4', 'impacts', 'exposure_summary_table.csv')
tabular_layer_name = 'breakdown'
result = data_store.add_layer(layer, tabular_layer_name)
self.assertTrue(result[0])
@unittest.skipIf(
int(gdal.VersionInfo('VERSION_NUM')) < 2000000,
'GDAL 2.0 is required for geopackage.')
@expect_failure_in_windows(AssertionError)
def test_read_existing_geopackage(self):
"""Test we can read an existing geopackage."""
path = standard_data_path('other', 'jakarta.gpkg')
import os
path = os.path.normpath(os.path.normcase(os.path.abspath(path)))
geopackage = QFileInfo(path)
data_store = GeoPackage(geopackage)
# We should have 3 layers in this geopackage.
self.assertEqual(len(data_store.layers()), 3)
# Test we can load a vector layer.
roads = QgsVectorLayer(
data_store.layer_uri('roads'),
'Test',
'ogr'
)
self.assertTrue(roads.isValid())
# Test we can load a raster layers.
# This currently fails on windows...
# So we have decorated it with expected fail on windows
# Should pass on other platforms.
path = data_store.layer_uri('flood')
flood = QgsRasterLayer(path, 'flood')
self.assertTrue(flood.isValid())
if __name__ == '__main__':
unittest.main()
|
orbitfold/tardis
|
tardis/plasma/properties/radiative_properties.py
|
Python
|
bsd-3-clause
| 11,605
| 0.003188
|
import logging
import numpy as np
import pandas as pd
import numexpr as ne
from astropy import units as u, constants as const
from tardis.plasma.properties.base import ProcessingPlasmaProperty
from tardis.plasma.properties.util import macro_atom
logger = logging.getLogger(__name__)
__all__ = ['StimulatedEmissionFactor', 'TauSobolev', 'BetaSobolev',
'TransitionProbabilities', 'LTEJBlues']
class StimulatedEmissionFactor(ProcessingPlasmaProperty):
"""
Attributes
----------
stimulated_emission_factor : Numpy Array, dtype float
Indexed by lines, columns as zones.
"""
outputs = ('stimulated_emission_factor',)
latex_formula = ('1-\\dfrac{g_{lower}n_{upper}}{g_{upper}n_{lower}}',)
def __init__(self, plasma_parent=None, nlte_species=None):
super(StimulatedEmissionFactor, self).__init__(plasma_parent)
self._g_upper = None
self._g_lower = None
try:
self.nlte_species = self.plasma_parent.nlte_species
except:
self.nlte_species = nlte_species
def get_g_lower(self, g, lines_lower_level_index):
if self._g_lower is None:
g_lower = np.array(g.ix[lines_lower_level_index],
dtype=np.float64)
self._g_lower = g_lower[np.newaxis].T
return self._g_lower
def get_g_upper(self, g, lines_upper_level_index):
if self._g_upper is None:
g_upper = np.array(g.ix[lines_upper_level_index],
dtype=np.float64)
self._g_upper = g_upper[np.newaxis].T
return self._g_upper
def get_metastable_upper(self, metastability, lines_upper_level_index):
if getattr(self, '_meta_stable_upper', None) is None:
self._meta_stable_upper = metastability.values[
lines_upper_level_index][np.newaxis].T
return self._meta_stable_upper
def calculate(self, g, level_number_density, lines_lower_level_index,
lines_upper_level_index, metastability, lines):
n_lower = level_number_density.values.take(lines_lower_level_index,
axis=0, mode='raise')
n_upper = level_number_density.values.take(lines_upper_level_index,
axis=0, mode='raise')
g_lower = self.get_g_lower(g, lines_lower_level_index)
g_upper = self.get_g_upper(g, lines_upper_level_index)
meta_stable_upper = self.get_metastable_upper(metastability,
lines_upper_level_index)
stimulated_emission_factor = ne.evaluate('1 - ((g_lower * n_upper) / '
'(g_upper * n_lower))')
stimulated_emission_factor[n_lower == 0.0] = 0.0
stimulated_emission_factor[np.isneginf(stimulated_emission_factor)]\
= 0.0
stimulated_emission_factor[meta_stable_upper &
(stimulated_emission_factor < 0)] = 0.0
if self.nlte_species:
nlte_lines_mask = \
np.zeros(stimulated_emission_factor.shape[0]).astype(bool)
for species in self.nlte_species:
nlte_lines_mask |= (lines.atomic_number == species[0]) & \
(lines.ion_number == species[1])
stimulated_emission_factor[(stimulated_emission_factor < 0) &
nlte_lines_mask[np.newaxis].T] = 0.0
return stimulated_emission_factor
class TauSobolev(ProcessingPlasmaProperty):
"""
Attributes
----------
tau_sobolev : Pandas DataFrame, dtype float
Sobolev optical depth for each line. Indexed by line.
Columns as zones.
"""
outputs = ('tau_sobolevs',)
latex_name = ('\\tau_{\\textrm{sobolev}}',)
latex_formula = ('\\dfrac{\\pi e^{2}}{m_{e} c}f_{lu}\\lambda t_{exp}\
n_{lower} \\Big(1-\\dfrac{g_{lower}n_{upper}}{g_{upper}n_{lower}}\\Big)',)
def __init__(self, plasma_parent):
super(TauSobolev, self).__init__(plasma_parent)
self.sobolev_coefficient = (((np.pi * const.e.gauss ** 2) /
(const.m_e.cgs * const.c.cgs))
* u.cm * u.s / u.cm**3).to(1).value
def calculate(self, lines, level_number_density, lines_lower_level_index,
time_explosion, stimulated_emission_factor, j_blues,
f_lu, wavelength_cm):
f_lu = f_lu.values[np.newaxis].T
wavelength = wavelength_cm.values[np.newaxis].T
n_lower = level_number_density.values.take(lines_lower_level_index,
axis=0, mode='raise')
tau_sobolevs = (self.sobolev_coefficient * f_lu * wavelength *
time_explosion * n_lower * stimulated_emission_factor)
if (np.any(np.isnan(tau_sobolevs)) or
np.any(np.isinf(np.abs(tau_sobolevs)))):
raise ValueError(
'Some tau_sobolevs are nan, inf, -inf in tau_sobolevs.'
' Something went wrong!')
return pd.DataFrame(tau_sobolevs, index=lines.index,
columns=np.array(level_number_density.columns))
class BetaSobolev(ProcessingPlasmaProperty):
"""
Attributes
----------
beta_sobolev : Numpy Array, dtype float
"""
outputs = ('beta_sobolev',)
latex_name = ('\\beta_{\\textrm{sobolev}}',)
def calculate(self, tau_sobolevs):
if getattr(self, 'beta_sobolev', None) is None:
beta_sobolev = np.zeros_like(tau_sobolevs.values)
else:
beta_sobolev = self.beta_sobolev
macro_atom.calculate_beta_sobolev(
tau_sobolevs.values.ravel(),
beta_sobolev.ravel())
return beta_sobolev
class TransitionProbabilities(ProcessingPlasmaProperty):
"""
Attributes
----------
transition_probabilities : Pandas DataFrame, dtype float
"""
outputs = ('transition_probabilities',)
def __init__(self, plasma_parent):
super(TransitionProbabilities, self).__init__(plasma_parent)
self.initialize = True
def calculate(self, atomic_data, beta_sobolev, j_bl
|
ues,
stimulated_emission_factor, tau_sobolevs):
#I wonder why?
# Not sure who wrote this but the answer is that when the plasma is
# first initialised (before the
|
first iteration, without temperature
# values etc.) there are no j_blues values so this just prevents
# an error. Aoife.
if len(j_blues) == 0:
return None
macro_atom_data = self._get_macro_atom_data(atomic_data)
if self.initialize:
self.initialize_macro_atom_transition_type_filters(atomic_data,
macro_atom_data)
self.transition_probability_coef = (
self._get_transition_probability_coefs(macro_atom_data))
self.initialize = False
transition_probabilities = self._calculate_transition_probability(macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor)
transition_probabilities = pd.DataFrame(transition_probabilities,
index=macro_atom_data.transition_line_id,
columns=tau_sobolevs.columns)
return transition_probabilities
def _calculate_transition_probability(self, macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor):
transition_probabilities = np.empty((self.transition_probability_coef.shape[0], beta_sobolev.shape[1]))
#trans_old = self.calculate_transition_probabilities(macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor)
transition_type = macro_atom_data.transition_type.values
lines_idx = macro_atom_data.lines_idx.values
tpos = macro_atom_data.transition_probability.values
#optimized_calculate_transition_probabilities(tpos, beta_sobolev, j_blues, stimulated_emission_factor, transition_type, lines_idx, self.block_references, transition_probabilities)
macro_atom.calculate_transition_probabilities(tpos, beta_sobolev, j_blues, stimulated_emission_factor, transit
|
tanium/pytan
|
EXAMPLES/PYTAN_API/invalid_get_action_single_by_name.py
|
Python
|
mit
| 3,103
| 0.0029
|
#!/usr/bin/env python
"""
Get an action by name (name is not a supported selector for action)
"""
# import the basic python packages we need
import os
import sys
import tempfile
import pprint
import traceback
# disable python from generating a .pyc file
sys.dont_write_bytecode = True
# change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API
pytan_loc = "~/gh/pytan"
pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib')
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# try to automatically determine the pytan lib directory by assuming it is in '../../lib/'
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
# add pytan_loc and lib_dir to the PYTHONPATH variable
path_adds = [lib_dir, pytan_static_path]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
# import pytan
import pytan
# create a dictionary of arguments for the pytan handler
handler_args = {}
# establish our connection info for the Tanium Server
handler_args['username'] = "Administrator"
handler_args['password'] = "Tanium2015!"
handler_args['host'] = "10.0.1.240"
handler_args['port'] = "443" # optional
handler_args['trusted_certs'] = "certs"
# optional, level 0 is no output except warnings/errors
# level 1 through 12 are more and more verbose
handler_args['loglevel'] = 1
# optional, use a debug format for the loggi
|
ng output (uses two lines per log entry)
handler_args['debugformat'] = False
# optional, this saves all response objects to handler.session.ALL_REQUESTS
|
_RESPONSES
# very useful for capturing the full exchange of XML requests and responses
handler_args['record_all_requests'] = True
# instantiate a handler using all of the arguments in the handler_args dictionary
print "...CALLING: pytan.handler() with args: {}".format(handler_args)
handler = pytan.Handler(**handler_args)
# print out the handler string
print "...OUTPUT: handler string: {}".format(handler)
# setup the arguments for the handler() class
kwargs = {}
kwargs["objtype"] = u'action'
kwargs["name"] = u'Distribute Tanium Standard Utilities'
print "...CALLING: handler.get() with args: {}".format(kwargs)
try:
handler.get(**kwargs)
except Exception as e:
print "...EXCEPTION: {}".format(e)
# this should throw an exception of type: pytan.exceptions.HandlerError
# uncomment to see full exception
# traceback.print_exc(file=sys.stdout)
'''STDOUT from running this:
...CALLING: pytan.handler() with args: {'username': 'Administrator', 'record_all_requests': True, 'loglevel': 1, 'debugformat': False, 'host': '10.0.1.240', 'password': 'Tanium2015!', 'port': '443'}
...OUTPUT: handler string: PyTan v2.1.4 Handler for Session to 10.0.1.240:443, Authenticated: True, Platform Version: 6.5.314.4301
...CALLING: handler.get() with args: {'objtype': u'action', 'name': u'Distribute Tanium Standard Utilities'}
...EXCEPTION: Getting a action requires at least one filter: ['id']
'''
'''STDERR from running this:
'''
|
denmojo/pygrow
|
grow/preprocessors/sass_preprocessor.py
|
Python
|
mit
| 3,649
| 0.002192
|
from . import base
from grow.common import utils
from protorpc import messages
import logging
import os
import re
if utils.is_appengine():
sass = None # Unavailable on Google App Engine.
else:
import sass
SUFFIXES = frozenset(['sass', 'scss'])
SUFFIX_PATTERN = re.compile('[.](' + '|'.join(map(re.escape, SUFFIXES)) + ')$')
class Config(messages.Message):
sass_dir = messages.StringField(1)
out_dir = messages.StringField(2)
suffix = messages.StringField(3, default='.min.css')
output_style = messages.StringField(4, default='compressed')
source_comments = messages.BooleanField(5)
image_path = messages.StringField
|
(6)
class SassPreprocessor(base.BasePreprocessor):
KIND = 'sas
|
s'
Config = Config
def run(self, build=True):
sass_dir = os.path.abspath(os.path.join(self.root, self.config.sass_dir.lstrip('/')))
out_dir = os.path.abspath(os.path.join(self.root, self.config.out_dir.lstrip('/')))
self.build_directory(sass_dir, out_dir)
def build_directory(self, sass_path, css_path, _root_sass=None, _root_css=None):
if sass is None:
raise utils.UnavailableError('The Sass compiler is not available in this environment.')
if self.config.image_path:
image_path = os.path.abspath(os.path.join(self.root, self.config.image_path.lstrip('/')))
else:
image_path = None
_root_sass = sass_path if _root_sass is None else _root_sass
_root_css = css_path if _root_css is None else _root_css
result = {}
if not os.path.isdir(css_path):
os.makedirs(css_path)
for name in os.listdir(sass_path):
if not SUFFIX_PATTERN.search(name) or name.startswith('_'):
continue
sass_fullname = os.path.join(sass_path, name)
if os.path.isfile(sass_fullname):
basename = os.path.splitext(name)[0]
css_fullname = os.path.join(css_path, basename) + self.config.suffix
try:
kwargs = {
'filename': sass_fullname,
'include_paths': [_root_sass],
'output_style': self.config.output_style,
}
if self.config.output_style is not None:
kwargs['output_style'] = self.config.output_style
if image_path is not None:
kwargs['image_path'] = image_path
if self.config.image_path is not None:
kwargs['image_path'] = image_path
css = sass.compile(**kwargs)
except sass.CompileError as e:
logging.error(str(e))
return result
with open(css_fullname, 'w') as css_file:
if isinstance(css, unicode):
css = css.encode('utf-8')
css_file.write(css)
result[sass_fullname] = css_fullname
elif os.path.isdir(sass_fullname):
css_fullname = os.path.join(css_path, name)
subresult = self.build_directory(sass_fullname, css_fullname,
_root_sass, _root_css)
result.update(subresult)
for sass_path, out_path in result.iteritems():
self.logger.info(
'Compiled: {} -> {}'.format(sass_path.replace(self.root, ''),
out_path.replace(self.root, '')))
return result
def list_watched_dirs(self):
return [self.config.sass_dir]
|
laserson/hdfs
|
doc/conf.py
|
Python
|
mit
| 10,413
| 0.006914
|
# -*- coding: utf-8 -*-
#
# hdfs documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 6 16:04:56 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import mock
MOCK_MODULES = ['numpy', 'pandas', 'requests_kerberos']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names h
|
ere, as strings. They can be
# extensions
|
coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'HdfsCLI'
copyright = u'2014'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import hdfs
# The short X.Y version.
version = hdfs.__version__.rsplit('.', 1)[0]
# The full version, including alpha/beta/rc tags.
release = hdfs.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Autodoc
autoclass_content = 'both'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'hdfsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'hdfs.tex', u'hdfs Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hdfs', u'hdfs documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'hdfs', u'hdfs documentation',
u'Author', 'hdfs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indic
|
hwkns/macguffin
|
files/utils.py
|
Python
|
gpl-3.0
| 5,724
| 0.001747
|
from __future__ import print_function, unicode_literals, division, absolute_import
import os
import sys
import string
import random
import hashlib
import logging
import subprocess
from io import StringIO
import config
def valid_path(path):
"""
Returns an expanded, absolute path, or None if the path does not exist.
"""
path = os.path.expanduser(path)
if not os.path.exists(path):
return None
return os.path.abspath(path)
def get_paths(args):
"""
Returns expanded, absolute paths for all valid paths in a list of arguments.
"""
assert isinstance(args, list)
valid_paths = []
for path in args:
abs_path = valid_path(path)
if abs_path is not None:
valid_paths.append(abs_path)
return valid_paths
def split_path(path):
"""
Returns a normalized list of the path's components.
"""
path = os.path.normpath(path)
return [x for x in path.split(os.path.sep) if x]
def generate_id(size=10, chars=string.ascii_uppercase + string.digits):
"""
Generate a string of random alphanumeric characters.
"""
return ''.join(random.choice(chars) for i in range(size))
def list_contents(rar_file_path):
"""
Returns a list of the archive's contents.
"""
assert os.path.isfile(rar_file_path) and rar_file_path.endswith('.rar')
contents = []
count = 0
command = '"{unrar}" v -- "{file}"'
command = command.format(unrar=config.UNRAR_PATH, file=rar_file_path)
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
output = e.output.decode(encoding='utf-8')
msg = 'Error while listing archive contents: "{error_string}"'
raise FileUtilsError(msg.format(error_string=output.strip()))
else:
output = StringIO(output.decode(encoding='utf-8'))
parse = False
for line in output.readlines():
line_list = line.strip().split()
# If the line is not empty...
if line_list:
# This marks the start and end of the section we want to parse
if line_list[0] == '-------------------------------------------------------------------------------':
parse = not parse
count = 0
# If we're in the section of the output we want to parse...
elif parse:
# Parse every other line (only the file paths)
if count % 2 == 0:
contents.append(line_list[0])
count += 1
return contents
def unrar(rar_file_path, destination_dir=None):
"""
Get a list of the archive's contents, then extract the archive and return the list.
"""
assert os.path.isfile(rar_file_path) and rar_file_path.endswith('.rar')
if not destination_dir:
destination_dir = os.path.split(rar_file_path)[0]
# Get a list of the archive's contents
contents = list_contents(rar_file_path)
extracted_files = []
# Extract the archive
command = '"{unrar}" x -o+ -- "{file}" "{destination}"'
command = command.format(unrar=config.UNRAR_PATH, file=rar_file_path, destination=destination_dir)
logging.debug(command)
try:
subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
output = e.output.decode(encoding='utf-8')
msg = 'Error while extracting!\n{error_string}'
raise FileUtilsError(msg.format(error_string
|
=output.strip()))
for relative_path in contents:
path = os.path.join(destination_dir, relative_path)
# Recursively extract until there are no R
|
AR files left
if path.endswith('.rar'):
extracted_files += unrar(path)
else:
extracted_files.append(path)
# Return the list of paths
return extracted_files
def sha1(data):
"""
Return the SHA-1 hash of the given data.
"""
assert isinstance(data, (bytes, bytearray))
sha1_hash = hashlib.sha1()
sha1_hash.update(data)
return sha1_hash.digest()
def set_log_file_name(file_name):
"""
Set the file name for log output.
"""
# Remove all logging handlers from the root logger
logger = logging.getLogger('')
for handler in list(logger.handlers):
logger.removeHandler(handler)
handler.flush()
handler.close()
# Configure console logging
console_log_format = logging.Formatter('%(module)-15s: %(levelname)-8s %(message)s')
console_log_handler = logging.StreamHandler(sys.stdout)
console_log_handler.setFormatter(console_log_format)
console_log_handler.setLevel(logging.INFO)
logger.addHandler(console_log_handler)
# Configure disk logging
if file_name:
log_path = os.path.join(config.LOG_DIR, file_name)
disk_log_format = logging.Formatter('%(asctime)s %(module)-15s: %(levelname)-8s %(message)s')
disk_log_handler = logging.FileHandler(filename=log_path, mode='w', encoding='utf-8')
disk_log_handler.setFormatter(disk_log_format)
disk_log_handler.setLevel(logging.DEBUG)
logger.addHandler(disk_log_handler)
logger.setLevel(logging.DEBUG)
# Set logging level for the requests lib to warning+
requests_log = logging.getLogger('requests')
requests_log.setLevel(logging.WARNING)
# Log system info and Python version for debugging purposes
logging.debug('Python {version}'.format(version=sys.version))
logging.debug('System platform: {platform}'.format(platform=sys.platform))
class FileUtilsError(Exception):
pass
|
home-assistant/home-assistant
|
homeassistant/components/tplink/config_flow.py
|
Python
|
apache-2.0
| 5,772
| 0.001213
|
"""Config flow for TP-Link."""
from __future__ import annotations
from typing import Any
from kasa import SmartDevice, SmartDeviceException
from kasa.discover import Discover
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import dhcp
from homeassistant.const import CONF_DEVICE, CONF_HOST, CONF_MAC
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.typing import DiscoveryInfoType
from . import async_discover_devices
from .const import DOMAIN
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for tplink."""
VERSION = 1
def __init__(self) -> None:
"""Initialize the config flow."""
self._discovered_devices: dict[str, SmartDevice] = {}
self._discovered_device: SmartDevice | None = None
async def async_step_dhcp(self, discovery_info: dhcp.DhcpServiceInfo) -> FlowResult:
"""Handle discovery via dhcp."""
return await self._async_handle_discovery(
discovery_info.ip, discovery_info.macaddress
)
async def async_step_discovery(
self, discovery_info: DiscoveryInfoType
) -> FlowResult:
"""Handle discovery."""
return await self._async_handle_discovery(
discovery_info[CONF_HOST], discovery_info[CONF_MAC]
)
async def _async_handle_discovery(self, host: str, mac: str) -> FlowResult:
"""Handle any discovery."""
await self.async_set_unique_id(dr.format_mac(mac))
self._abort_if_unique_id_configured(updates={CONF_HOST: host})
self._async_abort_entries_match({CONF_HOST: host})
self.context[CONF_HOST] = host
for progress in self._async_in_progress():
if progress.get("context", {}).get(CONF_HOST) == host:
return self.async_abort(reason="already_in_progress")
try:
self._discovered_device = await self._async_try_connect(
host, raise_on_progress=True
)
except SmartDeviceException:
return self.async_abort(reason="cannot_connect")
return await self.async_step_discovery_confirm()
async def async_step_discovery_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Confirm discovery."""
assert self._discovered_device is not None
if user_input is not None:
return self._async_create_entry_from_device(self._discovered_device)
self._set_confirm_only()
placeholders = {
"name": self._discovered_device.alias,
"model": self._discovered_device.model,
"host": self._discovered_device.host,
}
self.context["title_placeholders"] = placeholders
return self.async_show_form(
step_id="discovery_confirm", description_placeholders=placeholders
)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the initial step."""
errors = {}
if user_input is not None:
if not (host := user_input[CONF_HOST]):
return await self.async_step_pick_device()
try:
device = await self._async_try_connect(host, raise_on_
|
progress=False)
e
|
xcept SmartDeviceException:
errors["base"] = "cannot_connect"
else:
return self._async_create_entry_from_device(device)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({vol.Optional(CONF_HOST, default=""): str}),
errors=errors,
)
async def async_step_pick_device(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the step to pick discovered device."""
if user_input is not None:
mac = user_input[CONF_DEVICE]
await self.async_set_unique_id(mac, raise_on_progress=False)
return self._async_create_entry_from_device(self._discovered_devices[mac])
configured_devices = {
entry.unique_id for entry in self._async_current_entries()
}
self._discovered_devices = await async_discover_devices(self.hass)
devices_name = {
formatted_mac: f"{device.alias} {device.model} ({device.host}) {formatted_mac}"
for formatted_mac, device in self._discovered_devices.items()
if formatted_mac not in configured_devices
}
# Check if there is at least one device
if not devices_name:
return self.async_abort(reason="no_devices_found")
return self.async_show_form(
step_id="pick_device",
data_schema=vol.Schema({vol.Required(CONF_DEVICE): vol.In(devices_name)}),
)
@callback
def _async_create_entry_from_device(self, device: SmartDevice) -> FlowResult:
"""Create a config entry from a smart device."""
self._abort_if_unique_id_configured(updates={CONF_HOST: device.host})
return self.async_create_entry(
title=f"{device.alias} {device.model}",
data={
CONF_HOST: device.host,
},
)
async def _async_try_connect(
self, host: str, raise_on_progress: bool = True
) -> SmartDevice:
"""Try to connect."""
self._async_abort_entries_match({CONF_HOST: host})
device: SmartDevice = await Discover.discover_single(host)
await self.async_set_unique_id(
dr.format_mac(device.mac), raise_on_progress=raise_on_progress
)
return device
|
Show-Me-the-Code/python
|
pylyria/0001/0001_1.py
|
Python
|
mit
| 651
| 0.012939
|
# -*- coding: utf-8 -*-
#!/usr/bin/e
|
nv python
#第 0001 题:做为 Apple Store App 独立开发者,你要搞限时促销,为你的应用生成激活码(或者优惠券),使用 Python 如何生成 200 个激活码(或者优惠券)?
import random
import string
def activation_code(chars = string.ascii_uppercase + string.digits, length=16):
return ''.join([random.choice(chars) for i in range(length)])
if __name__ == '__main__':
code_collection =
|
set()
for i in range(200):
code = activation_code()
if code not in code_collection:
code_collection.add(code)
else:
continue
|
ethanfrey/aiojson
|
aiojson/echo_server.py
|
Python
|
bsd-3-clause
| 1,771
| 0.000565
|
"""
Simple http server to create streams for asyncio tests
"""
import asyncio
import
|
aiohttp
from aiohttp import web
import codecs
from utils.streamdecoder import DecodingStreamReader
async def get_data(host, port):
url = 'http://{}:{}/'.format(host, port)
decoder = codecs.getincrementaldecoder('utf-8')(errors='strict')
async with aiohttp.get(url) as r:
stream = DecodingStreamReader(r.content)
while not stream.at_eof():
data = await stream.read(7)
print(data, end='')
class UTF8Server:
def __init_
|
_(self):
self.app = web.Application()
self.app.router.add_route('GET', '/', self.hello)
self.handler = self.app.make_handler()
async def hello(self, request):
return web.Response(body=b'M\xc3\xa4dchen mit Bi\xc3\x9f\n')
# return web.Response(body=b'\xc3\x84\xc3\xa4\xc3\xa4\xc3\xa4\xc3\xa4\xc3\xa4\xc3\xa4\xc3\xa4\xc3\xa4\xc3\xa4h!\n')
def start_server(self, loop, host, port):
setup = loop.create_server(self.handler, host, port)
self.srv = loop.run_until_complete(setup)
return self.srv
def stop_server(self, loop):
self.srv.close()
loop.run_until_complete(self.srv.wait_closed())
loop.run_until_complete(self.handler.finish_connections(1.0))
loop.run_until_complete(self.app.finish())
if __name__ == '__main__':
HOST = '127.0.0.1'
PORT = '56789'
loop = asyncio.get_event_loop()
server = UTF8Server()
server.start_server(loop, HOST, PORT)
print("serving on", server.srv.sockets[0].getsockname())
try:
task = asyncio.ensure_future(get_data(HOST, PORT))
loop.run_until_complete(task)
finally:
server.stop_server(loop)
loop.close()
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/twisted/persisted/journal/base.py
|
Python
|
gpl-3.0
| 66
| 0.015152
|
../../../../../../share/pyshared/tw
|
isted/
|
persisted/journal/base.py
|
mFoxRU/cwaveplay
|
cwp/wavelets/ricker.py
|
Python
|
mit
| 215
| 0
|
__author__ = 'mFoxRU'
import scipy.signal as sps
from abstractwavelet imp
|
ort AbstractWavelet
class Ricker(AbstractWavelet):
name = 'Ricker(MHAT)'
params = {}
def fn(self)
|
:
return sps.ricker
|
tdyas/pants
|
src/python/pants/option/options_bootstrapper.py
|
Python
|
apache-2.0
| 9,939
| 0.004628
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
import os
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Iterable, List, Mapping, Optional, Sequence, Set, Tuple, Type
from pants.base.build_environment import get_default_pants_config_file
from pants.option.config import Config
from pants.option.custom_types import ListValueComponent
from pants.option.global_options import GlobalOptions
from pants.option.optionable import Optionable
from pants.option.options import Options
from pants.option.scope import GLOBAL_SCOPE, ScopeInfo
from pants.util.dirutil import read_file
from pants.util.memo import memoized_method, memoized_property
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.strutil import ensure_text
# This is a temporary hack that allows us to note the fact that we're in v2-exclusive mode
# in a static location, as soon as we know it. This way code that cannot access options
# can still use this information to customize behavior. Again, this is a temporary hack
# to provide a better v2 experience to users who are not (and possibly never have been)
# running v1, and should go away ASAP.
class IsV2Exclusive:
def __init__(self):
self._value = False
def set(self):
self._value = True
def __bool__(self):
return self._value
is_v2_exclusive = IsV2Exclusive()
@dataclass(frozen=True)
class OptionsBootstrapper:
"""Holds the result of the first stage of options parsing, and assists with parsing full
options."""
env_tuples: Tuple[Tuple[str, str], ...]
bootstrap_args: Tuple[str, ...]
args: Tuple[str, ...]
config: Config
@staticmethod
def get_config_file_paths(env, args) -> List[str]:
"""Get the location of the config files.
The locations are specified by the --pants-config-files option. However we need to load the
config in order to process the options. This method special-cases --pants-config-files
in order to solve this chicken-and-egg problem.
Note that, obviously, it's not possible to set the location of config files in a config file.
Doing so will have no effect.
"""
# This exactly mirrors the logic applied in Option to all regular options. Note that we'll
# also parse --pants-config as a regular option later, but there's no harm in that. In fact,
# it's preferable, so that any code that happens to want to know where we read config from
# can inspect the option.
flag = "--pants-config-files="
evars = [
"PANTS_GLOBAL_PANTS_CONFIG_FILES",
"PANTS_PANTS_CONFIG_FILES",
"PANTS_CONFIG_FILES",
]
path_list_values = []
default = get_default_pants_config_file()
if Path(default).is_file():
path_list_values.append(ListValueComponent.create(default))
for var in evars:
if var in env:
path_list_values.append(ListValueComponent.create(env[var]))
break
for arg in args:
# Technically this is very slightly incorrect, as we don't check scope. But it's
# very unlikely that any task or subsystem will have an option named --pants-config-files.
# TODO: Enforce a ban on options with a --pants- prefix outside our global options?
if arg.startswith(flag):
path_list_values.append(ListValueComponent.create(arg[len(flag) :]))
return ListValueComponent.merge(path_list_values).val
@staticmethod
def parse_bootstrap_options(
env: Mapping[str, str], args: Sequence[str], config: Config
) -> Options:
bootstrap_options = Options.create(
env=env, config=config, known_scope_infos=[GlobalOptions.get_scope_info()], args=args,
)
def register_global(*args, **kwargs):
## Only use of Options.register?
bootstrap_options.register(GLOBAL_SCOPE, *args, **kwargs)
GlobalOptions.register_bootstrap_options(register_global)
opts = bootstrap_options.for_global_scope()
if opts.v2 and not opts.v1 and opts.backend_packages == []:
is_v2_exclusive.set()
return bootstrap_options
@classmethod
def create(
cls, env: Optional[Mapping[str, str]] = None, args: Optional[Sequence[str]] = None,
) -> "OptionsBootstrapper":
"""Parses the minimum amount of configuration necessary to create an OptionsBootstrapper.
:param env: An environment dictionary, or None to use `os.environ`.
:param args: An args array, or None to use `sys.argv`.
"""
env = {
k: v for k, v in (os.environ if env is None else env).items() if k.startswith("PANTS_")
}
args = tuple(sys.argv if args is None else args)
flags = set()
short_flags = set()
# We can't use pants.engine.fs.FileContent here because it would cause a circular dep.
@dataclass(frozen=True)
class FileContent:
path: str
content: bytes
def filecontent_for(path: str) -> FileContent:
return FileContent(ensure_text(path), read_file(path, binary_mode=True),)
def capture_the_flags(*args: str, **kwargs) -> None:
for arg in args:
flags.add(arg)
if len(arg) == 2:
short_flags.add(arg)
elif kwargs.get("type") == bool:
flags.add(f"--no-{arg[2:]}")
GlobalOptions.register_bootstrap_options(capture_the_flags)
def is_bootstrap_option(arg: str) -> bool:
components = arg.split("=", 1)
if components[0] in flags:
return True
for flag in short_flags:
if arg.startswith(flag):
return True
return False
# Take just the bootstrap args, so we don't choke on other global-scope args on the cmd line.
# Stop before '--' since args after that are pass-through and may have duplicate names to our
# bootstrap options.
bargs = tuple(
filter(is_bootstrap_option, itertools.takewhile(lambda arg: arg != "--", args))
)
config_file_paths = cls.get_config_file_paths(env=env, args=args)
config_files_products = [filecontent_for(p) for p in config_file_paths]
pre_bootstrap_config = Config.load_file_contents(config_files_products)
initial_bootstrap_options = cls.parse_bootstrap_options(env, bargs, pre_bootstrap_config)
bootstrap_option_values = initial_bootstrap_options.for_global_scope()
# Now re-read the config, post-bootstrapping. Note the order: First whatever we bootstrapped
# from (typically pants.toml), then config override, then rcfiles.
full_config_paths = pre_bootstrap_config.sources()
if bootstrap_option_values.pantsrc:
rcfiles = [
os.path.expanduser(str(rcfile)) for rcfile in bootstrap_option_values.pantsrc_files
]
existing_rcfiles = list(filter(os.path.exists, rcfiles))
full_config_paths.extend(existing_rcfiles)
full_config_files_products = [filecontent_for(p) for p in full_config_paths]
post_bootstrap_config = Config.load_file_contents(
full_config_files_products, seed_values=bootstrap_option_values.as_dict(),
)
env_tuples = tuple(
|
sorted(env.items(), key=lambda x: x[0]))
return cls(
env_tuples=env_tuples, bootstrap_args=bargs, args=args, config=post_bootstrap_config
)
@memoized_property
def env(self) -> Dict[str, str]:
return dict(self.env_tuples)
@memoized_property
def bootstrap_options(self) -> Options:
"""The post-bootstra
|
p options, computed from the env, args, and fully discovered Config.
Re-computing options after Config has been fully expanded allows us to pick up bootstrap values
(such as backends) from a
|
MShel/PyChatBot
|
storage/Redis.py
|
Python
|
gpl-3.0
| 288
| 0
|
imp
|
ort redis
class RedisAdapter:
storage = None
host = None
port = None
def __init__(self, host, port):
if not self.storage:
self.storage = redis.StrictRedis(host=host, port=int(port), db=0)
def get_storage(self):
|
return self.storage
|
oVirt/ovirt-engine-sdk-tests
|
src/utils/dictutils.py
|
Python
|
apache-2.0
| 1,025
| 0.002927
|
#
# Copyright (c) 2013
|
Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See
|
the License for the specific language governing permissions and
# limitations under the License.
class DictUtils(object):
'''
Provides dict services
'''
@staticmethod
def exclude(dct, keys=[]):
"""
Removes given items from the disct
@param dct: the ditc to look at
@param keys: the keys of items to pop
@return: updated dict
"""
if dct:
for key in keys:
if dct.has_key(key):
dct.pop(key)
|
B3AU/waveTree
|
sklearn/waveTree/setup.py
|
Python
|
bsd-3-clause
| 672
| 0.001488
|
import os
import numpy
from numpy.distutils.misc_util import Configuration
def configuration(parent_package="", top_path=None):
config = Configuration("tree", parent_package, top_pa
|
th)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_tree",
sources=["_tree.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
config.add_subpackage("tests")
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict()
|
)
|
JianpingZeng/xcc
|
xcc/java/utils/lit/lit/main.py
|
Python
|
bsd-3-clause
| 21,621
| 0.005041
|
#!/usr/bin/env python
"""
lit - LLVM Integrated Tester.
See lit.pod for more information.
"""
from __future__ import absolute_import
import math, os, platform, random, re, sys, time
import lit.ProgressBar
import lit.LitConfig
import lit.Test
import lit.run
import lit.util
import lit.discovery
class TestingProgressDisplay(object):
def __init__(self, opts, numTests, progressBar=None):
self.opts = opts
self.numTests = numTests
self.current = None
self.progressBar = progressBar
self.completed = 0
def finish(self):
if self.progressBar:
self.progressBar.clear()
elif self.opts.quiet:
pass
elif self.opts.succinct:
sys.stdout.write('\n')
def update(self, test):
self.completed += 1
if self.opts.incremental:
update_incremental_cache(test)
if self.progressBar:
self.progressBar.update(float(self.completed)/self.numTests,
test.getFullName())
shouldShow = test.result.code.isFailure or \
self.opts.showAllOutput or \
(not self.opts.quiet and not self.opts.succinct)
if not shouldShow:
return
if self.progressBar:
self.progressBar.clear()
# Show the test result line.
test_name = test.getFullName()
print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
self.completed, self.numTests))
# Show the test failure output, if requested.
if (test.result.code.isFailure and self.opts.showOutput) or \
self.opts.showAllOutput:
if test.result.code.isFailure:
print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20))
print(test.result.output)
print("*" * 20)
# Report test metrics, if present.
if test.result.metrics:
print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
'*'*10))
items = sorted(test.result.metrics.items())
for metric_name, value in items:
print('%s: %s ' % (metric_name, value.format()))
print("*" * 10)
# Ensure the output is flushed.
sys.stdout.flush()
def write_test_results(run, lit_config, testing_time, output_path):
try:
import json
except ImportError:
lit_config.fatal('test output unsupported with Python 2.5')
# Construct the data we will write.
data = {}
# Encode the current lit version as a schema version.
data['__version__'] = lit.__versioninfo__
data['elapsed'] = testing_time
# FIXME: Record some information on the lit configuration used?
# FIXME: Record information from the individual test suites?
# Encode the tests.
data['tests'] = tests_data = []
for test in run.tests:
test_data = {
'name' : test.getFullName(),
'code' : test.result.code.name,
'output' : test.result.output,
'elapsed' : test.result.elapsed }
# Add test metrics, if present.
if test.result.metrics:
test_data['metrics'] = metrics_data = {}
for key, value in test.result.metrics.items():
metrics_data[key] = value.todata()
tests_data.append(test_data)
# Write the output.
f = open(output_path, 'w')
try:
json.dump(data, f, indent=2, sort_keys=True)
f.write('\n')
finally:
f.close()
def update_incremental_cache(test):
if not test.result.code.isFailure:
return
fname = test.getFilePath()
os.utime(fname, None)
def sort_by_incremental_cache(run):
def sortIndex(test):
fname = test.getFilePath()
try:
return -os.path.getmtime(fname)
except:
return 0
run.tests.sort(key = lambda t: sortIndex(t))
def main(builtinParameters = {}):
# Use processes by default on Unix platforms.
isWindows = platform.system() == 'Windows'
useProcessesIsDefault = not isWindows
global options
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options] {file-or-path}")
parser.add_option("", "--version", dest="show_version",
help="Show version and exit",
action="store_true", default=False)
parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
help="Number of testing threads",
type=int, action="store", default=None)
parser.add_option("", "--config-prefix", dest="configPrefix",
metavar="NAME", help="Prefix for 'lit' config files",
action="store", default=None)
parser.add_option("-D", "--param", dest="userParameters",
metavar="NAME=VAL",
help="Add 'NAME' = 'VAL' to the user defined parameters",
type=str, action="append", default=[])
group = OptionGroup(parser, "Output Format")
# FIXME: I find these names very confusing, although I like the
# functionality.
group.add_option("-q", "--quiet", dest="quiet",
help="Suppress no error output",
|
action="store_true", default=False)
group.add_option("-s", "--succinct", dest="succinct"
|
,
help="Reduce amount of output",
action="store_true", default=False)
group.add_option("-v", "--verbose", dest="showOutput",
help="Show test output for failures",
action="store_true", default=False)
group.add_option("-a", "--show-all", dest="showAllOutput",
help="Display all commandlines and output",
action="store_true", default=False)
group.add_option("-o", "--output", dest="output_path",
help="Write test results to the provided path",
action="store", type=str, metavar="PATH")
group.add_option("", "--no-progress-bar", dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false", default=True)
group.add_option("", "--show-unsupported", dest="show_unsupported",
help="Show unsupported tests",
action="store_true", default=False)
group.add_option("", "--show-xfail", dest="show_xfail",
help="Show tests that were expected to fail",
action="store_true", default=False)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Execution")
group.add_option("", "--path", dest="path",
help="Additional paths to add to testing environment",
action="append", type=str, default=[])
group.add_option("", "--vg", dest="useValgrind",
help="Run tests under valgrind",
action="store_true", default=False)
group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
help="Check for memory leaks under valgrind",
action="store_true", default=False)
group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
help="Specify an extra argument for valgrind",
type=str, action="append", default=[])
group.add_option("", "--time-tests", dest="timeTests",
help="Track elapsed wall time for each test",
action="store_true", default=False)
group.add_option("", "--no-execute", dest="noExecute",
help="Don't execute any tests (assume PASS)",
action="store_true", default=False)
group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
help=("Write XUnit-compatible XML test reports to the"
" specified file"), default=None)
group.add_option("", "--timeout", dest="max
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.