code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
"""
Module to handle sending error reports.
"""
import roam
import roam.config
import roam.utils
errorreporting = False
try:
from raven import Client
errorreporting = True
except ImportError:
errorreporting = False
roam.utils.warning("Error reporting disabled due to import error")
def can_send():
"""
Return True if allowed to send error reports to the online error service.
:return: True if allowed
"""
return roam.config.settings.get("online_error_reporting", False)
def send_exception(exinfo):
if can_send() and errorreporting:
client = Client(
dsn='http://681cb73fc39247d0bfa03437a9b53b61:[email protected]/17',
release=roam.__version__
)
roam.utils.info("Sending error report.")
client.captureException(exinfo)
| skeenp/Roam | src/roam/errors.py | Python | gpl-2.0 | 854 |
# Descargar e instalar el paquete NumPy de http://sourceforge.net/projects/numpy/
# Descargar e instalar el paquete SciPy de http://sourceforge.net/projects/scipy/
import scipy
import scipy.stats.distributions as distributions
import math
import os
import time
import threading
class Execute(threading.Thread):
def __init__(self, comando):
self.comando = comando
self.execution_time = 0
threading.Thread.__init__(self)
def get_execution_time(self):
return self.execution_time
def run(self):
before = time.time()
os.system(self.comando)
after = time.time()
self.execution_time = (after-before)*1000
def confidence(samples, confidence_level):
"""This function determines the confidence interval for a given set of samples,
as well as the mean, the standard deviation, and the size of the confidence
interval as a percentage of the mean.
"""
mean = scipy.mean(samples)
sdev = scipy.std(samples)
n = len(samples)
df = n - 1
t = distributions.t.ppf((1+confidence_level)/2.0, df)
interval = (interval_low, interval_high) = ( mean - t * sdev / math.sqrt(n) , mean + t * sdev / math.sqrt(n) )
interval_size = interval_high - interval_low
interval_percentage = interval_size / mean * 100.0
return (interval, mean, sdev, interval_percentage)
def startup(command, confidence_level, p_iterations, break_if_interval_percentage_is):
execution_times = []
for i in range(1, p_iterations+1):
t1 = Execute("java encryption.chat.Program 4500")
t2 = Execute("java encryption.chat.Program 127.0.0.1 4500")
t1.start()
t2.start()
t1.join()
t2.join()
#print "Values %s %s %s" % (t1.execution_time, t2.execution_time, t3.execution_time)
execution_time = t1.execution_time + t2.execution_time
#before = time.time()
#os.system(command)
#after = time.time()
#execution_time = (after-before)*1000
print "Iteration %s. Times in millis %s." % (i, execution_time)
execution_times.append(execution_time)
interval,mean,sdev,interval_percentage = confidence(execution_times, confidence_level)
if interval_percentage <= break_if_interval_percentage_is:
break
return interval, mean, sdev, interval_percentage
def steady(command, confidence_level, p_iterations, break_if_interval_percentage_is, max_bench_invocations, k, CoV):
command += " " + str(max_bench_invocations) + " " + str(k) + " " + str(CoV)
execution_times = []
for i in range(1, p_iterations+1):
execution_time = os.system(command)
print "Iteration %s. Times in millis %s." % (i, execution_time)
execution_times.append(execution_time)
interval,mean,sdev,interval_percentage = confidence(execution_times, confidence_level)
if interval_percentage <= break_if_interval_percentage_is:
break
return interval, mean, sdev, interval_percentage
if __name__ == "__main__":
# Parameters
print "------STARTUP------"
interval, mean, sdev, interval_percentage = startup("java Benchmark", 0.95, 30, 2)
print "Results Startup:"
print "Interval:", interval
print "Mean:", mean
print "Standard deviation:", sdev
print "Interval percentage:", interval_percentage
print "------STEADY-STATE------"
interval, mean, sdev, interval_percentage = steady("java Benchmark", 0.95, 30, 2, 30, 10, 0.02)
print "Results Steady-state:"
print "Interval:", interval
print "Mean:", mean
print "Standard deviation:", sdev
print "Interval percentage:", interval_percentage
| ComputationalReflection/weaveJ | Benchmarks/Real Applications/weaveJ/Communication_Encryption/bin/bench.py | Python | mit | 3,494 |
#!/usr/bin/python
#
# PyODConverter (Python OpenDocument Converter) v1.0.0 - 2008-05-05
#
# This script converts a document from one office format to another by
# connecting to an OpenOffice.org instance via Python-UNO bridge.
#
# Copyright (C) 2008 Mirko Nasato <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl-2.1.html
# - or any later version.
#
DEFAULT_OPENOFFICE_PORT = 8100
import uno
from os.path import abspath, isfile, splitext
from com.sun.star.beans import PropertyValue
from com.sun.star.task import ErrorCodeIOException
from com.sun.star.connection import NoConnectException
FAMILY_TEXT = "Text"
FAMILY_SPREADSHEET = "Spreadsheet"
FAMILY_PRESENTATION = "Presentation"
FAMILY_DRAWING = "Drawing"
FILTER_MAP = {
"pdf": {
FAMILY_TEXT: "writer_pdf_Export",
FAMILY_SPREADSHEET: "calc_pdf_Export",
FAMILY_PRESENTATION: "impress_pdf_Export",
FAMILY_DRAWING: "draw_pdf_Export"
},
"html": {
FAMILY_TEXT: "HTML (StarWriter)",
FAMILY_SPREADSHEET: "HTML (StarCalc)",
FAMILY_PRESENTATION: "impress_html_Export"
},
"odt": { FAMILY_TEXT: "writer8" },
"doc": { FAMILY_TEXT: "MS Word 97" },
"rtf": { FAMILY_TEXT: "Rich Text Format" },
"txt": { FAMILY_TEXT: "Text" },
"ods": { FAMILY_SPREADSHEET: "calc8" },
"xls": { FAMILY_SPREADSHEET: "MS Excel 97" },
"odp": { FAMILY_PRESENTATION: "impress8" },
"ppt": { FAMILY_PRESENTATION: "MS PowerPoint 97" },
"swf": { FAMILY_PRESENTATION: "impress_flash_Export" }
}
# see http://wiki.services.openoffice.org/wiki/Framework/Article/Filter
# for more available filters
class DocumentConversionException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class DocumentConverter:
def __init__(self, port=DEFAULT_OPENOFFICE_PORT):
localContext = uno.getComponentContext()
resolver = localContext.ServiceManager.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", localContext)
try:
context = resolver.resolve("uno:socket,host=localhost,port=%s;urp;StarOffice.ComponentContext" % port)
except NoConnectException:
raise DocumentConversionException, "failed to connect to OpenOffice.org on port %s" % port
self.desktop = context.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", context)
def convert(self, inputFile, outputFile):
inputUrl = self._toFileUrl(inputFile)
outputUrl = self._toFileUrl(outputFile)
document = self.desktop.loadComponentFromURL(inputUrl, "_blank", 0, self._toProperties(Hidden=True))
try:
document.refresh()
except AttributeError:
pass
outputExt = self._getFileExt(outputFile)
filterName = self._filterName(document, outputExt)
try:
document.storeToURL(outputUrl, self._toProperties(FilterName=filterName))
finally:
document.close(True)
def _filterName(self, document, outputExt):
family = self._detectFamily(document)
try:
filterByFamily = FILTER_MAP[outputExt]
except KeyError:
raise DocumentConversionException, "unknown output format: '%s'" % outputExt
try:
return filterByFamily[family]
except KeyError:
raise DocumentConversionException, "unsupported conversion: from '%s' to '%s'" % (family, outputExt)
def _detectFamily(self, document):
if document.supportsService("com.sun.star.text.GenericTextDocument"):
# NOTE: a GenericTextDocument is either a TextDocument, a WebDocument, or a GlobalDocument
# but this further distinction doesn't seem to matter for conversions
return FAMILY_TEXT
if document.supportsService("com.sun.star.sheet.SpreadsheetDocument"):
return FAMILY_SPREADSHEET
if document.supportsService("com.sun.star.presentation.PresentationDocument"):
return FAMILY_PRESENTATION
if document.supportsService("com.sun.star.drawing.DrawingDocument"):
return FAMILY_DRAWING
raise DocumentConversionException, "unknown document family: %s" % document
def _getFileExt(self, path):
ext = splitext(path)[1]
if ext is not None:
return ext[1:].lower()
def _toFileUrl(self, path):
return uno.systemPathToFileUrl(abspath(path))
def _toProperties(self, **args):
props = []
for key in args:
prop = PropertyValue()
prop.Name = key
prop.Value = args[key]
props.append(prop)
return tuple(props)
if __name__ == "__main__":
from sys import argv, exit
if len(argv) < 3:
print "USAGE: python %s <input-file> <output-file>" % argv[0]
exit(255)
if not isfile(argv[1]):
print "no such input file: %s" % argv[1]
exit(1)
try:
converter = DocumentConverter()
converter.convert(argv[1], argv[2])
except DocumentConversionException, exception:
print "ERROR!" + str(exception)
exit(1)
except ErrorCodeIOException, exception:
print "ERROR! ErrorCodeIOException %d" % exception.ErrCode
exit(1)
| itkin/proselytism | lib/proselytism/converters/open_office/odconverters/pyodconverter.py | Python | mit | 5,347 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
if __name__ == '__main__':
sys.path.append('../../')
import json
import logging
import sqlite3
from gchelpers.ip.GeoDbManager import GeoDbManager
from gchelpers.dt import DateTimeHandler
GEO_MANAGER = GeoDbManager()
def splitpath(path, n):
path_array = re.split('[\\\/]',path)
start_index = -(n+1)
# Check that path has enough elements
if abs(start_index) > len(path_array):
new_path = os.path.join(path_array[0],*path_array[1:])
else:
new_path = os.path.join(path_array[start_index],*path_array[start_index+1:])
return new_path
def RegisterSQLiteFunctions(dbh):
sqlite3.enable_callback_tracebacks(True)
dbh.create_function("REGEXP", 2, Regexp)
dbh.create_function('Basename',1,Basename)
dbh.create_function('BasenameN',2,BasenameN)
dbh.create_function("GetRegMatch", 3, GetRegMatch)
dbh.create_function("GetRegMatchArray", 3, GetRegMatchArray)
dbh.create_function("RemoveNewLines", 1, RemoveNewLines)
dbh.create_function("DtFormat", 2, DtFormat)
dbh.create_function("DtFormatTz", 4, DtFormatTz)
if GEO_MANAGER.DB_ATTACHED:
dbh.create_function("GetIpInfo", 1, GetIpInfo)
def DtFormatTz(dtstringin,newformat,current_tz_str,new_tz_str):
if dtstringin:
string_out = None
# Get object from in string
datetime_obj = DateTimeHandler.DatetimeFromString(
dtstringin
)
# Timezone Conversion
new_datetime_obj = DateTimeHandler.ConvertDatetimeTz(
datetime_obj,
current_tz_str,
new_tz_str
)
# Format object
string_out = DateTimeHandler.StringFromDatetime(
new_datetime_obj,
newformat
)
return string_out
return None
def DtFormat(dtstringin,newformat):
if dtstringin:
string_out = None
# Get object from in string
datetime_obj = DateTimeHandler.DatetimeFromString(
dtstringin
)
# Format object
string_out = DateTimeHandler.StringFromDatetime(
datetime_obj,
newformat
)
return string_out
return None
def Regexp(pattern,input):
if input is None:
return False
try:
if re.search(pattern, input):
return True
else:
return False
except Exception as error:
print(u'ERROR: {}'.format(str(error)))
return False
def Basename(fullname):
'''Get the base name of a fullname string'''
value = ''
if fullname:
try:
value = os.path.basename(fullname)
except:
value = filename
return value
def BasenameN(fullname,n):
'''Get the base name of a fullname string'''
value = ''
if fullname is None:
return None
value = splitpath(fullname,n)
return value
def GetIpInfo(ip_address):
if ip_address is None:
return None
geo = GEO_MANAGER
info = geo.GetIpInfo(ip_address)
return json.dumps(info)
def RemoveNewLines(input):
if input is None:
return None
input = input.replace("\n", "")
input = input.replace("\r", "")
return input
def GetRegMatch(input,group,pattern):
if input is None:
return None
match = re.search(pattern, input)
result = None
if match:
result = match.group(group)
return result
def GetRegMatchArray(input,group,pattern):
hits = []
if input is None:
return json.dumps(hits)
for result in re.finditer(pattern, input):
hits.append(result.group(group))
if len(hits) > 0:
return json.dumps(hits)
return json.dumps(hits)
def test1():
n = 2
fullname = "Partition 1\\TEST_P1 [NTFS]\\[root]\\testfolder002\\testfolder001\\testfile088.png"
splitname = splitpath(fullname,n)
print splitname
def test2():
n = 2
fullname = "testfolder001\\testfile088.png"
splitname = splitpath(fullname,n)
print splitname
if __name__ == '__main__':
test1()
test2() | devgc/GcHelpers | gchelpers/db/SqliteCustomFunctions.py | Python | apache-2.0 | 4,308 |
"""cubic_spline.py
Implementations of the natural (and maybe clamped) cubic spline algorithms
"""
from typing import List, Tuple
def natural_cubic_spline(n, x, a):
b = [0.0] * (n + 1)
c = [0.0] * (n + 1)
d = [0.0] * (n + 1)
u = [0.0] * n
l = [0.0] * (n + 1)
z = [0.0] * (n + 1)
# step 1
h = [x[i + 1] - x[i] for i in range(n)]
def calculate_matrix_entry(i):
return (3.0 / h[i]) * (a[i+1] - a[i]) - (3.0/h[i-1]) * (a[i] - a[i-1])
# step 2
alpha = [calculate_matrix_entry(i) for i in range(1, n)]
l[0] = 1
u[0] = 0
z[0] = 0
for i in range(1, n):
l[i] = 2 * (x[i+1] - x[i-1]) - (h[i-1] * u[i-1])
u[i] = h[i]/l[i]
z[i] = (alpha[i-1] - h[i-1] * z[i-1]) / l[i]
l[n] = 1
z[n] = 0
c[n] = 0
for j in range(n-1, -1, -1):
c[j] = z[j] - u[j]*c[j+1]
b[j] = (a[j+1] - a[j])/h[j] - h[j]*(c[j+1] + 2*c[j])/3
d[j] = (c[j+1] - c[j]) / (3*h[j])
from utility import cleanup_list
a = cleanup_list(a)
b = cleanup_list(b)
c = cleanup_list(c)
d = cleanup_list(d)
return list(zip(a[:-1], b[:-1], c[:-1], d[:-1]))
def clamped_cubic_spline(n: int, x: List[float], a: List[float], primes: Tuple[float, float]):
b = [0.0] * (n + 1)
c = [0.0] * (n + 1)
d = [0.0] * (n + 1)
u = [0.0] * n
l = [0.0] * (n + 1)
z = [0.0] * (n + 1)
alpha = [0.0] * (n + 1)
fpo, fpn = primes
h = [x[i+1] - x[i] for i in range(n)]
alpha[0] = 3*(a[1] - a[0]) / h[0] - 3 * fpo
alpha[n] = 3 * fpn - 3 * (a[-1] - a[-2])/h[-1]
def calculate_alpha_entry(i: int):
alpha[i] = (3 / h[i]) * (a[i+1] - a[i]) - (3 / h[i-1]) * (a[i] - a[i-1])
for i in range(1, n):
calculate_alpha_entry(i)
l[0] = 2 * h[0]
u[0] = 0.5
z[0] = alpha[0] / l[0]
for i in range(1, n):
l[i] = 2 * (x[i+1] - x[i-1]) - h[i-1] * u[i-1]
u[i] = h[i]/l[i]
z[i] = (alpha[i] - h[i-1] * z[i-1]) / l[i]
l[n] = h[n-1] * (2 - u[n-1])
z[n] = (alpha[n] - h[n-1] * z[n-1]) / l[n]
c[n] = z[n]
for j in range(n - 1, -1, -1):
c[j] = z[j] - u[j]*c[j+1]
b[j] = (a[j+1] - a[j]) / h[j] - h[j] * (c[j+1] + 2 * c[j]) / 3
d[j] = (c[j+1] - c[j]) / (3 * h[j])
from utility import cleanup_list
a = cleanup_list(a)
b = cleanup_list(b)
c = cleanup_list(c)
d = cleanup_list(d)
for j in range(n):
yield (a[j], b[j], c[j], d[j])
def spline_function(a, b, c, d, x0, x):
h = x - x0
return a + b * h + c * h ** 2 + d * h ** 3
def evaluate(x_data, y_data, splines, panels=100):
n = len(x_data) - 1
# splines = natural_cubic_spline(n, x_points, y_points)
curve_x = []
curve_y = []
# subinterval = 4
for idx, spline in enumerate(splines):
x0, x1 = x_data[idx], x_data[idx + 1]
diff = round((x1-x0) / panels, 5)
curve_x.append(x0)
curve_y.append(y_data[idx])
for i in range(panels):
curve_x.append(round(x0 + diff, 5))
result = spline_function(*spline, x0, x0 + diff)
curve_y.append(round(result, 5))
curve_x.append(x_data[-1])
curve_y.append(y_data[-1])
return curve_x, curve_y
if __name__ == '__main__':
import matplotlib.pyplot as plt
x_points = [
0.9,
1.3,
1.9,
2.1,
2.6,
3.0,
3.9,
4.4,
4.7,
5.0,
6.0,
7.0,
8.0,
9.2,
10.5,
11.3,
11.6,
12.0,
12.6,
13.0,
13.3,
]
y_points = [
1.3,
1.5,
1.85,
2.1,
2.6,
2.7,
2.4,
2.15,
2.05,
2.1,
2.25,
2.3,
2.25,
1.95,
1.4,
0.9,
0.7,
0.6,
0.5,
0.4,
0.25,
]
n = len(x_points) - 1
splines = natural_cubic_spline(n, x_points, y_points)
graph_x = []
graph = []
subinterval = 400
for idx, spline in enumerate(splines):
x0, x1 = x_points[idx], x_points[idx + 1]
diff = round((x1-x0) / subinterval, 5)
graph_x.append(x0)
graph.append(y_points[idx])
for i in range(subinterval):
graph_x.append(round(x0 + diff, 5))
result = spline_function(*spline, x0, x0 + diff)
graph.append(round(result, 5))
graph_x.append(x_points[-1])
graph.append(y_points[-1])
# plt.plot(x_points, y_points, 'bo')
plt.plot(graph_x, graph, 'b-')
# plt.axis([0, 14, -6, 4])
plt.show()
| Jokiva/Computational-Physics | lecture 8/cubic_spline.py | Python | gpl-3.0 | 4,623 |
# -*- coding: utf-8 -*-
# Module: default
# Author: asciidisco
# Created on: 24.07.2017
# License: MIT https://goo.gl/5bMj3H
"""Setup"""
import os
import re
import sys
from setuptools import find_packages, setup
REQUIRED_PYTHON_VERSION = (2, 7)
PACKAGES = find_packages()
INSTALL_DEPENDENCIES = []
SETUP_DEPENDENCIES = []
TEST_DEPENDENCIES = [
'nose',
'Kodistubs',
'httpretty',
'mock',
]
EXTRA_DEPENDENCIES = {
'dev': [
'nose',
'flake8',
'codeclimate-test-reporter',
'pylint',
'mccabe',
'pycodestyle',
'pyflakes',
'Kodistubs',
'httpretty',
'mock',
'requests',
'pyDes',
'radon',
'Sphinx',
'sphinx_rtd_theme',
'm2r',
'kodi-release-helper',
'dennis',
'blessings',
'demjson',
'restructuredtext_lint',
'yamllint',
]
}
def get_addon_data():
"""Loads the Kodi plugin data from addon.xml"""
root_dir = os.path.dirname(os.path.abspath(__file__))
pathname = os.path.join(root_dir, 'addon.xml')
with open(pathname, 'rb') as addon_xml:
addon_xml_contents = addon_xml.read()
_id = re.search(
r'(?<!xml )id="(.+?)"',
addon_xml_contents).group(1)
author = re.search(
r'(?<!xml )provider-name="(.+?)"',
addon_xml_contents).group(1)
name = re.search(
r'(?<!xml )name="(.+?)"',
addon_xml_contents).group(1)
version = re.search(
r'(?<!xml )version="(.+?)"',
addon_xml_contents).group(1)
desc = re.search(
r'(?<!xml )description lang="en_GB">(.+?)<',
addon_xml_contents).group(1)
email = re.search(
r'(?<!xml )email>(.+?)<',
addon_xml_contents).group(1)
source = re.search(
r'(?<!xml )source>(.+?)<',
addon_xml_contents).group(1)
return {
'id': _id,
'author': author,
'name': name,
'version': version,
'desc': desc,
'email': email,
'source': source,
}
if sys.version_info < REQUIRED_PYTHON_VERSION:
sys.exit('Python >= 2.7 is required. Your version:\n' + sys.version)
if __name__ == '__main__':
ADDON_DATA = get_addon_data()
setup(
name=ADDON_DATA.get('name'),
version=ADDON_DATA.get('version'),
author=ADDON_DATA.get('author'),
author_email=ADDON_DATA.get('email'),
description=ADDON_DATA.get('desc'),
packages=PACKAGES,
include_package_data=True,
install_requires=INSTALL_DEPENDENCIES,
setup_requires=SETUP_DEPENDENCIES,
tests_require=TEST_DEPENDENCIES,
extras_require=EXTRA_DEPENDENCIES,
test_suite='nose.collector',
)
| asciidisco/plugin.video.netflix | setup.py | Python | mit | 2,870 |
import sys
import report
import reportclient
from abrtcli.i18n import _
from abrtcli.match import match_get_problems
from . import Command
class Report(Command):
aliases = ['e']
name = 'report'
description = 'report problem'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_filter_arguments()
self.add_match_argument()
self.parser.add_argument('-d', '--delete', action='store_true',
help=_('remove after reporting'))
self.parser.add_argument('--unsafe', action='store_true',
help=_('ignore unreportable state'))
def run(self, arguments):
problems = match_get_problems(arguments.patterns,
authenticate=arguments.authenticate,
executables=arguments.executables,
components=arguments.components,
since=arguments.since,
until=arguments.until,
n_latest=arguments.n_latest,
not_reported=arguments.not_reported)
for problem in problems:
if problem.not_reportable and not arguments.unsafe:
if reportclient.verbose > 0:
print(problem.not_reportable_reason)
print(_('Problem \'{0}\' cannot be reported').format(problem.short_id))
sys.exit(1)
flags = report.LIBREPORT_WAIT | report.LIBREPORT_RUN_CLI
if arguments.unsafe:
flags |= report.LIBREPORT_IGNORE_NOT_REPORTABLE
problem.chown()
print(_("Reporting problem %s\n" % (problem.short_id)))
report.report_problem_in_dir(problem.path, flags)
if arguments.delete:
problem.delete(problem.path)
| martinky82/abrt | src/cli/abrtcli/cli/report.py | Python | gpl-2.0 | 1,967 |
# =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
# noinspection PyPackageRequirements
import wx
from service.market import Market
from gui.builtinMarketBrowser.searchBox import SearchBox
from gui.builtinMarketBrowser.itemView import ItemView
from gui.builtinMarketBrowser.metaButton import MetaButton
from gui.builtinMarketBrowser.marketTree import MarketTree
from logbook import Logger
pyfalog = Logger(__name__)
class MarketBrowser(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
pyfalog.debug("Initialize marketBrowser")
vbox = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(vbox)
# Add a search box on top
self.search = SearchBox(self)
vbox.Add(self.search, 0, wx.EXPAND)
self.splitter = wx.SplitterWindow(self, style=wx.SP_LIVE_UPDATE)
vbox.Add(self.splitter, 1, wx.EXPAND)
# Grab market service instance and create child objects
self.sMkt = Market.getInstance()
self.searchMode = False
self.marketView = MarketTree(self.splitter, self)
self.itemView = ItemView(self.splitter, self)
self.splitter.SplitHorizontally(self.marketView, self.itemView)
self.splitter.SetMinimumPaneSize(250)
# Setup our buttons for metaGroup selection
# Same fix as for search box on macs,
# need some pixels of extra space or everything clips and is ugly
p = wx.Panel(self)
box = wx.BoxSizer(wx.HORIZONTAL)
p.SetSizer(box)
vbox.Add(p, 0, wx.EXPAND)
self.metaButtons = []
btn = None
for name in list(self.sMkt.META_MAP.keys()):
btn = MetaButton(p, wx.ID_ANY, name.capitalize(), style=wx.BU_EXACTFIT)
setattr(self, name, btn)
box.Add(btn, 1, wx.ALIGN_CENTER)
btn.Bind(wx.EVT_TOGGLEBUTTON, self.toggleMetaButton)
btn.metaName = name
self.metaButtons.append(btn)
# Make itemview to set toggles according to list contents
self.itemView.setToggles()
p.SetMinSize((wx.SIZE_AUTO_WIDTH, btn.GetSize()[1] + 5))
def toggleMetaButton(self, event):
"""Process clicks on toggle buttons"""
mstate = wx.GetMouseState()
clickedBtn = event.EventObject
if mstate.cmdDown:
activeBtns = [btn for btn in self.metaButtons if btn.GetValue()]
if activeBtns:
clickedBtn.setUserSelection(clickedBtn.GetValue())
self.itemView.filterItemStore()
else:
# Do 'nothing' if we're trying to turn last active button off
# Keep button in the same state
clickedBtn.setUserSelection(True)
else:
for btn in self.metaButtons:
btn.setUserSelection(btn == clickedBtn)
self.itemView.filterItemStore()
def jump(self, item):
self.marketView.jump(item)
| bsmr-eve/Pyfa | gui/marketBrowser.py | Python | gpl-3.0 | 3,729 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ExtensionsV1beta1ScaleSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, replicas=None):
"""
ExtensionsV1beta1ScaleSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'replicas': 'int'
}
self.attribute_map = {
'replicas': 'replicas'
}
self._replicas = replicas
@property
def replicas(self):
"""
Gets the replicas of this ExtensionsV1beta1ScaleSpec.
desired number of instances for the scaled object.
:return: The replicas of this ExtensionsV1beta1ScaleSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this ExtensionsV1beta1ScaleSpec.
desired number of instances for the scaled object.
:param replicas: The replicas of this ExtensionsV1beta1ScaleSpec.
:type: int
"""
self._replicas = replicas
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| skuda/client-python | kubernetes/client/models/extensions_v1beta1_scale_spec.py | Python | apache-2.0 | 3,076 |
def count_factor(n, factor=0):
for i in range(1, int(n**0.5)+1):
if n % i == 0:
factor += 2
return factor
def nth_triangular_number(n):
return int(n+(n*(n-1))/2)
def find_triangular_number_over(k, n=0):
while count_factor(nth_triangular_number(n)) <= k:
n += 1
return nth_triangular_number(n)
def main():
print(find_triangular_number_over(500))
if __name__ == "__main__":
main()
| higee/project_euler | 11-20/12.py | Python | mit | 439 |
from pyfmodex.enums import SOUNDGROUP_BEHAVIOR
def test_max_audible(sound_group):
assert sound_group.max_audible == -1
sound_group.max_audible = 5
assert sound_group.max_audible == 5
def test_max_audible_behavior(sound_group):
new_behavior = SOUNDGROUP_BEHAVIOR.MUTE
assert sound_group.max_audible_behavior is SOUNDGROUP_BEHAVIOR.FAIL
sound_group.max_audible_behavior = new_behavior
assert sound_group.max_audible_behavior is new_behavior
def test_mute_fade_speed(sound_group):
assert sound_group.mute_fade_speed == 0.0
sound_group.mute_fade_speed = 0.5
assert sound_group.mute_fade_speed == 0.5
def test_name(sound_group):
assert sound_group.name == b"test group"
def test_num_playing(sound_group):
assert sound_group.num_playing == 0
def test_num_sounds(sound_group):
assert sound_group.num_sounds == 0
def test_get_sound(sound, sound_group):
sound.sound_group = sound_group
assert sound_group.get_sound(0) == sound
def test_system_object(initialized_system, sound_group):
assert sound_group.system_object == initialized_system
def test_volume(sound_group):
assert sound_group.volume == 1.0
sound_group.volume = 0.5
assert sound_group.volume == 0.5
def test_release(sound_group):
sound_group.release()
def test_stop(sound_group):
sound_group.stop() | tyrylu/pyfmodex | tests/test_sound_group.py | Python | mit | 1,345 |
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-02-12 23:40:36
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-02-19 12:44:46
from __future__ import print_function, division, absolute_import
from marvin.tests.api import MarvinAPITester
import unittest
class TestCubeView(MarvinAPITester):
def test_get_cube_success(self):
url = self.get_url('CubeView:index')
self._load_page('get', url, params={'release': 'MPL-5'})
data = 'this is a cube'
self._assert_success(data)
class TestGetCube(TestCubeView):
def test_get_plateifu_no_release(self):
errmsg = 'Missing data for required field.'
url = self.get_url('getCube')
self._route_no_valid_params(url, 'release', errmsg=errmsg)
def test_post_plateifu_no_release(self):
errmsg = 'Missing data for required field.'
url = self.get_url('getCube')
self._route_no_valid_params(url, 'release', 'post', errmsg=errmsg)
def test_post_plateifu_bad_name(self):
errmsg = 'String does not match expected pattern.'
url = self.get_url('getCube').format(name='badname')
self._route_no_valid_params(url, 'name', 'post', params={'release': 'MPL-5'}, errmsg=errmsg)
def test_post_plateifu_short_name(self):
errmsg = 'Shorter than minimum length 4.'
url = self.get_url('getCube').format(name='84')
self._route_no_valid_params(url, 'name', 'post', params={'release': 'MPL-5'}, errmsg=errmsg)
def _plateifu_success(self, reqtype):
url = self.get_url('getCube').format(name=self.plateifu)
data = {'plateifu': self.plateifu, 'mangaid': self.mangaid, 'ra': self.ra, 'dec': self.dec,
'redshift': self.redshift}
self._load_page(reqtype, url, params={'release': 'MPL-5'})
self._assert_success(data)
def test_get_plateifu_success(self):
self._plateifu_success('get')
def test_post_plateifu_success(self):
self._plateifu_success('post')
if __name__ == '__main__':
verbosity = 2
unittest.main(verbosity=verbosity)
| bretthandrews/marvin | python/marvin/tests/api/test_cube.py | Python | bsd-3-clause | 2,178 |
"""
Sphinx extension to create links to Contour documents (typically requirements
items).
:contour:`1412342`
links to https://www.contourhosted.com/perspective.req?projectId=2271&docId=1412342
when CONTOUR_PROJECT_ID = 2271 in Sphinx's conf.py.
"""
import urllib
from docutils import nodes, utils
CONTOUR_DOC_FORMAT = 'https://www.contourhosted.com/perspective.req?projectId={project_id}&docId={doc_id}'
LINK_TEXT_FORMAT = 'Contour Item {doc_id}'
def contour_doc(name, rawtext, text, lineno, inliner, options={},
content=[]):
env = inliner.document.settings.env
project_id = env.config.CONTOUR_PROJECT_ID
if not project_id:
raise Exception(
'CONTOUR_PROJECT_ID is not set in conf.py. Set it to '
'the numeric projectId as seen in email links.'
)
ref = CONTOUR_DOC_FORMAT.format(
project_id = project_id,
doc_id = urllib.quote(text, safe=''),
)
link_text = LINK_TEXT_FORMAT.format(doc_id=utils.unescape(text))
node = nodes.reference(rawtext, link_text, refuri=ref, **options)
return [node],[]
# Register the :contour: directive
def setup(app):
app.add_config_value('CONTOUR_PROJECT_ID',
None,
'env')
app.add_role('contour', contour_doc)
| sprin/sphinx-contour-docs | contour_docs/contour_docs.py | Python | mit | 1,340 |
"""
EvMenu
This implements a full menu system for Evennia. It is considerably
more flexible than the older contrib/menusystem.py and also uses
menu plugin modules.
To start the menu, just import the EvMenu class from this module.
Example usage:
```python
from evennia.utils.evmenu import EvMenu
EvMenu(caller, menu_module_path,
startnode="node1",
cmdset_mergetype="Replace", cmdset_priority=1,
auto_quit=True, cmd_on_exit="look", persistent=True)
```
Where `caller` is the Object to use the menu on - it will get a new
cmdset while using the Menu. The menu_module_path is the python path
to a python module containing function defintions. By adjusting the
keyword options of the Menu() initialization call you can start the
menu at different places in the menu definition file, adjust if the
menu command should overload the normal commands or not, etc.
The `perstent` keyword will make the menu survive a server reboot.
It is `False` by default. Note that if using persistent mode, every
node and callback in the menu must be possible to be *pickled*, this
excludes e.g. callables that are class methods or functions defined
dynamically or as part of another function. In non-persistent mode
no such restrictions exist.
The menu is defined in a module (this can be the same module as the
command definition too) with function defintions:
```python
def node1(caller):
# (this is the start node if called like above)
# code
return text, options
def node_with_other_namen(caller, input_string):
# code
return text, options
```
Where caller is the object using the menu and input_string is the
command entered by the user on the *previous* node (the command
entered to get to this node). The node function code will only be
executed once per node-visit and the system will accept nodes with
both one or two arguments interchangeably.
The menu tree itself is available on the caller as
`caller.ndb._menutree`. This makes it a convenient place to store
temporary state variables between nodes, since this NAttribute is
deleted when the menu is exited.
The return values must be given in the above order, but each can be
returned as None as well. If the options are returned as None, the
menu is immediately exited and the default "look" command is called.
text (str, tuple or None): Text shown at this node. If a tuple, the
second element in the tuple is a help text to display at this
node when the user enters the menu help command there.
options (tuple, dict or None): (
{'key': name, # can also be a list of aliases. A special key is
# "_default", which marks this option as the default
# fallback when no other option matches the user input.
'desc': description, # optional description
'goto': nodekey, # node to go to when chosen
'exec': nodekey}, # node or callback to trigger as callback when chosen.
# If a node key is given, the node will be executed once
# but its return values are ignored. If a callable is
# given, it must accept one or two args, like any node.
{...}, ...)
If key is not given, the option will automatically be identified by
its number 1..N.
Example:
```python
# in menu_module.py
def node1(caller):
text = ("This is a node text",
"This is help text for this node")
options = ({"key": "testing",
"desc": "Select this to go to node 2",
"goto": "node2",
"exec": "callback1"},
{"desc": "Go to node 3.",
"goto": "node3"})
return text, options
def callback1(caller):
# this is called when choosing the "testing" option in node1
# (before going to node2). It needs not have return values.
caller.msg("Callback called!")
def node2(caller):
text = '''
This is node 2. It only allows you to go back
to the original node1. This extra indent will
be stripped. We don't include a help text.
'''
options = {"goto": "node1"}
return text, options
def node3(caller):
text = "This ends the menu since there are no options."
return text, None
```
When starting this menu with `Menu(caller, "path.to.menu_module")`,
the first node will look something like this:
This is a node text
______________________________________
testing: Select this to go to node 2
2: Go to node 3
Where you can both enter "testing" and "1" to select the first option.
If the client supports MXP, they may also mouse-click on "testing" to
do the same. When making this selection, a function "callback1" in the
same Using `help` will show the help text, otherwise a list of
available commands while in menu mode.
The menu tree is exited either by using the in-menu quit command or by
reaching a node without any options.
For a menu demo, import CmdTestMenu from this module and add it to
your default cmdset. Run it with this module, like `testmenu
evennia.utils.evmenu`.
"""
from __future__ import print_function
from builtins import object, range
from textwrap import dedent
from inspect import isfunction, getargspec
from django.conf import settings
from evennia import Command, CmdSet
from evennia.utils import logger
from evennia.utils.evtable import EvTable
from evennia.utils.ansi import ANSIString, strip_ansi
from evennia.utils.utils import mod_import, make_iter, pad, m_len
from evennia.commands import cmdhandler
# read from protocol NAWS later?
_MAX_TEXT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
# we use cmdhandler instead of evennia.syscmdkeys to
# avoid some cases of loading before evennia init'd
_CMD_NOMATCH = cmdhandler.CMD_NOMATCH
_CMD_NOINPUT = cmdhandler.CMD_NOINPUT
# Return messages
# i18n
from django.utils.translation import ugettext as _
_ERR_NOT_IMPLEMENTED = _("Menu node '{nodename}' is not implemented. Make another choice.")
_ERR_GENERAL = _("Error in menu node '{nodename}'.")
_ERR_NO_OPTION_DESC = _("No description.")
_HELP_FULL = _("Commands: <menu option>, help, quit")
_HELP_NO_QUIT = _("Commands: <menu option>, help")
_HELP_NO_OPTIONS = _("Commands: help, quit")
_HELP_NO_OPTIONS_NO_QUIT = _("Commands: help")
_HELP_NO_OPTION_MATCH = _("Choose an option or try 'help'.")
_ERROR_PERSISTENT_SAVING = \
"""
{error}
|rThe menu state could not be saved for persistent mode. Switching
to non-persistent mode (which means the menu session won't survive
an eventual server reload).|n
"""
_TRACE_PERSISTENT_SAVING = \
"EvMenu persistent-mode error. Commonly, this is because one or " \
"more of the EvEditor callbacks could not be pickled, for example " \
"because it's a class method or is defined inside another function."
class EvMenuError(RuntimeError):
"""
Error raised by menu when facing internal errors.
"""
pass
#------------------------------------------------------------
#
# Menu command and command set
#
#------------------------------------------------------------
class CmdEvMenuNode(Command):
"""
Menu options.
"""
key = _CMD_NOINPUT
aliases = [_CMD_NOMATCH]
locks = "cmd:all()"
help_category = "Menu"
def func(self):
"""
Implement all menu commands.
"""
caller = self.caller
menu = caller.ndb._menutree or self.session.ndb._menutree
if not menu:
# check if there is a saved menu available
saved_options = caller.attributes.get("_menutree_saved")
if saved_options:
startnode = caller.attributes.get("_menutree_saved_startnode")
if startnode:
saved_options[1]["startnode"] = startnode
# this will create a completely new menu call
EvMenu(caller, *saved_options[0], **saved_options[1])
return
if not menu:
err = "Menu object not found as %s.ndb._menutree!" % (caller)
caller.msg(err)
raise EvMenuError(err)
menu._input_parser(menu, self.raw_string, caller)
class EvMenuCmdSet(CmdSet):
"""
The Menu cmdset replaces the current cmdset.
"""
key = "menu_cmdset"
priority = 1
mergetype = "Replace"
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"""
Called when creating the set.
"""
self.add(CmdEvMenuNode())
# These are default node formatters
def dedent_strip_nodetext_formatter(nodetext, has_options, caller=None):
"""
Simple dedent formatter that also strips text
"""
return dedent(nodetext).strip()
def dedent_nodetext_formatter(nodetext, has_options, caller=None):
"""
Just dedent text.
"""
return dedent(nodetext)
def evtable_options_formatter(optionlist, caller=None):
"""
Formats the option list display.
"""
if not optionlist:
return ""
# column separation distance
colsep = 4
nlist = len(optionlist)
# get the widest option line in the table.
table_width_max = -1
table = []
for key, desc in optionlist:
if not (key or desc):
continue
table_width_max = max(table_width_max,
max(m_len(p) for p in key.split("\n")) +
max(m_len(p) for p in desc.split("\n")) + colsep)
raw_key = strip_ansi(key)
if raw_key != key:
# already decorations in key definition
table.append(ANSIString(" |lc%s|lt%s|le: %s" % (raw_key, key, desc)))
else:
# add a default white color to key
table.append(ANSIString(" |lc%s|lt|w%s|n|le: %s" % (raw_key, raw_key, desc)))
ncols = (_MAX_TEXT_WIDTH // table_width_max) + 1 # number of ncols
nlastcol = nlist % ncols # number of elements left in last row
# get the amount of rows needed (start with 4 rows)
nrows = 4
while nrows * ncols < nlist:
nrows += 1
ncols = nlist // nrows # number of full columns
nlastcol = nlist % nrows # number of elements in last column
# get the final column count
ncols = ncols + 1 if nlastcol > 0 else ncols
if ncols > 1:
# only extend if longer than one column
table.extend([" " for i in range(nrows - nlastcol)])
# build the actual table grid
table = [table[icol * nrows : (icol * nrows) + nrows] for icol in range(0, ncols)]
# adjust the width of each column
for icol in range(len(table)):
col_width = max(max(m_len(p) for p in part.split("\n")) for part in table[icol]) + colsep
table[icol] = [pad(part, width=col_width + colsep, align="l") for part in table[icol]]
# format the table into columns
return unicode(EvTable(table=table, border="none"))
def underline_node_formatter(nodetext, optionstext, caller=None):
"""
Draws a node with underlines '_____' around it.
"""
nodetext_width_max = max(m_len(line) for line in nodetext.split("\n"))
options_width_max = max(m_len(line) for line in optionstext.split("\n"))
total_width = max(options_width_max, nodetext_width_max)
separator1 = "_" * total_width + "\n\n" if nodetext_width_max else ""
separator2 = "\n" + "_" * total_width + "\n\n" if total_width else ""
return separator1 + nodetext + separator2 + optionstext
def null_node_formatter(nodetext, optionstext, caller=None):
"""
A minimalistic node formatter, no lines or frames.
"""
return nodetext + "\n\n" + optionstext
def evtable_parse_input(menuobject, raw_string, caller):
"""
Processes the user' node inputs.
Args:
menuobject (EvMenu): The EvMenu instance
raw_string (str): The incoming raw_string from the menu
command.
caller (Object, Player or Session): The entity using
the menu.
"""
cmd = raw_string.strip().lower()
if cmd in menuobject.options:
# this will take precedence over the default commands
# below
goto, callback = menuobject.options[cmd]
menuobject.callback_goto(callback, goto, raw_string)
elif menuobject.auto_look and cmd in ("look", "l"):
menuobject.display_nodetext()
elif menuobject.auto_help and cmd in ("help", "h"):
menuobject.display_helptext()
elif menuobject.auto_quit and cmd in ("quit", "q", "exit"):
menuobject.close_menu()
elif menuobject.default:
goto, callback = menuobject.default
menuobject.callback_goto(callback, goto, raw_string)
else:
caller.msg(_HELP_NO_OPTION_MATCH)
if not (menuobject.options or menuobject.default):
# no options - we are at the end of the menu.
menuobject.close_menu()
#------------------------------------------------------------
#
# Menu main class
#
#------------------------------------------------------------
class EvMenu(object):
"""
This object represents an operational menu. It is initialized from
a menufile.py instruction.
"""
def __init__(self, caller, menudata, startnode="start",
cmdset_mergetype="Replace", cmdset_priority=1,
auto_quit=True, auto_look=True, auto_help=True,
cmd_on_exit="look",
nodetext_formatter=dedent_strip_nodetext_formatter,
options_formatter=evtable_options_formatter,
node_formatter=underline_node_formatter,
input_parser=evtable_parse_input,
persistent=False):
"""
Initialize the menu tree and start the caller onto the first node.
Args:
caller (Object, Player or Session): The user of the menu.
menudata (str, module or dict): The full or relative path to the module
holding the menu tree data. All global functions in this module
whose name doesn't start with '_ ' will be parsed as menu nodes.
Also the module itself is accepted as input. Finally, a dictionary
menu tree can be given directly. This must then be a mapping
`{"nodekey":callable,...}` where `callable` must be called as
and return the data expected of a menu node. This allows for
dynamic menu creation.
startnode (str, optional): The starting node name in the menufile.
cmdset_mergetype (str, optional): 'Replace' (default) means the menu
commands will be exclusive - no other normal commands will
be usable while the user is in the menu. 'Union' means the
menu commands will be integrated with the existing commands
(it will merge with `merge_priority`), if so, make sure that
the menu's command names don't collide with existing commands
in an unexpected way. Also the CMD_NOMATCH and CMD_NOINPUT will
be overloaded by the menu cmdset. Other cmdser mergetypes
has little purpose for the menu.
cmdset_priority (int, optional): The merge priority for the
menu command set. The default (1) is usually enough for most
types of menus.
auto_quit (bool, optional): Allow user to use "q", "quit" or
"exit" to leave the menu at any point. Recommended during
development!
auto_look (bool, optional): Automatically make "looK" or "l" to
re-show the last node. Turning this off means you have to handle
re-showing nodes yourself, but may be useful if you need to
use "l" for some other purpose.
auto_help (bool, optional): Automatically make "help" or "h" show
the current help entry for the node. If turned off, eventual
help must be handled manually, but it may be useful if you
need 'h' for some other purpose, for example.
cmd_on_exit (callable, str or None, optional): When exiting the menu
(either by reaching a node with no options or by using the
in-built quit command (activated with `allow_quit`), this
callback function or command string will be executed.
The callback function takes two parameters, the caller then the
EvMenu object. This is called after cleanup is complete.
Set to None to not call any command.
nodetext_formatter (callable, optional): This callable should be on
the form `function(nodetext, has_options, caller=None)`, where `nodetext` is the
node text string and `has_options` a boolean specifying if there
are options associated with this node. It must return a formatted
string. `caller` is optionally a reference to the user of the menu.
`caller` is optionally a reference to the user of the menu.
options_formatter (callable, optional): This callable should be on
the form `function(optionlist, caller=None)`, where ` optionlist is a list
of option dictionaries, like
[{"key":..., "desc",..., "goto": ..., "exec",...}, ...]
Each dictionary describes each possible option. Note that this
will also be called if there are no options, and so should be
able to handle an empty list. This should
be formatted into an options list and returned as a string,
including the required separator to use between the node text
and the options. If not given the default EvMenu style will be used.
`caller` is optionally a reference to the user of the menu.
node_formatter (callable, optional): This callable should be on the
form `func(nodetext, optionstext, caller=None)` where the arguments are strings
representing the node text and options respectively (possibly prepared
by `nodetext_formatter`/`options_formatter` or by the default styles).
It should return a string representing the final look of the node. This
can e.g. be used to create line separators that take into account the
dynamic width of the parts. `caller` is optionally a reference to the
user of the menu.
input_parser (callable, optional): This callable is responsible for parsing the
options dict from a node and has the form `func(menuobject, raw_string, caller)`,
where menuobject is the active `EvMenu` instance, `input_string` is the
incoming text from the caller and `caller` is the user of the menu.
It should use the helper method of the menuobject to goto new nodes, show
help texts etc. See the default `evtable_parse_input` function for help
with parsing.
persistent (bool, optional): Make the Menu persistent (i.e. it will
survive a reload. This will make the Menu cmdset persistent. Use
with caution - if your menu is buggy you may end up in a state
you can't get out of! Also note that persistent mode requires
that all formatters, menu nodes and callables are possible to
*pickle*.
Raises:
EvMenuError: If the start/end node is not found in menu tree.
Notes:
In persistent mode, all nodes, formatters and callbacks in
the menu must be possible to be *pickled*, this excludes
e.g. callables that are class methods or functions defined
dynamically or as part of another function. In
non-persistent mode no such restrictions exist.
"""
self._startnode = startnode
self._menutree = self._parse_menudata(menudata)
self._nodetext_formatter = nodetext_formatter
self._options_formatter = options_formatter
self._node_formatter = node_formatter
self._input_parser = input_parser
self._persistent = persistent
if startnode not in self._menutree:
raise EvMenuError("Start node '%s' not in menu tree!" % startnode)
# public variables made available to the command
self.caller = caller
self.auto_quit = auto_quit
self.auto_look = auto_look
self.auto_help = auto_help
if isinstance(cmd_on_exit, str):
self.cmd_on_exit = lambda caller, menu: caller.execute_cmd(cmd_on_exit)
elif callable(cmd_on_exit):
self.cmd_on_exit = cmd_on_exit
else:
self.cmd_on_exit = None
self.default = None
self.nodetext = None
self.helptext = None
self.options = None
# store ourself on the object
self.caller.ndb._menutree = self
if persistent:
# save the menu to the database
try:
caller.attributes.add("_menutree_saved",
((menudata, ),
{"startnode": startnode,
"cmdset_mergetype": cmdset_mergetype,
"cmdset_priority": cmdset_priority,
"auto_quit": auto_quit, "auto_look": auto_look, "auto_help": auto_help,
"cmd_on_exit": cmd_on_exit,
"nodetext_formatter": nodetext_formatter, "options_formatter": options_formatter,
"node_formatter": node_formatter, "input_parser": input_parser,
"persistent": persistent,}))
caller.attributes.add("_menutree_saved_startnode", startnode)
except Exception as err:
caller.msg(_ERROR_PERSISTENT_SAVING.format(error=err))
logger.log_trace(_TRACE_PERSISTENT_SAVING)
persistent = False
# set up the menu command on the caller
menu_cmdset = EvMenuCmdSet()
menu_cmdset.mergetype = str(cmdset_mergetype).lower().capitalize() or "Replace"
menu_cmdset.priority = int(cmdset_priority)
self.caller.cmdset.add(menu_cmdset, permanent=persistent)
# start the menu
self.goto(self._startnode, "")
def _parse_menudata(self, menudata):
"""
Parse a menufile for node functions and store in dictionary
map. Alternatively, accept a pre-made mapping dictionary of
node functions.
Args:
menudata (str, module or dict): The python.path to the menufile,
or the python module itself. If a dict, this should be a
mapping nodename:callable, where the callable must match
the criteria for a menu node.
Returns:
menutree (dict): A {nodekey: func}
"""
if isinstance(menudata, dict):
# This is assumed to be a pre-loaded menu tree.
return menudata
else:
# a python path of a module
module = mod_import(menudata)
return dict((key, func) for key, func in module.__dict__.items()
if isfunction(func) and not key.startswith("_"))
def _format_node(self, nodetext, optionlist):
"""
Format the node text + option section
Args:
nodetext (str): The node text
optionlist (list): List of (key, desc) pairs.
Returns:
string (str): The options section, including
all needed spaces.
Notes:
This will adjust the columns of the options, first to use
a maxiumum of 4 rows (expanding in columns), then gradually
growing to make use of the screen space.
"""
# handle the node text
nodetext = self._nodetext_formatter(nodetext, len(optionlist), self.caller)
# handle the options
optionstext = self._options_formatter(optionlist, self.caller)
# format the entire node
return self._node_formatter(nodetext, optionstext, self.caller)
def _execute_node(self, nodename, raw_string):
"""
Execute a node.
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
Returns:
nodetext, options (tuple): The node text (a string or a
tuple and the options tuple, if any.
"""
try:
node = self._menutree[nodename]
except KeyError:
self.caller.msg(_ERR_NOT_IMPLEMENTED.format(nodename=nodename))
raise EvMenuError
try:
# the node should return data as (text, options)
if len(getargspec(node).args) > 1:
# a node accepting raw_string
nodetext, options = node(self.caller, raw_string)
else:
# a normal node, only accepting caller
nodetext, options = node(self.caller)
except KeyError:
self.caller.msg(_ERR_NOT_IMPLEMENTED.format(nodename=nodename))
raise EvMenuError
except Exception:
self.caller.msg(_ERR_GENERAL.format(nodename=nodename))
raise
return nodetext, options
def display_nodetext(self):
self.caller.msg(self.nodetext)
def display_helptext(self):
self.caller.msg(self.helptext)
def callback_goto(self, callback, goto, raw_string):
if callback:
self.callback(callback, raw_string)
if goto:
self.goto(goto, raw_string)
def callback(self, nodename, raw_string):
"""
Run a node as a callback. This makes no use of the return
values from the node.
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
"""
if callable(nodename):
# this is a direct callable - execute it directly
try:
if len(getargspec(nodename).args) > 1:
# callable accepting raw_string
nodename(self.caller, raw_string)
else:
# normal callable, only the caller as arg
nodename(self.caller)
except Exception:
self.caller.msg(_ERR_GENERAL.format(nodename=nodename))
raise
else:
# nodename is a string; lookup as node
try:
# execute the node; we make no use of the return values here.
self._execute_node(nodename, raw_string)
except EvMenuError:
return
def goto(self, nodename, raw_string):
"""
Run a node by name
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
"""
try:
# execute the node, make use of the returns.
nodetext, options = self._execute_node(nodename, raw_string)
except EvMenuError:
return
if self._persistent:
self.caller.attributes.add("_menutree_saved_startnode", nodename)
# validation of the node return values
helptext = ""
if hasattr(nodetext, "__iter__"):
if len(nodetext) > 1:
nodetext, helptext = nodetext[:2]
else:
nodetext = nodetext[0]
nodetext = "" if nodetext is None else str(nodetext)
options = [options] if isinstance(options, dict) else options
# this will be displayed in the given order
display_options = []
# this is used for lookup
self.options = {}
self.default = None
if options:
for inum, dic in enumerate(options):
# fix up the option dicts
keys = make_iter(dic.get("key"))
if "_default" in keys:
keys = [key for key in keys if key != "_default"]
desc = dic.get("desc", dic.get("text", _ERR_NO_OPTION_DESC).strip())
goto, execute = dic.get("goto", None), dic.get("exec", None)
self.default = (goto, execute)
else:
keys = list(make_iter(dic.get("key", str(inum+1).strip()))) + [str(inum+1)]
desc = dic.get("desc", dic.get("text", _ERR_NO_OPTION_DESC).strip())
goto, execute = dic.get("goto", None), dic.get("exec", None)
if keys:
display_options.append((keys[0], desc))
for key in keys:
if goto or execute:
self.options[strip_ansi(key).strip().lower()] = (goto, execute)
self.nodetext = self._format_node(nodetext, display_options)
# handle the helptext
if helptext:
self.helptext = helptext
elif options:
self.helptext = _HELP_FULL if self.auto_quit else _HELP_NO_QUIT
else:
self.helptext = _HELP_NO_OPTIONS if self.auto_quit else _HELP_NO_OPTIONS_NO_QUIT
self.display_nodetext()
def close_menu(self):
"""
Shutdown menu; occurs when reaching the end node or using the quit command.
"""
self.caller.cmdset.remove(EvMenuCmdSet)
del self.caller.ndb._menutree
if self._persistent:
self.caller.attributes.remove("_menutree_saved")
self.caller.attributes.remove("_menutree_saved_startnode")
if self.cmd_on_exit is not None:
self.cmd_on_exit(self.caller, self)
# -------------------------------------------------------------------------------------------------
#
# Simple input shortcuts
#
# -------------------------------------------------------------------------------------------------
class CmdGetInput(Command):
"""
Enter your data and press return.
"""
key = _CMD_NOMATCH
aliases = _CMD_NOINPUT
def func(self):
"This is called when user enters anything."
caller = self.caller
callback = caller.ndb._getinputcallback
prompt = caller.ndb._getinputprompt
result = self.raw_string
ok = not callback(caller, prompt, result)
if ok:
# only clear the state if the callback does not return
# anything
del caller.ndb._getinputcallback
del caller.ndb._getinputprompt
caller.cmdset.remove(InputCmdSet)
class InputCmdSet(CmdSet):
"""
This stores the input command
"""
key = "input_cmdset"
priority = 1
mergetype = "Replace"
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"called once at creation"
self.add(CmdGetInput())
def get_input(caller, prompt, callback):
"""
This is a helper function for easily request input from
the caller.
Args:
caller (Player or Object): The entity being asked
the question. This should usually be an object
controlled by a user.
prompt (str): This text will be shown to the user,
in order to let them know their input is needed.
callback (callable): A function that will be called
when the user enters a reply. It must take three
arguments: the `caller`, the `prompt` text and the
`result` of the input given by the user. If the
callback doesn't return anything or return False,
the input prompt will be cleaned up and exited. If
returning True, the prompt will remain and continue to
accept input.
Raises:
RuntimeError: If the given callback is not callable.
"""
if not callable(callback):
raise RuntimeError("get_input: input callback is not callable.")
caller.ndb._getinputcallback = callback
caller.ndb._getinputprompt = prompt
caller.cmdset.add(InputCmdSet)
caller.msg(prompt)
#------------------------------------------------------------
#
# test menu strucure and testing command
#
#------------------------------------------------------------
def test_start_node(caller):
text = """
This is an example menu.
If you enter anything except the valid options, your input will be
recorded and you will be brought to a menu entry showing your
input.
Select options or use 'quit' to exit the menu.
"""
options = ({"key": ("{yS{net", "s"),
"desc": "Set an attribute on yourself.",
"exec": lambda caller: caller.attributes.add("menuattrtest", "Test value"),
"goto": "test_set_node"},
{"key": ("{yL{nook", "l"),
"desc": "Look and see a custom message.",
"goto": "test_look_node"},
{"key": ("{yV{niew", "v"),
"desc": "View your own name",
"goto": "test_view_node"},
{"key": ("{yQ{nuit", "quit", "q", "Q"),
"desc": "Quit this menu example.",
"goto": "test_end_node"},
{"key": "_default",
"goto": "test_displayinput_node"})
return text, options
def test_look_node(caller):
text = ""
options = {"key": ("{yL{nook", "l"),
"desc": "Go back to the previous menu.",
"goto": "test_start_node"}
return text, options
def test_set_node(caller):
text = ("""
The attribute 'menuattrtest' was set to
{w%s{n
(check it with examine after quitting the menu).
This node's has only one option, and one of its key aliases is the
string "_default", meaning it will catch any input, in this case
to return to the main menu. So you can e.g. press <return> to go
back now.
""" % caller.db.menuattrtest,
# optional help text for this node
"""
This is the help entry for this node. It is created by returning
the node text as a tuple - the second string in that tuple will be
used as the help text.
""")
options = {"key": ("back (default)", "_default"),
"desc": "back to main",
"goto": "test_start_node"}
return text, options
def test_view_node(caller):
text = """
Your name is {g%s{n!
click |lclook|lthere|le to trigger a look command under MXP.
This node's option has no explicit key (nor the "_default" key
set), and so gets assigned a number automatically. You can infact
-always- use numbers (1...N) to refer to listed options also if you
don't see a string option key (try it!).
""" % caller.key
options = {"desc": "back to main",
"goto": "test_start_node"}
return text, options
def test_displayinput_node(caller, raw_string):
text = """
You entered the text:
"{w%s{n"
... which could now be handled or stored here in some way if this
was not just an example.
This node has an option with a single alias "_default", which
makes it hidden from view. It catches all input (except the
in-menu help/quit commands) and will, in this case, bring you back
to the start node.
""" % raw_string
options = {"key": "_default",
"goto": "test_start_node"}
return text, options
def test_end_node(caller):
text = """
This is the end of the menu and since it has no options the menu
will exit here, followed by a call of the "look" command.
"""
return text, None
class CmdTestMenu(Command):
"""
Test menu
Usage:
testmenu <menumodule>
Starts a demo menu from a menu node definition module.
"""
key = "testmenu"
def func(self):
if not self.args:
self.caller.msg("Usage: testmenu menumodule")
return
# start menu
EvMenu(self.caller, self.args.strip(), startnode="test_start_node", persistent=True, cmdset_mergetype="Replace")
| ergodicbreak/evennia | evennia/utils/evmenu.py | Python | bsd-3-clause | 36,641 |
from sqlalchemy import MetaData, Table, inspect
from sqlalchemy.schema import CreateTable
from rs_sqla_test_utils.utils import clean, compile_query
def table_to_ddl(engine, table):
return str(CreateTable(table)
.compile(engine))
def test_view_reflection(redshift_engine):
table_ddl = "CREATE TABLE my_table (col1 INTEGER, col2 INTEGER)"
view_query = "SELECT my_table.col1, my_table.col2 FROM my_table"
view_ddl = "CREATE VIEW my_view AS %s" % view_query
conn = redshift_engine.connect()
conn.execute(table_ddl)
conn.execute(view_ddl)
insp = inspect(redshift_engine)
view_definition = insp.get_view_definition('my_view')
assert(clean(compile_query(view_definition)) == clean(view_query))
view = Table('my_view', MetaData(),
autoload=True, autoload_with=redshift_engine)
assert(len(view.columns) == 2)
def test_late_binding_view_reflection(redshift_engine):
table_ddl = "CREATE TABLE my_table (col1 INTEGER, col2 INTEGER)"
view_query = "SELECT my_table.col1, my_table.col2 FROM public.my_table"
view_ddl = ("CREATE VIEW my_late_view AS "
"%s WITH NO SCHEMA BINDING" % view_query)
conn = redshift_engine.connect()
conn.execute(table_ddl)
conn.execute(view_ddl)
insp = inspect(redshift_engine)
view_definition = insp.get_view_definition('my_late_view')
# For some reason, Redshift returns the entire DDL for late binding views.
assert(clean(compile_query(view_definition)) == clean(view_ddl))
view = Table('my_late_view', MetaData(),
autoload=True, autoload_with=redshift_engine)
assert(len(view.columns) == 2)
| graingert/redshift_sqlalchemy | tests/test_reflection_views.py | Python | mit | 1,676 |
class PREPARED(DataClassification):
name="PREPARED"
usage = 'Applies to all "prepared" data.'
parent = "UNPREPARED"
requirement = PHU( {'{re}.*?PREPAR*?': ".*?" })
newtypes.append(PREPARED())
| pyrrho314/recipesystem | trunk/dontload-astrodata_Gemini/ADCONFIG_Gemini/classifications/status/gemdtype.PREPARED.py | Python | mpl-2.0 | 218 |
#!/usr/bin/python
from magnum import *
world = World(
RectangularMesh((10, 10, 5), (5e-9, 5e-9, 5e-9)),
Body("freelayer", Material.Co(), Cuboid((0e-9, 0e-9, 25e-9), (50e-9, 50e-9, 20e-9))),
Body("fixedlayer", Material.Co(k1=1e7), Cuboid((0e-9, 0e-9, 15e-9), (50e-9, 50e-9, 0e-9)))
)
p = 1.0, 0.0, 0.0
a_j = -31830
solver = Solver(world, log=True)
solver.setMacroSpinTorque("freelayer", p, a_j)
solver.setZeeman((139260, 0.0, 0.0))
solver.addStepHandler(DataTableStepHandler("macro-spintorque.odt"))
solver.setM((8e5, 8e5, 0))
solver.solve(condition.Time(20e-9))
| MicroMagnum/MicroMagnum | examples/macro-spintorque/macro-spintorque.py | Python | gpl-3.0 | 580 |
import json
import hashlib
import requests
from optional_django.serializers import JSONEncoder
from .exceptions import ReactRenderingError
from . import conf
from .exceptions import RenderServerError
class RenderedComponent(object):
def __init__(self, markup, props):
self.markup = markup
self.props = props
def __str__(self):
return self.markup
def __unicode__(self):
return unicode(self.markup)
class RenderServer(object):
def __init__(self, url):
self.url = url
def render(self, path, props=None, to_static_markup=False):
if props is not None:
serialized_props = json.dumps(props, cls=JSONEncoder)
else:
serialized_props = None
if not conf.settings.RENDER:
return RenderedComponent('', serialized_props)
options = {
'path': path,
'serializedProps': serialized_props,
'toStaticMarkup': to_static_markup
}
serialized_options = json.dumps(options)
options_hash = hashlib.sha1(serialized_options.encode('utf-8')).hexdigest()
try:
res = requests.post(
self.url,
data=serialized_options,
headers={'content-type': 'application/json'},
params={'hash': options_hash}
)
except requests.ConnectionError:
raise RenderServerError('Could not connect to render server at {}'.format(self.url))
if res.status_code != 200:
raise RenderServerError(
'Unexpected response from render server at {} - {}: {}'.format(self.url, res.status_code, res.text)
)
obj = res.json()
markup = obj.get('markup', None)
err = obj.get('error', None)
if err:
if 'message' in err and 'stack' in err:
raise ReactRenderingError(
'Message: {}\n\nStack trace: {}'.format(err['message'], err['stack'])
)
raise ReactRenderingError(err)
if markup is None:
raise ReactRenderingError('Render server failed to return markup. Returned: {}'.format(obj))
return RenderedComponent(markup, serialized_props)
render_server = RenderServer(conf.settings.RENDER_URL)
| arceduardvincent/python-react | react/render_server.py | Python | mit | 2,316 |
# Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools.matchers import HasLength
from vitrage.evaluator.actions.base import ActionType
from vitrage.evaluator.actions.recipes.action_steps import EXECUTE_EXTERNAL
from vitrage.evaluator.actions.recipes.action_steps import EXECUTION_ENGINE
from vitrage.evaluator.actions.recipes.execute_mistral import ExecuteMistral
from vitrage.evaluator.actions.recipes.execute_mistral import MISTRAL
from vitrage.evaluator.actions.recipes.execute_mistral import WORKFLOW
from vitrage.evaluator.template_data import ActionSpecs
from vitrage.tests.base import BaseTest
from vitrage.tests.base import IsEmpty
class RaiseAlarmRecipeTest(BaseTest):
# noinspection PyPep8Naming
@classmethod
def setUpClass(cls):
super(RaiseAlarmRecipeTest, cls).setUpClass()
cls.props = {EXECUTION_ENGINE: MISTRAL,
WORKFLOW: 'wf_4',
'host': 'host5',
'state': 'ok'}
cls.action_spec = ActionSpecs(0,
ActionType.EXECUTE_MISTRAL,
{},
cls.props)
def test_get_do_recipe(self):
# Test Action
action_steps = ExecuteMistral.get_do_recipe(self.action_spec)
# Test Assertions
# expecting for one step: [execute_external]
self.assertThat(action_steps, HasLength(1))
self.assertEqual(EXECUTE_EXTERNAL, action_steps[0].type)
execute_external_step_params = action_steps[0].params
self.assertIsNotNone(execute_external_step_params)
self.assertLessEqual(2, len(execute_external_step_params))
execution_engine = execute_external_step_params[EXECUTION_ENGINE]
self.assertEqual(self.props[EXECUTION_ENGINE], execution_engine)
workflow = execute_external_step_params[WORKFLOW]
self.assertEqual(self.props[WORKFLOW], workflow)
def test_get_undo_recipe(self):
# Test Action
action_steps = ExecuteMistral.get_undo_recipe(self.action_spec)
# Test Assertions
# expecting for zero steps (no undo for this action)
self.assertThat(action_steps, IsEmpty())
| openstack/vitrage | vitrage/tests/unit/evaluator/recipes/test_execute_mistral.py | Python | apache-2.0 | 2,750 |
import sys
import random
import numpy as np
import torch
from torchtext import data
from args import get_args
from SST1 import SST1Dataset
from utils import clean_str_sst
args = get_args()
torch.manual_seed(args.seed)
if not args.cuda:
args.gpu = -1
if torch.cuda.is_available() and args.cuda:
print("Note: You are using GPU for training")
torch.cuda.set_device(args.gpu)
torch.cuda.manual_seed(args.seed)
if torch.cuda.is_available() and not args.cuda:
print("Warning: You have Cuda but do not use it. You are using CPU for training")
np.random.seed(args.seed)
random.seed(args.seed)
if not args.trained_model:
print("Error: You need to provide a option 'trained_model' to load the model")
sys.exit(1)
if args.dataset == 'SST-1':
TEXT = data.Field(batch_first=True, lower=True, tokenize=clean_str_sst)
LABEL = data.Field(sequential=False)
train, dev, test = SST1Dataset.splits(TEXT, LABEL)
TEXT.build_vocab(train, min_freq=2)
LABEL.build_vocab(train)
train_iter = data.Iterator(train, batch_size=args.batch_size, device=args.gpu, train=True, repeat=False,
sort=False, shuffle=True)
dev_iter = data.Iterator(dev, batch_size=args.batch_size, device=args.gpu, train=False, repeat=False,
sort=False, shuffle=False)
test_iter = data.Iterator(test, batch_size=args.batch_size, device=args.gpu, train=False, repeat=False,
sort=False, shuffle=False)
config = args
config.target_class = len(LABEL.vocab)
config.words_num = len(TEXT.vocab)
config.embed_num = len(TEXT.vocab)
print("Label dict:", LABEL.vocab.itos)
if args.cuda:
model = torch.load(args.trained_model, map_location=lambda storage, location: storage.cuda(args.gpu))
else:
model = torch.load(args.trained_model, map_location=lambda storage,location: storage)
def predict(dataset_iter, dataset, dataset_name):
print("Dataset: {}".format(dataset_name))
model.eval()
dataset_iter.init_epoch()
n_correct = 0
for data_batch_idx, data_batch in enumerate(dataset_iter):
scores = model(data_batch)
n_correct += (torch.max(scores, 1)[1].view(data_batch.label.size()).data == data_batch.label.data).sum()
print("no. correct {} out of {}".format(n_correct, len(dataset)))
accuracy = 100. * n_correct / len(dataset)
print("{} accuracy: {:8.6f}%".format(dataset_name, accuracy))
# Run the model on the dev set
predict(dataset_iter=dev_iter, dataset=dev, dataset_name="valid")
# Run the model on the test set
predict(dataset_iter=test_iter, dataset=test, dataset_name="test") | Impavidity/kim_cnn | main.py | Python | mit | 2,631 |
# modified by Yu Huang
from controllers.pid_controller import PIDController
import math
import numpy
class MovingToPoint2(PIDController):
"""FollowPath (i.e. move to next point) steers the robot to a predefined position in the world."""
def __init__(self, params):
"""Initialize internal variables"""
PIDController.__init__(self,params)
self.params = params
#print params.path
def get_heading_angle(self, state):
"""Get the direction from the robot to the goal as a vector."""
# generate heading angle to the next point
x_g, y_g = self.params.path[1][0], self.params.path[1][1]
#print point_cnt, 'FollowPath'
# The robot:
x_r, y_r, theta = state.pose
# Where is the goal in the robot's frame of reference?
return (math.atan2(y_g - y_r, x_g - x_r) - theta + math.pi)%(2*math.pi) - math.pi
def get_heading(self,state):
goal_angle = self.get_heading_angle(state)
return numpy.array([math.cos(goal_angle),math.sin(goal_angle),1])
def execute(self, state, dt):
v, w = PIDController.execute(self, state, dt)
#print 'Move to point ', (self.params.ga_path[point_cnt][0], self.params.ga_path[point_cnt][1])
return v/2, w | ZhuangER/robot_path_planning | controllers/movingtopoint2.py | Python | mit | 1,295 |
# -*- coding: utf-8 -*-
"""
Tests for gdcdatamodel.gdc_postgres_admin module
"""
import logging
import unittest
from psqlgraph import (
Edge,
Node,
PsqlGraphDriver,
)
from sqlalchemy.exc import ProgrammingError
from gdcdatamodel import gdc_postgres_admin as pgadmin
from gdcdatamodel import models
logging.basicConfig()
class TestGDCPostgresAdmin(unittest.TestCase):
logger = logging.getLogger('TestGDCPostgresAdmin')
logger.setLevel(logging.INFO)
host = 'localhost'
user = 'postgres'
database = 'automated_test'
base_args = [
'-H', host,
'-U', user,
'-D', database,
]
g = PsqlGraphDriver(host, user, '', database)
root_con_str = "postgres://{user}:{pwd}@{host}/{db}".format(
user=user, host=host, pwd='', db=database)
engine = pgadmin.create_engine(root_con_str)
@classmethod
def tearDownClass(cls):
"""Recreate the database for tests that follow.
"""
cls.create_all_tables()
# Re-grant permissions to test user
for scls in Node.get_subclasses() + Edge.get_subclasses():
statment = ("GRANT ALL PRIVILEGES ON TABLE {} TO test"
.format(scls.__tablename__))
cls.engine.execute('BEGIN; %s; COMMIT;' % statment)
@classmethod
def drop_all_tables(cls):
for scls in Node.get_subclasses():
try:
cls.engine.execute("DROP TABLE {} CASCADE"
.format(scls.__tablename__))
except Exception as e:
cls.logger.warning(e)
@classmethod
def create_all_tables(cls):
parser = pgadmin.get_parser()
args = parser.parse_args([
'graph-create', '--delay', '1', '--retries', '0'
] + cls.base_args)
pgadmin.main(args)
@classmethod
def drop_a_table(cls):
cls.engine.execute('DROP TABLE edge_clinicaldescribescase')
cls.engine.execute('DROP TABLE node_clinical')
def startTestRun(self):
self.drop_all_tables()
def setUp(self):
self.drop_all_tables()
def test_args(self):
parser = pgadmin.get_parser()
parser.parse_args(['graph-create'] + self.base_args)
def test_create_single(self):
"""Test simple table creation"""
pgadmin.main(pgadmin.get_parser().parse_args([
'graph-create', '--delay', '1', '--retries', '0'
] + self.base_args))
self.engine.execute('SELECT * from node_case')
def test_create_double(self):
"""Test idempotency of table creation"""
pgadmin.main(pgadmin.get_parser().parse_args([
'graph-create', '--delay', '1', '--retries', '0'
] + self.base_args))
self.engine.execute('SELECT * from node_case')
def test_priv_grant_read(self):
"""Test ability to grant read but not write privs"""
self.create_all_tables()
try:
self.engine.execute("CREATE USER pytest WITH PASSWORD 'pyt3st'")
self.engine.execute("GRANT USAGE ON SCHEMA public TO pytest")
g = PsqlGraphDriver(self.host, 'pytest', 'pyt3st', self.database)
#: If this failes, this test (not the code) is wrong!
with self.assertRaises(ProgrammingError):
with g.session_scope():
g.nodes().count()
pgadmin.main(pgadmin.get_parser().parse_args([
'graph-grant', '--read=pytest',
] + self.base_args))
with g.session_scope():
g.nodes().count()
with self.assertRaises(ProgrammingError):
with g.session_scope() as s:
s.merge(models.Case('1'))
finally:
self.engine.execute("DROP OWNED BY pytest; DROP USER pytest")
def test_priv_grant_write(self):
"""Test ability to grant read/write privs"""
self.create_all_tables()
try:
self.engine.execute("CREATE USER pytest WITH PASSWORD 'pyt3st'")
self.engine.execute("GRANT USAGE ON SCHEMA public TO pytest")
g = PsqlGraphDriver(self.host, 'pytest', 'pyt3st', self.database)
pgadmin.main(pgadmin.get_parser().parse_args([
'graph-grant', '--write=pytest',
] + self.base_args))
with g.session_scope() as s:
g.nodes().count()
s.merge(models.Case('1'))
finally:
self.engine.execute("DROP OWNED BY pytest; DROP USER pytest")
def test_priv_revoke_read(self):
"""Test ability to revoke read privs"""
self.create_all_tables()
try:
self.engine.execute("CREATE USER pytest WITH PASSWORD 'pyt3st'")
self.engine.execute("GRANT USAGE ON SCHEMA public TO pytest")
g = PsqlGraphDriver(self.host, 'pytest', 'pyt3st', self.database)
pgadmin.main(pgadmin.get_parser().parse_args([
'graph-grant', '--read=pytest',
] + self.base_args))
pgadmin.main(pgadmin.get_parser().parse_args([
'graph-revoke', '--read=pytest',
] + self.base_args))
with self.assertRaises(ProgrammingError):
with g.session_scope() as s:
g.nodes().count()
s.merge(models.Case('1'))
finally:
self.engine.execute("DROP OWNED BY pytest; DROP USER pytest")
def test_priv_revoke_write(self):
"""Test ability to revoke read/write privs"""
self.create_all_tables()
try:
self.engine.execute("CREATE USER pytest WITH PASSWORD 'pyt3st'")
self.engine.execute("GRANT USAGE ON SCHEMA public TO pytest")
g = PsqlGraphDriver(self.host, 'pytest', 'pyt3st', self.database)
pgadmin.main(pgadmin.get_parser().parse_args([
'graph-grant', '--write=pytest',
] + self.base_args))
pgadmin.main(pgadmin.get_parser().parse_args([
'graph-revoke', '--write=pytest',
] + self.base_args))
with g.session_scope() as s:
g.nodes().count()
with self.assertRaises(ProgrammingError):
with g.session_scope() as s:
s.merge(models.Case('1'))
finally:
self.engine.execute("DROP OWNED BY pytest; DROP USER pytest")
| NCI-GDC/gdcdatamodel | test/test_gdc_postgres_admin.py | Python | apache-2.0 | 6,451 |
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.db import models
from djangocms_text_ckeditor.fields import HTMLField
from filer.fields.image import FilerImageField
from cms.models.fields import PlaceholderField
from adminsortable.fields import SortableForeignKey
from parler.models import TranslatableModel, TranslatedFields
from aldryn_translation_tools.models import TranslationHelperMixin
from aldryn_common.admin_fields.sortedm2m import SortedM2MModelField
from allink_core.allink_base.models.choices import GENDER_CHOICES
from allink_core.allink_base.models import AllinkManualEntriesMixin, AllinkTranslatedAutoSlugifyMixin, AllinkContactFieldsModel
from allink_core.allink_base.models import AllinkBaseModel, AllinkBaseImage, AllinkBaseAppContentPlugin, AllinkAddressFieldsModel
from allink_apps.people.managers import AllinkPeopleManager
class People(TranslationHelperMixin, AllinkTranslatedAutoSlugifyMixin, TranslatableModel, AllinkContactFieldsModel, AllinkAddressFieldsModel, AllinkBaseModel):
"""
Translations
feel free to add app specific fields)
to override slug generation:
slug_source_field_name = 'full_name'
"""
slug_source_field_name = 'full_name'
firstname = models.CharField(
_(u'Firstname'),
max_length=255,
default=''
)
lastname = models.CharField(
_(u'Lastname'),
max_length=255,
default=''
)
preview_image = FilerImageField(
verbose_name=_(u'Preview Image'),
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='%(app_label)s_%(class)s_preview_image',
)
translations = TranslatedFields(
# to be removed in release 0.0.8, or when all old projects are up to date
old_firstname=models.CharField(
_(u'Firstname'),
max_length=255,
default=''
),
# to be removed in release 0.0.8, or when all old projects are up to date
old_lastname=models.CharField(
_(u'Lastname'),
max_length=255,
default=''
),
job_title=models.CharField(
_(u'Unit'),
max_length=255,
default=''
),
job_function=models.CharField(
_(u'Function'),
max_length=255,
default=''
),
text=HTMLField(
_(u'Text'),
blank=True,
null=True
),
slug=models.SlugField(
_(u'Slug'),
max_length=255,
default='',
blank=True,
help_text=_(u'Leave blank to auto-generate a unique slug.')
),
)
company_name = models.CharField(
_(u'Company Name'),
max_length=255,
blank=True,
null=True
)
gender = models.IntegerField(
_(u'Gender'),
choices=GENDER_CHOICES,
null=True
)
header_placeholder = PlaceholderField(u'people_header', related_name='%(app_label)s_%(class)s_header_placeholder')
content_placeholder = PlaceholderField(u'people_content', related_name='%(app_label)s_%(class)s_content_placeholder')
objects = AllinkPeopleManager()
class Meta:
app_label = 'people'
verbose_name = _('Person')
verbose_name_plural = _('People')
@property
def full_name(self):
return u'{} {}'.format(self.firstname, self.lastname)
@property
def title(self):
return u'{} {}'.format(self.firstname, self.lastname)
@property
def units(self):
units = []
for unit in self.categories.filter(identifier='units'):
units.append(unit.name)
return ','.join(units)
# APP CONTENT PLUGIN
class PeopleAppContentPlugin(AllinkManualEntriesMixin, AllinkBaseAppContentPlugin):
"""
specify:
data_model = People
manual_entries = SortedM2MModelField(
People, blank=True,
help_text=_('Select and arrange specific entries, or, leave blank to select all. (If '
'manual entries are selected, the category filtering is not active.)')
)
"""
FILTER_FIELD_CHOICES = (
('categories', {
'verbose': _(u'Unit'),
'query_filter': {'identifier': 'units'},
}),
# at the moment we can't handle two different categories at the same time
# ('categories', {
# 'verbose': _(u'Location'),
# 'query_filter': {'tag': 'locations'},
# # if locations have not been generated automatically
# # PROJECT_CATEGORY_IDENTIFIERS has to be set in settings accordingly ()
# # 'query_filter': {'identifier': 'locations'},
# }),
('job_function', {
'verbose': _(u'Job Function'),
'query_filter': {},
}),
('lastname', {
'verbose': _(u'Lastname'),
'query_filter': {},
}),
('job_title', {
'verbose': _(u'Job Title'),
'query_filter': {},
}),
)
data_model = People
manual_entries = SortedM2MModelField(
'{}.{}'.format(data_model._meta.app_label, data_model._meta.model_name), blank=True,
help_text=_('Select and arrange specific entries, or, leave blank to select all. (If '
'manual entries are selected the category filtering will be ignored.)')
)
class PeopleImage(AllinkBaseImage):
people = SortableForeignKey(People, verbose_name=_(u'Images'), help_text=_(u'The first image will be used as preview image.'), blank=True, null=True)
| allink/allink-apps | people/models.py | Python | bsd-3-clause | 5,649 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from proton.handlers import MessagingHandler
from proton.reactor import Container
from proton import Message, Endpoint
from system_test import main_module, TIMEOUT
from system_test import unittest
class Timeout(object):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.timeout()
class DrainMessagesHandler(MessagingHandler):
def __init__(self, address):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainMessagesHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.received_count = 0
self.address = address
self.error = "Unexpected Exit"
def timeout(self):
self.error = "Timeout Expired: sent: %d rcvd: %d" % (self.sent_count, self.received_count)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, "org.apache.dev")
self.sender = event.container.create_sender(self.conn, "org.apache.dev")
self.receiver.flow(1)
def on_link_flow(self, event):
if event.link.is_sender and event.link.credit \
and event.link.state & Endpoint.LOCAL_ACTIVE \
and event.link.state & Endpoint.REMOTE_ACTIVE :
self.on_sendable(event)
# The fact that the event.link.credit is 0 means that the receiver will not be receiving any more
# messages. That along with 10 messages received indicates that the drain worked and we can
# declare that the test is successful
if self.received_count == 10 and event.link.credit == 0:
self.error = None
self.timer.cancel()
self.receiver.close()
self.sender.close()
self.conn.close()
def on_sendable(self, event):
if self.sent_count < 10:
msg = Message(body="Hello World", properties={'seq': self.sent_count})
dlv = event.sender.send(msg)
dlv.settle()
self.sent_count += 1
def on_message(self, event):
if event.receiver == self.receiver:
if "Hello World" == event.message.body:
self.received_count += 1
if self.received_count < 4:
event.receiver.flow(1)
elif self.received_count == 4:
# We are issuing a drain of 20. This means that we will receive all the 10 messages
# that the sender is sending. The router will also send back a response flow frame with
# drain=True but I don't have any way of making sure that the response frame reached the
# receiver
event.receiver.drain(20)
def run(self):
Container(self).run()
class DrainOneMessageHandler(DrainMessagesHandler):
def __init__(self, address):
super(DrainOneMessageHandler, self).__init__(address)
def on_message(self, event):
if event.receiver == self.receiver:
if "Hello World" == event.message.body:
self.received_count += 1
if self.received_count < 4:
event.receiver.flow(1)
elif self.received_count == 4:
# We are issuing a drain of 1 after we receive the 4th message.
# This means that going forward, we will receive only one more message.
event.receiver.drain(1)
# The fact that the event.link.credit is 0 means that the receiver will not be receiving any more
# messages. That along with 5 messages received (4 earlier messages and 1 extra message for drain=1)
# indicates that the drain worked and we can declare that the test is successful
if self.received_count == 5 and event.link.credit == 0:
self.error = None
self.timer.cancel()
self.receiver.close()
self.sender.close()
self.conn.close()
class DrainNoMessagesHandler(MessagingHandler):
def __init__(self, address):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainNoMessagesHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.address = address
self.error = "Unexpected Exit"
def timeout(self):
self.error = "Timeout Expired"
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, "org.apache.dev")
self.sender = event.container.create_sender(self.conn, "org.apache.dev")
self.receiver.flow(1)
def on_sendable(self, event):
self.receiver.drain(1)
def on_link_flow(self, event):
if self.receiver.credit == 0:
self.error = None
self.timer.cancel()
self.conn.close()
def run(self):
Container(self).run()
class DrainNoMoreMessagesHandler(MessagingHandler):
def __init__(self, address):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainNoMoreMessagesHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.address = address
self.sent = 0
self.rcvd = 0
self.error = "Unexpected Exit"
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d" % (self.sent, self.rcvd)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, "org.apache.dev")
self.sender = event.container.create_sender(self.conn, "org.apache.dev")
self.receiver.flow(1)
def on_sendable(self, event):
if self.sent == 0:
msg = Message(body="Hello World")
event.sender.send(msg)
self.sent += 1
def on_message(self, event):
self.rcvd += 1
def on_settled(self, event):
self.receiver.drain(1)
def on_link_flow(self, event):
if self.receiver.credit == 0:
self.error = None
self.timer.cancel()
self.conn.close()
def run(self):
Container(self).run()
class DrainMessagesMoreHandler(MessagingHandler):
"""
Make sure the clients can send/receive after going through a drain cycle.
Send phase
1. Sender sending first 10 messages
2. Sender paused waiting for drain to finish
3. Sender is sending second 10 messages
4. Sender is done.
Receive phase
1. Receiver receiving first four messages; At #4 receiver issues drain 4,20
2. Reciever receives messages 5..10.
When 10 messages have been received and link credit =0 the drain is done
Receiver issues 10 credits
3. Receiver recieves messages 11..20.
4. Receiver is done
At issue in DISPATCH-1055 is that the 10 credits issued in Receive step 2
are never propagated across a link route to the 'broker'.
This code is instantiated with and without the link route to demonstrate that
it works properly when the 'test-router' is handling the drain by itself
and that it fails only on the link route.
"""
def __init__(self, address, route_name):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainMessagesMoreHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.received_count = 0
self.address = address
self.error = "Unexpected Exit"
self.send_phase = 1
self.recv_phase = 1
self.route_name = route_name
self.verbose_printing = False
def show_state(self):
return str("send_phase:" + str(self.send_phase)
+ ", sent_count:" + str(self.sent_count)
+ ", recv_phase:" + str(self.recv_phase)
+ ", receive_count:" + str(self.received_count)
+ ", receiver_credit:" + str(self.receiver.credit)
+ ", sender_credit:" + str(self.sender.credit))
def printme(self, str):
if (self.verbose_printing):
print (str + " " + self.show_state())
def timeout(self):
self.error = "Timeout Expired: sent: %d rcvd: %d" % (self.sent_count, self.received_count)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, source=self.route_name)
self.sender = event.container.create_sender(self.conn, target=self.route_name)
self.receiver.flow(1)
def on_link_flow(self, event):
if event.link.is_sender and event.link.credit \
and event.link.state & Endpoint.LOCAL_ACTIVE \
and event.link.state & Endpoint.REMOTE_ACTIVE :
self.on_sendable(event)
# The fact that the event.link.credit is 0 means that the receiver will not be receiving any more
# messages. That along with 10 messages received indicates that the drain worked.
if self.send_phase == 2 and self.received_count == 10 and event.link.credit == 0:
self.printme ("sender transitions to phase 3 - drain completed, send new flow now")
self.receiver.flow(10)
self.send_phase = 3
if event.link.is_sender and event.link.credit \
and event.link.state & Endpoint.LOCAL_ACTIVE \
and event.link.state & Endpoint.REMOTE_ACTIVE :
self.on_sendable(event)
self.printme (("sender " if event.link.is_sender else "receiver ") + "exit on_link_flow:")
def on_sendable(self, event):
if event.link.is_sender and self.send_phase == 1 and self.sent_count < 10:
msg = Message(body="Hello World", properties={'seq': self.sent_count})
dlv = event.sender.send(msg)
dlv.settle()
self.sent_count += 1
if self.sent_count == 10:
self.printme ("sender transitions to phase 2 - wait for drain to finish")
self.send_phase = 2
elif event.link.is_sender and self.send_phase == 3 and self.sent_count < 20:
msg = Message(body="Hello World", properties={'seq': self.sent_count})
dlv = event.sender.send(msg)
dlv.settle()
self.sent_count += 1
if self.sent_count == 20:
self.printme ("sender transitions to phase 4 - done sending")
self.send_phase = 4
self.printme (("sender " if event.link.is_sender else "receiver ") + "exit on_sendable:")
def on_message(self, event):
if event.receiver == self.receiver:
if "Hello World" == event.message.body:
self.received_count += 1
if self.recv_phase == 1 and self.received_count < 4:
event.receiver.flow(1)
elif self.recv_phase == 1 and self.received_count == 4:
# We are issuing a drain of 20. This means that we will receive all the 10 messages
# that the sender is sending. The router will also send back a response flow frame with
# drain=True but I don't have any way of making sure that the response frame reached the
# receiver
self.printme ("receiver transitions to phase 2 - sending drain now")
event.receiver.drain(20)
self.recv_phase = 2
elif self.recv_phase == 2 and self.received_count == 10:
self.printme ("receiver transitions to phase 3")
self.recv_phase = 3
msg = Message(body="Hello World", properties={'seq': self.sent_count})
dlv = self.sender.send(msg)
dlv.settle()
self.sent_count += 1
elif self.recv_phase == 3 and self.received_count == 20:
self.printme ("receiver transitions to phase 4 - test is completed successfully")
self.recv_phase = 4
self.error = None
self.timer.cancel()
self.receiver.close()
self.sender.close()
self.conn.close()
self.printme ("exit on_message:")
def run(self):
Container(self).run()
if __name__ == '__main__':
unittest.main(main_module())
| irinabov/debian-qpid-dispatch | tests/system_tests_drain_support.py | Python | apache-2.0 | 14,346 |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction* RPCs."""
from test_framework.test_framework import BitcoinTestFramework
import decimal
class RpcCreateMultiSigTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def get_keys(self):
node0,node1,node2 = self.nodes
self.add = [node1.getnewaddress() for _ in range(self.nkeys)]
self.pub = [node1.getaddressinfo(a)["pubkey"] for a in self.add]
self.priv = [node1.dumpprivkey(a) for a in self.add]
self.final = node2.getnewaddress()
def run_test(self):
node0,node1,node2 = self.nodes
# 50 BTC each, rest will be 25 BTC each
node0.generate(149)
self.sync_all()
self.moved = 0
for self.nkeys in [3,5]:
for self.nsigs in [2,3]:
for self.output_type in ["bech32", "p2sh-segwit", "legacy"]:
self.get_keys()
self.do_multisig()
self.checkbalances()
def checkbalances(self):
node0,node1,node2 = self.nodes
node0.generate(100)
self.sync_all()
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert 150 < height < 350
total = 149*50 + (height-149-100)*25
assert bal1 == 0
assert bal2 == self.moved
assert bal0+bal1+bal2 == total
def do_multisig(self):
node0,node1,node2 = self.nodes
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
if self.output_type == 'bech32':
assert madd[0:4] == "bcrt" # actually a bech32 address
# compare against addmultisigaddress
msigw = node1.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd in v["scriptPubKey"].get("addresses",[])]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
node0.generate(1)
outval = value - decimal.Decimal("0.00001000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], prevtxs)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], True)
blk = node0.generate(1)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
| Flowdalic/bitcoin | test/functional/rpc_createmultisig.py | Python | mit | 3,657 |
# -*- coding: utf-8 -*-
# _____________________________________________________________________________
#
# Copyright (c) 2012 Berlin Institute of Technology
# All rights reserved.
#
# Developed by: Neural Information Processing Group (NI)
# School for Electrical Engineering and Computer Science
# Berlin Institute of Technology
# MAR 5-6, Marchstr. 23, 10587 Berlin, Germany
# http://www.ni.tu-berlin.de/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal with the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the documentation
# and/or other materials provided with the distribution.
# * Neither the names of Neural Information Processing Group (NI), Berlin
# Institute of Technology, nor the names of its contributors may be used to
# endorse or promote products derived from this Software without specific
# prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# WITH THE SOFTWARE.
#_____________________________________________________________________________
#
# Acknowledgements:
# Philipp Meier <[email protected]>
#_____________________________________________________________________________
#
##---IMPORTS
try:
import unittest2 as ut
except ImportError:
import unittest as ut
from numpy.testing import assert_equal, assert_almost_equal
import scipy as sp
from botmpy.common.mcfilter.mcfilter_cy import (
_mcfilter_cy32, _mcfilter_cy64, _mcfilter_hist_cy32, _mcfilter_hist_cy64)
from botmpy.common.mcfilter.mcfilter_py import (
_mcfilter_py, _mcfilter_hist_py)
##---TESTS
class TestMcFilter(ut.TestCase):
def testHistoryCy32(self):
"""test history item"""
tf = 3
nc = 2
data = sp.randn(100, nc).astype(sp.float32)
filt = sp.ones((tf, nc), dtype=sp.float32)
hist = sp.zeros((tf - 1, nc), dtype=sp.float32)
fout, hist = _mcfilter_hist_cy32(data, filt, hist)
assert_equal(hist, data[-(tf - 1):])
def testHistoryCy64(self):
"""test history item"""
tf = 3
nc = 2
data = sp.randn(100, nc).astype(sp.float64)
filt = sp.ones((tf, nc), dtype=sp.float64)
hist = sp.zeros((tf - 1, nc), dtype=sp.float64)
fout, hist = _mcfilter_hist_cy64(data, filt, hist)
assert_equal(hist, data[-(tf - 1):])
def testPyVsCyOnesCy32(self):
"""test python and cython, float"""
tf = 3
nc = 2
data = sp.ones((20, nc), dtype=sp.float32)
filt = sp.ones((tf, nc), dtype=sp.float32)
hist = sp.ones((tf - 1, nc), dtype=sp.float32)
fopy, hopy = _mcfilter_hist_py(data, filt, hist)
focy, hocy = _mcfilter_hist_cy32(data, filt, hist)
assert_almost_equal(fopy, focy)
def testPyVsCyOnesCy64(self):
"""test python and cython, double"""
tf = 3
nc = 2
data = sp.ones((20, nc), dtype=sp.float64)
filt = sp.ones((tf, nc), dtype=sp.float64)
hist = sp.ones((tf - 1, nc), dtype=sp.float64)
fopy, hopy = _mcfilter_hist_py(data, filt, hist)
focy, hocy = _mcfilter_hist_cy64(data, filt, hist)
assert_almost_equal(fopy, focy)
def testPyVsCyRandnCy32(self):
"""test python and cython"""
tf = 3
nc = 2
data = sp.randn(20, nc).astype(sp.float32)
filt = sp.ones((tf, nc), dtype=sp.float32)
hist_py = sp.ones((tf - 1, nc), dtype=sp.float32)
hist_cy = sp.ones((tf - 1, nc), dtype=sp.float32)
fopy, hopy = _mcfilter_hist_py(data, filt, hist_py)
focy, hocy = _mcfilter_hist_cy32(data, filt, hist_cy)
assert_almost_equal(fopy, focy, decimal=5)
def testPyVsCyRandnCy64(self):
"""test python and cython"""
tf = 3
nc = 2
data = sp.randn(20, nc).astype(sp.float64)
filt = sp.ones((tf, nc), dtype=sp.float64)
hist_py = sp.ones((tf - 1, nc), dtype=sp.float64)
hist_cy = sp.ones((tf - 1, nc), dtype=sp.float64)
fopy, hopy = _mcfilter_hist_py(data, filt, hist_py)
focy, hocy = _mcfilter_hist_cy64(data, filt, hist_cy)
assert_almost_equal(fopy, focy)
def testStepsCy32(self):
tf = 3
nc = 2
data = sp.vstack([sp.concatenate(
[sp.arange(1, 4)] * 5)] * 2).T.astype(sp.float32)
filt = sp.ones((tf, nc), dtype=sp.float32) / float(tf)
hist_py = sp.ones((tf - 1, nc), dtype=sp.float32)
hist_cy = sp.ones((tf - 1, nc), dtype=sp.float32)
fopy, hopy = _mcfilter_hist_py(data, filt, hist_py)
focy, hocy = _mcfilter_hist_cy32(data, filt, hist_cy)
assert_almost_equal(fopy, focy)
def testStepsCy64(self):
tf = 3
nc = 2
data = sp.vstack([sp.concatenate(
[sp.arange(1, 4)] * 5)] * 2).T.astype(sp.float64)
filt = sp.ones((tf, nc), dtype=sp.float64) / float(tf)
hist_py = sp.ones((tf - 1, nc), dtype=sp.float64)
hist_cy = sp.ones((tf - 1, nc), dtype=sp.float64)
fopy, hopy = _mcfilter_hist_py(data, filt, hist_py)
focy, hocy = _mcfilter_hist_cy64(data, filt, hist_cy)
assert_almost_equal(fopy, focy)
def testDataConcatenationCy32(self):
data = sp.zeros((100, 1), dtype=sp.float32)
data[sp.arange(0, 100, 10)] = 1.0
filt = sp.zeros((5, 1), dtype=sp.float32)
filt[2] = 1.0
hist = sp.zeros((4, 1), dtype=sp.float32)
fout = _mcfilter_hist_cy32(data, filt, hist)[0]
cut = int(sp.floor(5.0 / 2))
assert_equal(data[:-cut], sp.array([fout[cut:]]).T)
def testDataConcatenationCy64(self):
data = sp.zeros((100, 1), dtype=sp.float64)
data[sp.arange(0, 100, 10)] = 1.0
filt = sp.zeros((5, 1), dtype=sp.float64)
filt[2] = 1.0
hist = sp.zeros((4, 1), dtype=sp.float64)
fout = _mcfilter_hist_cy64(data, filt, hist)[0]
cut = int(sp.floor(5.0 / 2))
assert_equal(data[:-cut], sp.array([fout[cut:]]).T)
def testMcfilterRecoveryPy(self):
data = sp.zeros((100, 1), dtype=sp.float64)
data[sp.arange(0, 100, 10)] = 1.0
filt = sp.zeros((5, 1), dtype=sp.float64)
filt[2] = 1.0
fout = _mcfilter_py(data, filt)
self.assertTupleEqual(data.shape, (fout.shape[0], 1))
assert_equal(data, sp.array([fout]).T)
"""
def mcfilter_hist_py_test(inp=None, plot=False):
if inp is None:
# test setup
TF = 10
NC = 2
xi = sp.vstack([sp.sin(sp.linspace(0, 2 * sp.pi,
TF))] * NC).T * 5
LEN = 2000
noise = sp.randn(LEN, NC)
# build signal
signal = sp.zeros_like(noise)
NPOS = 3
POS = [int(i * LEN / (NPOS + 1)) for i in xrange(1, NPOS + 1)]
for i in xrange(NPOS):
signal[POS[i]:POS[i] + TF] += xi
x = signal + noise
else:
x, xi = inp
TF, NC = xi.shape
ns = x.shape[0]
step = 200
chunks = [x[i * step:(i + 1) * step] for i in xrange(ns / step)]
fouts = []
h = None
for chunk in chunks:
r, h = _mcfilter_hist_py(chunk, xi, h)
fouts.append(r)
if plot:
from spikeplot import mcdata
other = sp.atleast_2d(sp.concatenate(fouts)).T
other = sp.vstack([other, sp.zeros((int(TF / 2 - 1), 1))])[
int(TF / 2 - 1):, :]
mcdata(x, other=other)
def mcfilter_hist_c_test(inp=None, plot=False):
if _mcfilter_hist_cy is None:
print 'No clib loaded! returning'
return
if inp is None:
# test setup
TF = 10
NC = 2
xi = sp.vstack([sp.sin(sp.linspace(0, 2 * sp.pi,
TF))] * NC).T * 5
LEN = 2000
noise = sp.randn(LEN, NC)
# build signal
signal = sp.zeros_like(noise)
NPOS = 3
POS = [int(i * LEN / (NPOS + 1)) for i in xrange(1, NPOS + 1)]
for i in xrange(NPOS):
signal[POS[i]:POS[i] + TF] += xi
x = signal + noise
else:
x, xi = inp
ns = x.shape[0]
step = 200
chunks = [x[i * step:(i + 1) * step] for i in xrange(ns / step)]
fouts = []
h = sp.zeros((xi.shape[0], xi.shape[1]), dtype=sp.float32)
# r = sp.array([0] * ns, dtype=sp.float32)
for chunk in chunks:
r, h = _mcfilter_hist_cy(chunk, sp.ascontiguousarray(xi), h)
fouts.append(r)
if plot:
from spikeplot import mcdata
mcdata(x, other=sp.atleast_2d(sp.concatenate(fouts)).T)
def gen_data(ns=200000, nc=4, tf=65):
# test setup
xi = sp.vstack([sp.sin(sp.linspace(0, 2 * sp.pi, tf))] * nc).T * 7
signal = sp.randn(ns, nc).astype(sp.float32)
# build signal
pos = [50 + i for i in xrange(1, ns, 4 * tf - 50)]
if pos[-1] + tf > ns:
pos.pop(-1)
for i in xrange(len(pos)):
signal[pos[i]:pos[i] + tf, :] += xi
return signal, tf, nc, xi.astype(sp.float32)
if __name__ == '__main__':
# generate some data
sig, tf, nc, xi = gen_data(64000)
# python conventional test
mcfilter_hist_py_test((sig, xi), plot=True)
mcfilter_hist_c_test((sig, xi), plot=True)
# import cProfile
# cProfile.run('mcfilter_hist_py_test((sig, xi), plot=False)')
# cProfile.run('mcfilter_hist_c_test((sig, xi), plot=False)')
"""
if __name__ == '__main__':
ut.main()
| pmeier82/BOTMpy | botmpy/test/test_common_mcfilter.py | Python | mit | 10,365 |
from osv import osv, fields
from openerp.tools.translate import _
class spree_product(osv.osv):
_name="product.product"
_inherit="product.product"
def _get_default_code(self, cr, uid, context=None):
print context
return self.pool.get('product.product').read(cr, uid, context['id'], ['default_code'])['default_code']
_columns = {
'waiting_spree_import': fields.boolean('Waiting spree import', required=False),
'permalink': fields.char('Spree permalink', required=False)
}
_defaults = {
'waiting_spree_import': True,
#'permalink': lambda s,cr,uid, c: s._get_default_code(cr, uid, context=c) or ''
}
spree_product()
class spree_stock_picking_out(osv.osv):
_name="stock.picking.out"
_inherit="stock.picking.out"
_columns = {
'shipment_reported_spree': fields.boolean('Shipment reported to Spree', required=False),
}
_defaults = {
'shipment_reported_spree': False,
}
spree_stock_picking_out()
class spree_stock_picking(osv.osv):
_name="stock.picking"
_inherit="stock.picking"
_columns = {
'shipment_reported_spree': fields.boolean('Shipment reported to Spree', required=False),
}
_defaults = {
'shipment_reported_spree': False,
}
spree_stock_picking() | OpenSolutionsFinland/spree_commerce | product.py | Python | agpl-3.0 | 1,356 |
#python
import testing
setup = testing.setup_mesh_source_test("NurbsCurve")
testing.require_valid_mesh(setup.document, setup.source.get_property("output_mesh"))
testing.require_similar_mesh(setup.document, setup.source.get_property("output_mesh"), "mesh.source.NurbsCurve", 2)
| barche/k3d | tests/mesh/mesh.source.NurbsCurve.py | Python | gpl-2.0 | 281 |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# Copyright (C) 2006-2007, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version. Please read the COPYING file.
#
import os
import sys
import glob
import shutil
from distutils.core import setup
from distutils.command.install import install
from scom import __version__ as version
distfiles = """
setup.py
scom/*.py
"""
def make_dist():
distdir = "scom-api-%s" % version
list = []
for t in distfiles.split():
list.extend(glob.glob(t))
if os.path.exists(distdir):
shutil.rmtree(distdir)
os.mkdir(distdir)
for file_ in list:
cum = distdir[:]
for d in os.path.dirname(file_).split('/'):
dn = os.path.join(cum, d)
cum = dn[:]
if not os.path.exists(dn):
os.mkdir(dn)
shutil.copy(file_, os.path.join(distdir, file_))
os.popen("tar -czf %s %s" % ("scom-api-" + version + ".tar.gz", distdir))
shutil.rmtree(distdir)
if "dist" in sys.argv:
make_dist()
sys.exit(0)
class Install(install):
def finalize_options(self):
# NOTE: for Pardus distribution
if os.path.exists("/etc/pisilinux-release"):
self.install_platlib = '$base/lib/pisilinux'
self.install_purelib = '$base/lib/pisilinux'
install.finalize_options(self)
def run(self):
install.run(self)
setup(
name = 'scom',
version = version,
description = 'SCOM API Functions',
url = 'http://www.pardus.org.tr/projeler/scom',
license = 'GNU GPL2',
package_dir = { '': '' },
packages = [ 'scom' ],
cmdclass = {
'install' : Install
}
)
| fuxprojesi/scom | api/setup.py | Python | gpl-3.0 | 1,890 |
# orm/path_registry.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Path tracking utilities, representing mapper graph traversals.
"""
from __future__ import annotations
from functools import reduce
from itertools import chain
import logging
from typing import Any
from typing import Tuple
from typing import Union
from . import base as orm_base
from .. import exc
from .. import inspection
from .. import util
from ..sql import visitors
from ..sql.cache_key import HasCacheKey
log = logging.getLogger(__name__)
def _unreduce_path(path):
return PathRegistry.deserialize(path)
_WILDCARD_TOKEN = "*"
_DEFAULT_TOKEN = "_sa_default"
class PathRegistry(HasCacheKey):
"""Represent query load paths and registry functions.
Basically represents structures like:
(<User mapper>, "orders", <Order mapper>, "items", <Item mapper>)
These structures are generated by things like
query options (joinedload(), subqueryload(), etc.) and are
used to compose keys stored in the query._attributes dictionary
for various options.
They are then re-composed at query compile/result row time as
the query is formed and as rows are fetched, where they again
serve to compose keys to look up options in the context.attributes
dictionary, which is copied from query._attributes.
The path structure has a limited amount of caching, where each
"root" ultimately pulls from a fixed registry associated with
the first mapper, that also contains elements for each of its
property keys. However paths longer than two elements, which
are the exception rather than the rule, are generated on an
as-needed basis.
"""
__slots__ = ()
is_token = False
is_root = False
has_entity = False
path: Tuple
natural_path: Tuple
parent: Union["PathRegistry", None]
root: "PathRegistry"
_cache_key_traversal = [
("path", visitors.ExtendedInternalTraversal.dp_has_cache_key_list)
]
def __eq__(self, other):
try:
return other is not None and self.path == other._path_for_compare
except AttributeError:
util.warn(
"Comparison of PathRegistry to %r is not supported"
% (type(other))
)
return False
def __ne__(self, other):
try:
return other is None or self.path != other._path_for_compare
except AttributeError:
util.warn(
"Comparison of PathRegistry to %r is not supported"
% (type(other))
)
return True
@property
def _path_for_compare(self):
return self.path
def set(self, attributes, key, value):
log.debug("set '%s' on path '%s' to '%s'", key, self, value)
attributes[(key, self.natural_path)] = value
def setdefault(self, attributes, key, value):
log.debug("setdefault '%s' on path '%s' to '%s'", key, self, value)
attributes.setdefault((key, self.natural_path), value)
def get(self, attributes, key, value=None):
key = (key, self.natural_path)
if key in attributes:
return attributes[key]
else:
return value
def __len__(self):
return len(self.path)
def __hash__(self):
return id(self)
def __getitem__(self, key: Any) -> "PathRegistry":
raise NotImplementedError()
@property
def length(self):
return len(self.path)
def pairs(self):
path = self.path
for i in range(0, len(path), 2):
yield path[i], path[i + 1]
def contains_mapper(self, mapper):
for path_mapper in [self.path[i] for i in range(0, len(self.path), 2)]:
if path_mapper.is_mapper and path_mapper.isa(mapper):
return True
else:
return False
def contains(self, attributes, key):
return (key, self.path) in attributes
def __reduce__(self):
return _unreduce_path, (self.serialize(),)
@classmethod
def _serialize_path(cls, path):
return list(
zip(
[
m.class_ if (m.is_mapper or m.is_aliased_class) else str(m)
for m in [path[i] for i in range(0, len(path), 2)]
],
[
path[i].key if (path[i].is_property) else str(path[i])
for i in range(1, len(path), 2)
]
+ [None],
)
)
@classmethod
def _deserialize_path(cls, path):
def _deserialize_mapper_token(mcls):
return (
# note: we likely dont want configure=True here however
# this is maintained at the moment for backwards compatibility
orm_base._inspect_mapped_class(mcls, configure=True)
if mcls not in PathToken._intern
else PathToken._intern[mcls]
)
def _deserialize_key_token(mcls, key):
if key is None:
return None
elif key in PathToken._intern:
return PathToken._intern[key]
else:
return orm_base._inspect_mapped_class(
mcls, configure=True
).attrs[key]
p = tuple(
chain(
*[
(
_deserialize_mapper_token(mcls),
_deserialize_key_token(mcls, key),
)
for mcls, key in path
]
)
)
if p and p[-1] is None:
p = p[0:-1]
return p
def serialize(self):
path = self.path
return self._serialize_path(path)
@classmethod
def deserialize(cls, path: Tuple) -> "PathRegistry":
assert path is not None
p = cls._deserialize_path(path)
return cls.coerce(p)
@classmethod
def per_mapper(cls, mapper):
if mapper.is_mapper:
return CachingEntityRegistry(cls.root, mapper)
else:
return SlotsEntityRegistry(cls.root, mapper)
@classmethod
def coerce(cls, raw):
return reduce(lambda prev, next: prev[next], raw, cls.root)
def token(self, token):
if token.endswith(f":{_WILDCARD_TOKEN}"):
return TokenRegistry(self, token)
elif token.endswith(f":{_DEFAULT_TOKEN}"):
return TokenRegistry(self.root, token)
else:
raise exc.ArgumentError(f"invalid token: {token}")
def __add__(self, other):
return reduce(lambda prev, next: prev[next], other.path, self)
def __str__(self):
return f"ORM Path[{' -> '.join(str(elem) for elem in self.path)}]"
def __repr__(self):
return f"{self.__class__.__name__}({self.path!r})"
class RootRegistry(PathRegistry):
"""Root registry, defers to mappers so that
paths are maintained per-root-mapper.
"""
inherit_cache = True
path = natural_path = ()
has_entity = False
is_aliased_class = False
is_root = True
def __getitem__(self, entity):
if entity in PathToken._intern:
return TokenRegistry(self, PathToken._intern[entity])
else:
return inspection.inspect(entity)._path_registry
PathRegistry.root = RootRegistry()
class PathToken(orm_base.InspectionAttr, HasCacheKey, str):
"""cacheable string token"""
_intern = {}
def _gen_cache_key(self, anon_map, bindparams):
return (str(self),)
@property
def _path_for_compare(self):
return None
@classmethod
def intern(cls, strvalue):
if strvalue in cls._intern:
return cls._intern[strvalue]
else:
cls._intern[strvalue] = result = PathToken(strvalue)
return result
class TokenRegistry(PathRegistry):
__slots__ = ("token", "parent", "path", "natural_path")
inherit_cache = True
def __init__(self, parent, token):
token = PathToken.intern(token)
self.token = token
self.parent = parent
self.path = parent.path + (token,)
self.natural_path = parent.natural_path + (token,)
has_entity = False
is_token = True
def generate_for_superclasses(self):
if not self.parent.is_aliased_class and not self.parent.is_root:
for ent in self.parent.mapper.iterate_to_root():
yield TokenRegistry(self.parent.parent[ent], self.token)
elif (
self.parent.is_aliased_class
and self.parent.entity._is_with_polymorphic
):
yield self
for ent in self.parent.entity._with_polymorphic_entities:
yield TokenRegistry(self.parent.parent[ent], self.token)
else:
yield self
def __getitem__(self, entity):
try:
return self.path[entity]
except TypeError as err:
raise IndexError(f"{entity}") from err
class PropRegistry(PathRegistry):
is_unnatural = False
inherit_cache = True
def __init__(self, parent, prop):
# restate this path in terms of the
# given MapperProperty's parent.
insp = inspection.inspect(parent[-1])
natural_parent = parent
if not insp.is_aliased_class or insp._use_mapper_path:
parent = natural_parent = parent.parent[prop.parent]
elif (
insp.is_aliased_class
and insp.with_polymorphic_mappers
and prop.parent in insp.with_polymorphic_mappers
):
subclass_entity = parent[-1]._entity_for_mapper(prop.parent)
parent = parent.parent[subclass_entity]
# when building a path where with_polymorphic() is in use,
# special logic to determine the "natural path" when subclass
# entities are used.
#
# here we are trying to distinguish between a path that starts
# on a the with_polymorhpic entity vs. one that starts on a
# normal entity that introduces a with_polymorphic() in the
# middle using of_type():
#
# # as in test_polymorphic_rel->
# # test_subqueryload_on_subclass_uses_path_correctly
# wp = with_polymorphic(RegularEntity, "*")
# sess.query(wp).options(someload(wp.SomeSubEntity.foos))
#
# vs
#
# # as in test_relationship->JoinedloadWPolyOfTypeContinued
# wp = with_polymorphic(SomeFoo, "*")
# sess.query(RegularEntity).options(
# someload(RegularEntity.foos.of_type(wp))
# .someload(wp.SubFoo.bar)
# )
#
# in the former case, the Query as it generates a path that we
# want to match will be in terms of the with_polymorphic at the
# beginning. in the latter case, Query will generate simple
# paths that don't know about this with_polymorphic, so we must
# use a separate natural path.
#
#
if parent.parent:
natural_parent = parent.parent[subclass_entity.mapper]
self.is_unnatural = True
else:
natural_parent = parent
elif (
natural_parent.parent
and insp.is_aliased_class
and prop.parent # this should always be the case here
is not insp.mapper
and insp.mapper.isa(prop.parent)
):
natural_parent = parent.parent[prop.parent]
self.prop = prop
self.parent = parent
self.path = parent.path + (prop,)
self.natural_path = natural_parent.natural_path + (prop,)
self._wildcard_path_loader_key = (
"loader",
parent.path + self.prop._wildcard_token,
)
self._default_path_loader_key = self.prop._default_path_loader_key
self._loader_key = ("loader", self.natural_path)
@util.memoized_property
def has_entity(self):
return self.prop._links_to_entity
@util.memoized_property
def entity(self):
return self.prop.entity
@property
def mapper(self):
return self.prop.mapper
@property
def entity_path(self):
return self[self.entity]
def __getitem__(self, entity):
if isinstance(entity, (int, slice)):
return self.path[entity]
else:
return SlotsEntityRegistry(self, entity)
class AbstractEntityRegistry(PathRegistry):
__slots__ = ()
has_entity = True
def __init__(self, parent, entity):
self.key = entity
self.parent = parent
self.is_aliased_class = entity.is_aliased_class
self.entity = entity
self.path = parent.path + (entity,)
# the "natural path" is the path that we get when Query is traversing
# from the lead entities into the various relationships; it corresponds
# to the structure of mappers and relationships. when we are given a
# path that comes from loader options, as of 1.3 it can have ac-hoc
# with_polymorphic() and other AliasedInsp objects inside of it, which
# are usually not present in mappings. So here we track both the
# "enhanced" path in self.path and the "natural" path that doesn't
# include those objects so these two traversals can be matched up.
# the test here for "(self.is_aliased_class or parent.is_unnatural)"
# are to avoid the more expensive conditional logic that follows if we
# know we don't have to do it. This conditional can just as well be
# "if parent.path:", it just is more function calls.
if parent.path and (self.is_aliased_class or parent.is_unnatural):
# this is an infrequent code path used only for loader strategies
# that also make use of of_type().
if entity.mapper.isa(parent.natural_path[-1].entity):
self.natural_path = parent.natural_path + (entity.mapper,)
else:
self.natural_path = parent.natural_path + (
parent.natural_path[-1].entity,
)
# it seems to make sense that since these paths get mixed up
# with statements that are cached or not, we should make
# sure the natural path is cacheable across different occurrences
# of equivalent AliasedClass objects. however, so far this
# does not seem to be needed for whatever reason.
# elif not parent.path and self.is_aliased_class:
# self.natural_path = (self.entity._generate_cache_key()[0], )
else:
# self.natural_path = parent.natural_path + (entity, )
self.natural_path = self.path
@property
def entity_path(self):
return self
@property
def mapper(self):
return inspection.inspect(self.entity).mapper
def __bool__(self):
return True
__nonzero__ = __bool__
def __getitem__(self, entity):
if isinstance(entity, (int, slice)):
return self.path[entity]
elif entity in PathToken._intern:
return TokenRegistry(self, PathToken._intern[entity])
else:
return PropRegistry(self, entity)
class SlotsEntityRegistry(AbstractEntityRegistry):
# for aliased class, return lightweight, no-cycles created
# version
inherit_cache = True
__slots__ = (
"key",
"parent",
"is_aliased_class",
"entity",
"path",
"natural_path",
)
class CachingEntityRegistry(AbstractEntityRegistry, dict):
# for long lived mapper, return dict based caching
# version that creates reference cycles
inherit_cache = True
def __getitem__(self, entity):
if isinstance(entity, (int, slice)):
return self.path[entity]
elif isinstance(entity, PathToken):
return TokenRegistry(self, entity)
else:
return dict.__getitem__(self, entity)
def __missing__(self, key):
self[key] = item = PropRegistry(self, key)
return item
| sqlalchemy/sqlalchemy | lib/sqlalchemy/orm/path_registry.py | Python | mit | 16,453 |
# -*- coding: utf-8 -*-
from functools import wraps
from pollirio import commands
def old_expose(cmd):
def inner(fn):
def wrapped(*args, **kwargs):
commands[cmd] = fn
fn(*args)
return wraps(fn)(wrapped)
return inner
def expose(cmd, args=None):
def decorator(fn):
commands[cmd] = {"func":fn, "args":args}
return fn
return decorator
def plugin_run(name, *args):
if name in commands:
return commands.get(name)["func"](*args)
def check_args(name, bot, ievent):
if name in commands:
if commands.get(name)["args"]:
# TODO: check if we have all the arguments
#print len(ievent.args), commands.get(name)["args"]
if len(ievent.args) < commands.get(name)["args"]:
bot.msg(ievent.channel, "%s: %s" % (ievent.nick, commands.get(name)["func"].__doc__))
return False
else:
return True
else:
return True
return False
from lart import *
from perla import *
from polygen import *
#from bts import *
from misc import *
from channel import *
from quotes import *
from erepublik import *
from erep_ei import *
| dpaleino/pollirio | pollirio/modules/__init__.py | Python | mit | 1,212 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import random
from pymatgen.util.num import abs_cap, min_max_indexes, round_to_sigfigs
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = '[email protected]'
__date__ = '9/25/14'
class FuncTestCase(unittest.TestCase):
def test_abs_cap(self):
self.assertEqual(abs_cap(1.000000001), 1.0)
self.assertEqual(abs_cap(-1.000000001), -1.0)
v = random.uniform(-1, 1)
self.assertEqual(abs_cap(v), v)
self.assertEqual(abs_cap(1.000000001, 2), 1.000000001)
self.assertEqual(abs_cap(-2.000000001, 2), -2.0)
def test_min_max_indexes(self):
val = ['b', 'a', 'm', 'z', 'y']
min_ind, max_ind = min_max_indexes(val)
self.assertEqual(min_ind, 1)
self.assertEqual(max_ind, 3)
def test_round(self):
vals = [424.2425, 2.3425356, 0.000042535636653,
0.23, 2.468e6, 0, -1.392156]
sigfigs = range(1, 6)
rounded_vals = [[400.0, 420.0, 424.0, 424.2, 424.24],
[2.0, 2.3, 2.34, 2.343, 2.3425],
[4e-5, 4.3e-5, 4.25e-5, 4.254e-5, 4.2536e-5],
[0.2, 0.23, 0.23, 0.23, 0.23],
[2e6, 2.5e6, 2.47e6, 2.468e6, 2.468e6],
[0, 0, 0, 0, 0],
[-1, -1.4, -1.39, -1.392, -1.3922]]
for v, val in enumerate(vals):
for s, sig in enumerate(sigfigs):
self.assertEqual(round_to_sigfigs(val, sig),
rounded_vals[v][s])
with self.assertRaises(ValueError):
round_to_sigfigs(3.5, -2)
with self.assertRaises(TypeError):
round_to_sigfigs(3.5, 3.5)
if __name__ == '__main__':
unittest.main()
| czhengsci/pymatgen | pymatgen/util/tests/test_num_utils.py | Python | mit | 2,005 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 1
ERR_LOOKUP_FAILED = mcl.status.framework.ERR_START + 2
ERR_PACKET_SETUP_FAILED = mcl.status.framework.ERR_START + 3
ERR_CREATE_SOCKET_FAILED = mcl.status.framework.ERR_START + 4
ERR_SEND_PACKET_FAILED = mcl.status.framework.ERR_START + 5
ERR_INVALID_SOURCE = mcl.status.framework.ERR_START + 6
ERR_INVALID_DEST = mcl.status.framework.ERR_START + 7
ERR_CREATE_SOCKET_ERROR = mcl.status.framework.ERR_START + 8
ERR_BIND_ERROR = mcl.status.framework.ERR_START + 9
ERR_RECEIVE_ERROR = mcl.status.framework.ERR_START + 10
ERR_RAW_RECV_ERROR = mcl.status.framework.ERR_START + 11
ERR_NOT_IMPLEMENTED = mcl.status.framework.ERR_START + 12
ERR_MAX_HOPS_EXCEEDED = mcl.status.framework.ERR_START + 13
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_MARSHAL_FAILED: 'Marshaling data failed',
ERR_LOOKUP_FAILED: 'Hostname lookup failed',
ERR_PACKET_SETUP_FAILED: 'Packet generation failed',
ERR_CREATE_SOCKET_FAILED: 'Unable to create a socket',
ERR_SEND_PACKET_FAILED: 'Unable to send packet',
ERR_INVALID_SOURCE: 'Invalid source address specified',
ERR_INVALID_DEST: 'Invalid destination address specified',
ERR_CREATE_SOCKET_ERROR: 'Unable to create socket',
ERR_BIND_ERROR: 'Unable to bind socket',
ERR_RECEIVE_ERROR: 'Error receiving data on socket',
ERR_RAW_RECV_ERROR: 'Raw receive error',
ERR_NOT_IMPLEMENTED: 'Not implemented on this platform',
ERR_MAX_HOPS_EXCEEDED: 'Maximum number of hops exceeded'
} | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/network/cmd/traceroute/errors.py | Python | unlicense | 1,801 |
from enigma import eEPGCache
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.Converter.genre import getGenreStringSub
class EventName(Converter, object):
NAME = 0
SHORT_DESCRIPTION = 1
EXTENDED_DESCRIPTION = 2
FULL_DESCRIPTION = 3
ID = 4
NAME_NOW = 5
NAME_NEXT = 6
NAME_NEXT2 = 7
GENRE = 8
RATING = 9
SRATING = 10
RAWRATING = 11
RATINGCOUNTRY = 12
NEXT_DESCRIPTION = 21
THIRD_NAME = 22
THIRD_DESCRIPTION = 23
AUSSHORT = 0
AUSLONG = 1
AUSTEXT = {
"NC" : (" ", "Not Classified"),
"P" : ("P", "Preschool"),
"C" : ("C", "Children"),
"G" : ("G", "General"),
"PG" : ("PG", "Parental Guidance Recommended"),
"M" : ("M", "Mature Audience 15+"),
"MA" : ("MA", "Mature Adult Audience 15+"),
"AV" : ("AV", "Adult Audience, Strong Violence 15+"),
"R" : ("R", "Restricted 18+")
}
AUSRATINGS = {
0 : AUSTEXT["NC"],
1 : AUSTEXT["NC"],
2 : AUSTEXT["P"],
3 : AUSTEXT["P"],
4 : AUSTEXT["C"],
5 : AUSTEXT["C"],
6 : AUSTEXT["G"],
7 : AUSTEXT["G"],
8 : AUSTEXT["PG"],
9 : AUSTEXT["PG"],
10 : AUSTEXT["M"],
11 : AUSTEXT["M"],
12 : AUSTEXT["MA"],
13 : AUSTEXT["MA"],
14 : AUSTEXT["AV"],
15 : AUSTEXT["R"]
}
def __init__(self, type):
Converter.__init__(self, type)
self.epgcache = eEPGCache.getInstance()
args = type.split(',')
args = [arg.strip() for arg in args]
type = args.pop(0)
if "Separated" in args:
self.SEPARATOR = "\n\n"
else:
self.SEPARATOR = "\n"
if "Trimmed" in args:
self.TRIM = True
else:
self.TRIM = False
if type == "Description":
self.type = self.SHORT_DESCRIPTION
elif type == "ExtendedDescription":
self.type = self.EXTENDED_DESCRIPTION
elif type == "FullDescription":
self.type = self.FULL_DESCRIPTION
elif type == "ID":
self.type = self.ID
elif type == "NameNow" or type == "NowName":
self.type = self.NAME_NOW
elif type == "NameNext" or type == "NextName":
self.type = self.NAME_NEXT
elif type == "NameNextOnly" or type == "NextNameOnly":
self.type = self.NAME_NEXT2
elif type == "Genre":
self.type = self.GENRE
elif type == "Rating":
self.type = self.RATING
elif type == "SmallRating":
self.type = self.SRATING
elif type == "RawRating":
self.type = self.RAWRATING
elif type == "RatingCountry":
self.type = self.RATINGCOUNTRY
elif type == "NextDescription":
self.type = self.NEXT_DESCRIPTION
elif type == "ThirdName":
self.type = self.THIRD_NAME
elif type == "ThirdDescription":
self.type = self.THIRD_DESCRIPTION
else:
self.type = self.NAME
def trimText(self, text):
if self.TRIM:
return str(text).strip()
else:
return text
@cached
def getText(self):
event = self.source.event
if event is None:
return ""
if self.type == self.NAME:
return self.trimText(event.getEventName())
elif self.type == self.RATINGCOUNTRY:
rating = event.getParentalData()
if rating is None:
return ""
else:
return rating.getCountryCode()
elif self.type == self.RAWRATING:
rating = event.getParentalData()
if rating is None:
return ""
else:
return "%d" % int(rating.getRating())
elif self.type == self.SRATING:
rating = event.getParentalData()
if rating is None:
return ""
else:
country = rating.getCountryCode()
age = int(rating.getRating())
if country.upper() == "AUS":
errmsg = _("BC%d") % age
undef = (errmsg, "")
return _(self.AUSRATINGS.get(age, undef)[self.AUSSHORT])
else:
if age == 0:
return _("All ages")
elif age > 15:
return _("bc%d") % age
else:
age += 3
return " %d+" % age
elif self.type == self.RATING:
rating = event.getParentalData()
if rating is None:
return ""
else:
country = rating.getCountryCode()
age = int(rating.getRating())
if country.upper() == "AUS":
errmsg = _("Defined By Broadcaster (%d)") % age
undef = ("", errmsg)
return _(self.AUSRATINGS.get(age, undef)[self.AUSLONG])
else:
if age == 0:
return _("Rating undefined")
elif age > 15:
return _("Rating defined by broadcaster - %d") % age
else:
age += 3
return _("Minimum age %d years") % age
elif self.type == self.GENRE:
genre = event.getGenreData()
if genre is None:
return ""
else:
return self.trimText(getGenreStringSub(genre.getLevel1(), genre.getLevel2()))
elif self.type == self.NAME_NOW:
return pgettext("now/next: 'now' event label", "Now") + ": " + self.trimText(event.getEventName())
elif self.type == self.SHORT_DESCRIPTION:
return self.trimText(event.getShortDescription())
elif self.type == self.EXTENDED_DESCRIPTION:
return self.trimText(event.getExtendedDescription() or event.getShortDescription())
elif self.type == self.FULL_DESCRIPTION:
description = self.trimText(event.getShortDescription())
extended = self.trimText(event.getExtendedDescription())
if description and extended:
description += self.SEPARATOR
return description + extended
elif self.type == self.ID:
return str(event.getEventId())
elif int(self.type) in (6, 7) or int(self.type) >= 21:
try:
reference = self.source.service
info = reference and self.source.info
if info is None:
return
test = ['ITSECX', (reference.toString(), 1, -1, 1440)] # search next 24 hours
self.list = [] if self.epgcache is None else self.epgcache.lookupEvent(test)
if self.list:
if self.type == self.NAME_NEXT and self.list[1][1]:
return pgettext("now/next: 'next' event label", "Next") + ": " + self.trimText(self.list[1][1])
elif self.type == self.NAME_NEXT2 and self.list[1][1]:
return self.trimText(self.list[1][1])
elif self.type == self.NEXT_DESCRIPTION and (self.list[1][2] or self.list[1][3]):
description = self.trimText(self.list[1][2])
extended = self.trimText(self.list[1][3])
if (description and extended) and (description[0:20] != extended[0:20]):
description += self.SEPARATOR
return description + extended
elif self.type == self.THIRD_NAME and self.list[2][1]:
return pgettext("third event: 'third' event label", "Later") + ": " + self.trimText(self.list[2][1])
elif self.type == self.THIRD_DESCRIPTION and (self.list[2][2] or self.list[2][3]):
description = self.trimText(self.list[2][2])
extended = self.trimText(self.list[2][3])
if (description and extended) and (description[0:20] != extended[0:20]):
description += self.SEPARATOR
return description + extended
else:
# failed to return any epg data.
return ""
except:
# failed to return any epg data.
if self.type == self.NAME_NEXT:
return pgettext("now/next: 'next' event label", "Next") + ": " + self.trimText(event.getEventName())
return ""
text = property(getText)
| OpenLD/enigma2-wetek | lib/python/Components/Converter/EventName.py | Python | gpl-2.0 | 6,891 |
#! /usr/bin/python -tt
import nose
from rhuilib.util import *
from rhuilib.rhui_testcase import *
from rhuilib.rhuimanager import *
from rhuilib.rhuimanager_cds import *
from rhuilib.rhuimanager_client import *
from rhuilib.rhuimanager_repo import *
from rhuilib.rhuimanager_sync import *
class test_tcms_90682(RHUITestcase):
def _setup(self):
'''[TCMS#90682 setup] Do initial rhui-manager run'''
RHUIManager.initial_run(self.rs.Instances["RHUA"][0])
'''[TCMS#90682 setup] Add cdses '''
RHUIManagerCds.add_cds(self.rs.Instances["RHUA"][0], "Cluster1", self.rs.Instances["CDS"][0].private_hostname)
'''[TCMS#90682 setup] Create custom repo '''
RHUIManagerRepo.add_custom_repo(self.rs.Instances["RHUA"][0], "repo1")
'''[TCMS#90682 setup] Associate custom repo with cluster '''
RHUIManagerCds.associate_repo_cds(self.rs.Instances["RHUA"][0], "Cluster1", ["repo1"])
'''[TCMS#90682 setup] Upload content to custom repo '''
RHUIManagerRepo.upload_content(self.rs.Instances["RHUA"][0], ["repo1"], "/etc/rhui/confrpm")
'''[TCMS#90682 setup] Sync cdses '''
self._sync_cds([self.rs.Instances["CDS"][0].private_hostname])
def _test(self):
'''[TCMS#90682 test] Backup cds password '''
Expect.ping_pong(self.rs.Instances["CDS"][0], "cat /var/lib/pulp-cds/.gofer/secret > /var/lib/pulp-cds/.gofer/secret.old && echo SUCCESS", "[^ ]SUCCESS")
'''[TCMS#90682 test] Set wrong cds password '''
Expect.ping_pong(self.rs.Instances["CDS"][0], "echo d4648caf-af85-43db-858d-743c840ae928 > /var/lib/pulp-cds/.gofer/secret && echo SUCCESS", "[^ ]SUCCESS")
'''[TCMS#90682 test] Trying to remove cds with wrong password '''
try:
RHUIManagerCds.delete_cds(self.rs.Instances["RHUA"][0], "Cluster1", [self.rs.Instances["CDS"][0].private_hostname])
# failing
assert False
except ExpectFailed:
pass
def _cleanup(self):
'''[TCMS#90682 cleanup] Restore cds passeord '''
Expect.ping_pong(self.rs.Instances["CDS"][0], "cat /var/lib/pulp-cds/.gofer/secret.old > /var/lib/pulp-cds/.gofer/secret && echo SUCCESS", "[^ ]SUCCESS")
'''[TCMS#90682 cleanup] Remove cds '''
RHUIManagerCds.delete_cds(self.rs.Instances["RHUA"][0], "Cluster1", [self.rs.Instances["CDS"][0].private_hostname])
'''[TCMS#90682 cleanup] Delete custom repo '''
RHUIManagerRepo.delete_repo(self.rs.Instances["RHUA"][0], ["repo1"])
if __name__ == "__main__":
nose.run(defaultTest=__name__, argv=[__file__, '-v'])
| RedHatQE/rhui-testing-tools | rhui-tests/test_rhui_tcms90682.py | Python | gpl-3.0 | 2,622 |
"""Mozilla / Netscape cookie loading / saving."""
import re, time, logging
from cookielib import (reraise_unmasked_exceptions, FileCookieJar, Cookie,
MISSING_FILENAME_TEXT)
class MozillaCookieJar(FileCookieJar):
"""
WARNING: you may want to backup your browser's cookies file if you use
this class to save cookies. I *think* it works, but there have been
bugs in the past!
This class differs from CookieJar only in the format it uses to save and
load cookies to and from a file. This class uses the Mozilla/Netscape
`cookies.txt' format. lynx uses this file format, too.
Don't expect cookies saved while the browser is running to be noticed by
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
you change them on disk while it's running; on Windows, you probably can't
save at all while the browser is running).
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
Netscape cookies on saving.
In particular, the cookie version and port number information is lost,
together with information about whether or not Path, Port and Discard were
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
domain as set in the HTTP header started with a dot (yes, I'm aware some
domains in Netscape files start with a dot and some don't -- trust me, you
really don't want to know any more about this).
Note that though Mozilla and Netscape use the same format, they use
slightly different headers. The class saves cookies using the Netscape
header by default (Mozilla can cope with that).
"""
magic_re = "#( Netscape)? HTTP Cookie File"
header = """\
# Netscape HTTP Cookie File
# http://www.netscape.com/newsref/std/cookie_spec.html
# This is a generated file! Do not edit.
"""
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()
magic = f.readline()
if not re.search(self.magic_re, magic):
f.close()
raise IOError(
"%s does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith("#") or
line.strip().startswith("$") or
line.strip() == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t")
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = value
value = None
initial_dot = domain.startswith(".")
assert domain_specified == initial_dot
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except:
reraise_unmasked_exceptions((IOError,))
raise IOError("invalid Netscape format file %s: %s" %
(filename, line))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
f.write(self.header)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure: secure = "TRUE"
else: secure = "FALSE"
if cookie.domain.startswith("."): initial_dot = "TRUE"
else: initial_dot = "FALSE"
if cookie.expires is not None:
expires = str(cookie.expires)
else:
expires = ""
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = ""
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
"\t".join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value])+
"\n")
finally:
f.close()
| trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9-SunOS-i386/lib/python/lib/python2.4/_MozillaCookieJar.py | Python | gpl-2.0 | 5,794 |
#
# Copyright 2014 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for managing BTRFS file systems.
"""
import itertools
import os
import re
import subprocess
import uuid
import salt.utils.fsutils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
def __virtual__():
"""
Only work on POSIX-like systems
"""
return not salt.utils.platform.is_windows() and __grains__.get("kernel") == "Linux"
def version():
"""
Return BTRFS version.
CLI Example:
.. code-block:: bash
salt '*' btrfs.version
"""
out = __salt__["cmd.run_all"]("btrfs --version")
if out.get("stderr"):
raise CommandExecutionError(out["stderr"])
return {"version": out["stdout"].split(" ", 1)[-1]}
def _parse_btrfs_info(data):
"""
Parse BTRFS device info data.
"""
ret = {}
for line in [line for line in data.split("\n") if line][:-1]:
if line.startswith("Label:"):
line = re.sub(r"Label:\s+", "", line)
label, uuid_ = (tkn.strip() for tkn in line.split("uuid:"))
ret["label"] = label != "none" and label or None
ret["uuid"] = uuid_
continue
if line.startswith("\tdevid"):
dev_data = re.split(r"\s+", line.strip())
dev_id = dev_data[-1]
ret[dev_id] = {
"device_id": dev_data[1],
"size": dev_data[3],
"used": dev_data[5],
}
return ret
def info(device):
"""
Get BTRFS filesystem information.
CLI Example:
.. code-block:: bash
salt '*' btrfs.info /dev/sda1
"""
out = __salt__["cmd.run_all"]("btrfs filesystem show {}".format(device))
salt.utils.fsutils._verify_run(out)
return _parse_btrfs_info(out["stdout"])
def devices():
"""
Get known BTRFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' btrfs.devices
"""
out = __salt__["cmd.run_all"]("blkid -o export")
salt.utils.fsutils._verify_run(out)
return salt.utils.fsutils._blkid_output(out["stdout"], fs_type="btrfs")
def _defragment_mountpoint(mountpoint):
"""
Defragment only one BTRFS mountpoint.
"""
out = __salt__["cmd.run_all"](
"btrfs filesystem defragment -f {}".format(mountpoint)
)
return {
"mount_point": mountpoint,
"passed": not out["stderr"],
"log": out["stderr"] or False,
"range": False,
}
def defragment(path):
"""
Defragment mounted BTRFS filesystem.
In order to defragment a filesystem, device should be properly mounted and writable.
If passed a device name, then defragmented whole filesystem, mounted on in.
If passed a moun tpoint of the filesystem, then only this mount point is defragmented.
CLI Example:
.. code-block:: bash
salt '*' btrfs.defragment /dev/sda1
salt '*' btrfs.defragment /path/on/filesystem
"""
is_device = salt.utils.fsutils._is_device(path)
mounts = salt.utils.fsutils._get_mounts("btrfs")
if is_device and not mounts.get(path):
raise CommandExecutionError('Device "{}" is not mounted'.format(path))
result = []
if is_device:
for mount_point in mounts[path]:
result.append(_defragment_mountpoint(mount_point["mount_point"]))
else:
is_mountpoint = False
for mountpoints in mounts.values():
for mpnt in mountpoints:
if path == mpnt["mount_point"]:
is_mountpoint = True
break
d_res = _defragment_mountpoint(path)
if (
not is_mountpoint
and not d_res["passed"]
and "range ioctl not supported" in d_res["log"]
):
d_res[
"log"
] = "Range ioctl defragmentation is not supported in this kernel."
if not is_mountpoint:
d_res["mount_point"] = False
d_res["range"] = os.path.exists(path) and path or False
result.append(d_res)
return result
def features():
"""
List currently available BTRFS features.
CLI Example:
.. code-block:: bash
salt '*' btrfs.mkfs_features
"""
out = __salt__["cmd.run_all"]("mkfs.btrfs -O list-all")
salt.utils.fsutils._verify_run(out)
ret = {}
for line in [
re.sub(r"\s+", " ", line) for line in out["stderr"].split("\n") if " - " in line
]:
option, description = line.split(" - ", 1)
ret[option] = description
return ret
def _usage_overall(raw):
"""
Parse usage/overall.
"""
data = {}
for line in raw.split("\n")[1:]:
keyset = [
item.strip()
for item in re.sub(r"\s+", " ", line).split(":", 1)
if item.strip()
]
if len(keyset) == 2:
key = re.sub(r"[()]", "", keyset[0]).replace(" ", "_").lower()
if key in ["free_estimated", "global_reserve"]: # An extra field
subk = keyset[1].split("(")
data[key] = subk[0].strip()
subk = subk[1].replace(")", "").split(": ")
data["{}_{}".format(key, subk[0])] = subk[1]
else:
data[key] = keyset[1]
return data
def _usage_specific(raw):
"""
Parse usage/specific.
"""
get_key = lambda val: dict([tuple(val.split(":"))])
raw = raw.split("\n")
section, size, used = raw[0].split(" ")
section = section.replace(",", "_").replace(":", "").lower()
data = {}
data[section] = {}
for val in [size, used]:
data[section].update(get_key(val.replace(",", "")))
for devices in raw[1:]:
data[section].update(get_key(re.sub(r"\s+", ":", devices.strip())))
return data
def _usage_unallocated(raw):
"""
Parse usage/unallocated.
"""
ret = {}
for line in raw.split("\n")[1:]:
keyset = re.sub(r"\s+", " ", line.strip()).split(" ")
if len(keyset) == 2:
ret[keyset[0]] = keyset[1]
return ret
def usage(path):
"""
Show in which disk the chunks are allocated.
CLI Example:
.. code-block:: bash
salt '*' btrfs.usage /your/mountpoint
"""
out = __salt__["cmd.run_all"]("btrfs filesystem usage {}".format(path))
salt.utils.fsutils._verify_run(out)
ret = {}
for section in out["stdout"].split("\n\n"):
if section.startswith("Overall:\n"):
ret["overall"] = _usage_overall(section)
elif section.startswith("Unallocated:\n"):
ret["unallocated"] = _usage_unallocated(section)
else:
ret.update(_usage_specific(section))
return ret
def mkfs(*devices, **kwargs):
"""
Create a file system on the specified device. By default wipes out with force.
General options:
* **allocsize**: Specify the BTRFS offset from the start of the device.
* **bytecount**: Specify the size of the resultant filesystem.
* **nodesize**: Node size.
* **leafsize**: Specify the nodesize, the tree block size in which btrfs stores data.
* **noforce**: Prevent force overwrite when an existing filesystem is detected on the device.
* **sectorsize**: Specify the sectorsize, the minimum data block allocation unit.
* **nodiscard**: Do not perform whole device TRIM operation by default.
* **uuid**: Pass UUID or pass True to generate one.
Options:
* **dto**: (raid0|raid1|raid5|raid6|raid10|single|dup)
Specify how the data must be spanned across the devices specified.
* **mto**: (raid0|raid1|raid5|raid6|raid10|single|dup)
Specify how metadata must be spanned across the devices specified.
* **fts**: Features (call ``salt <host> btrfs.features`` for full list of available features)
See the ``mkfs.btrfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' btrfs.mkfs /dev/sda1
salt '*' btrfs.mkfs /dev/sda1 noforce=True
"""
if not devices:
raise CommandExecutionError("No devices specified")
mounts = salt.utils.fsutils._get_mounts("btrfs")
for device in devices:
if mounts.get(device):
raise CommandExecutionError(
'Device "{}" should not be mounted'.format(device)
)
cmd = ["mkfs.btrfs"]
dto = kwargs.get("dto")
mto = kwargs.get("mto")
if len(devices) == 1:
if dto:
cmd.append("-d single")
if mto:
cmd.append("-m single")
else:
if dto:
cmd.append("-d {}".format(dto))
if mto:
cmd.append("-m {}".format(mto))
for key, option in [
("-l", "leafsize"),
("-L", "label"),
("-O", "fts"),
("-A", "allocsize"),
("-b", "bytecount"),
("-n", "nodesize"),
("-s", "sectorsize"),
]:
if option == "label" and option in kwargs:
kwargs["label"] = "'{}'".format(kwargs["label"])
if kwargs.get(option):
cmd.append("{} {}".format(key, kwargs.get(option)))
if kwargs.get("uuid"):
cmd.append(
"-U {}".format(
kwargs.get("uuid") is True and uuid.uuid1() or kwargs.get("uuid")
)
)
if kwargs.get("nodiscard"):
cmd.append("-K")
if not kwargs.get("noforce"):
cmd.append("-f")
cmd.extend(devices)
out = __salt__["cmd.run_all"](" ".join(cmd))
salt.utils.fsutils._verify_run(out)
ret = {"log": out["stdout"]}
ret.update(__salt__["btrfs.info"](devices[0]))
return ret
def resize(mountpoint, size):
"""
Resize filesystem.
General options:
* **mountpoint**: Specify the BTRFS mountpoint to resize.
* **size**: ([+/-]<newsize>[kKmMgGtTpPeE]|max) Specify the new size of the target.
CLI Example:
.. code-block:: bash
salt '*' btrfs.resize /mountpoint size=+1g
salt '*' btrfs.resize /dev/sda1 size=max
"""
if size == "max":
if not salt.utils.fsutils._is_device(mountpoint):
raise CommandExecutionError(
'Mountpoint "{}" should be a valid device'.format(mountpoint)
)
if not salt.utils.fsutils._get_mounts("btrfs").get(mountpoint):
raise CommandExecutionError(
'Device "{}" should be mounted'.format(mountpoint)
)
elif (
len(size) < 3
or size[0] not in "-+"
or size[-1] not in "kKmMgGtTpPeE"
or re.sub(r"\d", "", size[1:][:-1])
):
raise CommandExecutionError(
'Unknown size: "{}". Expected: [+/-]<newsize>[kKmMgGtTpPeE]|max'.format(
size
)
)
out = __salt__["cmd.run_all"](
"btrfs filesystem resize {} {}".format(size, mountpoint)
)
salt.utils.fsutils._verify_run(out)
ret = {"log": out["stdout"]}
ret.update(__salt__["btrfs.info"](mountpoint))
return ret
def _fsck_ext(device):
"""
Check an ext2/ext3/ext4 file system.
This is forced check to determine a filesystem is clean or not.
NOTE: Maybe this function needs to be moved as a standard method in extfs module in a future.
"""
msgs = {
0: "No errors",
1: "Filesystem errors corrected",
2: "System should be rebooted",
4: "Filesystem errors left uncorrected",
8: "Operational error",
16: "Usage or syntax error",
32: "Fsck canceled by user request",
128: "Shared-library error",
}
return msgs.get(
__salt__["cmd.run_all"]("fsck -f -n {}".format(device))["retcode"],
"Unknown error",
)
def convert(device, permanent=False, keeplf=False):
"""
Convert ext2/3/4 to BTRFS. Device should be mounted.
Filesystem can be converted temporarily so the further processing and rollback is possible,
or permanently, where previous extended filesystem image gets deleted. Please note, permanent
conversion takes a while as BTRFS filesystem needs to be properly rebalanced afterwards.
General options:
* **permanent**: Specify if the migration should be permanent (false by default)
* **keeplf**: Keep ``lost+found`` of the partition (removed by default,
but still in the image, if not permanent migration)
CLI Example:
.. code-block:: bash
salt '*' btrfs.convert /dev/sda1
salt '*' btrfs.convert /dev/sda1 permanent=True
"""
out = __salt__["cmd.run_all"]("blkid -o export")
salt.utils.fsutils._verify_run(out)
devices = salt.utils.fsutils._blkid_output(out["stdout"])
if not devices.get(device):
raise CommandExecutionError('The device "{}" was is not found.'.format(device))
if not devices[device]["type"] in ["ext2", "ext3", "ext4"]:
raise CommandExecutionError(
'The device "{}" is a "{}" file system.'.format(
device, devices[device]["type"]
)
)
mountpoint = (
salt.utils.fsutils._get_mounts(devices[device]["type"])
.get(device, [{"mount_point": None}])[0]
.get("mount_point")
)
if mountpoint == "/":
raise CommandExecutionError(
"""One does not simply converts a root filesystem!
Converting an extended root filesystem to BTRFS is a careful
and lengthy process, among other steps including the following
requirements:
1. Proper verified backup.
2. System outage.
3. Offline system access.
For further details, please refer to your OS vendor
documentation regarding this topic.
"""
)
salt.utils.fsutils._verify_run(__salt__["cmd.run_all"]("umount {}".format(device)))
ret = {
"before": {
"fsck_status": _fsck_ext(device),
"mount_point": mountpoint,
"type": devices[device]["type"],
}
}
salt.utils.fsutils._verify_run(
__salt__["cmd.run_all"]("btrfs-convert {}".format(device))
)
salt.utils.fsutils._verify_run(
__salt__["cmd.run_all"]("mount {} {}".format(device, mountpoint))
)
# Refresh devices
out = __salt__["cmd.run_all"]("blkid -o export")
salt.utils.fsutils._verify_run(out)
devices = salt.utils.fsutils._blkid_output(out["stdout"])
ret["after"] = {
"fsck_status": "N/A", # ToDO
"mount_point": mountpoint,
"type": devices[device]["type"],
}
# Post-migration procedures
image_path = "{}/ext2_saved".format(mountpoint)
orig_fstype = ret["before"]["type"]
if not os.path.exists(image_path):
raise CommandExecutionError(
'BTRFS migration went wrong: the image "{}" not found!'.format(image_path)
)
if not permanent:
ret["after"]["{}_image".format(orig_fstype)] = image_path
image_info_proc = subprocess.run(
["file", "{}/image".format(image_path)], check=True, stdout=subprocess.PIPE
)
ret["after"][
"{}_image_info".format(orig_fstype)
] = image_info_proc.stdout.strip()
else:
ret["after"]["{}_image".format(orig_fstype)] = "removed"
ret["after"]["{}_image_info".format(orig_fstype)] = "N/A"
salt.utils.fsutils._verify_run(
__salt__["cmd.run_all"]("btrfs subvolume delete {}".format(image_path))
)
out = __salt__["cmd.run_all"]("btrfs filesystem balance {}".format(mountpoint))
salt.utils.fsutils._verify_run(out)
ret["after"]["balance_log"] = out["stdout"]
lost_found = "{}/lost+found".format(mountpoint)
if os.path.exists(lost_found) and not keeplf:
salt.utils.fsutils._verify_run(
__salt__["cmd.run_all"]("rm -rf {}".format(lost_found))
)
return ret
def _restripe(mountpoint, direction, *devices, **kwargs):
"""
Restripe BTRFS: add or remove devices from the particular mounted filesystem.
"""
fs_log = []
if salt.utils.fsutils._is_device(mountpoint):
raise CommandExecutionError(
'Mountpount expected, while device "{}" specified'.format(mountpoint)
)
mounted = False
for device, mntpoints in salt.utils.fsutils._get_mounts("btrfs").items():
for mntdata in mntpoints:
if mntdata["mount_point"] == mountpoint:
mounted = True
break
if not mounted:
raise CommandExecutionError(
'No BTRFS device mounted on "{}" mountpoint'.format(mountpoint)
)
if not devices:
raise CommandExecutionError("No devices specified.")
available_devices = __salt__["btrfs.devices"]()
for device in devices:
if device not in available_devices.keys():
raise CommandExecutionError('Device "{}" is not recognized'.format(device))
cmd = ["btrfs device {}".format(direction)]
for device in devices:
cmd.append(device)
if direction == "add":
if kwargs.get("nodiscard"):
cmd.append("-K")
if kwargs.get("force"):
cmd.append("-f")
cmd.append(mountpoint)
out = __salt__["cmd.run_all"](" ".join(cmd))
salt.utils.fsutils._verify_run(out)
if out["stdout"]:
fs_log.append(out["stdout"])
if direction == "add":
out = None
data_conversion = kwargs.get("dc")
meta_conversion = kwargs.get("mc")
if data_conversion and meta_conversion:
out = __salt__["cmd.run_all"](
"btrfs balance start -dconvert={} -mconvert={} {}".format(
data_conversion, meta_conversion, mountpoint
)
)
else:
out = __salt__["cmd.run_all"](
"btrfs filesystem balance {}".format(mountpoint)
)
salt.utils.fsutils._verify_run(out)
if out["stdout"]:
fs_log.append(out["stdout"])
# Summarize the result
ret = {}
if fs_log:
ret.update({"log": "\n".join(fs_log)})
ret.update(__salt__["btrfs.info"](mountpoint))
return ret
def add(mountpoint, *devices, **kwargs):
"""
Add a devices to a BTRFS filesystem.
General options:
* **nodiscard**: Do not perform whole device TRIM
* **force**: Force overwrite existing filesystem on the disk
CLI Example:
.. code-block:: bash
salt '*' btrfs.add /mountpoint /dev/sda1 /dev/sda2
"""
return _restripe(mountpoint, "add", *devices, **kwargs)
def delete(mountpoint, *devices, **kwargs):
"""
Remove devices from a BTRFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' btrfs.delete /mountpoint /dev/sda1 /dev/sda2
"""
return _restripe(mountpoint, "delete", *devices, **kwargs)
def _parse_proplist(data):
"""
Parse properties list.
"""
out = {}
for line in data.split("\n"):
line = re.split(r"\s+", line, 1)
if len(line) == 2:
out[line[0]] = line[1]
return out
def properties(obj, type=None, set=None):
"""
List properties for given btrfs object. The object can be path of BTRFS device,
mount point, or any directories/files inside the BTRFS filesystem.
General options:
* **type**: Possible types are s[ubvol], f[ilesystem], i[node] and d[evice].
* **force**: Force overwrite existing filesystem on the disk
* **set**: <key=value,key1=value1...> Options for a filesystem properties.
CLI Example:
.. code-block:: bash
salt '*' btrfs.properties /mountpoint
salt '*' btrfs.properties /dev/sda1 type=subvol set='ro=false,label="My Storage"'
"""
if type and type not in [
"s",
"subvol",
"f",
"filesystem",
"i",
"inode",
"d",
"device",
]:
raise CommandExecutionError(
'Unknown property type: "{}" specified'.format(type)
)
cmd = ["btrfs"]
cmd.append("property")
cmd.append(set and "set" or "list")
if type:
cmd.append("-t{}".format(type))
cmd.append(obj)
if set:
try:
for key, value in [
[item.strip() for item in keyset.split("=")]
for keyset in set.split(",")
]:
cmd.append(key)
cmd.append(value)
except Exception as ex: # pylint: disable=broad-except
raise CommandExecutionError(ex)
out = __salt__["cmd.run_all"](" ".join(cmd))
salt.utils.fsutils._verify_run(out)
if not set:
ret = {}
for prop, descr in _parse_proplist(out["stdout"]).items():
ret[prop] = {"description": descr}
value = __salt__["cmd.run_all"](
"btrfs property get {} {}".format(obj, prop)
)["stdout"]
ret[prop]["value"] = value and value.split("=")[-1] or "N/A"
return ret
def subvolume_exists(path):
"""
Check if a subvolume is present in the filesystem.
path
Mount point for the subvolume (full path)
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_exists /mnt/var
"""
cmd = ["btrfs", "subvolume", "show", path]
return __salt__["cmd.retcode"](cmd, ignore_retcode=True) == 0
def subvolume_create(name, dest=None, qgroupids=None):
"""
Create subvolume `name` in `dest`.
Return True if the subvolume is created, False is the subvolume is
already there.
name
Name of the new subvolume
dest
If not given, the subvolume will be created in the current
directory, if given will be in /dest/name
qgroupids
Add the newly created subcolume to a qgroup. This parameter
is a list
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_create var
salt '*' btrfs.subvolume_create var dest=/mnt
salt '*' btrfs.subvolume_create var qgroupids='[200]'
"""
if qgroupids and type(qgroupids) is not list:
raise CommandExecutionError("Qgroupids parameter must be a list")
if dest:
name = os.path.join(dest, name)
# If the subvolume is there, we are done
if subvolume_exists(name):
return False
cmd = ["btrfs", "subvolume", "create"]
if type(qgroupids) is list:
cmd.append("-i")
cmd.extend(qgroupids)
cmd.append(name)
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
def subvolume_delete(name=None, names=None, commit=None):
"""
Delete the subvolume(s) from the filesystem
The user can remove one single subvolume (name) or multiple of
then at the same time (names). One of the two parameters needs to
specified.
Please, refer to the documentation to understand the implication
on the transactions, and when the subvolume is really deleted.
Return True if the subvolume is deleted, False is the subvolume
was already missing.
name
Name of the subvolume to remove
names
List of names of subvolumes to remove
commit
* 'after': Wait for transaction commit at the end
* 'each': Wait for transaction commit after each delete
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_delete /var/volumes/tmp
salt '*' btrfs.subvolume_delete /var/volumes/tmp commit=after
"""
if not name and not (names and type(names) is list):
raise CommandExecutionError("Provide a value for the name parameter")
if commit and commit not in ("after", "each"):
raise CommandExecutionError("Value for commit not recognized")
# Filter the names and take the ones that are still there
names = [
n for n in itertools.chain([name], names or []) if n and subvolume_exists(n)
]
# If the subvolumes are gone, we are done
if not names:
return False
cmd = ["btrfs", "subvolume", "delete"]
if commit == "after":
cmd.append("--commit-after")
elif commit == "each":
cmd.append("--commit-each")
cmd.extend(names)
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
def subvolume_find_new(name, last_gen):
"""
List the recently modified files in a subvolume
name
Name of the subvolume
last_gen
Last transid marker from where to compare
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_find_new /var/volumes/tmp 1024
"""
cmd = ["btrfs", "subvolume", "find-new", name, last_gen]
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
lines = res["stdout"].splitlines()
# Filenames are at the end of each inode line
files = [l.split()[-1] for l in lines if l.startswith("inode")]
# The last transid is in the last line
transid = lines[-1].split()[-1]
return {
"files": files,
"transid": transid,
}
def subvolume_get_default(path):
"""
Get the default subvolume of the filesystem path
path
Mount point for the subvolume
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_get_default /var/volumes/tmp
"""
cmd = ["btrfs", "subvolume", "get-default", path]
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
line = res["stdout"].strip()
# The ID is the second parameter, and the name the last one, or
# '(FS_TREE)'
#
# When the default one is set:
# ID 5 (FS_TREE)
#
# When we manually set a different one (var):
# ID 257 gen 8 top level 5 path var
#
id_ = line.split()[1]
name = line.split()[-1]
return {
"id": id_,
"name": name,
}
def _pop(line, key, use_rest):
"""
Helper for the line parser.
If key is a prefix of line, will remove ir from the line and will
extract the value (space separation), and the rest of the line.
If use_rest is True, the value will be the rest of the line.
Return a tuple with the value and the rest of the line.
"""
value = None
if line.startswith(key):
line = line[len(key) :].strip()
if use_rest:
value = line
line = ""
else:
value, line = line.split(" ", 1)
return value, line.strip()
def subvolume_list(
path,
parent_id=False,
absolute=False,
ogeneration=False,
generation=False,
subvolumes=False,
uuid=False,
parent_uuid=False,
sent_subvolume_uuid=False,
snapshots=False,
readonly=False,
deleted=False,
generation_cmp=None,
ogeneration_cmp=None,
sort=None,
):
"""
List the subvolumes present in the filesystem.
path
Mount point for the subvolume
parent_id
Print parent ID
absolute
Print all the subvolumes in the filesystem and distinguish
between absolute and relative path with respect to the given
<path>
ogeneration
Print the ogeneration of the subvolume
generation
Print the generation of the subvolume
subvolumes
Print only subvolumes below specified <path>
uuid
Print the UUID of the subvolume
parent_uuid
Print the parent uuid of subvolumes (and snapshots)
sent_subvolume_uuid
Print the UUID of the sent subvolume, where the subvolume is
the result of a receive operation
snapshots
Only snapshot subvolumes in the filesystem will be listed
readonly
Only readonly subvolumes in the filesystem will be listed
deleted
Only deleted subvolumens that are ye not cleaned
generation_cmp
List subvolumes in the filesystem that its generation is >=,
<= or = value. '+' means >= value, '-' means <= value, If
there is neither '+' nor '-', it means = value
ogeneration_cmp
List subvolumes in the filesystem that its ogeneration is >=,
<= or = value
sort
List subvolumes in order by specified items. Possible values:
* rootid
* gen
* ogen
* path
You can add '+' or '-' in front of each items, '+' means
ascending, '-' means descending. The default is ascending. You
can combite it in a list.
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_list /var/volumes/tmp
salt '*' btrfs.subvolume_list /var/volumes/tmp path=True
salt '*' btrfs.subvolume_list /var/volumes/tmp sort='[-rootid]'
"""
if sort and type(sort) is not list:
raise CommandExecutionError("Sort parameter must be a list")
valid_sorts = [
"".join((order, attrib))
for order, attrib in itertools.product(
("-", "", "+"), ("rootid", "gen", "ogen", "path")
)
]
if sort and not all(s in valid_sorts for s in sort):
raise CommandExecutionError("Value for sort not recognized")
cmd = ["btrfs", "subvolume", "list"]
params = (
(parent_id, "-p"),
(absolute, "-a"),
(ogeneration, "-c"),
(generation, "-g"),
(subvolumes, "-o"),
(uuid, "-u"),
(parent_uuid, "-q"),
(sent_subvolume_uuid, "-R"),
(snapshots, "-s"),
(readonly, "-r"),
(deleted, "-d"),
)
cmd.extend(p[1] for p in params if p[0])
if generation_cmp:
cmd.extend(["-G", generation_cmp])
if ogeneration_cmp:
cmd.extend(["-C", ogeneration_cmp])
# We already validated the content of the list
if sort:
cmd.append("--sort={}".format(",".join(sort)))
cmd.append(path)
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
# Parse the output. ID and gen are always at the beginning, and
# path is always at the end. There is only one column that
# contains space (top level), and the path value can also have
# spaces. The issue is that we do not know how many spaces do we
# have in the path name, so any classic solution based on split
# will fail.
#
# This list is in order.
columns = (
"ID",
"gen",
"cgen",
"parent",
"top level",
"otime",
"parent_uuid",
"received_uuid",
"uuid",
"path",
)
result = []
for line in res["stdout"].splitlines():
table = {}
for key in columns:
value, line = _pop(line, key, key == "path")
if value:
table[key.lower()] = value
# If line is not empty here, we are not able to parse it
if not line:
result.append(table)
return result
def subvolume_set_default(subvolid, path):
"""
Set the subvolume as default
subvolid
ID of the new default subvolume
path
Mount point for the filesystem
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_set_default 257 /var/volumes/tmp
"""
cmd = ["btrfs", "subvolume", "set-default", subvolid, path]
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
def subvolume_show(path):
"""
Show information of a given subvolume
path
Mount point for the filesystem
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_show /var/volumes/tmp
"""
cmd = ["btrfs", "subvolume", "show", path]
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
result = {}
table = {}
# The real name is the first line, later there is a table of
# values separated with colon.
stdout = res["stdout"].splitlines()
key = stdout.pop(0)
result[key.strip()] = table
for line in stdout:
key, value = line.split(":", 1)
table[key.lower().strip()] = value.strip()
return result
def subvolume_snapshot(source, dest=None, name=None, read_only=False):
"""
Create a snapshot of a source subvolume
source
Source subvolume from where to create the snapshot
dest
If only dest is given, the subvolume will be named as the
basename of the source
name
Name of the snapshot
read_only
Create a read only snapshot
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_snapshot /var/volumes/tmp dest=/.snapshots
salt '*' btrfs.subvolume_snapshot /var/volumes/tmp name=backup
"""
if not dest and not name:
raise CommandExecutionError("Provide parameter dest, name, or both")
cmd = ["btrfs", "subvolume", "snapshot"]
if read_only:
cmd.append("-r")
if dest and not name:
cmd.append(dest)
if dest and name:
name = os.path.join(dest, name)
if name:
cmd.append(name)
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
def subvolume_sync(path, subvolids=None, sleep=None):
"""
Wait until given subvolume are completely removed from the
filesystem after deletion.
path
Mount point for the filesystem
subvolids
List of IDs of subvolumes to wait for
sleep
Sleep N seconds betwenn checks (default: 1)
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_sync /var/volumes/tmp
salt '*' btrfs.subvolume_sync /var/volumes/tmp subvolids='[257]'
"""
if subvolids and type(subvolids) is not list:
raise CommandExecutionError("Subvolids parameter must be a list")
cmd = ["btrfs", "subvolume", "sync"]
if sleep:
cmd.extend(["-s", sleep])
cmd.append(path)
if subvolids:
cmd.extend(subvolids)
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
| saltstack/salt | salt/modules/btrfs.py | Python | apache-2.0 | 34,445 |
#!/usr/bin/env python
# coding=utf-8
from distutils.core import setup
setup(
name = 'funcModule',
version = '1.0.0',
py_modules = ['funcModule'],
author = 'haibin',
author_email ='[email protected]',
url = 'http://github.com/hibin2014',
descripthon = 'A simple printer',
)
| xOpenLee/python | HeadFirstPython/chapter2/setup.py | Python | gpl-2.0 | 302 |
##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Implementation of OnlineHelp System.
This is the default implementation of the `OnlineHelp`. It defines the global
`OnlineHelp` in which all basic Zope-core help screens are registered.
$Id: onlinehelp.py 67630 2006-04-27 00:54:03Z jim $
"""
__docformat__ = 'restructuredtext'
import os
from zope.interface import implements
from zope.configuration.exceptions import ConfigurationError
from zope.traversing.interfaces import IContainmentRoot
from zope.app import zapi
from zope.app.onlinehelp.interfaces import IOnlineHelp, IOnlineHelpTopic
from zope.app.onlinehelp.onlinehelptopic import OnlineHelpTopic
class OnlineHelp(OnlineHelpTopic):
"""
>>> import os
>>> from zope import component
>>> from zope.component.interfaces import IFactory
>>> from zope.component.factory import Factory
>>> from zope.app.onlinehelp.tests.test_onlinehelp import testdir
>>> from zope.app.onlinehelp.tests.test_onlinehelp import I1, Dummy1
>>> path = os.path.join(testdir(), 'help.txt')
Create an `OnlineHelp` instance
>>> onlinehelp = OnlineHelp('Help', path)
First do the interface verifying tests.
>>> from zope.interface.verify import verifyObject
>>> from zope.traversing.interfaces import IContainmentRoot
>>> verifyObject(IOnlineHelp, onlinehelp)
True
>>> verifyObject(IContainmentRoot, onlinehelp)
True
Register a new subtopic for interface 'I1' and view 'view.html'
>>> from zope.app.onlinehelp.onlinehelptopic import OnlineHelpTopic
>>> from zope.app.onlinehelp.onlinehelptopic import RESTOnlineHelpTopic
>>> from zope.app.onlinehelp.onlinehelptopic import STXOnlineHelpTopic
>>> from zope.app.onlinehelp.onlinehelptopic import ZPTOnlineHelpTopic
>>> default = Factory(OnlineHelpTopic)
>>> rest = Factory(RESTOnlineHelpTopic)
>>> stx = Factory(STXOnlineHelpTopic)
>>> zpt = Factory(ZPTOnlineHelpTopic)
>>> component.provideUtility(default, IFactory, 'onlinehelp.topic.default')
>>> component.provideUtility(rest, IFactory, 'onlinehelp.topic.rest')
>>> component.provideUtility(stx, IFactory, 'onlinehelp.topic.stx')
>>> component.provideUtility(zpt, IFactory, 'onlinehelp.topic.zpt')
>>> path = os.path.join(testdir(), 'help2.txt')
>>> onlinehelp.registerHelpTopic('', 'help2', 'Help 2',
... path, I1, 'view.html')
Test if the subtopic is set correctly
>>> onlinehelp['help2'].title
'Help 2'
Additionally it should appear as a utility
>>> from zope.app import zapi
>>> topic = zapi.getUtility(IOnlineHelpTopic,'help2')
>>> topic.title
'Help 2'
add another topic without parent
>>> onlinehelp.registerHelpTopic('missing', 'help3', 'Help 3',
... path, I1, 'view.html')
The new topic should not be a child of the onlinehelp instance
>>> 'help3' in onlinehelp.keys()
False
But it is available as a utility
>>> topic = zapi.getUtility(IOnlineHelpTopic,'missing/help3')
>>> topic.title
'Help 3'
now register the missing parent
>>> onlinehelp.registerHelpTopic('', 'missing', 'Missing',
... path, I1, 'view.html')
This is a child on the onlinehelp
>>> 'missing' in onlinehelp.keys()
True
>>> missing = onlinehelp['missing']
This topic should now have 'help3' as a child
>>> 'help3' in missing.keys()
True
"""
implements(IOnlineHelp, IContainmentRoot)
def __init__(self, title, path):
super(OnlineHelp, self).__init__('',title, path, None)
def registerHelpTopic(self, parent_path, id, title,
doc_path, interface=None, view=None,
class_=None, resources=None):
"See zope.app.onlineHelp.interfaces.IOnlineHelp"
if not os.path.exists(doc_path):
raise ConfigurationError(
"Help Topic definition %s does not exist" % doc_path
)
if class_ is None:
class_ = OnlineHelpTopic
# Create topic base on the custom class or OnlinHelpTopic
topic = class_(id, title, doc_path, parent_path, interface, view)
# add resources to topic
if resources is not None:
topic.addResources(resources)
# add topic to onlinehelp hierarchy
parent = None
try:
parent = zapi.traverse(self, parent_path)
parent[id] = topic
except KeyError:
pass
for t in zapi.getUtilitiesFor(IOnlineHelpTopic):
if parent is None:
if t[1].getTopicPath() == parent_path:
t[1][id] = topic
if topic.getTopicPath() == t[1].parentPath:
topic[t[1].id] = t[1]
# Add topic to utilities registry
#utils = zapi.getService(Utilities)
#utils.provideUtility(IOnlineHelpTopic, topic, topic.getTopicPath())
zapi.getGlobalSiteManager().registerUtility(
topic, IOnlineHelpTopic, topic.getTopicPath())
| Donkyhotay/MoonPy | zope/app/onlinehelp/onlinehelp.py | Python | gpl-3.0 | 5,638 |
"""Test the helper objects in letsencrypt.client.plugins.apache.obj."""
import unittest
class AddrTest(unittest.TestCase):
"""Test the Addr class."""
def setUp(self):
from letsencrypt.client.plugins.apache.obj import Addr
self.addr1 = Addr.fromstring("192.168.1.1")
self.addr2 = Addr.fromstring("192.168.1.1:*")
self.addr3 = Addr.fromstring("192.168.1.1:80")
def test_fromstring(self):
self.assertEqual(self.addr1.get_addr(), "192.168.1.1")
self.assertEqual(self.addr1.get_port(), "")
self.assertEqual(self.addr2.get_addr(), "192.168.1.1")
self.assertEqual(self.addr2.get_port(), "*")
self.assertEqual(self.addr3.get_addr(), "192.168.1.1")
self.assertEqual(self.addr3.get_port(), "80")
def test_str(self):
self.assertEqual(str(self.addr1), "192.168.1.1")
self.assertEqual(str(self.addr2), "192.168.1.1:*")
self.assertEqual(str(self.addr3), "192.168.1.1:80")
def test_get_addr_obj(self):
self.assertEqual(str(self.addr1.get_addr_obj("443")), "192.168.1.1:443")
self.assertEqual(str(self.addr2.get_addr_obj("")), "192.168.1.1")
self.assertEqual(str(self.addr1.get_addr_obj("*")), "192.168.1.1:*")
def test_eq(self):
self.assertEqual(self.addr1, self.addr2.get_addr_obj(""))
self.assertNotEqual(self.addr1, self.addr2)
self.assertFalse(self.addr1 == 3333)
def test_set_inclusion(self):
from letsencrypt.client.plugins.apache.obj import Addr
set_a = set([self.addr1, self.addr2])
addr1b = Addr.fromstring("192.168.1.1")
addr2b = Addr.fromstring("192.168.1.1:*")
set_b = set([addr1b, addr2b])
self.assertEqual(set_a, set_b)
class VirtualHostTest(unittest.TestCase):
"""Test the VirtualHost class."""
def setUp(self):
from letsencrypt.client.plugins.apache.obj import VirtualHost
from letsencrypt.client.plugins.apache.obj import Addr
self.vhost1 = VirtualHost(
"filep", "vh_path",
set([Addr.fromstring("localhost")]), False, False)
def test_eq(self):
from letsencrypt.client.plugins.apache.obj import Addr
from letsencrypt.client.plugins.apache.obj import VirtualHost
vhost1b = VirtualHost(
"filep", "vh_path",
set([Addr.fromstring("localhost")]), False, False)
self.assertEqual(vhost1b, self.vhost1)
self.assertEqual(str(vhost1b), str(self.vhost1))
self.assertFalse(vhost1b == 1234)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| diracdeltas/lets-encrypt-preview | letsencrypt/client/plugins/apache/tests/obj_test.py | Python | apache-2.0 | 2,620 |
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import common
def main_run(args):
with common.temporary_file() as tempfile_path:
rc = common.run_command([
sys.executable,
os.path.join(common.SRC_DIR, 'third_party', 'blink',
'tools', 'lint_test_expectations.py'),
'--json', tempfile_path
])
with open(tempfile_path) as f:
failures = json.load(f)
common.record_local_script_results(
'blink_lint_expectations', args.output, failures, True)
return rc
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
| nwjs/chromium.src | testing/scripts/blink_lint_expectations.py | Python | bsd-3-clause | 924 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging as std_logging
import os
import random
import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo.config import cfg
from cinder.openstack.common import eventlet_backdoor
from cinder.openstack.common._i18n import _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import systemd
from cinder.openstack.common import threadgroup
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup_and_daemon(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher()
launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher
| hguemar/cinder | cinder/openstack/common/service.py | Python | apache-2.0 | 15,241 |
#SQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/test.db'
HIPCHAT_ADDON_KEY = 'io.close.hipchat-addon'
HIPCHAT_ADDON_NAME = 'Close.io'
HIPCHAT_ADDON_DESCRIPTION = 'A HipChat add-on to give details about a Close.io lead when its URL is mentioned in HipChat'
HIPCHAT_ADDON_VENDOR_URL = 'http://close.io'
HIPCHAT_ADDON_VENDOR_NAME = 'Close.io'
#HIPCHAT_ADDON_BASE_URL = 'https://closeio-hipchat.herokuapp.com' # no trailing slash
CACHE_TYPE = 'simple'
DEBUG=False
| elasticsales/closeio-hipchat-addon | settings.py | Python | mit | 458 |
#Combine Join Field for MUNAME column and Add acres and Sort by MUNAME scripts together
#Join mapunit table to soils
#A. Stephens
#11/19/2014
import arcpy
arcpy.env.overwriteOutput = True
inFC = arcpy.GetParameterAsText (0) #Input Feature Class
intable = arcpy.GetParameterAsText (1) #Input Table
out_xls = arcpy.GetParameterAsText (2) # Output Excel Name (add xls extension)
#Join muname Field
arcpy.JoinField_management(inFC, "mukey", intable, "mukey", ["muname"])
#Dissolve Soils feature class
dissolveFields = ["AREASYMBOL", "MUSYM", "MUKEY", "MUNAME"]
#Dissolve Features
arcpy.Dissolve_management (inFC, "outFCDISSOLVE", dissolveFields)
#Add Acres Field
arcpy.AddField_management("outFCDISSOLVE", "ACRES", "DOUBLE", )
#Calculate Acres Field
arcpy.CalculateField_management("outFCDISSOLVE", "ACRES", '!Shape.area@ACRES!', "PYTHON_9.3", )
#Sort MUNAME
arcpy.Sort_management ("outFCDISSOLVE", "outFCDISSOLVE_SORT", [["muname", "ASCENDING"]])
#outa_xls = "MLRA_INTERSECT.xls"
#Table to Excel
arcpy.TableToExcel_conversion("outFCDISSOLVE_SORT", out_xls)
arcpy.Statistics_analysis("outFCDISSOLVE_SORT", "STATISTICS", [["ACRES", "SUM"]])
#Delete Feature Classes
#arcpy.Delete_management ("outFCDISSOLVE")
#arcpy.Delete_management ("outFCDISSOLVE_SORT")
#print "Script Completed"
print ("Script Completed") | ncss-tech/geo-pit | alena_tools/Pro__V_tools/joinmuname_add_acres_sort_muname_20141119.py | Python | gpl-2.0 | 1,366 |
import pytest
from pandas.util._validators import validate_args_and_kwargs
_fname = "func"
def test_invalid_total_length_max_length_one():
compat_args = ("foo",)
kwargs = {"foo": "FOO"}
args = ("FoO", "BaZ")
min_fname_arg_count = 0
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
msg = (
fr"{_fname}\(\) takes at most {max_length} "
fr"argument \({actual_length} given\)"
)
with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
def test_invalid_total_length_max_length_multiple():
compat_args = ("foo", "bar", "baz")
kwargs = {"foo": "FOO", "bar": "BAR"}
args = ("FoO", "BaZ")
min_fname_arg_count = 2
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
msg = (
fr"{_fname}\(\) takes at most {max_length} "
fr"arguments \({actual_length} given\)"
)
with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
@pytest.mark.parametrize("args,kwargs", [((), {"foo": -5, "bar": 2}), ((-5, 2), {})])
def test_missing_args_or_kwargs(args, kwargs):
bad_arg = "bar"
min_fname_arg_count = 2
compat_args = {"foo": -5, bad_arg: 1}
msg = (
fr"the '{bad_arg}' parameter is not supported "
fr"in the pandas implementation of {_fname}\(\)"
)
with pytest.raises(ValueError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
def test_duplicate_argument():
min_fname_arg_count = 2
compat_args = {"foo": None, "bar": None, "baz": None}
kwargs = {"foo": None, "bar": None}
args = (None,) # duplicate value for "foo"
msg = fr"{_fname}\(\) got multiple values for keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
def test_validation():
# No exceptions should be raised.
compat_args = {"foo": 1, "bar": None, "baz": -2}
kwargs = {"baz": -2}
args = (1, None)
min_fname_arg_count = 2
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
| rs2/pandas | pandas/tests/util/test_validate_args_and_kwargs.py | Python | bsd-3-clause | 2,391 |
# coding=utf-8
"""point.py - Represents a generic point on a sphere as a Python object.
See documentation of class Point for details.
Ole Nielsen, ANU 2002
"""
from math import cos, sin, pi
from math import acos as unsafe_acos # this may cause a domain error
import numpy
def acos(c):
"""acos - Safe inverse cosine
:param c: This value is shrunk to admissible interval
to avoid case where a small rounding error causes
a math domain error.
:type c: float
:returns: Arcos of the parameter c.
:rtype: float
"""
if c > 1:
c = 1
if c < -1:
c = -1
return unsafe_acos(c)
class Point(object):
"""Definition of a generic point on the sphere.
Defines a point in terms of latitude and longitude
and computes distances to other points on the sphere.
Initialise as
Point(lat, lon), where lat and lon are in decimal degrees (dd.dddd)
Public Methods:
distance_to(P)
bearing_to(P)
dist(P)
Author: Ole Nielsen, ANU 2002
"""
# class constants
R = 6372000 # Approximate radius of Earth (m)
degrees2radians = pi / 180.0
def __init__(self, latitude=None, longitude=None):
""" Point constructor.
:param latitude: The latitudinal position of the point
:type latitude: float
:param longitude: The longitudinal position of the point
:type longitude: float
:raises: Exception, AssertionError
:returns: a point instance
:rtype: Point
"""
if latitude is None:
msg = 'Argument latitude must be specified to Point constructor'
raise Exception(msg)
if longitude is None:
msg = 'Argument longitude must be specified to Point constructor'
raise Exception(msg)
msg = 'Specified latitude %f was out of bounds' % latitude
assert(-90 <= latitude <= 90.0), msg
msg = 'Specified longitude %f was out of bounds' % longitude
assert(-180 <= longitude <= 180.0), msg
self.latitude = float(latitude)
self.longitude = float(longitude)
lat = latitude * self.degrees2radians # Converted to radians
lon = longitude * self.degrees2radians # Converted to radians
self.coslat = cos(lat)
self.coslon = cos(lon)
self.sinlat = sin(lat)
self.sinlon = sin(lon)
#---------------
# Public methods
#---------------
def bearing_to(self, P):
"""Bearing (in degrees) to point P.
:param P: A relative point
:type P: Point
:returns: bearing degrees
:rtype: int
"""
AZ = self.AZ(P)
return int(round(AZ / self.degrees2radians))
def distance_to(self, P):
"""Distance to point P.
:param P: A relative point
:type P: Point
:returns: distance
:rtype: float
"""
GCA = self.GCA(P)
return self.R * GCA
def approximate_distance_to(self, P):
"""Very cheap and rough approximation to distance.
:param P: A relative point
:type P: Point
:returns: distance
:rtype: float
"""
return max(abs(self.latitude - P.latitude),
abs(self.longitude - P.longitude))
#-----------------
# Internal methods
#-----------------
def __repr__(self):
"""Readable representation of point with two decimal places.
:returns: point in human readable format
:rtype: str
"""
d = 2
lat = round(self.latitude, d)
lon = round(self.longitude, d)
return ' (' + str(lat) + ', ' + str(lon) + ')'
def GCA(self, P):
"""Compute the Creat Circle Angle (GCA) between current point and P.
:param P: A relative point
:type P: Point
:returns: angle in radians
:rtype: float
"""
alpha = P.coslon * self.coslon + P.sinlon * self.sinlon
# The original formula is alpha = cos(self.lon - P.lon)
# but rewriting lets us make us of precomputed trigonometric values.
x = alpha * self.coslat * P.coslat + self.sinlat * P.sinlat
return acos(x)
def AZ(self, P):
"""Compute Azimuth bearing (AZ) from current point to P.
:param P: A relative point
:type P: Point
:returns: bearing in radians
:rtype: float
"""
# Compute cosine(AZ), where AZ is the azimuth angle
GCA = self.GCA(P)
c = P.sinlat - self.sinlat * cos(GCA)
c = c / self.coslat / sin(GCA)
AZ = acos(c)
# Reverse direction if bearing is westward,
# i.e. sin(self.lon - P.lon) > 0
# Without this correction the bearing due west, say, will be 90 degrees
# because the formulas work in the positive direction which is east.
#
# Precomputed trigonometric values are used to rewrite the formula:
if self.sinlon * P.coslon - self.coslon * P.sinlon > 0:
AZ = 2 * pi - AZ
return AZ
def generate_circle(self, radius, resolution=1):
"""Make a circle about this point.
:param radius: The desired cirle radius [m]
:type radius: float, int
:param resolution: Radial distance (degrees) between
points on circle. Default is 1 making the circle consist
of 360 points. (optional)
:type resolution: int, float
:returns: list of lon, lat coordinates defining the circle
:rtype: list
..note::
The circle is defined in geographic coordinates so
the distance in meters will be greater than the specified radius
in the north south direction.
"""
# Find first point in circle to the east of the center by bisecting
# until desired radius is achieved
# Go north until distance is greater than desired
step = 0.001
d = 0
while d < radius:
stepmin = step
step *= 2
p = Point(self.latitude + step, self.longitude)
d = self.distance_to(p)
# Then bisect until the correct distance is found in degrees
stepmax = step
while not numpy.allclose(d, radius, rtol=1.0e-6):
step = (stepmax + stepmin) / 2
p = Point(self.latitude + step, self.longitude)
d = self.distance_to(p)
if d > radius:
stepmax = step
else:
stepmin = step
#print
#print ('Found geographical distance %f degrees yielding radius %f m'
# % (step, d))
r = step
# Generate circle with geographic radius = step (degrees)
P = [[p.longitude, p.latitude]]
angle = 0
while angle < 360:
theta = angle * self.degrees2radians
#print angle, theta, self.bearing_to(p), self.distance_to(p)
# Generate new point on circle
lat = self.latitude + r * cos(theta)
lon = self.longitude + r * sin(theta)
p = Point(latitude=lat, longitude=lon)
angle += resolution
P.append([p.longitude, p.latitude])
# Close polygon
P.append(P[0])
return numpy.array(P)
| lptorres/noah-inasafe | web_api/safe/common/geodesy.py | Python | gpl-3.0 | 7,635 |
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000397.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | biomodels/BIOMD0000000397 | BIOMD0000000397/model.py | Python | cc0-1.0 | 427 |
#!/usr/bin/env python
# Convert old config files using logM_gas to a new value of Sigma_c
import argparse
import yaml
from gas_mass_conversions import logM_to_logsigma
parser = argparse.ArgumentParser(description="Convert the value of logM_gas to Sigma_c in config.yaml files.")
parser.add_argument("--config", help="name of the config file used for the run.", default="config.yaml")
args = parser.parse_args()
f = open(args.config)
config = yaml.load(f)
f.close()
model = config["model"]
p = config["parameters"]
if model == "standard":
logSigma_c = logM_to_logsigma[model](p["r_c"], p["gamma"], p["logM_gas"])
elif model == "vertical":
logSigma_c = logM_to_logsigma[model](p["r_c"], p["gamma"], p["logM_gas"])
elif model == "truncated":
pass
elif model == "cavity":
logSigma_c = logM_to_logsigma[model](p["r_c"], p["r_cav"], p["gamma"], p["gamma_cav"], p["logM_gas"])
else:
print("Model type not found.")
print("logSigma_c", logSigma_c, "[g/cm^2]")
| iancze/JudithExcalibur | scripts/config_convert_Mgas.py | Python | mit | 987 |
# ExpenseTracker - a simple, Django based expense tracker.
# Copyright (C) 2013 Massimo Barbieri - http://www.massimobarbieri.it
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.forms import ModelForm
from .models import Expense
class ExpenseForm(ModelForm):
class Meta:
model = Expense
| barmassimo/Expense-Tracker | src/expenses/forms.py | Python | gpl-3.0 | 901 |
from __future__ import annotations
from io import (
BytesIO,
StringIO,
)
import os
from urllib.error import HTTPError
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.xml import read_xml
"""
CHECK LIST
[x] - ValueError: "Values for parser can only be lxml or etree."
etree
[X] - ImportError: "lxml not found, please install or use the etree parser."
[X] - TypeError: "expected str, bytes or os.PathLike object, not NoneType"
[X] - ValueError: "Either element or attributes can be parsed not both."
[X] - ValueError: "xpath does not return any nodes..."
[X] - SyntaxError: "You have used an incorrect or unsupported XPath"
[X] - ValueError: "names does not match length of child elements in xpath."
[X] - TypeError: "...is not a valid type for names"
[X] - ValueError: "To use stylesheet, you need lxml installed..."
[] - URLError: (GENERAL ERROR WITH HTTPError AS SUBCLASS)
[X] - HTTPError: "HTTP Error 404: Not Found"
[] - OSError: (GENERAL ERROR WITH FileNotFoundError AS SUBCLASS)
[X] - FileNotFoundError: "No such file or directory"
[] - ParseError (FAILSAFE CATCH ALL FOR VERY COMPLEX XML)
[X] - UnicodeDecodeError: "'utf-8' codec can't decode byte 0xe9..."
[X] - UnicodeError: "UTF-16 stream does not start with BOM"
[X] - BadZipFile: "File is not a zip file"
[X] - OSError: "Invalid data stream"
[X] - LZMAError: "Input format not supported by decoder"
[X] - ValueError: "Unrecognized compression type"
[X] - PermissionError: "Forbidden"
lxml
[X] - ValueError: "Either element or attributes can be parsed not both."
[X] - AttributeError: "__enter__"
[X] - XSLTApplyError: "Cannot resolve URI"
[X] - XSLTParseError: "document is not a stylesheet"
[X] - ValueError: "xpath does not return any nodes."
[X] - XPathEvalError: "Invalid expression"
[] - XPathSyntaxError: (OLD VERSION IN lxml FOR XPATH ERRORS)
[X] - TypeError: "empty namespace prefix is not supported in XPath"
[X] - ValueError: "names does not match length of child elements in xpath."
[X] - TypeError: "...is not a valid type for names"
[X] - LookupError: "unknown encoding"
[] - URLError: (USUALLY DUE TO NETWORKING)
[X - HTTPError: "HTTP Error 404: Not Found"
[X] - OSError: "failed to load external entity"
[X] - XMLSyntaxError: "Start tag expected, '<' not found"
[] - ParserError: (FAILSAFE CATCH ALL FOR VERY COMPLEX XML
[X] - ValueError: "Values for parser can only be lxml or etree."
[X] - UnicodeDecodeError: "'utf-8' codec can't decode byte 0xe9..."
[X] - UnicodeError: "UTF-16 stream does not start with BOM"
[X] - BadZipFile: "File is not a zip file"
[X] - OSError: "Invalid data stream"
[X] - LZMAError: "Input format not supported by decoder"
[X] - ValueError: "Unrecognized compression type"
[X] - PermissionError: "Forbidden"
"""
geom_df = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": [4, np.nan, 3],
}
)
xml_default_nmsp = """\
<?xml version='1.0' encoding='utf-8'?>
<data xmlns="http://example.com">
<row>
<shape>square</shape>
<degrees>360</degrees>
<sides>4</sides>
</row>
<row>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3</sides>
</row>
</data>"""
xml_prefix_nmsp = """\
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns:doc="http://example.com">
<doc:row>
<doc:shape>square</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides>4.0</doc:sides>
</doc:row>
<doc:row>
<doc:shape>circle</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides/>
</doc:row>
<doc:row>
<doc:shape>triangle</doc:shape>
<doc:degrees>180</doc:degrees>
<doc:sides>3.0</doc:sides>
</doc:row>
</doc:data>"""
df_kml = DataFrame(
{
"id": {
0: "ID_00001",
1: "ID_00002",
2: "ID_00003",
3: "ID_00004",
4: "ID_00005",
},
"name": {
0: "Blue Line (Forest Park)",
1: "Red, Purple Line",
2: "Red, Purple Line",
3: "Red, Purple Line",
4: "Red, Purple Line",
},
"styleUrl": {
0: "#LineStyle01",
1: "#LineStyle01",
2: "#LineStyle01",
3: "#LineStyle01",
4: "#LineStyle01",
},
"extrude": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0},
"altitudeMode": {
0: "clampedToGround",
1: "clampedToGround",
2: "clampedToGround",
3: "clampedToGround",
4: "clampedToGround",
},
"coordinates": {
0: (
"-87.77678526964958,41.8708863930319,0 "
"-87.77826234150609,41.87097820122218,0 "
"-87.78251583439344,41.87130129991005,0 "
"-87.78418294588424,41.87145055520308,0 "
"-87.7872369165933,41.8717239119163,0 "
"-87.79160214925886,41.87210797280065,0"
),
1: (
"-87.65758750947528,41.96427269188822,0 "
"-87.65802133507393,41.96581929055245,0 "
"-87.65819033925305,41.96621846093642,0 "
"-87.6583189819129,41.96650362897086,0 "
"-87.65835858701473,41.96669002089185,0 "
"-87.65838428411853,41.96688150295095,0 "
"-87.65842208882658,41.96745896091846,0 "
"-87.65846556843937,41.9683761425439,0 "
"-87.65849296214573,41.96913893870342,0"
),
2: (
"-87.65492939166126,41.95377494531437,0 "
"-87.65557043199591,41.95376544118533,0 "
"-87.65606302030132,41.95376391658746,0 "
"-87.65623502146268,41.95377379126367,0 "
"-87.65634748981634,41.95380103566435,0 "
"-87.65646537904269,41.95387703994676,0 "
"-87.65656532461145,41.95396622645799,0 "
"-87.65664760856414,41.95404201996044,0 "
"-87.65671750555913,41.95416647054043,0 "
"-87.65673983607117,41.95429949810849,0 "
"-87.65673866475777,41.95441024240925,0 "
"-87.6567690255541,41.95490657227902,0 "
"-87.65683672482363,41.95692259283837,0 "
"-87.6568900886376,41.95861070983142,0 "
"-87.65699865558875,41.96181418669004,0 "
"-87.65756347177603,41.96397045777844,0 "
"-87.65758750947528,41.96427269188822,0"
),
3: (
"-87.65362593118043,41.94742799535678,0 "
"-87.65363554415794,41.94819886386848,0 "
"-87.6536456393239,41.95059994675451,0 "
"-87.65365831235026,41.95108288489359,0 "
"-87.6536604873874,41.9519954657554,0 "
"-87.65362592053201,41.95245597302328,0 "
"-87.65367158496069,41.95311153649393,0 "
"-87.65368468595476,41.9533202828916,0 "
"-87.65369271253692,41.95343095587119,0 "
"-87.65373335834569,41.95351536301472,0 "
"-87.65378605844126,41.95358212680591,0 "
"-87.65385067928185,41.95364452823767,0 "
"-87.6539390793817,41.95370263886964,0 "
"-87.6540786298351,41.95373403675265,0 "
"-87.65430648647626,41.9537535411832,0 "
"-87.65492939166126,41.95377494531437,0"
),
4: (
"-87.65345391792157,41.94217681262115,0 "
"-87.65342448305786,41.94237224420864,0 "
"-87.65339745703922,41.94268217746244,0 "
"-87.65337753982941,41.94288140770284,0 "
"-87.65336256753105,41.94317369618263,0 "
"-87.65338799707138,41.94357253961736,0 "
"-87.65340240886648,41.94389158188269,0 "
"-87.65341837392448,41.94406444407721,0 "
"-87.65342275247338,41.94421065714904,0 "
"-87.65347469646018,41.94434829382345,0 "
"-87.65351486483024,41.94447699917548,0 "
"-87.65353483605053,41.9453896864472,0 "
"-87.65361975532807,41.94689193720703,0 "
"-87.65362593118043,41.94742799535678,0"
),
},
}
)
@pytest.fixture(params=["rb", "r"])
def mode(request):
return request.param
@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"])
def parser(request):
return request.param
# FILE / URL
@td.skip_if_no("lxml")
def test_parser_consistency_file(datapath):
filename = datapath("io", "data", "xml", "books.xml")
df_file_lxml = read_xml(filename, parser="lxml")
df_file_etree = read_xml(filename, parser="etree")
tm.assert_frame_equal(df_file_lxml, df_file_etree)
@tm.network
@pytest.mark.slow
@td.skip_if_no("lxml")
def test_parser_consistency_url(datapath):
url = (
"https://data.cityofchicago.org/api/views/"
"8pix-ypme/rows.xml?accessType=DOWNLOAD"
)
df_url_lxml = read_xml(url, xpath=".//row/row", parser="lxml")
df_url_etree = read_xml(url, xpath=".//row/row", parser="etree")
tm.assert_frame_equal(df_url_lxml, df_url_etree)
def test_file_like(datapath, parser, mode):
filename = datapath("io", "data", "xml", "books.xml")
with open(filename, mode) as f:
df_file = read_xml(f, parser=parser)
df_expected = DataFrame(
{
"category": ["cooking", "children", "web"],
"title": ["Everyday Italian", "Harry Potter", "Learning XML"],
"author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"],
"year": [2005, 2005, 2003],
"price": [30.00, 29.99, 39.95],
}
)
tm.assert_frame_equal(df_file, df_expected)
def test_file_io(datapath, parser, mode):
filename = datapath("io", "data", "xml", "books.xml")
with open(filename, mode) as f:
xml_obj = f.read()
df_io = read_xml(
(BytesIO(xml_obj) if isinstance(xml_obj, bytes) else StringIO(xml_obj)),
parser=parser,
)
df_expected = DataFrame(
{
"category": ["cooking", "children", "web"],
"title": ["Everyday Italian", "Harry Potter", "Learning XML"],
"author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"],
"year": [2005, 2005, 2003],
"price": [30.00, 29.99, 39.95],
}
)
tm.assert_frame_equal(df_io, df_expected)
def test_file_buffered_reader_string(datapath, parser, mode):
filename = datapath("io", "data", "xml", "books.xml")
with open(filename, mode) as f:
xml_obj = f.read()
df_str = read_xml(xml_obj, parser=parser)
df_expected = DataFrame(
{
"category": ["cooking", "children", "web"],
"title": ["Everyday Italian", "Harry Potter", "Learning XML"],
"author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"],
"year": [2005, 2005, 2003],
"price": [30.00, 29.99, 39.95],
}
)
tm.assert_frame_equal(df_str, df_expected)
def test_file_buffered_reader_no_xml_declaration(datapath, parser, mode):
filename = datapath("io", "data", "xml", "books.xml")
with open(filename, mode) as f:
next(f)
xml_obj = f.read()
df_str = read_xml(xml_obj, parser=parser)
df_expected = DataFrame(
{
"category": ["cooking", "children", "web"],
"title": ["Everyday Italian", "Harry Potter", "Learning XML"],
"author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"],
"year": [2005, 2005, 2003],
"price": [30.00, 29.99, 39.95],
}
)
tm.assert_frame_equal(df_str, df_expected)
def test_file_handle_close(datapath, parser):
xml_file = datapath("io", "data", "xml", "books.xml")
with open(xml_file, "rb") as f:
read_xml(BytesIO(f.read()), parser=parser)
assert not f.closed
@td.skip_if_no("lxml")
@pytest.mark.parametrize("val", ["", b""])
def test_empty_string_lxml(val):
from lxml.etree import XMLSyntaxError
with pytest.raises(XMLSyntaxError, match="Document is empty"):
read_xml(val, parser="lxml")
@pytest.mark.parametrize("val", ["", b""])
def test_empty_string_etree(val):
from xml.etree.ElementTree import ParseError
with pytest.raises(ParseError, match="no element found"):
read_xml(val, parser="etree")
@td.skip_if_no("lxml")
def test_wrong_file_path_lxml():
from lxml.etree import XMLSyntaxError
filename = os.path.join("data", "html", "books.xml")
with pytest.raises(
XMLSyntaxError,
match=("Start tag expected, '<' not found"),
):
read_xml(filename, parser="lxml")
def test_wrong_file_path_etree():
from xml.etree.ElementTree import ParseError
filename = os.path.join("data", "html", "books.xml")
with pytest.raises(
ParseError,
match=("not well-formed"),
):
read_xml(filename, parser="etree")
@tm.network
@td.skip_if_no("lxml")
def test_url():
url = "https://www.w3schools.com/xml/books.xml"
df_url = read_xml(url, xpath=".//book[count(*)=4]")
df_expected = DataFrame(
{
"category": ["cooking", "children", "web"],
"title": ["Everyday Italian", "Harry Potter", "Learning XML"],
"author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"],
"year": [2005, 2005, 2003],
"price": [30.00, 29.99, 39.95],
"cover": [None, None, "paperback"],
}
)
tm.assert_frame_equal(df_url, df_expected)
@tm.network
def test_wrong_url(parser):
with pytest.raises(HTTPError, match=("HTTP Error 404: Not Found")):
url = "https://www.w3schools.com/xml/python.xml"
read_xml(url, xpath=".//book[count(*)=4]", parser=parser)
# XPATH
@td.skip_if_no("lxml")
def test_empty_xpath_lxml(datapath):
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(ValueError, match=("xpath does not return any nodes")):
read_xml(filename, xpath=".//python", parser="lxml")
def test_bad_xpath_etree(datapath):
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(
SyntaxError, match=("You have used an incorrect or unsupported XPath")
):
read_xml(filename, xpath=".//[book]", parser="etree")
@td.skip_if_no("lxml")
def test_bad_xpath_lxml(datapath):
from lxml.etree import XPathEvalError
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(XPathEvalError, match=("Invalid expression")):
read_xml(filename, xpath=".//[book]", parser="lxml")
# NAMESPACE
def test_default_namespace(parser):
df_nmsp = read_xml(
xml_default_nmsp,
xpath=".//ns:row",
namespaces={"ns": "http://example.com"},
parser=parser,
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": [4.0, float("nan"), 3.0],
}
)
tm.assert_frame_equal(df_nmsp, df_expected)
def test_prefix_namespace(parser):
df_nmsp = read_xml(
xml_prefix_nmsp,
xpath=".//doc:row",
namespaces={"doc": "http://example.com"},
parser=parser,
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": [4.0, float("nan"), 3.0],
}
)
tm.assert_frame_equal(df_nmsp, df_expected)
@td.skip_if_no("lxml")
def test_consistency_default_namespace():
df_lxml = read_xml(
xml_default_nmsp,
xpath=".//ns:row",
namespaces={"ns": "http://example.com"},
parser="lxml",
)
df_etree = read_xml(
xml_default_nmsp,
xpath=".//doc:row",
namespaces={"doc": "http://example.com"},
parser="etree",
)
tm.assert_frame_equal(df_lxml, df_etree)
@td.skip_if_no("lxml")
def test_consistency_prefix_namespace():
df_lxml = read_xml(
xml_prefix_nmsp,
xpath=".//doc:row",
namespaces={"doc": "http://example.com"},
parser="lxml",
)
df_etree = read_xml(
xml_prefix_nmsp,
xpath=".//doc:row",
namespaces={"doc": "http://example.com"},
parser="etree",
)
tm.assert_frame_equal(df_lxml, df_etree)
# PREFIX
def test_missing_prefix_with_default_namespace(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(ValueError, match=("xpath does not return any nodes")):
read_xml(filename, xpath=".//Placemark", parser=parser)
def test_missing_prefix_definition_etree(datapath):
filename = datapath("io", "data", "xml", "cta_rail_lines.kml")
with pytest.raises(SyntaxError, match=("you used an undeclared namespace prefix")):
read_xml(filename, xpath=".//kml:Placemark", parser="etree")
@td.skip_if_no("lxml")
def test_missing_prefix_definition_lxml(datapath):
from lxml.etree import XPathEvalError
filename = datapath("io", "data", "xml", "cta_rail_lines.kml")
with pytest.raises(XPathEvalError, match=("Undefined namespace prefix")):
read_xml(filename, xpath=".//kml:Placemark", parser="lxml")
@td.skip_if_no("lxml")
@pytest.mark.parametrize("key", ["", None])
def test_none_namespace_prefix(key):
with pytest.raises(
TypeError, match=("empty namespace prefix is not supported in XPath")
):
read_xml(
xml_default_nmsp,
xpath=".//kml:Placemark",
namespaces={key: "http://www.opengis.net/kml/2.2"},
parser="lxml",
)
# ELEMS AND ATTRS
def test_file_elems_and_attrs(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
df_expected = DataFrame(
{
"category": ["cooking", "children", "web"],
"title": ["Everyday Italian", "Harry Potter", "Learning XML"],
"author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"],
"year": [2005, 2005, 2003],
"price": [30.00, 29.99, 39.95],
}
)
tm.assert_frame_equal(df_file, df_expected)
def test_file_only_attrs(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, attrs_only=True, parser=parser)
df_expected = DataFrame({"category": ["cooking", "children", "web"]})
tm.assert_frame_equal(df_file, df_expected)
def test_file_only_elems(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, elems_only=True, parser=parser)
df_expected = DataFrame(
{
"title": ["Everyday Italian", "Harry Potter", "Learning XML"],
"author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"],
"year": [2005, 2005, 2003],
"price": [30.00, 29.99, 39.95],
}
)
tm.assert_frame_equal(df_file, df_expected)
def test_elem_and_attrs_only(datapath, parser):
filename = datapath("io", "data", "xml", "cta_rail_lines.kml")
with pytest.raises(
ValueError,
match=("Either element or attributes can be parsed not both"),
):
read_xml(filename, elems_only=True, attrs_only=True, parser=parser)
@td.skip_if_no("lxml")
def test_attribute_centric_xml():
xml = """\
<?xml version="1.0" encoding="UTF-8"?>
<TrainSchedule>
<Stations>
<station Name="Manhattan" coords="31,460,195,498"/>
<station Name="Laraway Road" coords="63,409,194,455"/>
<station Name="179th St (Orland Park)" coords="0,364,110,395"/>
<station Name="153rd St (Orland Park)" coords="7,333,113,362"/>
<station Name="143rd St (Orland Park)" coords="17,297,115,330"/>
<station Name="Palos Park" coords="128,281,239,303"/>
<station Name="Palos Heights" coords="148,257,283,279"/>
<station Name="Worth" coords="170,230,248,255"/>
<station Name="Chicago Ridge" coords="70,187,208,214"/>
<station Name="Oak Lawn" coords="166,159,266,185"/>
<station Name="Ashburn" coords="197,133,336,157"/>
<station Name="Wrightwood" coords="219,106,340,133"/>
<station Name="Chicago Union Sta" coords="220,0,360,43"/>
</Stations>
</TrainSchedule>"""
df_lxml = read_xml(xml, xpath=".//station")
df_etree = read_xml(xml, xpath=".//station", parser="etree")
tm.assert_frame_equal(df_lxml, df_etree)
# NAMES
def test_names_option_output(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(
filename, names=["Col1", "Col2", "Col3", "Col4", "Col5"], parser=parser
)
df_expected = DataFrame(
{
"Col1": ["cooking", "children", "web"],
"Col2": ["Everyday Italian", "Harry Potter", "Learning XML"],
"Col3": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"],
"Col4": [2005, 2005, 2003],
"Col5": [30.00, 29.99, 39.95],
}
)
tm.assert_frame_equal(df_file, df_expected)
def test_names_option_wrong_length(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(ValueError, match=("names does not match length")):
read_xml(filename, names=["Col1", "Col2", "Col3"], parser=parser)
def test_names_option_wrong_type(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(TypeError, match=("is not a valid type for names")):
read_xml(
filename, names="Col1, Col2, Col3", parser=parser # type: ignore[arg-type]
)
# ENCODING
def test_wrong_encoding(datapath, parser):
filename = datapath("io", "data", "xml", "baby_names.xml")
with pytest.raises(UnicodeDecodeError, match=("'utf-8' codec can't decode")):
read_xml(filename, parser=parser)
def test_utf16_encoding(datapath, parser):
filename = datapath("io", "data", "xml", "baby_names.xml")
with pytest.raises(
UnicodeError,
match=(
"UTF-16 stream does not start with BOM|"
"'utf-16-le' codec can't decode byte"
),
):
read_xml(filename, encoding="UTF-16", parser=parser)
def test_unknown_encoding(datapath, parser):
filename = datapath("io", "data", "xml", "baby_names.xml")
with pytest.raises(LookupError, match=("unknown encoding: uft-8")):
read_xml(filename, encoding="UFT-8", parser=parser)
def test_ascii_encoding(datapath, parser):
filename = datapath("io", "data", "xml", "baby_names.xml")
with pytest.raises(UnicodeDecodeError, match=("'ascii' codec can't decode byte")):
read_xml(filename, encoding="ascii", parser=parser)
@td.skip_if_no("lxml")
def test_parser_consistency_with_encoding(datapath):
filename = datapath("io", "data", "xml", "baby_names.xml")
df_lxml = read_xml(filename, parser="lxml", encoding="ISO-8859-1")
df_etree = read_xml(filename, parser="etree", encoding="iso-8859-1")
tm.assert_frame_equal(df_lxml, df_etree)
# PARSER
@td.skip_if_installed("lxml")
def test_default_parser_no_lxml(datapath):
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(
ImportError, match=("lxml not found, please install or use the etree parser.")
):
read_xml(filename)
def test_wrong_parser(datapath):
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(
ValueError, match=("Values for parser can only be lxml or etree.")
):
read_xml(filename, parser="bs4")
# STYLESHEET
@td.skip_if_no("lxml")
def test_stylesheet_file(datapath):
kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
xsl = datapath("io", "data", "xml", "flatten_doc.xsl")
df_style = read_xml(
kml,
xpath=".//k:Placemark",
namespaces={"k": "http://www.opengis.net/kml/2.2"},
stylesheet=xsl,
)
tm.assert_frame_equal(df_kml, df_style)
@td.skip_if_no("lxml")
def test_stylesheet_file_like(datapath, mode):
kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
xsl = datapath("io", "data", "xml", "flatten_doc.xsl")
with open(xsl, mode) as f:
df_style = read_xml(
kml,
xpath=".//k:Placemark",
namespaces={"k": "http://www.opengis.net/kml/2.2"},
stylesheet=f,
)
tm.assert_frame_equal(df_kml, df_style)
@td.skip_if_no("lxml")
def test_stylesheet_io(datapath, mode):
kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
xsl = datapath("io", "data", "xml", "flatten_doc.xsl")
xsl_obj: BytesIO | StringIO
with open(xsl, mode) as f:
if mode == "rb":
xsl_obj = BytesIO(f.read())
else:
xsl_obj = StringIO(f.read())
df_style = read_xml(
kml,
xpath=".//k:Placemark",
namespaces={"k": "http://www.opengis.net/kml/2.2"},
stylesheet=xsl_obj,
)
tm.assert_frame_equal(df_kml, df_style)
@td.skip_if_no("lxml")
def test_stylesheet_buffered_reader(datapath, mode):
kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
xsl = datapath("io", "data", "xml", "flatten_doc.xsl")
with open(xsl, mode) as f:
xsl_obj = f.read()
df_style = read_xml(
kml,
xpath=".//k:Placemark",
namespaces={"k": "http://www.opengis.net/kml/2.2"},
stylesheet=xsl_obj,
)
tm.assert_frame_equal(df_kml, df_style)
@td.skip_if_no("lxml")
def test_not_stylesheet(datapath):
from lxml.etree import XSLTParseError
kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
xsl = datapath("io", "data", "xml", "books.xml")
with pytest.raises(XSLTParseError, match=("document is not a stylesheet")):
read_xml(kml, stylesheet=xsl)
@td.skip_if_no("lxml")
def test_incorrect_xsl_syntax(datapath):
from lxml.etree import XMLSyntaxError
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:k="http://www.opengis.net/kml/2.2"/>
<xsl:output method="xml" omit-xml-declaration="yes"
cdata-section-elements="k:description" indent="yes"/>
<xsl:strip-space elements="*"/>
<xsl:template match="node()|@*">
<xsl:copy>
<xsl:apply-templates select="node()|@*"/>
</xsl:copy>
</xsl:template>
<xsl:template match="k:MultiGeometry|k:LineString">
<xsl:apply-templates select='*'/>
</xsl:template>
<xsl:template match="k:description|k:Snippet|k:Style"/>
</xsl:stylesheet>"""
kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
with pytest.raises(
XMLSyntaxError, match=("Extra content at the end of the document")
):
read_xml(kml, stylesheet=xsl)
@td.skip_if_no("lxml")
def test_incorrect_xsl_eval(datapath):
from lxml.etree import XSLTParseError
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:k="http://www.opengis.net/kml/2.2">
<xsl:output method="xml" omit-xml-declaration="yes"
cdata-section-elements="k:description" indent="yes"/>
<xsl:strip-space elements="*"/>
<xsl:template match="node(*)|@*">
<xsl:copy>
<xsl:apply-templates select="node()|@*"/>
</xsl:copy>
</xsl:template>
<xsl:template match="k:MultiGeometry|k:LineString">
<xsl:apply-templates select='*'/>
</xsl:template>
<xsl:template match="k:description|k:Snippet|k:Style"/>
</xsl:stylesheet>"""
kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
with pytest.raises(XSLTParseError, match=("failed to compile")):
read_xml(kml, stylesheet=xsl)
@td.skip_if_no("lxml")
def test_incorrect_xsl_apply(datapath):
from lxml.etree import XSLTApplyError
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:copy-of select="document('non_existent.xml')/*"/>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>"""
kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
with pytest.raises(XSLTApplyError, match=("Cannot resolve URI")):
read_xml(kml, stylesheet=xsl)
@td.skip_if_no("lxml")
def test_wrong_stylesheet():
from lxml.etree import XMLSyntaxError
kml = os.path.join("data", "xml", "cta_rail_lines.kml")
xsl = os.path.join("data", "xml", "flatten.xsl")
with pytest.raises(
XMLSyntaxError,
match=("Start tag expected, '<' not found"),
):
read_xml(kml, stylesheet=xsl)
@td.skip_if_no("lxml")
def test_stylesheet_file_close(datapath, mode):
kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
xsl = datapath("io", "data", "xml", "flatten_doc.xsl")
xsl_obj: BytesIO | StringIO
with open(xsl, mode) as f:
if mode == "rb":
xsl_obj = BytesIO(f.read())
else:
xsl_obj = StringIO(f.read())
read_xml(kml, stylesheet=xsl_obj)
assert not f.closed
@td.skip_if_no("lxml")
def test_stylesheet_with_etree(datapath):
kml = os.path.join("data", "xml", "cta_rail_lines.kml")
xsl = os.path.join("data", "xml", "flatten_doc.xsl")
with pytest.raises(
ValueError, match=("To use stylesheet, you need lxml installed")
):
read_xml(kml, parser="etree", stylesheet=xsl)
@td.skip_if_no("lxml")
@pytest.mark.parametrize("val", ["", b""])
def test_empty_stylesheet(val):
from lxml.etree import XMLSyntaxError
kml = os.path.join("data", "xml", "cta_rail_lines.kml")
with pytest.raises(
XMLSyntaxError, match=("Document is empty|Start tag expected, '<' not found")
):
read_xml(kml, stylesheet=val)
@tm.network
@td.skip_if_no("lxml")
def test_online_stylesheet():
xml = "https://www.w3schools.com/xml/cdcatalog_with_xsl.xml"
xsl = "https://www.w3schools.com/xml/cdcatalog.xsl"
df_xsl = read_xml(
xml,
xpath=".//tr[td and position() <= 6]",
names=["title", "artist"],
stylesheet=xsl,
)
df_expected = DataFrame(
{
"title": {
0: "Empire Burlesque",
1: "Hide your heart",
2: "Greatest Hits",
3: "Still got the blues",
4: "Eros",
},
"artist": {
0: "Bob Dylan",
1: "Bonnie Tyler",
2: "Dolly Parton",
3: "Gary Moore",
4: "Eros Ramazzotti",
},
}
)
tm.assert_frame_equal(df_expected, df_xsl)
# COMPRESSION
@pytest.mark.parametrize("comp", ["bz2", "gzip", "xz", "zip"])
def test_compression_read(parser, comp):
with tm.ensure_clean() as path:
geom_df.to_xml(path, index=False, parser=parser, compression=comp)
xml_df = read_xml(path, parser=parser, compression=comp)
tm.assert_frame_equal(xml_df, geom_df)
@pytest.mark.parametrize("comp", ["gzip", "xz", "zip"])
def test_wrong_compression_bz2(parser, comp):
with tm.ensure_clean() as path:
geom_df.to_xml(path, parser=parser, compression=comp)
with pytest.raises(OSError, match="Invalid data stream"):
read_xml(path, parser=parser, compression="bz2")
@pytest.mark.parametrize("comp", ["bz2", "xz", "zip"])
def test_wrong_compression_gz(parser, comp):
with tm.ensure_clean() as path:
geom_df.to_xml(path, parser=parser, compression=comp)
with pytest.raises(OSError, match="Not a gzipped file"):
read_xml(path, parser=parser, compression="gzip")
@pytest.mark.parametrize("comp", ["bz2", "gzip", "zip"])
def test_wrong_compression_xz(parser, comp):
from lzma import LZMAError
with tm.ensure_clean() as path:
geom_df.to_xml(path, parser=parser, compression=comp)
with pytest.raises(LZMAError, match="Input format not supported by decoder"):
read_xml(path, parser=parser, compression="xz")
@pytest.mark.parametrize("comp", ["bz2", "gzip", "xz"])
def test_wrong_compression_zip(parser, comp):
from zipfile import BadZipFile
with tm.ensure_clean() as path:
geom_df.to_xml(path, parser=parser, compression=comp)
with pytest.raises(BadZipFile, match="File is not a zip file"):
read_xml(path, parser=parser, compression="zip")
def test_unsuported_compression(datapath, parser):
with pytest.raises(ValueError, match="Unrecognized compression type"):
with tm.ensure_clean() as path:
read_xml(path, parser=parser, compression="7z")
# STORAGE OPTIONS
@tm.network
@td.skip_if_no("s3fs")
@td.skip_if_no("lxml")
def test_s3_parser_consistency():
# Python Software Foundation (2019 IRS-990 RETURN)
s3 = "s3://irs-form-990/201923199349319487_public.xml"
df_lxml = read_xml(
s3,
xpath=".//irs:Form990PartVIISectionAGrp",
namespaces={"irs": "http://www.irs.gov/efile"},
parser="lxml",
storage_options={"anon": True},
)
df_etree = read_xml(
s3,
xpath=".//irs:Form990PartVIISectionAGrp",
namespaces={"irs": "http://www.irs.gov/efile"},
parser="etree",
storage_options={"anon": True},
)
tm.assert_frame_equal(df_lxml, df_etree)
| dsm054/pandas | pandas/tests/io/xml/test_xml.py | Python | bsd-3-clause | 33,896 |
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import json
import logging
import os
from opencensus.common.schedule import QueueExitEvent
from opencensus.ext.azure.common import Options, utils
from opencensus.ext.azure.common.exporter import BaseExporter
from opencensus.ext.azure.common.processor import ProcessorMixin
from opencensus.ext.azure.common.protocol import (
Data,
Envelope,
ExceptionData,
RemoteDependency,
Request,
)
from opencensus.ext.azure.common.storage import LocalFileStorage
from opencensus.ext.azure.common.transport import TransportMixin
from opencensus.ext.azure.metrics_exporter import statsbeat_metrics
from opencensus.trace import attributes_helper
from opencensus.trace.span import SpanKind
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
logger = logging.getLogger(__name__)
__all__ = ['AzureExporter']
HTTP_METHOD = attributes_helper.COMMON_ATTRIBUTES['HTTP_METHOD']
HTTP_PATH = attributes_helper.COMMON_ATTRIBUTES['HTTP_PATH']
HTTP_ROUTE = attributes_helper.COMMON_ATTRIBUTES['HTTP_ROUTE']
HTTP_URL = attributes_helper.COMMON_ATTRIBUTES['HTTP_URL']
HTTP_STATUS_CODE = attributes_helper.COMMON_ATTRIBUTES['HTTP_STATUS_CODE']
ERROR_MESSAGE = attributes_helper.COMMON_ATTRIBUTES['ERROR_MESSAGE']
ERROR_NAME = attributes_helper.COMMON_ATTRIBUTES['ERROR_NAME']
STACKTRACE = attributes_helper.COMMON_ATTRIBUTES['STACKTRACE']
class AzureExporter(BaseExporter, ProcessorMixin, TransportMixin):
"""An exporter that sends traces to Microsoft Azure Monitor.
:param options: Options for the exporter.
"""
def __init__(self, **options):
self.options = Options(**options)
utils.validate_instrumentation_key(self.options.instrumentation_key)
self.storage = None
if self.options.enable_local_storage:
self.storage = LocalFileStorage(
path=self.options.storage_path,
max_size=self.options.storage_max_size,
maintenance_period=self.options.storage_maintenance_period,
retention_period=self.options.storage_retention_period,
source=self.__class__.__name__,
)
self._telemetry_processors = []
super(AzureExporter, self).__init__(**options)
atexit.register(self._stop, self.options.grace_period)
# start statsbeat on exporter instantiation
if not os.environ.get("APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL"):
statsbeat_metrics.collect_statsbeat_metrics(self.options)
# For redirects
self._consecutive_redirects = 0 # To prevent circular redirects
def span_data_to_envelope(self, sd):
envelope = Envelope(
iKey=self.options.instrumentation_key,
tags=dict(utils.azure_monitor_context),
time=sd.start_time,
)
envelope.tags['ai.operation.id'] = sd.context.trace_id
if sd.parent_span_id:
envelope.tags['ai.operation.parentId'] = '{}'.format(
sd.parent_span_id,
)
if sd.span_kind == SpanKind.SERVER:
if ERROR_MESSAGE in sd.attributes:
envelope.name = 'Microsoft.ApplicationInsights.Exception'
data = ExceptionData(
exceptions=[{
'id': 1,
'outerId': '{}'.format(sd.span_id),
'typeName': sd.attributes.get(ERROR_NAME, ''),
'message': sd.attributes[ERROR_MESSAGE],
'hasFullStack': STACKTRACE in sd.attributes,
'parsedStack': sd.attributes.get(STACKTRACE, None)
}],
)
envelope.data = Data(baseData=data, baseType='ExceptionData')
yield envelope
envelope.name = 'Microsoft.ApplicationInsights.Request'
data = Request(
id='{}'.format(sd.span_id),
duration=utils.timestamp_to_duration(
sd.start_time,
sd.end_time,
),
responseCode=str(sd.status.code),
success=False, # Modify based off attributes or status
properties={},
)
envelope.data = Data(baseData=data, baseType='RequestData')
data.name = ''
if HTTP_METHOD in sd.attributes:
data.name = sd.attributes[HTTP_METHOD]
if HTTP_ROUTE in sd.attributes:
data.name = data.name + ' ' + sd.attributes[HTTP_ROUTE]
envelope.tags['ai.operation.name'] = data.name
data.properties['request.name'] = data.name
elif HTTP_PATH in sd.attributes:
data.properties['request.name'] = data.name + \
' ' + sd.attributes[HTTP_PATH]
if HTTP_URL in sd.attributes:
data.url = sd.attributes[HTTP_URL]
data.properties['request.url'] = sd.attributes[HTTP_URL]
if HTTP_STATUS_CODE in sd.attributes:
status_code = sd.attributes[HTTP_STATUS_CODE]
data.responseCode = str(status_code)
data.success = (
status_code >= 200 and status_code <= 399
)
elif sd.status.code == 0:
data.success = True
else:
envelope.name = \
'Microsoft.ApplicationInsights.RemoteDependency'
data = RemoteDependency(
name=sd.name, # TODO
id='{}'.format(sd.span_id),
resultCode=str(sd.status.code),
duration=utils.timestamp_to_duration(
sd.start_time,
sd.end_time,
),
success=False, # Modify based off attributes or status
properties={},
)
envelope.data = Data(
baseData=data,
baseType='RemoteDependencyData',
)
if sd.span_kind == SpanKind.CLIENT:
data.type = sd.attributes.get('component')
if HTTP_URL in sd.attributes:
url = sd.attributes[HTTP_URL]
# TODO: error handling, probably put scheme as well
data.data = url
parse_url = urlparse(url)
# target matches authority (host:port)
data.target = parse_url.netloc
if HTTP_METHOD in sd.attributes:
# name is METHOD/path
data.name = sd.attributes[HTTP_METHOD] \
+ ' ' + parse_url.path
if HTTP_STATUS_CODE in sd.attributes:
status_code = sd.attributes[HTTP_STATUS_CODE]
data.resultCode = str(status_code)
data.success = 200 <= status_code < 400
elif sd.status.code == 0:
data.success = True
else:
data.type = 'INPROC'
data.success = True
if sd.links:
links = []
for link in sd.links:
links.append(
{"operation_Id": link.trace_id, "id": link.span_id})
data.properties["_MS.links"] = json.dumps(links)
# TODO: tracestate, tags
for key in sd.attributes:
# This removes redundant data from ApplicationInsights
if key.startswith('http.'):
continue
data.properties[key] = sd.attributes[key]
yield envelope
def emit(self, batch, event=None):
try:
if batch:
envelopes = [envelope
for sd in batch
for envelope in self.span_data_to_envelope(sd)]
envelopes = self.apply_telemetry_processors(envelopes)
result = self._transmit(envelopes)
# Only store files if local storage enabled
if self.storage and result > 0:
self.storage.put(envelopes, result)
if event:
if isinstance(event, QueueExitEvent):
self._transmit_from_storage() # send files before exit
event.set()
return
if len(batch) < self.options.max_batch_size:
self._transmit_from_storage()
except Exception:
logger.exception('Exception occurred while exporting the data.')
def _stop(self, timeout=None):
if self.storage:
self.storage.close()
if self._worker:
self._worker.stop(timeout)
| census-instrumentation/opencensus-python | contrib/opencensus-ext-azure/opencensus/ext/azure/trace_exporter/__init__.py | Python | apache-2.0 | 9,315 |
import pytest
from api.base.settings.defaults import API_BASE
from api_tests import utils as api_utils
from framework.auth.core import Auth
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
NodeFactory,
)
from osf.utils import permissions as osf_permissions
@pytest.mark.django_db
class LogsTestCase:
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def node_private(self, user_one):
node_private = ProjectFactory(is_public=False)
node_private.add_contributor(
user_one,
permissions=[osf_permissions.READ],
auth=Auth(node_private.creator),
log=True, save=True
)
return node_private
@pytest.fixture()
def node_public(self, user_one):
node_public = ProjectFactory(is_public=True)
node_public.add_contributor(
user_one,
permissions=[osf_permissions.READ],
auth=Auth(node_public.creator),
log=True, save=True
)
return node_public
@pytest.fixture()
def logs_public(self, node_public):
return list(node_public.logs.order_by('date'))
@pytest.fixture()
def log_public(self, logs_public):
return logs_public[0]
@pytest.fixture()
def contributor_log_public(self, logs_public):
return logs_public[1]
@pytest.fixture()
def logs_private(self, node_private):
return list(node_private.logs.order_by('date'))
@pytest.fixture()
def log_private(self, logs_private):
return logs_private[0]
@pytest.fixture()
def contributor_log_private(self, logs_private):
return logs_private[1]
@pytest.fixture()
def url_node_private_log(self, node_private):
return '/{}nodes/{}/logs/'.format(API_BASE, node_private._id)
@pytest.fixture()
def url_logs(self):
return '/{}logs/'.format(API_BASE)
@pytest.fixture()
def url_log_private_nodes(self, log_private, url_logs):
return '{}{}/nodes/'.format(url_logs, log_private._id)
@pytest.fixture()
def url_log_public_nodes(self, log_public, url_logs):
return '{}{}/nodes/'.format(url_logs, log_public._id)
@pytest.fixture()
def url_log_detail_private(self, log_private, url_logs):
return '{}{}/'.format(url_logs, log_private._id)
@pytest.fixture()
def url_log_detail_public(self, log_public, url_logs):
return '{}{}/'.format(url_logs, log_public._id)
@pytest.mark.django_db
class TestLogDetail(LogsTestCase):
def test_log_detail_private(
self, app, url_log_detail_private,
user_one, user_two, log_private):
# test_log_detail_returns_data
res = app.get(url_log_detail_private, auth=user_one.auth)
assert res.status_code == 200
json_data = res.json['data']
assert json_data['id'] == log_private._id
# test_log_detail_private_not_logged_in_cannot_access_logs
res = app.get(url_log_detail_private, expect_errors=True)
assert res.status_code == 401
# test_log_detail_private_non_contributor_cannot_access_logs
res = app.get(
url_log_detail_private,
auth=user_two.auth, expect_errors=True
)
assert res.status_code == 403
def test_log_detail_public(
self, app, url_log_detail_public,
log_public, user_two, user_one):
# test_log_detail_public_not_logged_in_can_access_logs
res = app.get(url_log_detail_public, expect_errors=True)
assert res.status_code == 200
data = res.json['data']
assert data['id'] == log_public._id
# test_log_detail_public_non_contributor_can_access_logs
res = app.get(
url_log_detail_public,
auth=user_two.auth, expect_errors=True)
assert res.status_code == 200
data = res.json['data']
assert data['id'] == log_public._id
# test_log_detail_data_format_api
res = app.get(
'{}?format=api'.format(url_log_detail_public),
auth=user_one.auth)
assert res.status_code == 200
assert log_public._id in unicode(res.body, 'utf-8')
@pytest.mark.django_db
class TestNodeFileLogDetail:
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def node(self, user_one, user_two):
node = ProjectFactory(creator=user_one)
node.add_contributor(user_two)
return node
@pytest.fixture()
def component(self, user_one, node):
return NodeFactory(parent=node, creator=user_one)
@pytest.fixture()
def file_component(self, user_one, component):
return api_utils.create_test_file(target=component, user=user_one)
@pytest.fixture()
def url_node_logs(self, node):
return '/{}nodes/{}/logs/'.format(API_BASE, node._id)
@pytest.fixture()
def url_component_logs(self, component):
return '/{}nodes/{}/logs/'.format(API_BASE, component._id)
@pytest.fixture()
def node_with_log(self, node, user_one, file_component, component):
node.add_log(
'osf_storage_file_moved',
auth=Auth(user_one),
params={
'node': node._id,
'project': node.parent_id,
'path': file_component.materialized_path,
'urls': {'url1': 'www.fake.org', 'url2': 'www.fake.com'},
'source': {
'materialized': file_component.materialized_path,
'addon': 'osfstorage',
'node': {
'_id': component._id,
'url': component.url,
'title': component.title,
}
},
'destination': {
'materialized': file_component.materialized_path,
'addon': 'osfstorage',
'node': {
'_id': node._id,
'url': node.url,
'title': node.title,
}
}
},
)
node.save()
return node
@pytest.fixture()
def node_with_folder_log(self, node, user_one, file_component, component):
# Node log is added directly to prove that URLs are removed in
# serialization
node.add_log(
'osf_storage_folder_created',
auth=Auth(user_one),
params={
'node': node._id,
'project': node.parent_id,
'path': file_component.materialized_path,
'urls': {'url1': 'www.fake.org', 'url2': 'www.fake.com'},
'source': {
'materialized': file_component.materialized_path,
'addon': 'osfstorage',
'node': {
'_id': component._id,
'url': component.url,
'title': component.title,
}
}
},
)
node.save()
return node
def test_title_visibility_in_file_move(
self, app, url_node_logs,
user_two, component, node_with_log):
# test_title_not_hidden_from_contributor_in_file_move
res = app.get(url_node_logs, auth=user_two.auth)
assert res.status_code == 200
assert res.json['data'][0]['attributes']['params']['destination']['node_title'] == node_with_log.title
# test_title_hidden_from_non_contributor_in_file_move
res = app.get(url_node_logs, auth=user_two.auth)
assert res.status_code == 200
assert component.title not in res.json['data']
assert res.json['data'][0]['attributes']['params']['source']['node_title'] == 'Private Component'
def test_file_log_keeps_url(
self, app, url_node_logs, user_two, node_with_log
):
res = app.get(url_node_logs, auth=user_two.auth)
assert res.status_code == 200
assert res.json['data'][0]['attributes']['params'].get('urls')
def test_folder_log_url_removal(
self, app, url_node_logs, user_two
):
res = app.get(url_node_logs, auth=user_two.auth)
assert res.status_code == 200
assert not res.json['data'][0]['attributes']['params'].get('urls')
| pattisdr/osf.io | api_tests/logs/views/test_log_detail.py | Python | apache-2.0 | 8,613 |
#
# Copyright 2009 Benjamin Mellor
#
# This file is part of Fundy.
#
# Fundy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from utils import dot_node, dot_link, rset
class NodePtr(object):
"""
A NodePtr is a reference to a node.
It can be dynamically reassigned. All other objects that want to refer to a
node should do it through a NodePtr, allowing the reference to be
over-written in place to point to a new node. Otherwise, this would be
possible in Python, but not in RPython as it is not possible to change the
__class__ to a different subclass of Node.
The primary reason for this explicit indirection is that reduction of a node
should be able to replace the original node with its reduction in-place, or
other references to the same node would have to reduce it again.
"""
def __init__(self, node):
self.node = node
# XXX: The eq and hash methods need to be defined for passing to r_dict
# in the construction of each node's rset to hold NodePtrs of the node's
# types. They are defined so that two NodePtrs pointing to the same node
# will be considered the "same" NodePtr. The reason for not using the
# special names __eq__ and __hash__ is that RPython does not understand
# these special methods, which would introduce a subtle behavioural
# difference between the translated and untranslated interpreters.
def eq(self, other):
return self.node is other.node
# FIXME: Cannot use ``hash()`` in RPython.
def hash(self):
return 0
def add_type(self, typeptr):
self.node.add_type(typeptr)
def nodeid(self):
"""
Return a unique identifier for the node pointed at.
"""
return self.node.nodeid()
def reduce_WHNF_inplace(self):
"""
Replace the pointed at node with the result of reducing that node
to weak head normal form.
"""
self.node = self.node.reduce_WHNF()
def get_applied_node(self, argument_ptr):
"""
Apply the pointed at node to argument, returning a new node.
"""
return self.node.apply(argument_ptr)
def get_instantiated_node(self, replace_this_ptr, with_this_ptr):
"""
Instantiate the node inside this ptr, returning a new node. The graph
under the new node is the result of copying the graph under the original
ptr's node, but replacing all references to replace_this with
references to with_this.
replace_this and with_this are both node pointers, not nodes.
"""
return self.node.instantiate(replace_this_ptr, with_this_ptr)
def get_instantiated_node_ptr(self, replace_this_ptr, with_this_ptr):
"""
Like get_instantiated_node, but return a new NodePtr pointing to the
node instead of returning the node directly. Convenience function for
the deeper levels of instantiation that are creating new pointers to new
nodes, as opposed to the top level of instantiation that is returning a
node for a pointer being reduced to overwrite its node with.
"""
new_node = self.node.instantiate(replace_this_ptr, with_this_ptr)
if new_node is self.node:
# shouldn't make a new pointer to the same node
return self
else:
return NodePtr(new_node)
def __repr__(self, toplevel=True):
"""
NOT_RPYTHON:
"""
# toplevel is just to make sure that the repr for a NodePtr says that
# it's a NodePtr, whereas the repr for a node doesn't, but only at
# the top level, so the graph is easier to read
if toplevel:
return '*(%s)' % self.node.__repr__(False)
else:
return self.node.__repr__(toplevel)
def dot(self, already_seen=None):
"""
NOT_RPYTHON: Yield dot format description of the graph under this node.
already_seen should be a set of nodes (not node pointers!) already seen,
that will be ignored. Defaults to nothing, so is mostly only needed when
implementing a dot method.
Forwards to the node object itself. Will yield each graph element
separately.
"""
for dot in self.node.dot(already_seen):
yield dot
class Node(object):
"""
Base class for the different kinds of node.
The methods here do not modify the nodes, they return new nodes.
Nodes should have NodePtr data members, not refer directly to other Nodes.
"""
def __init__(self):
self.types = rset(NodePtr.eq, NodePtr.hash)
def nodeid(self):
"""
Return a unique identifier for this node.
"""
return id(self)
def reduce_WHNF(self):
"""
Return a Node that is the result of reducing this Node to weak head
normal form. Either returns a new Node, or self.
"""
return self # by default reduction doesn't change nodes
def instantiate(self, replace_this_ptr, with_this_ptr):
"""
Instantiate a node, returning a node that is the result of replacing
one ptr with another in the subgraph under this node. Returns self
only in the case where it is absolutely known that replace_this_ptr
cannot occur in the subgraph under this node (basically at leaf nodes).
"""
raise NotImplementedError
def apply(self, argument_ptr):
"""
Apply a node to an argument, returning a node that is the result of
the application.
"""
raise TypeError # only lambdas and builtins can be applied
def to_string(self):
raise NotImplementedError
def add_type(self, typeptr):
self.types.add(typeptr)
def __repr__(self, toplevel=True):
"""
NOT_RPYTHON:
"""
return "NODE_WITHOUT_REPR"
def dot(self, already_seen=None):
"""
NOT_RPYTHON: Yield a description of the graph under this node.
"""
if already_seen is None:
already_seen = set()
if self not in already_seen:
already_seen.add(self)
yield dot_node(self.nodeid(), shape='tripleoctagon',
label='UNRENDERABLE', color='red')
for dot in self.dot_types(already_seen):
yield dot
@classmethod
def add_instantiate_fn(cls, *attr_names):
"""
NOT_RPYTHON: Add an instantiation function to the class.
This is to define the common instantiation pattern in one place:
for each attr:
if attr is the thing to replace, replace it
else replace attr with its own instantiation
if all replacement attrs are the same as the original, return self
else return new node created with the replacement attrs
This function is not RPython, but the function it returns must be,
which is why it is defined by eval()ing a string instead of using the
perfectly adequate capabilities of Python.
Manually defining an instantiation function that does this logic can
be replaced by:
class FOO:
...
FOO.add_instantiate_fn(attr1, attr2, ..., attrN)
attr1, attr2, ..., attrN must all be the names of data members of FOO
nodes, and must all be of type NodePtr. Constructing a valid FOO must
also be able to be acomplished by FOO(attr1, attr2, ..., attrN) (i.e.
in the same order as the attributes appeared in the call to
add_instantiate_fn).
"""
conj_fragments = []
arg_fragments = []
func_fragments = ["def instantiate(self, replace_this_ptr, "
"with_this_ptr):\n"]
for name in attr_names:
s = (" if self.%(name)s is replace_this_ptr:\n"
" new_%(name)s = with_this_ptr\n"
" else:\n"
" new_%(name)s = self.%(name)s."
"get_instantiated_node_ptr("
"replace_this_ptr, with_this_ptr)\n"
) % {'name': name}
func_fragments.append(s)
conj_fragments.append("new_%(name)s is self.%(name)s" %
{'name': name})
arg_fragments.append("new_%(name)s" % {'name': name})
conj_fragments.append('replace_this_ptr is not None')
func_fragments += [" if ", ' and '.join(conj_fragments), ":\n"
" return self\n"
" else:\n"
" return ", cls.__name__, "(",
', '.join(arg_fragments), ")\n"]
func_str = ''.join(func_fragments)
exec func_str
cls.instantiate = instantiate
# end def add_instantiate_fn
def dot_types(self, already_seen=None):
"""
NOT_RPYTHON:
"""
if already_seen is None:
already_seen = set()
for typeptr in self.types:
yield dot_link(self.nodeid(), typeptr.nodeid(),
color='cyan', style='dashed')
for dot in typeptr.dot(already_seen):
yield dot
@classmethod
def add_dot_fn(cls, self_spec, **attrs):
"""
NOT_RPYTHON: Add a dot method to the class.
This is to define the common dot render pattern in one place:
if haven't already seen self:
record that have seen self
yield render of self as a node (with various parameters)
for each attr:
yield render of link to attr (with various parameters)
yield whatever attr.dot() yields
for each type:
yield render of link to type
yield whaever type.dot() yields
self_spec should be a dictionary of parameters for the graph node to be
rendered for nodes of this class: color (note US spelling!), shape, etc.
Each extra keyword argument should be the name of an attribute that
holds a NodePtr. Its value should be a dictionary of parameters for the
link to be rendered.
Manually defining a dot function that does this logic can be replaced
by:
class FOO:
...
FOO.add_dot_fn(dict(...), attr1=dict(...), ..., attrN=dict(...))
This function is not RPython, and the methods it creates do not have to
be RPython at the moment either, as actually viewing the dot files that
can be generated by these methods depends on PyGame, which is obviously
not RPython, and so is only available when running on top of CPython.
"""
# Compare with the hackery in add_instantiate_fn above;
# this sort of thing is so much easier in Python than RPython.
# Will be a pain to convert this if graph viewing is ever supported at
# runtime in the translated interpreter.
def dot(self, already_seen=None):
"""
NOT_RPYTHON: autogenerated dot method for class %s
""" % cls.__name__
if already_seen is None:
already_seen = set()
if self not in already_seen:
already_seen.add(self)
local_self_spec = self_spec.copy()
for k in local_self_spec:
if callable(local_self_spec[k]):
local_self_spec[k] = local_self_spec[k](self)
yield dot_node(self.nodeid(), **local_self_spec)
for attr_name, link_spec in attrs.items():
local_link_spec = link_spec.copy()
for k in local_link_spec:
if callable(local_link_spec[k]):
local_link_spec[k] = local_link_spec[k](self)
attr_val = getattr(self, attr_name)
if attr_val is not None:
yield dot_link(self.nodeid(), attr_val.nodeid(),
**local_link_spec)
for thing in attr_val.dot(already_seen):
yield thing
for dot in self.dot_types(already_seen):
yield dot
# end def dot
cls.dot = dot
# end def add_dot_fn
# end class Node
class ApplicationNode(Node):
def __init__(self, functor, argument):
Node.__init__(self)
self.functor = functor
self.argument = argument
def reduce_WHNF(self):
self.functor.reduce_WHNF_inplace()
# self.functor should now be a lambda node or a builtin node
new_node = self.functor.get_applied_node(self.argument)
# now try to reduce the result, in case it returned another application
return new_node.reduce_WHNF()
def __repr__(self, toplevel=True):
"""
NOT_RPYTHON:
"""
return 'Application(%s to %s)' % (self.functor.__repr__(False),
self.argument.__repr__(False))
ApplicationNode.add_instantiate_fn('functor', 'argument')
ApplicationNode.add_dot_fn(dict(shape='ellipse', label='apply'),
functor=dict(color='red', label='f'),
argument=dict(color='purple', label='a'))
class LambdaNode(Node):
def __init__(self, parameter, body):
Node.__init__(self)
self.parameter = parameter
self.body = body
def apply(self, argument):
if self.body is self.parameter: # if the body is just the param
return argument.node # just return the arg node now
return self.body.get_instantiated_node(self.parameter, argument)
def instantiate(self, replace_this_ptr, with_this_ptr):
# TODO: this assertion was causing test failures, and removing it
# doesn't cause any; maybe my reasoning is wrong and it's not actually
# necessary; investigate!
#assert replace_this_ptr is not self.parameter, ("Don't instantiate "
# "a lambda replacing its parameter, apply it to something")
if self.body is replace_this_ptr:
new_body = with_this_ptr
else:
new_body = self.body.get_instantiated_node_ptr(replace_this_ptr,
with_this_ptr)
if new_body is self.body and replace_this_ptr is not None:
return self
else:
# Make a new lambda node with a new parameter node, but then have
# to instantiate the body *again* to replace references to the old
# lambda's parameter node with the new parameter node. The old
# lambda's parameter cannot just be reused, as the original lambda
# might still be referenced from somewhere, and if one lambda ends
# up inside the other, then we have two lambdas both trying to bind
# the same free variable. If they could be guaranteed to remain
# disjoint, this actually wouldn't be a problem, as it is only
# *references* to a parameter that get replaced.
new_param = Param()
new_body = new_body.get_instantiated_node_ptr(self.parameter,
new_param)
return LambdaNode(new_param, new_body)
def __repr__(self, toplevel=True):
"""
NOT_RPYTHON:
"""
return 'LAMBDA %s --> %s' % (self.parameter.__repr__(False),
self.body.__repr__(False))
LambdaNode.add_dot_fn(dict(shape='octagon', label='lambda'),
parameter=dict(color='blue', label='p'),
body=dict(color='green'))
class ParameterNode(Node):
# used in __repr__ of ParameterNode, should not be needed by translation
_param_dict = {}
# parameter nodes don't actually hold any information other than
# their identity, so there's no __init__ function
def instantiate(self, replace_this_ptr, with_this_ptr):
# parameters have no children, so do not need to make a copy as it will
# always be identical to the original (and this simplifies instantiation
# of lambda nodes, which assume they can just reuse the parameter node)
return self
def __repr__(self, toplevel=True):
"""
NOT_RPYTHON:
"""
if not self in self._param_dict:
self._param_dict[self] = 'v%d' % len(self._param_dict)
return self._param_dict[self]
ParameterNode.add_dot_fn(dict(shape='octagon', label='param', color='blue'))
class FixfindNode(Node):
"""
This implements the Y combinator, which finds the fixpoint of lambda terms.
"""
def apply(self, argument):
return ApplicationNode(argument.get_instantiated_node_ptr(None, None),
Application(Y, argument))
def instantiate(self, replace_this_ptr, with_this_ptr):
# Y node has no substructure, so don't make a copy
return self
def __repr__(self, toplevel=True):
return "Y"
FixfindNode.add_dot_fn(dict(shape='ellipse', label='Y', color='green'))
class BuiltinNode(Node):
def __init__(self):
Node.__init__(self)
def __repr__(self, toplevel=True):
"""
NOT_RPYTHON:
"""
return 'BUILTIN %s' % self.func.func_name
def dot(self, already_seen=None):
"""
NOT_RPYTHON:
"""
if already_seen is None:
already_seen = set()
# NOTE: here we depend on all descendent classes of BuiltinNode
# having a func member (which we could not do in RPython, unless they
# all had the same type). But this is Python, so we can just override
# this method anywhere the assumption doesn't hold.
if self not in already_seen:
already_seen.add(self)
yield dot_node(self.nodeid(), shape='octagon', color='green',
label=self.func.func_name)
for dot in self.dot_types(already_seen):
yield dot
class TypeswitchNode(Node):
def __init__(self, cases):
self.cases = cases
def apply(self, argument):
argument.reduce_WHNF_inplace()
for c in self.cases:
assert isinstance(c.node, ConsNode)
case_type = c.node.a
case_ret = c.node.b
case_type.reduce_WHNF_inplace()
if argument.node.types.contains(case_type):
return case_ret.node
raise TypeError("typeswitch found no match")
def instantiate(self, replace_this_ptr, with_this_ptr):
new_cases = []
for c in self.cases:
if c is replace_this_ptr:
new_cases.append(with_this_ptr)
else:
new_cases.append(c.get_instantiated_node_ptr(replace_this_ptr,
with_this_ptr))
return TypeswitchNode(new_cases)
def __repr__(self, toplevel=True):
"""
NOT_RPYTHON:
"""
cases = ["case %s return %s" % (c.node.a.__repr__(toplevel),
c.node.b.__repr__(toplevel))
for c in self.cases]
return "typeswitch: [%s]" % ', '.join(cases)
def dot(self, already_seen=None):
"""
NOT_RPYTHON:
"""
if already_seen is None:
already_seen = set()
if self not in already_seen:
yield dot_node(self.nodeid(), shape='octagon', label='typeswitch',
color='cyan')
for case in self.cases:
yield dot_link(self.nodeid(), case.nodeid(), color='cyan')
for thing in case.dot(already_seen):
yield thing
class ValueNode(Node):
"""
Base class for nodes containing values.
"""
pass
class ConsNode(ValueNode):
"""
Cons node contains two other nodes. (pointers!)
"""
def __init__(self, a, b):
Node.__init__(self)
self.a = a
self.b = b
def to_string(self):
return self.a.node.to_string() + " . " + self.b.node.to_string()
def __repr__(self, toplevel=True):
"""
NOT_RPYTHON:
"""
return '% . %' % (self.a.__repr__(toplevel), self.b.__repr__(toplevel))
@staticmethod
def make_tree(leaves):
"""
Return a (pointer to) a tree of Cons nodes with the given leaves.
The elements of leaves must be NodePtrs. This function will always
generate trees with the same "shape" for the same number of inputs.
i.e. make_tree(a1, a2, ... aN) and make_tree(b1, b2, ... bN) will put
a1 and b1 in analagous positions, etc.
"""
if len(leaves) == 1:
return leaves[0]
else:
pivot = len(leaves) / 2
left = leaves[:pivot]
right = leaves[pivot:]
return Cons(ConsNode.make_tree(left), ConsNode.make_tree(right))
ConsNode.add_instantiate_fn('a', 'b')
ConsNode.add_dot_fn(dict(shape='box', color='maroon', label='cons'),
a=dict(color='maroon', label='a'),
b=dict(color='maroon', label='b'))
class PrimitiveNode(ValueNode):
def __init__(self):
Node.__init__(self)
def instantiate(self, replace_this_ptr, with_this_ptr):
return self
def eq(self, other):
raise NotImplementedError
def __repr__(self, toplevel=True):
return 'VALUE %s' % self.to_repr()
PrimitiveNode.add_dot_fn(dict(shape='box', color='purple',
label=lambda self:self.to_repr()))
class LabelledValueNode(PrimitiveNode):
"""
Represents a value that contains no information other than its identity.
"""
def __init__(self, name=None):
PrimitiveNode.__init__(self)
self.name = name
def to_string(self):
if self.name:
return self.name
else:
return "<void>"
to_repr = to_string
def eq(self, other):
return self.name == other.name
def Application(functor, argument):
"""
Helper function to make pointers to new application nodes
"""
return NodePtr(ApplicationNode(functor, argument))
def Lambda(param, body):
"""
Helper function to make pointers to new lambda nodes
"""
return NodePtr(LambdaNode(param, body))
def Param():
"""
Helper function to make pointers to new parameter nodes
"""
return NodePtr(ParameterNode())
def Typeswitch(cases):
"""
Helper function to make pointers to new typeswitch nodes.
"""
return NodePtr(TypeswitchNode(cases))
def Cons(a, b):
"""
Helper function to make pointers to new cons nodes.
"""
return NodePtr(ConsNode(a, b))
def LabelledValue(name=None):
"""
Helper function to make pointers to new empty value nodes.
"""
return NodePtr(LabelledValueNode(name))
# define the Y combinator; don't really need a function to make new ones!
Y = NodePtr(FixfindNode())
| cumber/fundy | graph.py | Python | gpl-3.0 | 23,972 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TopStory.width'
db.add_column(u'djangocms_topstory_topstory', 'width',
self.gf('django.db.models.fields.CharField')(default='100%', max_length=10),
keep_default=False)
# Adding field 'TopStory.height'
db.add_column(u'djangocms_topstory_topstory', 'height',
self.gf('django.db.models.fields.CharField')(default='434px', max_length=10),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TopStory.width'
db.delete_column(u'djangocms_topstory_topstory', 'width')
# Deleting field 'TopStory.height'
db.delete_column(u'djangocms_topstory_topstory', 'height')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'djangocms_topstory.topstory': {
'Meta': {'object_name': 'TopStory', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'height': ('django.db.models.fields.CharField', [], {'default': "'434px'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.CharField', [], {'default': "'100%'", 'max_length': '10'})
},
u'djangocms_topstory.topstoryitem': {
'Meta': {'ordering': "['ordering']", 'object_name': 'TopStoryItem'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'focal_point_x': ('django.db.models.fields.PositiveIntegerField', [], {'default': '50'}),
'focal_point_y': ('django.db.models.fields.PositiveIntegerField', [], {'default': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'image_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True'}),
'image_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {}),
'size': ('django.db.models.fields.CharField', [], {'default': "'masonry'", 'max_length': '50'}),
'teaser_layout': ('django.db.models.fields.CharField', [], {'default': "'white'", 'max_length': '100'}),
'teaser_position': ('django.db.models.fields.CharField', [], {'default': "'left'", 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'topstory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topstory_items'", 'to': u"orm['djangocms_topstory.TopStory']"})
}
}
complete_apps = ['djangocms_topstory'] | kohout/djangocms-getaweb-topstory | djangocms_topstory/south_migrations/0006_auto__add_field_topstory_width__add_field_topstory_height.py | Python | unlicense | 5,968 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network layers, regularizers, summaries, etc.
## Higher level ops for building neural network layers.
This package provides several ops that take care of creating variables that are
used internally in a consistent way and provide the building blocks for many
common machine learning algorithms.
@@avg_pool2d
@@batch_norm
@@convolution2d
@@convolution2d_in_plane
@@convolution2d_transpose
@@flatten
@@fully_connected
@@layer_norm
@@max_pool2d
@@one_hot_encoding
@@repeat
@@safe_embedding_lookup_sparse
@@separable_convolution2d
@@stack
@@unit_norm
Aliases for fully_connected which set a default activation function are
available: `relu`, `relu6` and `linear`.
## Regularizers
Regularization can help prevent overfitting. These have the signature
`fn(weights)`. The loss is typically added to
`tf.GraphKeys.REGULARIZATION_LOSSES`.
@@apply_regularization
@@l1_regularizer
@@l2_regularizer
@@sum_regularizer
## Initializers
Initializers are used to initialize variables with sensible values given their
size, data type, and purpose.
@@xavier_initializer
@@xavier_initializer_conv2d
@@variance_scaling_initializer
## Optimization
Optimize weights given a loss.
@@optimize_loss
## Summaries
Helper functions to summarize specific variables or ops.
@@summarize_activation
@@summarize_tensor
@@summarize_tensors
@@summarize_collection
The layers module defines convenience functions `summarize_variables`,
`summarize_weights` and `summarize_biases`, which set the `collection` argument
of `summarize_collection` to `VARIABLES`, `WEIGHTS` and `BIASES`, respectively.
@@summarize_activations
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.layers.python.layers import *
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
| cg31/tensorflow | tensorflow/contrib/layers/__init__.py | Python | apache-2.0 | 2,629 |
import datetime
import pdb
from django.test import TestCase
from finance.models import *
from efinance.models import *
from students.models import *
class UtilsTestCase(TestCase):
fixtures = ['students_testdata.json']
def setUp(self):
super(UtilsTestCase, self).setUp()
#self.cuota_1 = Cuota.objects.get(pk=1)
#self.cuota_2 = Cuota.objects.get(pk=2)
#self.cuota_3 = Cuota.objects.get(pk=3)
#self.unico_pago_1 = UnicoPago.objects.get(pk=1)
self.alumno_1 = Alumno.objects.get(pk=1)
self.alumno_2 = Alumno.objects.get(pk=2)
#self.empleado_1 = Empleado.objects.get(pk=1)
#self.empleado_2 = Empleado.objects.get(pk=2)
def test_pago_cuota(self):
self.assertEqual(self.alumno_1.id, 1)
def test_pago_sueldo(self):
self.assertEqual(self.alumno_1.id, 1)
def test_gen_cuotas(self):
self.assertEqual(self.alumno_1.id, 1)
def test_gastos(self):
self.assertEqual(self.alumno_1.id, 1)
def test_cashflow(self):
self.assertEqual(self.alumno_1.id, 1)
def test_pago_unico(self):
self.assertEqual(self.alumno_1.id, 1)
def test_deuda_alumno(self):
self.assertEqual(self.alumno_1.id, 1)
def test_deuda_empleado(self):
self.assertEqual(self.alumno_1.id, 1)
def test_monto_cuota(self):
self.assertEqual(self.alumno_1.id, 1)
'''
def test_realizar_pago(self):
self.assertEqual(realizar_pago(385,1), 0)
self.assertEqual(realizar_pago(350,2), 0)
def test_pagar_cuota(self):
self.assertEqual(pagar_cuota(self.cuota_1.id, 385), 0)
self.assertEqual(pagar_cuota(self.cuota_2.id, 700), 200)
#self.assertEqual(pagar_cuota(self.cuota_3.id, 100), 0)
self.setUp()
self.assertEqual(self.cuota_1.deuda, 0)
self.assertEqual(self.cuota_2.deuda, 0)
#self.assertEqual(self.cuota_3.deuda, 700)
def test_pagar_pu(self):
self.assertEqual(pagar_pu(self.unico_pago_1.id, 500), 0)
pu = UnicoPago.objects.get(pk=1)
self.assertEqual(pu.deuda, 0)
def test_pago_sueldo(self):
#la que se usa en views
#veronica monini cobra 6543 mensuales
pago_sueldo(700, self.empleado_1.id)
pago_sueldo(3200, self.empleado_2.id)
self.setUp()
self.assertEqual(self.empleado_1.get_deuda(), 1000)
self.assertEqual(self.empleado_2.get_deuda(), 0)
def test_pagar_sueldo(self):
#un sueldo en particular
pass
'''
| mfalcon/edujango | students/tests/utils.py | Python | apache-2.0 | 2,679 |
"""
XX. Model inheritance
Model inheritance exists in two varieties:
- abstract base classes which are a way of specifying common
information inherited by the subclasses. They don't exist as a separate
model.
- non-abstract base classes (the default), which are models in their own
right with their own database tables and everything. Their subclasses
have references back to them, created automatically.
Both styles are demonstrated here.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
#
# Abstract base classes
#
@python_2_unicode_compatible
class CommonInfo(models.Model):
name = models.CharField(max_length=50)
age = models.PositiveIntegerField()
class Meta:
abstract = True
ordering = ['name']
def __str__(self):
return '%s %s' % (self.__class__.__name__, self.name)
class Worker(CommonInfo):
job = models.CharField(max_length=50)
class Student(CommonInfo):
school_class = models.CharField(max_length=10)
class Meta:
pass
#
# Abstract base classes with related models
#
class Post(models.Model):
title = models.CharField(max_length=50)
@python_2_unicode_compatible
class Attachment(models.Model):
post = models.ForeignKey(
Post,
models.CASCADE,
related_name='attached_%(class)s_set',
related_query_name='attached_%(app_label)s_%(class)ss',
)
content = models.TextField()
class Meta:
abstract = True
def __str__(self):
return self.content
class Comment(Attachment):
is_spam = models.BooleanField(default=False)
class Link(Attachment):
url = models.URLField()
#
# Multi-table inheritance
#
@python_2_unicode_compatible
class Chef(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "%s the chef" % self.name
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return "%s the place" % self.name
class Rating(models.Model):
rating = models.IntegerField(null=True, blank=True)
class Meta:
abstract = True
ordering = ['-rating']
@python_2_unicode_compatible
class Restaurant(Place, Rating):
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
chef = models.ForeignKey(Chef, models.SET_NULL, null=True, blank=True)
class Meta(Rating.Meta):
db_table = 'my_restaurant'
def __str__(self):
return "%s the restaurant" % self.name
@python_2_unicode_compatible
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField(default=False)
def __str__(self):
return "%s the italian restaurant" % self.name
@python_2_unicode_compatible
class Supplier(Place):
customers = models.ManyToManyField(Restaurant, related_name='provider')
def __str__(self):
return "%s the supplier" % self.name
@python_2_unicode_compatible
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, models.CASCADE, primary_key=True, parent_link=True)
main_site = models.ForeignKey(Place, models.CASCADE, related_name='lot')
def __str__(self):
return "%s the parking lot" % self.name
#
# Abstract base classes with related models where the sub-class has the
# same name in a different app and inherits from the same abstract base
# class.
# NOTE: The actual API tests for the following classes are in
# model_inheritance_same_model_name/models.py - They are defined
# here in order to have the name conflict between apps
#
class Title(models.Model):
title = models.CharField(max_length=50)
class NamedURL(models.Model):
title = models.ForeignKey(Title, models.CASCADE, related_name='attached_%(app_label)s_%(class)s_set')
url = models.URLField()
class Meta:
abstract = True
class Mixin(object):
def __init__(self):
self.other_attr = 1
super(Mixin, self).__init__()
class MixinModel(models.Model, Mixin):
pass
class Base(models.Model):
titles = models.ManyToManyField(Title)
class SubBase(Base):
sub_id = models.IntegerField(primary_key=True)
class GrandParent(models.Model):
first_name = models.CharField(max_length=80)
last_name = models.CharField(max_length=80)
email = models.EmailField(unique=True)
class Meta:
unique_together = ('first_name', 'last_name')
class Parent(GrandParent):
pass
class Child(Parent):
pass
class GrandChild(Child):
pass
| kawamon/hue | desktop/core/ext-py/Django-1.11.29/tests/model_inheritance/models.py | Python | apache-2.0 | 4,766 |
#MenuTitle: Check glyph names
# encoding: utf-8
__doc__="""
Goes through all glyph names and looks for illegal characters.
"""
firstChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
otherChars = "0123456789._-"
legalChars = firstChars + otherChars
exceptionList = [".notdef", ".null"]
import GlyphsApp
allGlyphs = Glyphs.font.glyphs
def process( thisName ):
thisFirstChar = thisName[0]
if thisFirstChar not in firstChars and thisName not in exceptionList:
if thisFirstChar in otherChars:
print "'%s': potentially problematic first character" % thisName
else:
print "'%s': illegal first character" % thisName
for thisChar in thisName[1:]:
if thisChar not in legalChars:
print "'%s': illegal character '%s'" % ( thisName, thisChar )
for thisGlyph in allGlyphs:
process( thisGlyph.name.replace("\U","\u").decode('unicode-escape') )
| weiweihuanghuang/Glyphs-Scripts | Glyph Names/Check glyph names.py | Python | apache-2.0 | 871 |
# %load "/Users/Achilles/Documents/Tech/Scala_Spark/HackOnData/Final Project/Build a WebInterface/screen.py"
#!/usr/bin/env python
from lxml import html
import json
import requests
import json,re
from dateutil import parser as dateparser
from time import sleep
def ParseReviews(asin):
# Added Retrying
for i in range(5):
try:
#This script has only been tested with Amazon.com
amazon_url = 'http://www.amazon.com/dp/'+asin
# Add some recent user agent to prevent amazon from blocking the request
# Find some chrome user agent strings here https://udger.com/resources/ua-list/browser-detail?browser=Chrome
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36'}
page = requests.get(amazon_url,headers = headers)
page_response = page.text
parser = html.fromstring(page_response)
XPATH_AGGREGATE = '//span[@id="acrCustomerReviewText"]'
XPATH_REVIEW_SECTION_1 = '//div[contains(@id,"reviews-summary")]'
XPATH_REVIEW_SECTION_2 = '//div[@data-hook="review"]'
XPATH_AGGREGATE_RATING = '//table[@id="histogramTable"]//tr'
XPATH_PRODUCT_NAME = '//h1//span[@id="productTitle"]//text()'
XPATH_PRODUCT_PRICE = '//span[@id="priceblock_ourprice"]/text()'
raw_product_price = parser.xpath(XPATH_PRODUCT_PRICE)
product_price = ''.join(raw_product_price).replace(',','')
raw_product_name = parser.xpath(XPATH_PRODUCT_NAME)
product_name = ''.join(raw_product_name).strip()
total_ratings = parser.xpath(XPATH_AGGREGATE_RATING)
reviews = parser.xpath(XPATH_REVIEW_SECTION_1)
if not reviews:
reviews = parser.xpath(XPATH_REVIEW_SECTION_2)
ratings_dict = {}
reviews_list = []
if not reviews:
raise ValueError('unable to find reviews in page')
#grabing the rating section in product page
for ratings in total_ratings:
extracted_rating = ratings.xpath('./td//a//text()')
if extracted_rating:
rating_key = extracted_rating[0]
raw_raing_value = extracted_rating[1]
rating_value = raw_raing_value
if rating_key:
ratings_dict.update({rating_key:rating_value})
#Parsing individual reviews
for review in reviews:
XPATH_RATING = './/i[@data-hook="review-star-rating"]//text()'
XPATH_REVIEW_HEADER = './/a[@data-hook="review-title"]//text()'
XPATH_REVIEW_POSTED_DATE = './/a[contains(@href,"/profile/")]/parent::span/following-sibling::span/text()'
XPATH_REVIEW_TEXT_1 = './/div[@data-hook="review-collapsed"]//text()'
XPATH_REVIEW_TEXT_2 = './/div//span[@data-action="columnbalancing-showfullreview"]/@data-columnbalancing-showfullreview'
XPATH_REVIEW_COMMENTS = './/span[@data-hook="review-comment"]//text()'
XPATH_AUTHOR = './/a[contains(@href,"/profile/")]/parent::span//text()'
XPATH_REVIEW_TEXT_3 = './/div[contains(@id,"dpReviews")]/div/text()'
raw_review_author = review.xpath(XPATH_AUTHOR)
raw_review_rating = review.xpath(XPATH_RATING)
raw_review_header = review.xpath(XPATH_REVIEW_HEADER)
raw_review_posted_date = review.xpath(XPATH_REVIEW_POSTED_DATE)
raw_review_text1 = review.xpath(XPATH_REVIEW_TEXT_1)
raw_review_text2 = review.xpath(XPATH_REVIEW_TEXT_2)
raw_review_text3 = review.xpath(XPATH_REVIEW_TEXT_3)
author = ' '.join(' '.join(raw_review_author).split()).strip('By')
#cleaning data
review_rating = ''.join(raw_review_rating).replace('out of 5 stars','')
review_header = ' '.join(' '.join(raw_review_header).split())
review_posted_date = dateparser.parse(''.join(raw_review_posted_date)).strftime('%d %b %Y')
review_text = ' '.join(' '.join(raw_review_text1).split())
#grabbing hidden comments if present
if raw_review_text2:
json_loaded_review_data = json.loads(raw_review_text2[0])
json_loaded_review_data_text = json_loaded_review_data['rest']
cleaned_json_loaded_review_data_text = re.sub('<.*?>','',json_loaded_review_data_text)
full_review_text = review_text+cleaned_json_loaded_review_data_text
else:
full_review_text = review_text
if not raw_review_text1:
full_review_text = ' '.join(' '.join(raw_review_text3).split())
raw_review_comments = review.xpath(XPATH_REVIEW_COMMENTS)
review_comments = ''.join(raw_review_comments)
review_comments = re.sub('[A-Za-z]','',review_comments).strip()
review_dict = {
'review_comment_count':review_comments,
'review_text':full_review_text,
'review_posted_date':review_posted_date,
'review_header':review_header,
'review_rating':review_rating,
'review_author':author
}
reviews_list.append(review_dict)
data = {
'ratings':ratings_dict,
# 'reviews':reviews_list,
# 'url':amazon_url,
# 'price':product_price,
'name':product_name
}
return data
except ValueError:
print ("Retrying to get the correct response")
return {"error":"failed to process the page","asin":asin}
def ReadAsin(AsinList):
#Add your own ASINs here
#AsinList = ['B01ETPUQ6E','B017HW9DEW']
extracted_data = []
for asin in AsinList:
print ("Downloading and processing page http://www.amazon.com/dp/"+asin)
extracted_data.append(ParseReviews(asin))
#f=open('data.json','w')
#json.dump(extracted_data,f,indent=4)
print(extracted_data)
from IPython.core.display import display, HTML
def displayProducts(prodlist):
html_code = """
<table class="image">
"""
# prodlist = ['B000068NW5','B0002CZV82','B0002E1NQ4','B0002GW3Y8','B0002M6B2M','B0002M72JS','B000KIRT74','B000L7MNUM','B000LFCXL8','B000WS1QC6']
for prod in prodlist:
html_code = html_code+ """
<td><img src = "http://images.amazon.com/images/P/%s.01._PI_SCMZZZZZZZ_.jpg" style="float: left" id=%s onclick="itemselect(this)"</td>
%s""" % (prod,prod,prod)
html_code = html_code + """</table>
<img id="myFinalImg" src="">"""
javascriptcode = """
<script type="text/javascript">
function itemselect(selectedprod){
srcFile='http://images.amazon.com/images/P/'+selectedprod.id+'.01._PI_SCTZZZZZZZ_.jpg';
document.getElementById("myFinalImg").src = srcFile;
IPython.notebook.kernel.execute("selected_product = '" + selectedprod.id + "'")
}
</script>"""
display(HTML(html_code + javascriptcode))
#spark.read.json("data.json").show()
#======================================================
displayProducts(
[ row[0] for row in top_reviewed_products.select('asin').collect() ])
| koulakis/amazon-review-qa-analysis | modules/scripts/WebDashboard.py | Python | mit | 7,846 |
from . import base
import numpy as np
class BinaryAccuracy(base.Metric):
def compute(self, output, target, model=None):
output_classes = output.round()
target_classes = target.round()
cmp = target_classes.eq(output_classes)
total = cmp.numel()
correct = cmp.sum()
return total, correct, correct / total
def cumulate(self, metric_values=[]):
data = np.stack(metric_values).T
total = data[0].sum()
correct = data[1].sum()
accuracy = correct / total
return total, correct, accuracy
@classmethod
def plotable_columns(cls):
return ["accuracy"]
@classmethod
def columns(cls):
return ["total", "correct", "accuracy"]
class CategoricalAccuracy(base.Metric):
def compute(self, output, target, model=None):
output_maxes = output.topk(1, 1)[1]
target_maxes = target.topk(1, 1)[1]
cmp = target_maxes.eq(output_maxes)
total = cmp.size(0)
correct = cmp.sum()
return total, correct, correct / total
def cumulate(self, metric_values=[]):
data = np.stack(metric_values).T
total = data[0].sum()
correct = data[1].sum()
accuracy = correct / total
return total, correct, accuracy
@classmethod
def plotable_columns(cls):
return ["accuracy"]
@classmethod
def columns(cls):
return ["total", "correct", "accuracy"]
| ynop/candle | candle/metrics/accuracy.py | Python | mit | 1,468 |
#!/usr/bin/env python
import os
config = {
"default_actions": [
'clobber',
'checkout-sources',
'get-blobs',
'update-source-manifest',
'build',
'build-symbols',
'make-updates',
'prep-upload',
'upload',
'make-socorro-json',
'upload-source-manifest',
],
"upload": {
"default": {
"ssh_key": os.path.expanduser("~/.ssh/b2gbld_dsa"),
"ssh_user": "b2gbld",
"upload_remote_host": "pvtbuilds.pvt.build.mozilla.org",
"upload_remote_path": "/pvt/mozilla.org/b2gotoro/tinderbox-builds/%(branch)s-%(target)s/%(buildid)s",
"upload_remote_symlink": "/pvt/mozilla.org/b2gotoro/tinderbox-builds/%(branch)s-%(target)s/latest",
"upload_remote_nightly_path": "/pvt/mozilla.org/b2gotoro/nightly/%(branch)s-%(target)s/%(year)04i/%(month)02i/%(year)04i-%(month)02i-%(day)02i-%(hour)02i-%(minute)02i-%(second)02i",
"upload_remote_nightly_symlink": "/pvt/mozilla.org/b2gotoro/nightly/%(branch)s-%(target)s/latest",
"upload_dep_target_exclusions": [],
},
"public": {
"ssh_key": os.path.expanduser("~/.ssh/ffxbld_dsa"),
"ssh_user": "ffxbld",
"upload_remote_host": "stage.mozilla.org",
"post_upload_cmd": "post_upload.py --tinderbox-builds-dir %(branch)s-%(target)s -p b2g -i %(buildid)s --revision %(revision)s --release-to-tinderbox-dated-builds",
"post_upload_nightly_cmd": "post_upload.py --tinderbox-builds-dir %(branch)s-%(target)s -b %(branch)s-%(target)s -p b2g -i %(buildid)s --revision %(revision)s --release-to-tinderbox-dated-builds --release-to-latest --release-to-dated",
},
},
"tooltool_servers": ["http://runtime-binaries.pvt.build.mozilla.org/tooltool/"],
"gittool_share_base": "/builds/git-shared/git",
"gittool_base_mirror_urls": [],
"hgtool_share_base": "/builds/hg-shared",
"hgtool_base_bundle_urls": ["https://ftp-ssl.mozilla.org/pub/mozilla.org/firefox/bundles"],
"exes": {
"tooltool.py": "/tools/tooltool.py",
"python": "/tools/python27/bin/python2.7",
},
"manifest": {
"upload_remote_host": "stage.mozilla.org",
"upload_remote_basepath": "/pub/mozilla.org/b2g/manifests/nightly/%(version)s",
"depend_upload_remote_basepath": "/pub/mozilla.org/b2g/manifests/depend/%(branch)s/%(platform)s/%(buildid)s",
"ssh_key": os.path.expanduser("~/.ssh/b2gbld_dsa"),
"ssh_user": "b2gbld",
"branches": {
'mozilla-b2g28_v1_3t': '1.3.0t',
'mozilla-b2g30_v1_4': '1.4.0',
'mozilla-b2g32_v2_0': '2.0.0',
'mozilla-aurora': '2.1.0',
'mozilla-central': '2.2.0',
},
"translate_hg_to_git": True,
"translate_base_url": "http://cruncher.build.mozilla.org/mapper/{project}/{vcs}/{rev}",
"target_suffix": "-eng",
},
"env": {
"CCACHE_DIR": "/builds/ccache",
"CCACHE_COMPRESS": "1",
"CCACHE_UMASK": "002",
"GAIA_OPTIMIZE": "1",
"SYMBOL_SERVER_HOST": "symbolpush.mozilla.org",
"SYMBOL_SERVER_USER": "b2gbld",
"SYMBOL_SERVER_SSH_KEY": "/home/mock_mozilla/.ssh/b2gbld_dsa",
"SYMBOL_SERVER_PATH": "/mnt/netapp/breakpad/symbols_b2g/",
"POST_SYMBOL_UPLOAD_CMD": "/usr/local/bin/post-symbol-upload.py",
"B2G_UPDATER": "1",
"B2G_SYSTEM_APPS": "1",
"WGET_OPTS": "-c -q",
"PATH": "/tools/python27/bin:%(PATH)s",
"B2G_UPDATE_CHANNEL": "default",
},
"purge_minsize": 15,
"clobberer_url": "http://clobberer.pvt.build.mozilla.org/index.php",
"is_automation": True,
"variant": "eng",
"target_suffix": "-eng",
"update_type": "fota",
"repo_mirror_dir": "/builds/git-shared/repo",
"repo_remote_mappings": {
'https://android.googlesource.com/': 'https://git.mozilla.org/external/aosp',
'git://codeaurora.org/': 'https://git.mozilla.org/external/caf',
'https://git.mozilla.org/b2g': 'https://git.mozilla.org/b2g',
'git://github.com/mozilla-b2g/': 'https://git.mozilla.org/b2g',
'git://github.com/mozilla/': 'https://git.mozilla.org/b2g',
'https://git.mozilla.org/releases': 'https://git.mozilla.org/releases',
'http://android.git.linaro.org/git-ro/': 'https://git.mozilla.org/external/linaro',
'git://github.com/apitrace/': 'https://git.mozilla.org/external/apitrace',
},
}
| kartikgupta0909/gittest | configs/b2g/releng-fota-eng.py | Python | mpl-2.0 | 4,525 |
from crypto.hashes.hashinterface import HashInterface
from Crypto.Hash import SHA384 as libsha384
class SHA384(HashInterface):
def hashString(self, stringMessage):
sha384 = libsha384.new()
sha384.update(stringMessage.encode())
return sha384.digest()
def getDigestSize(self):
return 48
def isValidHash(self, stringMessage, hashBytes):
return self.hashString(stringMessage) == hashBytes
| bensoer/pychat | crypto/hashes/sha384.py | Python | mit | 443 |
# (c) Copyright 2016 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for brcd fc zone client http(s)."""
import time
from oslo_utils import encodeutils
import mock
from mock import patch
import six
from cinder import exception
from cinder import test
from cinder.zonemanager.drivers.brocade import (brcd_http_fc_zone_client
as client)
import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant
cfgs = {'openstack_cfg': 'zone1;zone2'}
cfgs_to_delete = {
'openstack_cfg': 'zone1;zone2;openstack50060b0000c26604201900051ee8e329'}
zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'}
zones_to_delete = {
'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11',
'openstack50060b0000c26604201900051ee8e329':
'50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29'}
alias = {}
qlps = {}
ifas = {}
parsed_raw_zoneinfo = ""
random_no = ''
auth_version = ''
session = None
active_cfg = 'openstack_cfg'
activate = True
no_activate = False
vf_enable = True
ns_info = ['10:00:00:05:1e:7c:64:96']
nameserver_info = """
<HTML>
<HEAD>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
<TITLE>NSInfo Page</TITLE>
</HEAD>
<BODY>
<PRE>
--BEGIN DEVICEPORT 10:00:00:05:1e:7c:64:96
node.wwn=20:00:00:05:1e:7c:64:96
deviceport.portnum=9
deviceport.portid=300900
deviceport.portIndex=9
deviceport.porttype=N
deviceport.portwwn=10:00:00:05:1e:7c:64:96
--END DEVICEPORT 10:00:00:05:1e:7c:64:96
</PRE>
</BODY>
</HTML>
"""
mocked_zone_string = 'zonecfginfo=openstack_cfg zone1;zone2 '\
'zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 '\
'zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 '\
'alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 '\
'qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\
'fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\
'openstack_cfg null &saveonly=false'
mocked_zone_string_no_activate = 'zonecfginfo=openstack_cfg zone1;zone2 '\
'zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 '\
'zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 '\
'alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 '\
'qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\
'fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c &saveonly=true'
zone_string_to_post = "zonecfginfo=openstack_cfg "\
"openstack50060b0000c26604201900051ee8e329;zone1;zone2 "\
"openstack50060b0000c26604201900051ee8e329 "\
"50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 "\
"zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\
"zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\
"openstack_cfg null &saveonly=false"
zone_string_to_post_no_activate = "zonecfginfo=openstack_cfg "\
"openstack50060b0000c26604201900051ee8e329;zone1;zone2 "\
"openstack50060b0000c26604201900051ee8e329 "\
"50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 " \
"zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\
"zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\
"&saveonly=true"
zone_string_to_post_invalid_request = "zonecfginfo=openstack_cfg "\
"openstack50060b0000c26604201900051ee8e32900000000000000000000000000;"\
"zone1;zone2 openstack50060b0000c26604201900051ee8e329000000000000000000000"\
"00000 50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 "\
"zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\
"zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 &saveonly=true"
zone_string_del_to_post = "zonecfginfo=openstack_cfg zone1;zone2"\
" zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\
"zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\
"openstack_cfg null &saveonly=false"
zone_string_del_to_post_no_active = "zonecfginfo=openstack_cfg zone1;zone2"\
" zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 " \
"zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\
"&saveonly=true"
zone_post_page = """
<BODY>
<PRE>
--BEGIN ZONE_TXN_INFO
txnId=34666
adId=0
user=admin
roleUser=admin
openTxnOwner=
openTxnId=0
openTxnAbortable=0
txnStarttime=1421916354
txnEndtime=1421916355
currStateInt=4
prevStateInt=3
actionInt=5
currState=done
prevState=progress
action=error
sessionId=5892021
selfAborted=false
status=done
errorCode=-1
errorMessage=Name too long
--END ZONE_TXN_INFO
</PRE>
</BODY>"""
zone_post_page_no_error = """
<BODY>
<PRE>
--BEGIN ZONE_TXN_INFO
txnId=34666
adId=0
user=admin
roleUser=admin
openTxnOwner=
openTxnId=0
openTxnAbortable=0
txnStarttime=1421916354
txnEndtime=1421916355
currStateInt=4
prevStateInt=3
actionInt=5
currState=done
prevState=progress
action=error
sessionId=5892021
selfAborted=false
status=done
errorCode=0
errorMessage=
--END ZONE_TXN_INFO
</PRE>
</BODY>"""
secinfo_resp = """
<BODY>
<PRE>
--BEGIN SECINFO
SECURITY = OFF
RANDOM = 6281590
DefaultPasswdBitmap = 0
primaryFCS = no
switchType = 66
resource = 10.24.48.210
REALM = FC Switch Administration
AUTHMETHOD = Custom_Basic
hasUpfrontLogin=yes
AUTHVERSION = 1
vfEnabled=false
vfSupported=true
--END SECINFO
</PRE>
</BODY>
"""
authenticate_resp = """<HTML>
<PRE>
--BEGIN AUTHENTICATE
authenticated = yes
username=admin
userrole=admin
adCapable=1
currentAD=AD0
trueADEnvironment=0
adId=0
adList=ALL
contextType=0
--END AUTHENTICATE
</PRE>
</BODY>
"""
un_authenticate_resp = """<HTML>
<HEAD>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
<TITLE>Authentication</TITLE>
</HEAD>
<BODY>
<PRE>
--BEGIN AUTHENTICATE
authenticated = no
errCode = -3
authType = Custom_Basic
realm = FC Switch Administration
--END AUTHENTICATE
</PRE>
</BODY>
</HTML>"""
switch_page_resp = """<HTML>
<HEAD>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
</HEAD>
<BODY>
<PRE>
--BEGIN SWITCH INFORMATION
didOffset=96
swFWVersion=v7.3.0b_rc1_bld06
swDomain=2
--END SWITCH INFORMATION
</PRE>
</BODY>
</HTML>
"""
switch_page_invalid_firm = """<HTML>
<HEAD>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
</HEAD>
<BODY>
<PRE>
--BEGIN SWITCH INFORMATION
didOffset=96
swFWVersion=v6.1.1
swDomain=2
--END SWITCH INFORMATION
</PRE>
</BODY>
</HTML>
"""
parsed_value = """
didOffset=96
swFWVersion=v7.3.0b_rc1_bld06
swDomain=2
"""
parsed_session_info_vf = """
sessionId=524461483
user=admin
userRole=admin
isAdminRole=Yes
authSource=0
sessionIp=172.26.1.146
valid=yes
adName=
adId=128
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=0
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=0
err=
contextType=1
vfEnabled=true
vfSupported=true
HomeVF=128
sessionLFId=2
isContextManageable=1
manageableLFList=2,128,
activeLFList=128,2,
"""
session_info_vf = """
<BODY>
<PRE>
--BEGIN SESSION
sessionId=524461483
user=admin
userRole=admin
isAdminRole=Yes
authSource=0
sessionIp=172.26.1.146
valid=yes
adName=
adId=128
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=0
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=0
err=
contextType=1
vfEnabled=true
vfSupported=true
HomeVF=128
sessionLFId=2
isContextManageable=1
manageableLFList=2,128,
activeLFList=128,2,
--END SESSION
</PRE>
</BODY>
"""
session_info_vf_not_changed = """
<BODY>
<PRE>
--BEGIN SESSION
sessionId=524461483
user=admin
userRole=admin
isAdminRole=Yes
authSource=0
sessionIp=172.26.1.146
User-Agent=Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML,
valid=yes
adName=
adId=128
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=0
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=0
err=
contextType=1
vfEnabled=true
vfSupported=true
HomeVF=128
sessionLFId=128
isContextManageable=1
manageableLFList=2,128,
activeLFList=128,2,
--END SESSION
</PRE>
</BODY>
"""
session_info_AD = """<HTML>
<HEAD>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
<TITLE>Webtools Session Info</TITLE>
</HEAD>
<BODY>
<PRE>
--BEGIN SESSION
sessionId=-2096740776
user=
userRole=root
isAdminRole=No
authSource=0
sessionIp=
User-Agent=
valid=no
adName=
adId=0
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=1
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=-2
err=Could not obtain session data from store
contextType=0
--END SESSION
</PRE>
</BODY>
</HTML>
"""
zone_info = """<HTML>
<HEAD>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
<TITLE>Zone Configuration Information</TITLE>
</HEAD>
<BODY>
<PRE>
--BEGIN ZONE CHANGE
LastZoneChangeTime=1421926251
--END ZONE CHANGE
isZoneTxnSupported=true
ZoneLicense=true
QuickLoopLicense=true
DefZoneStatus=noaccess
McDataDefaultZone=false
McDataSafeZone=false
AvailableZoneSize=1043890
--BEGIN ZONE INFO
openstack_cfg zone1;zone2 """\
"""zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 """\
"""zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 """\
"""alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 """\
"""qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c """\
"""fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c """\
"""openstack_cfg null 1045274"""\
"""--END ZONE INFO
</PRE>
</BODY>
</HTML>
"""
active_zone_set = {
'zones':
{'zone1':
['20:01:00:05:33:0e:96:15', '20:00:00:05:33:0e:93:11'],
'zone2':
['20:01:00:05:33:0e:96:14', '20:00:00:05:33:0e:93:11']},
'active_zone_config': 'openstack_cfg'}
updated_zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11',
'test_updated_zone':
'20:01:00:05:33:0e:96:10;20:00:00:05:33:0e:93:11'}
updated_cfgs = {'openstack_cfg': 'test_updated_zone;zone1;zone2'}
valid_zone_name = "openstack50060b0000c26604201900051ee8e329"
class TestBrcdHttpFCZoneClient(client.BrcdHTTPFCZoneClient, test.TestCase):
def setUp(self):
self.auth_header = "YWRtaW46cGFzc3dvcmQ6NDM4ODEyNTIw"
self.switch_user = "admin"
self.switch_pwd = "password"
self.protocol = "HTTPS"
self.conn = None
self.alias = {}
self.qlps = {}
self.ifas = {}
self.parsed_raw_zoneinfo = ""
self.random_no = ''
self.auth_version = ''
self.session = None
super(TestBrcdHttpFCZoneClient, self).setUp()
# override some of the functions
def __init__(self, *args, **kwargs):
test.TestCase.__init__(self, *args, **kwargs)
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_create_auth_token(self, connect_mock):
connect_mock.return_value = secinfo_resp
self.assertEqual("Custom_Basic YWRtaW46cGFzc3dvcmQ6NjI4MTU5MA==",
self.create_auth_token())
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_authenticate(self, connect_mock):
connect_mock.return_value = authenticate_resp
self.assertEqual(
(True, "Custom_Basic YWRtaW46eHh4Og=="), self.authenticate())
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_authenticate_failed(self, connect_mock):
connect_mock.return_value = un_authenticate_resp
self.assertRaises(
exception.BrocadeZoningHttpException, self.authenticate)
def test_get_parsed_data(self):
valid_delimiter1 = zone_constant.SWITCHINFO_BEGIN
valid_delimiter2 = zone_constant.SWITCHINFO_END
invalid_delimiter = "--END SWITCH INFORMATION1"
self.assertEqual(parsed_value, self.get_parsed_data(
switch_page_resp, valid_delimiter1, valid_delimiter2))
self.assertRaises(exception.BrocadeZoningHttpException,
self.get_parsed_data,
switch_page_resp,
valid_delimiter1,
invalid_delimiter)
self.assertRaises(exception.BrocadeZoningHttpException,
self.get_parsed_data,
switch_page_resp,
invalid_delimiter,
valid_delimiter2)
def test_get_nvp_value(self):
valid_keyname = zone_constant.FIRMWARE_VERSION
invalid_keyname = "swFWVersion1"
self.assertEqual(
"v7.3.0b_rc1_bld06", self.get_nvp_value(parsed_value,
valid_keyname))
self.assertRaises(exception.BrocadeZoningHttpException,
self.get_nvp_value,
parsed_value,
invalid_keyname)
def test_get_managable_vf_list(self):
manageable_list = ['2', '128']
self.assertEqual(
manageable_list, self.get_managable_vf_list(session_info_vf))
self.assertRaises(exception.BrocadeZoningHttpException,
self.get_managable_vf_list, session_info_AD)
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'is_vf_enabled')
def test_check_change_vf_context_vf_enabled(self, is_vf_enabled_mock):
is_vf_enabled_mock.return_value = (True, session_info_vf)
self.vfid = None
self.assertRaises(
exception.BrocadeZoningHttpException,
self.check_change_vf_context)
self.vfid = "2"
with mock.patch.object(self, 'change_vf_context') \
as change_vf_context_mock:
self.check_change_vf_context()
change_vf_context_mock.assert_called_once_with(
self.vfid, session_info_vf)
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'is_vf_enabled')
def test_check_change_vf_context_vf_disabled(self, is_vf_enabled_mock):
is_vf_enabled_mock.return_value = (False, session_info_AD)
self.vfid = "128"
self.assertRaises(
exception.BrocadeZoningHttpException,
self.check_change_vf_context)
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list')
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_change_vf_context_valid(self, connect_mock,
get_managable_vf_list_mock):
get_managable_vf_list_mock.return_value = ['2', '128']
connect_mock.return_value = session_info_vf
self.assertIsNone(self.change_vf_context("2", session_info_vf))
data = zone_constant.CHANGE_VF.format(vfid="2")
headers = {zone_constant.AUTH_HEADER: self.auth_header}
connect_mock.assert_called_once_with(
zone_constant.POST_METHOD, zone_constant.SESSION_PAGE,
data, headers)
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list')
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_change_vf_context_vf_not_changed(self,
connect_mock,
get_managable_vf_list_mock):
get_managable_vf_list_mock.return_value = ['2', '128']
connect_mock.return_value = session_info_vf_not_changed
self.assertRaises(exception.BrocadeZoningHttpException,
self.change_vf_context, "2", session_info_vf)
data = zone_constant.CHANGE_VF.format(vfid="2")
headers = {zone_constant.AUTH_HEADER: self.auth_header}
connect_mock.assert_called_once_with(
zone_constant.POST_METHOD, zone_constant.SESSION_PAGE,
data, headers)
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list')
def test_change_vf_context_vfid_not_managaed(self,
get_managable_vf_list_mock):
get_managable_vf_list_mock.return_value = ['2', '128']
self.assertRaises(exception.BrocadeZoningHttpException,
self.change_vf_context, "12", session_info_vf)
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_is_supported_firmware(self, connect_mock):
connect_mock.return_value = switch_page_resp
self.assertTrue(self.is_supported_firmware())
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_is_supported_firmware_invalid(self, connect_mock):
connect_mock.return_value = switch_page_invalid_firm
self.assertFalse(self.is_supported_firmware())
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_get_active_zone_set(self, connect_mock):
connect_mock.return_value = zone_info
returned_zone_map = self.get_active_zone_set()
self.assertDictEqual(active_zone_set, returned_zone_map)
def test_form_zone_string(self):
new_alias = {
'alia1': u'10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12'}
new_qlps = {'qlp': u'10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c'}
new_ifas = {'fa1': u'20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c'}
self.assertEqual(type(self.form_zone_string(
cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, True)),
six.binary_type)
self.assertEqual(
encodeutils.safe_encode(mocked_zone_string),
self.form_zone_string(
cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, True))
self.assertEqual(
encodeutils.safe_encode(mocked_zone_string_no_activate),
self.form_zone_string(
cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, False))
@patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data')
def test_add_zones_activate(self, post_zone_data_mock):
post_zone_data_mock.return_value = ("0", "")
self.cfgs = cfgs.copy()
self.zones = zones.copy()
self.alias = alias.copy()
self.qlps = qlps.copy()
self.ifas = ifas.copy()
self.active_cfg = active_cfg
add_zones_info = {valid_zone_name:
['50:06:0b:00:00:c2:66:04',
'20:19:00:05:1e:e8:e3:29']
}
self.add_zones(add_zones_info, True)
post_zone_data_mock.assert_called_once_with(
encodeutils.safe_encode(zone_string_to_post))
@patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data')
def test_add_zones_invalid_zone_name(self, post_zone_data_mock):
post_zone_data_mock.return_value = ("-1", "Name Too Long")
self.cfgs = cfgs.copy()
self.zones = zones.copy()
self.alias = alias.copy()
self.qlps = qlps.copy()
self.ifas = ifas.copy()
self.active_cfg = active_cfg
invalid_zone_name = valid_zone_name + "00000000000000000000000000"
add_zones_info = {invalid_zone_name:
['50:06:0b:00:00:c2:66:04',
'20:19:00:05:1e:e8:e3:29']
}
self.assertRaises(
exception.BrocadeZoningHttpException,
self.add_zones, add_zones_info, False)
@patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data')
def test_add_zones_no_activate(self, post_zone_data_mock):
post_zone_data_mock.return_value = ("0", "")
self.cfgs = cfgs.copy()
self.zones = zones.copy()
self.alias = alias.copy()
self.qlps = qlps.copy()
self.ifas = ifas.copy()
self.active_cfg = active_cfg
add_zones_info = {valid_zone_name:
['50:06:0b:00:00:c2:66:04',
'20:19:00:05:1e:e8:e3:29']
}
self.add_zones(add_zones_info, False)
post_zone_data_mock.assert_called_once_with(
encodeutils.safe_encode(zone_string_to_post_no_activate))
@patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data')
def test_delete_zones_activate(self, post_zone_data_mock):
post_zone_data_mock.return_value = ("0", "")
self.cfgs = cfgs_to_delete.copy()
self.zones = zones_to_delete.copy()
self.alias = alias.copy()
self.qlps = qlps.copy()
self.ifas = ifas.copy()
self.active_cfg = active_cfg
delete_zones_info = valid_zone_name
self.delete_zones(delete_zones_info, True)
post_zone_data_mock.assert_called_once_with(
encodeutils.safe_encode(zone_string_del_to_post))
@patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data')
def test_delete_zones_no_activate(self, post_zone_data_mock):
post_zone_data_mock.return_value = ("0", "")
self.cfgs = cfgs_to_delete.copy()
self.zones = zones_to_delete.copy()
self.alias = alias.copy()
self.qlps = qlps.copy()
self.ifas = ifas.copy()
self.active_cfg = active_cfg
delete_zones_info = valid_zone_name
self.delete_zones(delete_zones_info, False)
post_zone_data_mock.assert_called_once_with(
encodeutils.safe_encode(zone_string_del_to_post_no_active))
@patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data')
def test_delete_zones_invalid_zone_name(self, post_zone_data_mock):
post_zone_data_mock.return_value = ("0", "")
self.cfgs = cfgs_to_delete.copy()
self.zones = zones_to_delete.copy()
self.alias = alias.copy()
self.qlps = qlps.copy()
self.ifas = ifas.copy()
self.active_cfg = active_cfg
delete_zones_info = 'openstack50060b0000c26604201900051ee8e32'
self.assertRaises(exception.BrocadeZoningHttpException,
self.delete_zones, delete_zones_info, False)
@patch.object(time, 'sleep')
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_post_zone_data(self, connect_mock, sleep_mock):
connect_mock.return_value = zone_post_page
self.assertEqual(
("-1", "Name too long"), self.post_zone_data(zone_string_to_post))
connect_mock.return_value = zone_post_page_no_error
self.assertEqual(("0", ""), self.post_zone_data(zone_string_to_post))
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_get_nameserver_info(self, connect_mock):
connect_mock.return_value = nameserver_info
self.assertEqual(ns_info, self.get_nameserver_info())
@patch.object(client.BrcdHTTPFCZoneClient, 'get_session_info')
def test_is_vf_enabled(self, get_session_info_mock):
get_session_info_mock.return_value = session_info_vf
self.assertEqual((True, parsed_session_info_vf), self.is_vf_enabled())
def test_delete_zones_cfgs(self):
cfgs = {'openstack_cfg': 'zone1;zone2'}
zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'}
delete_zones_info = valid_zone_name
self.assertEqual(
(zones, cfgs, active_cfg),
self.delete_zones_cfgs(
cfgs_to_delete.copy(),
zones_to_delete.copy(),
delete_zones_info,
active_cfg))
cfgs = {'openstack_cfg': 'zone2'}
zones = {'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'}
delete_zones_info = valid_zone_name + ";zone1"
self.assertEqual(
(zones, cfgs, active_cfg),
self.delete_zones_cfgs(
cfgs_to_delete.copy(),
zones_to_delete.copy(),
delete_zones_info,
active_cfg))
def test_add_zones_cfgs(self):
add_zones_info = {valid_zone_name:
['50:06:0b:00:00:c2:66:04',
'20:19:00:05:1e:e8:e3:29']
}
updated_cfgs = {
'openstack_cfg':
valid_zone_name + ';zone1;zone2'}
updated_zones = {
'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11',
valid_zone_name:
'50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29'}
self.assertEqual((updated_zones, updated_cfgs, active_cfg),
self.add_zones_cfgs(
cfgs.copy(),
zones.copy(),
add_zones_info,
active_cfg,
"openstack_cfg"))
add_zones_info = {valid_zone_name:
['50:06:0b:00:00:c2:66:04',
'20:19:00:05:1e:e8:e3:29'],
'test4':
['20:06:0b:00:00:b2:66:07',
'20:10:00:05:1e:b8:c3:19']
}
updated_cfgs = {
'openstack_cfg':
'test4;openstack50060b0000c26604201900051ee8e329;zone1;zone2'}
updated_zones = {
'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11',
valid_zone_name:
'50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29',
'test4': '20:06:0b:00:00:b2:66:07;20:10:00:05:1e:b8:c3:19'}
result = self.add_zones_cfgs(cfgs.copy(), zones.copy(), add_zones_info,
active_cfg, "openstack_cfg")
self.assertEqual(updated_zones, result[0])
self.assertEqual(active_cfg, result[2])
result_cfg = result[1]['openstack_cfg']
self.assertIn('test4', result_cfg)
self.assertIn('openstack50060b0000c26604201900051ee8e329', result_cfg)
self.assertIn('zone1', result_cfg)
self.assertIn('zone2', result_cfg)
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_get_zone_info(self, connect_mock):
connect_mock.return_value = zone_info
self.get_zone_info()
self.assertEqual({'openstack_cfg': 'zone1;zone2'}, self.cfgs)
self.assertEqual(
{'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'},
self.zones)
self.assertEqual('openstack_cfg', self.active_cfg)
self.assertEqual(
{'alia1': '10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12'},
self.alias)
self.assertEqual(
{'fa1': '20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c'},
self.ifas)
self.assertEqual(
{'qlp': '10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c'},
self.qlps)
| Datera/cinder | cinder/tests/unit/zonemanager/test_brcd_http_fc_zone_client.py | Python | apache-2.0 | 27,655 |
import unittest
from unittest.mock import patch
from taiga.models import Webhook, Webhooks
from taiga.requestmaker import RequestMaker
class TestWebhooks(unittest.TestCase):
@patch("taiga.models.base.ListResource._new_resource")
def test_create_webhook(self, mock_new_resource):
rm = RequestMaker("/api/v1", "fakehost", "faketoken")
mock_new_resource.return_value = Webhook(rm)
Webhooks(rm).create(1, "Webhook-Name", "Webhook-Url", "Webhook-Key")
mock_new_resource.assert_called_with(
payload={"project": 1, "name": "Webhook-Name", "url": "Webhook-Url", "key": "Webhook-Key"}
)
| nephila/python-taiga | tests/test_webhooks.py | Python | mit | 641 |
import os
import datetime as dt
try:
from importlib import reload
except ImportError:
try:
from imp import reload
except ImportError:
pass
import numpy as np
from numpy.testing import assert_almost_equal
import pandas as pd
import unittest
import pytest
from pvlib.location import Location
try:
from numba import __version__ as numba_version
numba_version_int = int(numba_version.split('.')[0] +
numba_version.split('.')[1])
except ImportError:
numba_version_int = 0
times = (pd.date_range('2003-10-17 12:30:30', periods=1, freq='D')
.tz_localize('MST'))
unixtimes = np.array(times.tz_convert('UTC').astype(np.int64)*1.0/10**9)
lat = 39.742476
lon = -105.1786
elev = 1830.14
pressure = 820
temp = 11
delta_t = 67.0
atmos_refract= 0.5667
JD = 2452930.312847
JC = 0.0379277986858
JDE = 2452930.313623
JCE = 0.037927819916852
JME = 0.003792781991685
L = 24.0182616917
B = -0.0001011219
R = 0.9965422974
Theta = 204.0182616917
beta = 0.0001011219
X0 = 17185.861179
X1 = 1722.893218
X2 = 18234.075703
X3 = 18420.071012
X4 = 51.686951
dPsi = -0.00399840
dEpsilon = 0.00166657
epsilon0 = 84379.672625
epsilon = 23.440465
dTau = -0.005711
lamd = 204.0085519281
v0 = 318.515579
v = 318.511910
alpha = 202.227408
delta = -9.31434
H = 11.10590
xi = 0.002451
dAlpha = -0.000369
alpha_prime = 202.22704
delta_prime = -9.316179
H_prime = 11.10627
e0 = 39.872046
de = 0.016332
e = 39.888378
theta = 50.11162
theta0 = 90 - e0
Gamma = 14.340241
Phi = 194.340241
year = 1985
month = 2
year_array = np.array([-499, 500, 1000, 1500, 1800, 1900, 1950, 1970, 1985, 1990, 2000, 2005])
month_array = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
dt_actual = 54.413442486
dt_actual_array = np.array([1.7184831e+04, 5.7088051e+03, 1.5730419e+03,
1.9801820e+02, 1.3596506e+01, -2.1171894e+00,
2.9289261e+01, 4.0824887e+01, 5.4724581e+01,
5.7426651e+01, 6.4108015e+01, 6.5038015e+01])
mix_year_array = np.full((10), year)
mix_month_array = np.full((10), month)
mix_year_actual = np.full((10), dt_actual)
mix_month_actual = mix_year_actual
class SpaBase(object):
"""Test functions common to numpy and numba spa"""
def test_julian_day_dt(self):
dt = times.tz_convert('UTC')[0]
year = dt.year
month = dt.month
day = dt.day
hour = dt.hour
minute = dt.minute
second = dt.second
microsecond = dt.microsecond
assert_almost_equal(JD,
self.spa.julian_day_dt(year, month, day, hour,
minute, second, microsecond), 6)
def test_julian_ephemeris_day(self):
assert_almost_equal(JDE, self.spa.julian_ephemeris_day(JD, delta_t), 5)
def test_julian_century(self):
assert_almost_equal(JC, self.spa.julian_century(JD), 6)
def test_julian_ephemeris_century(self):
assert_almost_equal(JCE, self.spa.julian_ephemeris_century(JDE), 10)
def test_julian_ephemeris_millenium(self):
assert_almost_equal(JME, self.spa.julian_ephemeris_millennium(JCE), 10)
def test_heliocentric_longitude(self):
assert_almost_equal(L, self.spa.heliocentric_longitude(JME), 6)
def test_heliocentric_latitude(self):
assert_almost_equal(B, self.spa.heliocentric_latitude(JME), 6)
def test_heliocentric_radius_vector(self):
assert_almost_equal(R, self.spa.heliocentric_radius_vector(JME), 6)
def test_geocentric_longitude(self):
assert_almost_equal(Theta, self.spa.geocentric_longitude(L), 6)
def test_geocentric_latitude(self):
assert_almost_equal(beta, self.spa.geocentric_latitude(B), 6)
def test_mean_elongation(self):
assert_almost_equal(X0, self.spa.mean_elongation(JCE), 5)
def test_mean_anomaly_sun(self):
assert_almost_equal(X1, self.spa.mean_anomaly_sun(JCE), 5)
def test_mean_anomaly_moon(self):
assert_almost_equal(X2, self.spa.mean_anomaly_moon(JCE), 5)
def test_moon_argument_latitude(self):
assert_almost_equal(X3, self.spa.moon_argument_latitude(JCE), 5)
def test_moon_ascending_longitude(self):
assert_almost_equal(X4, self.spa.moon_ascending_longitude(JCE), 6)
def test_longitude_nutation(self):
assert_almost_equal(dPsi, self.spa.longitude_nutation(JCE, X0, X1, X2,
X3, X4), 6)
def test_obliquity_nutation(self):
assert_almost_equal(dEpsilon, self.spa.obliquity_nutation(JCE, X0, X1,
X2, X3, X4),
6)
def test_mean_ecliptic_obliquity(self):
assert_almost_equal(epsilon0, self.spa.mean_ecliptic_obliquity(JME), 6)
def test_true_ecliptic_obliquity(self):
assert_almost_equal(epsilon, self.spa.true_ecliptic_obliquity(
epsilon0, dEpsilon), 6)
def test_aberration_correction(self):
assert_almost_equal(dTau, self.spa.aberration_correction(R), 6)
def test_apparent_sun_longitude(self):
assert_almost_equal(lamd, self.spa.apparent_sun_longitude(
Theta, dPsi, dTau), 6)
def test_mean_sidereal_time(self):
assert_almost_equal(v0, self.spa.mean_sidereal_time(JD, JC), 3)
def test_apparent_sidereal_time(self):
assert_almost_equal(v, self.spa.apparent_sidereal_time(
v0, dPsi, epsilon), 5)
def test_geocentric_sun_right_ascension(self):
assert_almost_equal(alpha, self.spa.geocentric_sun_right_ascension(
lamd, epsilon, beta), 6)
def test_geocentric_sun_declination(self):
assert_almost_equal(delta, self.spa.geocentric_sun_declination(
lamd, epsilon, beta), 6)
def test_local_hour_angle(self):
assert_almost_equal(H, self.spa.local_hour_angle(v, lon, alpha), 4)
def test_equatorial_horizontal_parallax(self):
assert_almost_equal(xi, self.spa.equatorial_horizontal_parallax(R), 6)
def test_parallax_sun_right_ascension(self):
u = self.spa.uterm(lat)
x = self.spa.xterm(u, lat, elev)
y = self.spa.yterm(u, lat, elev)
assert_almost_equal(dAlpha, self.spa.parallax_sun_right_ascension(
x, xi, H, delta), 4)
def test_topocentric_sun_right_ascension(self):
assert_almost_equal(alpha_prime,
self.spa.topocentric_sun_right_ascension(
alpha, dAlpha), 5)
def test_topocentric_sun_declination(self):
u = self.spa.uterm(lat)
x = self.spa.xterm(u, lat, elev)
y = self.spa.yterm(u, lat, elev)
assert_almost_equal(delta_prime, self.spa.topocentric_sun_declination(
delta, x, y, xi, dAlpha,H), 5)
def test_topocentric_local_hour_angle(self):
assert_almost_equal(H_prime, self.spa.topocentric_local_hour_angle(
H, dAlpha), 5)
def test_topocentric_elevation_angle_without_atmosphere(self):
assert_almost_equal(
e0, self.spa.topocentric_elevation_angle_without_atmosphere(
lat, delta_prime, H_prime), 6)
def test_atmospheric_refraction_correction(self):
assert_almost_equal(de, self.spa.atmospheric_refraction_correction(
pressure, temp, e0, atmos_refract), 6)
def test_topocentric_elevation_angle(self):
assert_almost_equal(e, self.spa.topocentric_elevation_angle(e0, de), 6)
def test_topocentric_zenith_angle(self):
assert_almost_equal(theta, self.spa.topocentric_zenith_angle(e), 5)
def test_topocentric_astronomers_azimuth(self):
assert_almost_equal(Gamma, self.spa.topocentric_astronomers_azimuth(
H_prime, delta_prime, lat), 5)
def test_topocentric_azimuth_angle(self):
assert_almost_equal(Phi, self.spa.topocentric_azimuth_angle(Gamma), 5)
def test_solar_position(self):
assert_almost_equal(
np.array([[theta, theta0, e, e0, Phi]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_t,
atmos_refract)[:-1], 5)
assert_almost_equal(
np.array([[v, alpha, delta]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst=True)[:3], 5)
def test_equation_of_time(self):
eot = 14.64
M = self.spa.sun_mean_longitude(JME)
assert_almost_equal(eot, self.spa.equation_of_time(
M, alpha, dPsi, epsilon), 2)
def test_transit_sunrise_sunset(self):
# tests at greenwich
times = pd.DatetimeIndex([dt.datetime(1996, 7, 5, 0),
dt.datetime(2004, 12, 4, 0)]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunrise = pd.DatetimeIndex([dt.datetime(1996, 7, 5, 7, 8, 15),
dt.datetime(2004, 12, 4, 4, 38, 57)]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunset = pd.DatetimeIndex([dt.datetime(1996, 7, 5, 17, 1, 4),
dt.datetime(2004, 12, 4, 19, 2, 2)]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
times = np.array(times)
sunrise = np.array(sunrise)
sunset = np.array(sunset)
result = self.spa.transit_sunrise_sunset(times, -35.0, 0.0, 64.0, 1)
assert_almost_equal(sunrise/1e3, result[1]/1e3, 3)
assert_almost_equal(sunset/1e3, result[2]/1e3, 3)
times = pd.DatetimeIndex([dt.datetime(1994, 1, 2),]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunset = pd.DatetimeIndex([dt.datetime(1994, 1, 2, 16, 59, 55),]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunrise = pd.DatetimeIndex([dt.datetime(1994, 1, 2, 7, 8, 12),]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
times = np.array(times)
sunrise = np.array(sunrise)
sunset = np.array(sunset)
result = self.spa.transit_sunrise_sunset(times, 35.0, 0.0, 64.0, 1)
assert_almost_equal(sunrise/1e3, result[1]/1e3, 3)
assert_almost_equal(sunset/1e3, result[2]/1e3, 3)
# tests from USNO
# Golden
times = pd.DatetimeIndex([dt.datetime(2015, 1, 2),
dt.datetime(2015, 4, 2),
dt.datetime(2015, 8, 2),
dt.datetime(2015, 12, 2),],
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunrise = pd.DatetimeIndex([dt.datetime(2015, 1, 2, 7, 19),
dt.datetime(2015, 4, 2, 5, 43),
dt.datetime(2015, 8, 2, 5, 1),
dt.datetime(2015, 12, 2, 7, 1),],
).tz_localize('MST').astype(np.int64)*1.0/10**9
sunset = pd.DatetimeIndex([dt.datetime(2015, 1, 2, 16, 49),
dt.datetime(2015, 4, 2, 18, 24),
dt.datetime(2015, 8, 2, 19, 10),
dt.datetime(2015, 12, 2, 16, 38),],
).tz_localize('MST').astype(np.int64)*1.0/10**9
times = np.array(times)
sunrise = np.array(sunrise)
sunset = np.array(sunset)
result = self.spa.transit_sunrise_sunset(times, 39.0, -105.0, 64.0, 1)
assert_almost_equal(sunrise/1e3, result[1]/1e3, 1)
assert_almost_equal(sunset/1e3, result[2]/1e3, 1)
# Beijing
times = pd.DatetimeIndex([dt.datetime(2015, 1, 2),
dt.datetime(2015, 4, 2),
dt.datetime(2015, 8, 2),
dt.datetime(2015, 12, 2),],
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunrise = pd.DatetimeIndex([dt.datetime(2015, 1, 2, 7, 36),
dt.datetime(2015, 4, 2, 5, 58),
dt.datetime(2015, 8, 2, 5, 13),
dt.datetime(2015, 12, 2, 7, 17),],
).tz_localize('Asia/Shanghai'
).astype(np.int64)*1.0/10**9
sunset = pd.DatetimeIndex([dt.datetime(2015, 1, 2, 17, 0),
dt.datetime(2015, 4, 2, 18, 39),
dt.datetime(2015, 8, 2, 19, 28),
dt.datetime(2015, 12, 2, 16, 50),],
).tz_localize('Asia/Shanghai'
).astype(np.int64)*1.0/10**9
times = np.array(times)
sunrise = np.array(sunrise)
sunset = np.array(sunset)
result = self.spa.transit_sunrise_sunset(times, 39.917, 116.383, 64.0,1)
assert_almost_equal(sunrise/1e3, result[1]/1e3, 1)
assert_almost_equal(sunset/1e3, result[2]/1e3, 1)
def test_earthsun_distance(self):
times = (pd.date_range('2003-10-17 12:30:30', periods=1, freq='D')
.tz_localize('MST'))
unixtimes = times.tz_convert('UTC').astype(np.int64)*1.0/10**9
unixtimes = np.array(unixtimes)
result = self.spa.earthsun_distance(unixtimes, 64.0, 1)
assert_almost_equal(R, result, 6)
def test_calculate_deltat(self):
result_mix_year = self.spa.calculate_deltat(mix_year_array, month)
assert_almost_equal(mix_year_actual, result_mix_year)
result_mix_month = self.spa.calculate_deltat(year, mix_month_array)
assert_almost_equal(mix_month_actual, result_mix_month)
result_array = self.spa.calculate_deltat(year_array, month_array)
assert_almost_equal(dt_actual_array, result_array, 3)
result_scalar = self.spa.calculate_deltat(year,month)
assert_almost_equal(dt_actual, result_scalar)
class NumpySpaTest(unittest.TestCase, SpaBase):
"""Import spa without compiling to numba then run tests"""
@classmethod
def setUpClass(self):
os.environ['PVLIB_USE_NUMBA'] = '0'
import pvlib.spa as spa
spa = reload(spa)
self.spa = spa
@classmethod
def tearDownClass(self):
del os.environ['PVLIB_USE_NUMBA']
def test_julian_day(self):
assert_almost_equal(JD, self.spa.julian_day(unixtimes)[0], 6)
@pytest.mark.skipif(numba_version_int < 17,
reason='Numba not installed or version not >= 0.17.0')
class NumbaSpaTest(unittest.TestCase, SpaBase):
"""Import spa, compiling to numba, and run tests"""
@classmethod
def setUpClass(self):
os.environ['PVLIB_USE_NUMBA'] = '1'
if numba_version_int >= 17:
import pvlib.spa as spa
spa = reload(spa)
self.spa = spa
@classmethod
def tearDownClass(self):
del os.environ['PVLIB_USE_NUMBA']
def test_julian_day(self):
assert_almost_equal(JD, self.spa.julian_day(unixtimes[0]), 6)
def test_solar_position_singlethreaded(self):
assert_almost_equal(
np.array([[theta, theta0, e, e0, Phi]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=1)[:-1], 5)
assert_almost_equal(
np.array([[v, alpha, delta]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=1, sst=True)[:3], 5)
def test_solar_position_multithreaded(self):
result = np.array([theta, theta0, e, e0, Phi])
nresult = np.array([result, result, result]).T
times = np.array([unixtimes[0], unixtimes[0], unixtimes[0]])
assert_almost_equal(
nresult
, self.spa.solar_position(
times, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=8)[:-1], 5)
result = np.array([v, alpha, delta])
nresult = np.array([result, result, result]).T
assert_almost_equal(
nresult
, self.spa.solar_position(
times, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=8, sst=True)[:3], 5)
| uvchik/pvlib-python | pvlib/test/test_spa.py | Python | bsd-3-clause | 16,534 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolUpgradeOsOptions(Model):
"""Additional parameters for upgrade_os operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
| AutorestCI/azure-sdk-for-python | azure-batch/azure/batch/models/pool_upgrade_os_options.py | Python | mit | 3,076 |
SHARED_SECRET = b"<PASSWORD>"
TIMEOUT = 3600 # one hour
| Virako/authapi | authapi/auth_settings.py | Python | agpl-3.0 | 56 |
#!/usr/bin/env python
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .compute_handler import ComputeHandler
from .project_handler import ProjectHandler
from .node_handler import NodeHandler
from .link_handler import LinkHandler
from .server_handler import ServerHandler
from .drawing_handler import DrawingHandler
from .symbol_handler import SymbolHandler
from .snapshot_handler import SnapshotHandler
from .appliance_handler import ApplianceHandler
from .template_handler import TemplateHandler
from .gns3_vm_handler import GNS3VMHandler
from .notification_handler import NotificationHandler
| GNS3/gns3-server | gns3server/handlers/api/controller/__init__.py | Python | gpl-3.0 | 1,234 |
from math import ceil
from django.conf import settings
from django.core.paginator import (
EmptyPage, InvalidPage, Page, PageNotAnInteger, Paginator)
class ESPaginator(Paginator):
"""
A better paginator for search results
The normal Paginator does a .count() query and then a slice. Since ES
results contain the total number of results, we can take an optimistic
slice and then adjust the count.
:param use_elasticsearch_dsl:
Used to activate support for our elasticsearch-dsl based pagination
implementation. elasticsearch-dsl is being used in the v3 API while
we have our own wrapper implementation in :mod:`olympia.amo.search`.
"""
# Maximum result position. Should match 'index.max_result_window' ES
# setting if present. ES defaults to 10000 but we'd like more to make sure
# all our extensions can be found if searching without a query and
# paginating through all results.
max_result_window = settings.ES_MAX_RESULT_WINDOW
def __init__(self, *args, **kwargs):
self.use_elasticsearch_dsl = kwargs.pop('use_elasticsearch_dsl', True)
Paginator.__init__(self, *args, **kwargs)
def _get_num_pages(self):
"""
Returns the total number of pages.
"""
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
# Make sure we never return a page beyond max_result_window
hits = min(
self.max_result_window,
max(1, self.count - self.orphans))
self._num_pages = int(ceil(hits / float(self.per_page)))
return self._num_pages
num_pages = property(_get_num_pages)
def validate_number(self, number):
"""
Validates the given 1-based page number.
This class overrides the default behavior and ignores the upper bound.
"""
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
"""
Returns a page object.
This class overrides the default behavior and ignores "orphans" and
assigns the count from the ES result to the Paginator.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top > self.max_result_window:
raise InvalidPage(
'That page number is too high for the current page size')
# Force the search to evaluate and then attach the count. We want to
# avoid an extra useless query even if there are no results, so we
# directly fetch the count from hits.
if self.use_elasticsearch_dsl:
result = self.object_list[bottom:top].execute()
# Overwrite `object_list` with the list of ES results.
page = Page(result.hits, number, self)
# Update the `_count`.
self._count = page.object_list.total
else:
page = Page(self.object_list[bottom:top], number, self)
# Force the search to evaluate and then attach the count.
list(page.object_list)
self._count = page.object_list.count()
# Now that we have the count validate that the page number isn't higher
# than the possible number of pages and adjust accordingly.
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return page
| tsl143/addons-server | src/olympia/amo/pagination.py | Python | bsd-3-clause | 3,855 |
import command_system
def hello():
message = 'Привет, друг!\nЯ новый чат-бот.'
return message, ''
hello_command = command_system.Command()
hello_command.keys = ['привет', 'hello', 'дратути', 'здравствуй', 'здравствуйте']
hello_command.description = 'Поприветствую тебя'
hello_command.process = hello
| omax83/strorinWind | commands/hello.py | Python | apache-2.0 | 384 |
#!/usr/bin/env python3
import os
import sys
import time
import pickle
import argparse
import numpy as np
import h5py
try:
from matplotlib.backends.qt_compat import QtCore, QtWidgets, QtGui, is_pyqt5
except:
from matplotlib.backends.backend_qt4agg import QtCore, QtWidgets, QtGui, is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
class CustomNavToolbar(NavigationToolbar):
NavigationToolbar.toolitems = (
('Signals','Choose signal traces to show', 'choose', 'choose'),
('Autoscale', 'Autoscale axes for each new event', 'autoscale','autoscale'),
('Legend', 'Toggle legend', 'legend','legend'),
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
(None, None, None, None),
('Save', 'Save the figure', 'filesave', 'save_figure')
)
def __init__(self, *args, **kwargs):
'''parent is expected to be a SignalView object'''
super().__init__(*args, **kwargs)
def choose(self):
self.parent.select_signals()
def legend(self):
self.parent.legend = not self.parent.legend
self.parent.plot_signals()
def autoscale(self):
self.parent.fig_ax.set_autoscale_on(True)
self.parent.plot_signals()
from matplotlib.figure import Figure
class SignalSelector(QtWidgets.QDialog):
def __init__(self,fname,parent=None,selected=None,raw_adc=False,raw_time=False,pedestal=None,distribute=None):
super().__init__(parent)
self._layout = QtWidgets.QVBoxLayout(self)
self.set_file(fname,selected)
self.raw_checkbox = QtWidgets.QCheckBox('Plot raw ADC counts')
self.raw_checkbox.setCheckState(QtCore.Qt.Checked if raw_adc else QtCore.Qt.Unchecked)
self._layout.addWidget(self.raw_checkbox)
self.raw_time_checkbox = QtWidgets.QCheckBox('Plot sample index')
self.raw_time_checkbox.setCheckState(QtCore.Qt.Checked if raw_time else QtCore.Qt.Unchecked)
self._layout.addWidget(self.raw_time_checkbox)
redist_layout = QtWidgets.QHBoxLayout()
self.redist_checkbox = QtWidgets.QCheckBox('Redistribute signals')
self.redist_checkbox.setCheckState(QtCore.Qt.Checked if distribute else QtCore.Qt.Unchecked)
redist_layout.addWidget(self.redist_checkbox)
self.redist_amount = QtWidgets.QLineEdit('0' if distribute is None else str(distribute))
redist_layout.addWidget(self.redist_amount)
self._layout.addLayout(redist_layout)
ped_layout = QtWidgets.QHBoxLayout()
self.baseline_checkbox = QtWidgets.QCheckBox('Correct baselines')
self.baseline_checkbox.setCheckState(QtCore.Qt.Checked if pedestal else QtCore.Qt.Unchecked)
ped_layout.addWidget(self.baseline_checkbox)
self.ped_min = QtWidgets.QLineEdit('0' if pedestal is None else str(pedestal[0]))
self.ped_min.setFixedWidth(100)
ped_layout.addWidget(self.ped_min)
self.ped_max = QtWidgets.QLineEdit('50' if pedestal is None else str(pedestal[1]))
self.ped_max.setFixedWidth(100)
ped_layout.addWidget(self.ped_max)
self._layout.addLayout(ped_layout)
buttons = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel, QtCore.Qt.Horizontal, self)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
self._layout.addWidget(buttons)
def get_distribute(self):
if self.redist_checkbox.checkState() == QtCore.Qt.Checked:
return float(self.redist_amount.text())
else:
return None
def get_pedestal(self):
if self.baseline_checkbox.checkState() == QtCore.Qt.Checked:
return float(self.ped_min.text()),float(self.ped_max.text())
else:
return None
def get_raw_adc(self):
return self.raw_checkbox.checkState() == QtCore.Qt.Checked
def get_raw_time(self):
return self.raw_time_checkbox.checkState() == QtCore.Qt.Checked
def get_selected(self):
selected = []
root = self._tree.invisibleRootItem()
it_stack = [((),root)]
while len(it_stack) > 0:
path,it = it_stack.pop()
for i in range(it.childCount()):
item = it.child(i)
name = str(item.text(0))
fullpath = path+(name,)
if item.childCount() > 0 and item.checkState(0) != QtCore.Qt.Unchecked:
it_stack.append((fullpath,item))
elif item.checkState(0) == QtCore.Qt.Checked:
selected.append(fullpath)
return selected
def add_element(self,*label,parent=None,is_leaf=False,checked=True):
label = ' '.join(map(str,label))
elem = QtWidgets.QTreeWidgetItem(parent)
elem.setText(0, label)
if is_leaf:
elem.setFlags(elem.flags() | QtCore.Qt.ItemIsUserCheckable)
elem.setCheckState(0, QtCore.Qt.Checked if checked else QtCore.Qt.Unchecked)
else:
elem.setFlags(elem.flags() | QtCore.Qt.ItemIsTristate | QtCore.Qt.ItemIsUserCheckable)
return elem
def set_file(self,path,previous_selection=None):
self._tree = QtWidgets.QTreeWidget()
self._tree.setHeaderLabel('Channels Shown')
with h5py.File(path,'r') as hf:
for dname in hf.keys():
digitizer = hf[dname]
dgelem = self.add_element(dname,parent=self._tree)
for gcname in digitizer.keys():
grch = digitizer[gcname]
if 'gr' in gcname:
gelem = self.add_element(gcname,parent=dgelem)
for grdat in grch:
if 'ch' in grdat or 'tr' == grdat:
ch = grch[grdat]
if previous_selection:
checked = (str(dname),str(gcname),str(grdat)) in previous_selection
else:
checked = False
self.add_element(grdat,parent=gelem,is_leaf=True,checked=checked)
else:
#self.add_element(grch[grdat])
pass
elif 'ch' in gcname:
if previous_selection:
checked = (str(dname),str(gcname)) in previous_selection
else:
checked = False
self.add_element(gcname,parent=dgelem,is_leaf=True,checked=checked)
self._layout.addWidget(self._tree)
class SignalView(QtWidgets.QWidget):
def __init__(self,parent=None,figure=None):
super().__init__(parent=parent)
if figure is None:
figure = Figure(tight_layout=True)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.figure = figure
self.figure.tight_layout()
self.fig_ax = self.figure.subplots()
self.fig_canvas = FigureCanvas(self.figure)
self.fig_toolbar = CustomNavToolbar(self.fig_canvas, self)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.fig_toolbar)
self.layout.addWidget(self.fig_canvas)
self.toolbar_shown(False)
self.fname = None
self.legend = False
self.idx = 0
self.raw_adc = False
self.raw_time = False
self.pedestal = None
self.distribute = None
self.selected = None
self.save_props = ['legend','selected','raw_adc','raw_time','pedestal','distribute']
self.times,self.data,self.raw_data = None,None,None
def focusInEvent(self, *args, **kwargs):
super().focusInEvent(*args, **kwargs)
self.toolbar_shown(True)
def focusOutEvent(self, *args, **kwargs):
super().focusOutEvent(*args, **kwargs)
self.toolbar_shown(False)
def toolbar_shown(self,shown):
if shown:
self.fig_toolbar.show()
else:
self.fig_toolbar.hide()
def get_state(self):
all_props = self.__dict__
return {prop:getattr(self,prop) for prop in self.save_props if prop in all_props}
def set_state(self, state):
all_props = self.__dict__
for prop,val in state.items():
if prop in all_props:
setattr(self,prop,val)
def _load_data(self):
self.times = []
self.data = []
self.raw_data = []
with h5py.File(self.fname,'r') as hf:
for sig_idx,(dgzt,*grch) in enumerate(self.selected):
dgzt = hf[dgzt]
channel = dgzt
for part in grch:
channel = channel[part]
ns_per_sample = dgzt.attrs['ns_sample']
if 'samples' in dgzt.attrs:
num_samples = dgzt.attrs['samples']
else:
num_samples = channel.attrs['samples']
if self.raw_time:
time = np.arange(0,num_samples)
else:
time = np.arange(0,num_samples*ns_per_sample,ns_per_sample)
self.times.append(time)
bits = dgzt.attrs['bits'] #digitizer bit depth
if bits == 12: #V1472
Vpp = 1.0
zero_is_zero = True
elif bits == 14: #V1730
Vpp = 2.0
zero_is_zero = False
else:
raise Exception('Not sure how to do offset correction!')
offset = channel.attrs['offset'] #16bit DAC offset
if not zero_is_zero: #the different models treat offset of 0 DAC differently
offset = 2**16 - offset
offset = Vpp*(offset/2.0**16.0) #now in Volts
samples = channel['samples'][:] #raw ADC values
self.raw_data.append(samples)
samples = 1000*Vpp*(samples/2.0**bits)-offset #now in mV
if self.pedestal is not None:
ped_min,ped_max = self.pedestal
i = np.argwhere(ped_min >= time)[0,0]
j = np.argwhere(ped_max <= time)[0,0]
pedestals = np.mean(samples[:,i:j],axis=1)
samples = (samples.T - pedestals).T
if self.distribute is not None:
samples = samples + sig_idx*self.distribute
self.data.append(samples)
def select_signals(self):
if self.fname is None:
self.times,self.data,self.raw_data,self.selected = None,None,None,None
return
selector = SignalSelector(self.fname, parent=self, selected=self.selected, raw_adc=self.raw_adc, raw_time=self.raw_time, pedestal=self.pedestal, distribute=self.distribute)
result = selector.exec_()
self.selected = selector.get_selected()
self.raw_adc = selector.get_raw_adc()
self.raw_time = selector.get_raw_time()
self.pedestal = selector.get_pedestal()
self.distribute = selector.get_distribute()
self._load_data()
self.plot_signals()
def plot_signals(self):
ax = self.fig_ax
autoscale = ax.get_autoscale_on()
if not autoscale:
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.clear()
if self.selected:
if self.fname and (not self.times or not self.data):
self._load_data()
if not self.times or not self.data:
return
for t,v,n in zip(self.times,self.raw_data if self.raw_adc else self.data,self.selected):
if self.idx < 0 or self.idx >= len(v):
continue
label = os.path.join('/',*n)
ax.plot(t,v[self.idx],linestyle='steps',label=label)
ax.set_xlabel('Sample' if self.raw_time else 'Time (ns)')
ax.set_ylabel('ADC Counts' if self.raw_adc else ('Voltage (mV)' if not self.distribute else 'Arb. Shifted Voltage (mV)'))
if not autoscale:
ax.set_autoscale_on(False)
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
if self.legend:
ax.legend()
ax.figure.canvas.draw()
class EvDisp(QtWidgets.QMainWindow):
def __init__(self,rows=1,cols=1,fname=None,evidx=0,layout=None):
super().__init__()
plot_layout = layout
self.fname,self.idx = fname,evidx
self._main = QtWidgets.QWidget()
self._main.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setCentralWidget(self._main)
layout = QtWidgets.QVBoxLayout(self._main)
button_layout = QtWidgets.QHBoxLayout()
button = QtWidgets.QPushButton('Load Data')
button_layout.addWidget(button)
button.setToolTip('Load a WbLSdaq data file')
button.clicked.connect(self.load_file)
button = QtWidgets.QPushButton('Reshape')
button_layout.addWidget(button)
button.setToolTip('Change the plot grid shape')
button.clicked.connect(self.reshape_prompt)
button = QtWidgets.QPushButton('Load Layout')
button_layout.addWidget(button)
button.setToolTip('Save plot layout and selected signals')
button.clicked.connect(self.load_layout)
button = QtWidgets.QPushButton('Save Layout')
button_layout.addWidget(button)
button.setToolTip('Load plot layout and selected signals')
button.clicked.connect(self.save_layout)
layout.addLayout(button_layout)
self.grid = QtWidgets.QGridLayout()
self.views = []
self.reshape(rows,cols)
layout.addLayout(self.grid)
nav_layout = QtWidgets.QHBoxLayout()
button = QtWidgets.QPushButton('<<')
nav_layout.addWidget(button)
button.setToolTip('Previous Event')
button.clicked.connect(self.backward)
self.txtidx = QtWidgets.QLineEdit(self)
self.txtidx.setFixedWidth(100)
nav_layout.addWidget(self.txtidx)
self.txtidx.returnPressed.connect(self.set_idx)
button = QtWidgets.QPushButton('>>')
nav_layout.addWidget(button)
button.setToolTip('Next Event')
button.clicked.connect(self.forward)
layout.addLayout(nav_layout)
self.txtidx.setText(str(self.idx))
if plot_layout:
self.load_layout(plot_layout)
self._load_file(fname)
self.plot_selected()
@QtCore.pyqtSlot()
def reshape_prompt(self):
dialog = QtWidgets.QDialog()
layout = QtWidgets.QFormLayout()
layout.addRow(QtWidgets.QLabel("Choose Plot Grid Shape"))
rowbox,colbox = QtWidgets.QLineEdit(str(self.rows)),QtWidgets.QLineEdit(str(self.cols))
layout.addRow(QtWidgets.QLabel("Rows"),rowbox)
layout.addRow(QtWidgets.QLabel("Cols"),colbox)
buttons = QtWidgets.QDialogButtonBox( QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel, QtCore.Qt.Horizontal, dialog)
buttons.accepted.connect(dialog.accept)
buttons.rejected.connect(dialog.reject)
layout.addWidget(buttons)
dialog.setLayout(layout)
if dialog.exec_() == QtWidgets.QDialog.Accepted:
try:
r = int(rowbox.text())
c = int(colbox.text())
self.reshape(r,c)
except:
print('Invalid input to reshape dialog')
def reshape(self,rows,cols):
self.rows = rows
self.cols = cols
for i in reversed(range(self.grid.count())):
self.grid.itemAt(i).widget().setParent(None)
for r in range(self.rows):
for c in range(self.cols):
i = c+self.cols*r
if i < len(self.views):
view = self.views[i]
else:
view = SignalView()
view.fname = self.fname
view.idx = self.idx
self.views.append(view)
self.grid.addWidget(view,r,c)
for widget in self.views[self.rows*self.cols:]:
widget.deleteLater() #how necessary is this, i wonder
self.views = self.views[:self.rows*self.cols]
self.plot_selected()
@QtCore.pyqtSlot()
def backward(self):
self.idx = self.idx - 1;
self.txtidx.setText(str(self.idx))
self.plot_selected()
@QtCore.pyqtSlot()
def forward(self):
self.idx = self.idx + 1;
self.txtidx.setText(str(self.idx))
self.plot_selected()
@QtCore.pyqtSlot()
def set_idx(self):
self.idx = int(self.txtidx.text())
self.txtidx.setText(str(self.idx))
self.plot_selected()
@QtCore.pyqtSlot()
def load_layout(self,fname=None):
if fname is None:
fname,_ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open settings', '.','WbLSdaq Plot Layouts (*.ply);;All files (*.*)')
if fname:
try:
with open(fname,'rb') as f:
settings = pickle.load(f)
rows = settings['rows']
cols = settings['cols']
views = [SignalView() for i in range(rows*cols)]
for view,state in zip(views,settings['views']):
view.fname = self.fname
view.idx = self.idx
if type(state) == tuple: #old format
raw_adc,selected,pedestal,distribute = state
view.raw_adc = raw_adc
view.selected = selected
view.pedestal = pedestal
view.distribute = distribute
else:
view.set_state(state)
self.views = views
self.reshape(rows,cols)
except:
print('Could not load settings')
@QtCore.pyqtSlot()
def save_layout(self):
fname,_ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save settings', '.','WbLSdaq Plot Layouts (*.ply);;All files (*.*)')
if fname:
if not fname.endswith('.ply'):
fname = fname + '.ply'
settings = {'rows':self.rows,'cols':self.cols}
settings['views'] = [v.get_state() for v in self.views]
with open(fname,'wb') as f:
pickle.dump(settings,f)
@QtCore.pyqtSlot()
def load_file(self):
fname,_ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open data', '.','WbLSdaq HDF5 Files (*.h5);;All files (*.*)')
self._load_file(fname)
def _load_file(self,fname):
if fname:
self.fname = fname
for view in self.views:
view.fname = self.fname
if view.selected is None:
view.select_signals()
self.plot_selected()
@QtCore.pyqtSlot()
def plot_selected(self):
for view in self.views:
view.idx = self.idx
view.plot_signals()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Visually display data from WbLSdaq')
parser.add_argument('fname',default=None,nargs='?',help='A data file to open initially')
parser.add_argument('evidx',default=0,type=int,nargs='?',help='The index of the event to display first')
parser.add_argument('--rows','-r',default=1,type=int,help='Rows of plots [1]')
parser.add_argument('--cols','-c',default=1,type=int,help='Columns of plots [1]')
parser.add_argument('--layout','-l',default=None,help='Load a saved layout')
args = parser.parse_args()
qapp = QtWidgets.QApplication([])
app = EvDisp(**vars(args))
app.show()
qapp.exec_()
| BenLand100/WbLSdaq | evdisp.py | Python | gpl-3.0 | 21,079 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class IotHubDescription(Resource):
"""The description of the IoT hub.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: The resource tags.
:type tags: dict
:param subscriptionid: The subscription identifier.
:type subscriptionid: str
:param resourcegroup: The name of the resource group that contains the IoT
hub. A resource group name uniquely identifies the resource group within
the subscription.
:type resourcegroup: str
:param etag: The Etag field is *not* required. If it is provided in the
response body, it must also be provided as a header per the normal ETag
convention.
:type etag: str
:param properties:
:type properties: :class:`IotHubProperties
<azure.mgmt.iothub.models.IotHubProperties>`
:param sku:
:type sku: :class:`IotHubSkuInfo <azure.mgmt.iothub.models.IotHubSkuInfo>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'location': {'required': True},
'subscriptionid': {'required': True},
'resourcegroup': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'subscriptionid': {'key': 'subscriptionid', 'type': 'str'},
'resourcegroup': {'key': 'resourcegroup', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'IotHubProperties'},
'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'},
}
def __init__(self, location, subscriptionid, resourcegroup, sku, tags=None, etag=None, properties=None):
super(IotHubDescription, self).__init__(location=location, tags=tags)
self.subscriptionid = subscriptionid
self.resourcegroup = resourcegroup
self.etag = etag
self.properties = properties
self.sku = sku
| lmazuel/azure-sdk-for-python | azure-mgmt-iothub/azure/mgmt/iothub/models/iot_hub_description.py | Python | mit | 2,998 |
#generic python modules
import argparse
import operator
from operator import itemgetter
import sys, os, shutil
import os.path
################################################################################################################################################
# RETRIEVE USER INPUTS
################################################################################################################################################
#=========================================================================================
# create parser
#=========================================================================================
version_nb = "0.0.3"
parser = argparse.ArgumentParser(prog='ff_times', usage='', add_help=False, formatter_class=argparse.RawDescriptionHelpFormatter, description=\
'''
**********************************************
v''' + version_nb + '''
author: Jean Helie ([email protected])
git: https://github.com/jhelie/ff_times
**********************************************
[ DESCRITPION ]
This script estimates when the flipflopping lipids identified by ff_detect actually
flip-flops. Three times are calculated for each flip-flopping lipid:
- an estimated starting time of the flip-flop
- an estimated ending time of the flip-flop
- an estimated mid-point time
A file listing the flip-flopping lipids must be supplied with the --flipflops option.
Each line of this file should follow the format (time in ns):
-> 'resname,resid,starting_leaflet,z_bead'
where starting_leaflet is either 'upper' or 'lower' - e.g. 'POPC,145,lower,PO4'. The
'z_bead' particle is used to track the position of the lipid. The script will then
produce an updated file with the following format:
-> 'resname,resid,starting_leaflet,z_bead,t_start,t_end'
The script also outputs the estimated time at which each flip-flop occurs (mid-point time)
and, based on these, the evolution with time of the number of lipids having flip-flopped.
[ REQUIREMENTS ]
The following python modules are needed:
- MDAnalysis
- matplotlib
[ NOTES ]
1. It's a good idea to trjconv the xtc first and only outputs the relevant bead of each
phospholipid as the script will run MUCH faster.
2. Identification of the bilayer leaflets is controlled via 3 options:
(a) selection of particles
By default, the PO4 group is used to detect lipids and assess their flip-flop status.
This bead name can be changed via the --bead flag, the selection string being:
-> "name " + beadname
Note that only lipids which contain the bead mentioned in the selection string
will be taken into account to identify leaflets.
(b) leaflet finding method: reference file
By default leaflets are identified using the MDAnalysis LeafletFinder routine and the
the optimum cutoff to identify 2 lipids groups is determined using the optimize_cutoff
routine.
This optimisation process can take time in large systems and you can specify your own
cutoff value to skip this step. For instance to use the default 15 Angstrom cutoff
directly (without optimising):
-> '--leaflets 15'
In very large systems (more than ~50,000 phospholipids) LeafletFinder (or rather the
networkX module that it relies on) can fail. To avoid this you can choose not to use
this routine by specifying:
-> '--leaflets large'
In this case lipids whose headgroups z value is above the average lipids z value will
be considered to make up the upper leaflet and those whose headgroups z value is below
the average will be considered to be in the lower leaflet.
This means that the bilayer should be as flat as possible in the gro file supplied in
order to get a meaningful outcome.
3.
(a) The same approach as in ff_detect is used. Flip-flopping lipids are considered to have
flip-flopped when more of their neighbours within the distance --ngh_dist belong to
the opposite leaflet than to their starting leaflet. The first time at which this happens
is the mid-point time.
(b) To detect when flip-flops start/end the distance of the flip-flopping lipids --bead
particle to the center of geometry (COG) of the --ngh_nb nearest neighbours in each
leaflet is calculated and a local core membrane layer is defined relative to this distance
using a fraction of the distance, --ngh_frac, between those inter-leaflets COGs: t_start
corresponds to when the flip-flopping lipid enters this core layer (false starts, ie if it
comes out again, are taken into account) and t_end corresponds to when it leaves it.
NB: --ngh_dist and --ngh_nb are unrelated.
[ USAGE ]
Option Default Description
-----------------------------------------------------
-f : reference structure [.gro] (required)
-x : trajectory file [.xtc] (required, must be in current folder unless -o is used)
-o : name of output folder
-b : beginning time (ns)
-e : ending time (ns)
-t [10] : process every t-frames
Leaflets identification (see note 2)
-----------------------------------------------------
--bead [PO4] : lipids bead name
--leaflets : leaflet identification
Flip-flops identification
-----------------------------------------------------
--flipflops : input file with flipflopping lipids (output of ff_detect)
--ngh_dist [15] : distance (Angstrom) within which to consider neighbours for ff time, see note 3(a)
--ngh_nb [5] : nb of closest neighbours in each leaflet, see note 3(b)
--ngh_frac [0.1] : fraction of distance between interleaflet neighbours COGs, see note 3(b)
--reverse : browse xtc backwards [TO DO]
Other options
-----------------------------------------------------
--version : show version number and exit
-h, --help : show this menu and exit
''')
#data options
parser.add_argument('-f', nargs=1, dest='grofilename', help=argparse.SUPPRESS, required=True)
parser.add_argument('-x', nargs=1, dest='xtcfilename', help=argparse.SUPPRESS, required=True)
parser.add_argument('-o', nargs=1, dest='output_folder', default=['no'], help=argparse.SUPPRESS)
parser.add_argument('-b', nargs=1, dest='t_start', default=[-1], type=int, help=argparse.SUPPRESS)
parser.add_argument('-e', nargs=1, dest='t_end', default=[-1], type=int, help=argparse.SUPPRESS)
parser.add_argument('-t', nargs=1, dest='frames_dt', default=[10], type=int, help=argparse.SUPPRESS)
#leaflets identification
parser.add_argument('--bead', nargs=1, dest='beadname', default=['PO4'], help=argparse.SUPPRESS)
parser.add_argument('--leaflets', nargs=1, dest='cutoff_leaflet', default=['optimise'], help=argparse.SUPPRESS)
#flip-flops identification
parser.add_argument('--flipflops', nargs=1, dest='selection_file_ff', help=argparse.SUPPRESS, required=True)
parser.add_argument('--ngh_dist', nargs=1, dest='ngh_dist', default=[15], type=int, help=argparse.SUPPRESS)
parser.add_argument('--ngh_nb', nargs=1, dest='ngh_nb', default=[5], type=int, help=argparse.SUPPRESS)
parser.add_argument('--ngh_frac', nargs=1, dest='ngh_frac', default=[0.1], type=float, help=argparse.SUPPRESS)
parser.add_argument('--reverse', dest='reverse', action='store_true', help=argparse.SUPPRESS)
#other options
parser.add_argument('--version', action='version', version='%(prog)s v' + version_nb, help=argparse.SUPPRESS)
parser.add_argument('-h','--help', action='help', help=argparse.SUPPRESS)
#=========================================================================================
# store inputs
#=========================================================================================
#parse user inputs
#-----------------
args = parser.parse_args()
#data options
args.grofilename = args.grofilename[0]
args.xtcfilename = args.xtcfilename[0]
args.output_folder = args.output_folder[0]
args.t_start=args.t_start[0]
args.t_end=args.t_end[0]
args.frames_dt=args.frames_dt[0]
#leaflets identification
args.beadname = args.beadname[0]
args.cutoff_leaflet = args.cutoff_leaflet[0]
#flip-flops identification
args.selection_file_ff = args.selection_file_ff[0]
args.ngh_dist = args.ngh_dist[0]
args.ngh_nb = args.ngh_nb[0]
args.ngh_frac = args.ngh_frac[0]
#=========================================================================================
# import modules (doing it now otherwise might crash before we can display the help menu!)
#=========================================================================================
#generic science modules
try:
import math
except:
print "Error: you need to install the maths module."
sys.exit(1)
try:
import numpy as np
except:
print "Error: you need to install the np module."
sys.exit(1)
try:
import scipy
except:
print "Error: you need to install the scipy module."
sys.exit(1)
try:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.colors as mcolors
mcolorconv = mcolors.ColorConverter()
import matplotlib.cm as cm #colours library
import matplotlib.ticker
from matplotlib.ticker import MaxNLocator
from matplotlib.font_manager import FontProperties
fontP=FontProperties()
except:
print "Error: you need to install the matplotlib module."
sys.exit(1)
try:
import pylab as plt
except:
print "Error: you need to install the pylab module."
sys.exit(1)
#MDAnalysis module
try:
import MDAnalysis
from MDAnalysis import *
import MDAnalysis.selections
import MDAnalysis.analysis
import MDAnalysis.analysis.leaflet
import MDAnalysis.analysis.distances
#set MDAnalysis to use periodic boundary conditions
MDAnalysis.core.flags['use_periodic_selections'] = True
MDAnalysis.core.flags['use_KDTree_routines'] = False
except:
print "Error: you need to install the MDAnalysis module first. See http://mdanalysis.googlecode.com"
sys.exit(1)
#=========================================================================================
# sanity check
#=========================================================================================
if not os.path.isfile(args.grofilename):
print "Error: file " + str(args.grofilename) + " not found."
sys.exit(1)
if not os.path.isfile(args.xtcfilename):
print "Error: file " + str(args.xtcfilename) + " not found."
sys.exit(1)
if not os.path.isfile(args.selection_file_ff):
print "Error: file " + str(args.selection_file_ff) + " not found."
sys.exit(1)
if args.ngh_dist < 0:
print "Error: --ngh_dist should be greater than 0, see note 3(a)."
sys.exit(1)
if args.ngh_nb < 0:
print "Error: --ngh_nb should be greater than 0, see note 3(b)."
sys.exit(1)
if args.ngh_frac < 0 or args.ngh_frac > 0.5:
print "Error: --ngh_dist should be between 0 and 0.5, see note 3(b)."
sys.exit(1)
if args.cutoff_leaflet != "large" and args.cutoff_leaflet != "optimise":
try:
args.cutoff_leaflet = float(args.cutoff_leaflet)
except:
print "Error: the argument of the --leaflets option should be a number or 'large', see note 2"
sys.exit(1)
#=========================================================================================
# create folders and log file
#=========================================================================================
if args.output_folder=="no":
args.output_folder="ff_times_" + args.xtcfilename[:-4]
if os.path.isdir(args.output_folder):
print "Error: folder " + str(args.output_folder) + " already exists, choose a different output name via -o."
sys.exit(1)
else:
#create folders
os.mkdir(args.output_folder)
#create log
filename_log=os.getcwd() + '/' + str(args.output_folder) + '/ff_time.log'
output_log=open(filename_log, 'w')
output_log.write("[ff_time v" + str(version_nb) + "]\n")
output_log.write("\nThis folder and its content were created using the following command:\n\n")
tmp_log="python ff_time.py"
for c in sys.argv[1:]:
tmp_log+=" " + c
output_log.write(tmp_log + "\n")
output_log.close()
##########################################################################################
# FUNCTIONS DEFINITIONS
##########################################################################################
#=========================================================================================
# data loading
#=========================================================================================
def set_lipids_beads():
global leaflet_sele_string
#set default beads
leaflet_sele_string = "name " + str(args.beadname)
return
def load_MDA_universe():
global U, U_lip
global all_atoms
global nb_atoms
global nb_frames_xtc
global frames_to_process
global frames_to_write
global nb_frames_to_process
global f_start
global f_end
f_start = 0
print "\nLoading trajectory..."
U = Universe(args.grofilename, args.xtcfilename)
U_timestep = U.trajectory.dt
all_atoms = U.selectAtoms("all")
nb_atoms = all_atoms.numberOfAtoms()
nb_frames_xtc = U.trajectory.numframes
#sanity check
if U.trajectory[nb_frames_xtc-1].time/float(1000) < args.t_start:
print "Error: the trajectory duration (" + str(U.trajectory.time/float(1000)) + "ns) is shorted than the starting stime specified (" + str(args.t_start) + "ns)."
sys.exit(1)
if U.trajectory.numframes < args.frames_dt:
print "Warning: the trajectory contains fewer frames (" + str(nb_frames_xtc) + ") than the frame step specified (" + str(args.frames_dt) + ")."
#rewind traj (very important to make sure that later the 1st frame of the xtc will be used for leaflet identification)
U.trajectory.rewind()
#create list of index of frames to process
if args.t_end != -1:
f_end = int((args.t_end*1000 - U.trajectory[0].time) / float(U_timestep))
if f_end < 0:
print "Error: the starting time specified is before the beginning of the xtc."
sys.exit(1)
else:
f_end = nb_frames_xtc - 1
if args.t_start != -1:
f_start = int((args.t_start*1000 - U.trajectory[0].time) / float(U_timestep))
if f_start > f_end:
print "Error: the starting time specified is after the end of the xtc."
sys.exit(1)
if (f_end - f_start)%args.frames_dt == 0:
tmp_offset = 0
else:
tmp_offset = 1
frames_to_process = map(lambda f:f_start + args.frames_dt*f, range(0,(f_end - f_start)//args.frames_dt+tmp_offset))
nb_frames_to_process = len(frames_to_process)
#check the leaflet selection string is valid
U_lip = U.selectAtoms(leaflet_sele_string)
if U_lip.numberOfAtoms() == 0:
print "Error: invalid selection string '" + str(leaflet_sele_string) + "'"
print "-> no particles selected."
sys.exit(1)
return
def identify_ff():
print "\nReading selection file for flipflopping lipids..."
#declare variables
global lipids_ff_nb
global lipids_ff_info
global lipids_ff_resnames
global lipids_ff_leaflet
global lipids_ff_u2l_index
global lipids_ff_l2u_index
global lipids_sele_ff
global lipids_sele_ff_bead
global lipids_sele_ff_bonds
global lipids_sele_ff_VMD_string
global leaflet_sele_string
lipids_ff_nb = 0
lipids_ff_info = {}
lipids_ff_resnames = []
lipids_ff_leaflet = []
lipids_ff_u2l_index = []
lipids_ff_l2u_index = []
lipids_sele_ff = {}
lipids_sele_ff_bead = {}
lipids_sele_ff_bonds = {}
lipids_sele_ff_VMD_string={}
#read flip-flopping lipids info and create selection string to exclude them from leaflets selection
with open(args.selection_file_ff) as f:
lines = f.readlines()
lipids_ff_nb = len(lines)
print " -found " + str(lipids_ff_nb) + " flipflopping lipids"
leaflet_sele_string = leaflet_sele_string + " and not ("
for l_index in range(0,lipids_ff_nb):
line = lines[l_index]
if line[-1] == "\n":
line = line[:-1]
try:
line_content = line.split(',')
if len(line_content) != 4:
print "Error: wrong format for line " + str(l_index+1) + " in " + str(args.selection_file_ff) + ", see note 4 in bilayer_perturbations --help."
print " ->", line
sys.exit(1)
#read current lipid details
lip_resname = line_content[0]
lip_resnum = int(line_content[1])
lip_leaflet = line_content[2]
lip_bead = line_content[3]
lipids_ff_info[l_index] = [lip_resname,lip_resnum,lip_leaflet,lip_bead]
#update: starting leaflets
if lip_leaflet not in lipids_ff_leaflet:
lipids_ff_leaflet.append(lip_leaflet)
#update: index in directional lists
if lip_leaflet == "upper":
lipids_ff_u2l_index.append(l_index)
elif lip_leaflet == "lower":
lipids_ff_l2u_index.append(l_index)
else:
print "->unknown starting leaflet '" + str(lip_leaflet) + "'."
sys.exit(1)
#update: resnames
if lip_resname not in lipids_ff_resnames:
lipids_ff_resnames.append(lip_resname)
#update: leaflet selection string
if l_index==0:
leaflet_sele_string+="(resname " + str(lip_resname) + " and resnum " + str(lip_resnum) + ")"
else:
leaflet_sele_string+=" or (resname " + str(lip_resname) + " and resnum " + str(lip_resnum) + ")"
#create selections
lipids_sele_ff[l_index] = U.selectAtoms("resname " + str(lip_resname) + " and resnum " + str(lip_resnum))
lipids_sele_ff_bead[l_index] = lipids_sele_ff[l_index].selectAtoms("name " + str(lip_bead))
lipids_sele_ff_VMD_string[l_index]="resname " + str(lipids_ff_info[l_index][0]) + " and resid " + str(lipids_ff_info[l_index][1])
if lipids_sele_ff[l_index].numberOfAtoms() == 0:
print "Error:"
print line
print "-> no such lipid found."
sys.exit(1)
except:
print "Error: invalid flipflopping lipid selection string on line " + str(l_index+1) + ": '" + line + "'"
sys.exit(1)
leaflet_sele_string+=")"
return
def identify_leaflets():
print "\nIdentifying leaflets..."
#declare variables
global leaflet_sele
global leaflet_sele_atoms
global upper_resnums
global lower_resnums
leaflet_sele = {}
leaflet_sele_atoms = {}
for l in ["lower","upper","both"]:
leaflet_sele[l] = {}
leaflet_sele_atoms[l] = {}
#make sure lipids not involved in ff remain in the Universe!
test_beads = U.selectAtoms(leaflet_sele_string)
if test_beads.numberOfAtoms() == 0:
print "Error: invalid selection string '" + str(leaflet_sele_string) + "'"
print "-> no particles selected."
sys.exit(1)
#use LeafletFinder:
if args.cutoff_leaflet != 'large':
if args.cutoff_leaflet == 'optimise':
print " -optimising cutoff..."
cutoff_value = MDAnalysis.analysis.leaflet.optimize_cutoff(U, leaflet_sele_string)
L = MDAnalysis.analysis.leaflet.LeafletFinder(U, leaflet_sele_string, cutoff_value[0])
else:
L = MDAnalysis.analysis.leaflet.LeafletFinder(U, leaflet_sele_string, args.cutoff_leaflet)
if np.shape(L.groups())[0]<2:
print "Error: imposssible to identify 2 leaflets."
sys.exit(1)
if L.group(0).centerOfGeometry()[2] > L.group(1).centerOfGeometry()[2]:
leaflet_sele["upper"] = L.group(0)
leaflet_sele["lower"] = L.group(1)
else:
leaflet_sele["upper"] = L.group(1)
leaflet_sele["lower"] = L.group(0)
leaflet_sele["both"] = leaflet_sele["lower"] + leaflet_sele["upper"]
if np.shape(L.groups())[0] == 2:
print " -found 2 leaflets: ", leaflet_sele["upper"].numberOfResidues(), '(upper) and ', leaflet_sele["lower"].numberOfResidues(), '(lower) lipids'
else:
other_lipids=0
for g in range(2, np.shape(L.groups())[0]):
other_lipids += L.group(g).numberOfResidues()
print " -found " + str(np.shape(L.groups())[0]) + " groups: " + str(leaflet_sele["upper"].numberOfResidues()) + "(upper), " + str(leaflet_sele["lower"].numberOfResidues()) + "(lower) and " + str(other_lipids) + " (others) lipids respectively"
#use cog:
else:
leaflet_sele["both"] = U.selectAtoms(leaflet_sele_string)
tmp_lipids_avg_z = leaflet_sele["both"].centerOfGeometry()[2]
leaflet_sele["upper"] = leaflet_sele["both"].selectAtoms("prop z > " + str(tmp_lipids_avg_z))
leaflet_sele["lower"] = leaflet_sele["both"].selectAtoms("prop z < " + str(tmp_lipids_avg_z))
print " -found 2 leaflets: ", leaflet_sele["upper"].numberOfResidues(), '(upper) and ', leaflet_sele["lower"].numberOfResidues(), '(lower) lipids'
#store resnums (avoid repetitive access)
upper_resnums = leaflet_sele["upper"].resnums()
lower_resnums = leaflet_sele["lower"].resnums()
return
#=========================================================================================
# core functions
#=========================================================================================
def calculate_cog(tmp_coords, box_dim):
#this method allows to take pbc into account when calculcating the center of geometry
#see: http://en.wikipedia.org/wiki/Center_of_mass#Systems_with_periodic_boundary_conditions
cog_coord = np.zeros((1,3))
tmp_nb_atoms = np.shape(tmp_coords)[0]
for n in range(0,3):
tet = tmp_coords[:,n] * 2 * math.pi / float(box_dim[n])
xsi = np.cos(tet)
zet = np.sin(tet)
tet_avg = math.atan2(-np.average(zet),-np.average(xsi)) + math.pi
cog_coord[0,n] = tet_avg * box_dim[n] / float(2*math.pi)
return np.float32(cog_coord)
def check_ff(f_nb, t, box_dim):
global ff_t_mid, ff_t_start, ff_t_mid
global ff_nb_u2l
global ff_nb_l2u
ff_counter = 0
#upper to lower
#==============
#initialise nb of lipids having flip-flopped using previous frame value
if f_nb > 0:
ff_nb_u2l[f_nb] = ff_nb_u2l[f_nb-1]
#process each flip-flopping lipids
#---------------------------------
for l_index in lipids_ff_u2l_index:
#display update
ff_counter += 1
progress = '\r -processing frame ' + str(f_nb+1) + '/' + str(nb_frames_to_process) + ' (every ' + str(args.frames_dt) + ' from ' + str(f_start) + ' to ' + str(f_end) + ' out of ' + str(nb_frames_xtc) + ') and flip-flopping lipid ' + str(ff_counter) + '/' + str(lipids_ff_nb) + ' '
sys.stdout.flush()
sys.stdout.write(progress)
if ff_t_end[l_index] == 0:
tmp_ff_coord = lipids_sele_ff_bead[l_index].coordinates()
#calculate distances with other PO4 in lower and upper leaflets
d_lower = MDAnalysis.analysis.distances.distance_array(tmp_ff_coord, tmp_lower_coord, box_dim)
d_upper = MDAnalysis.analysis.distances.distance_array(tmp_ff_coord, tmp_upper_coord, box_dim)
#get cutoff distance for n nearest neighbours
d_lower.sort(axis=1)
d_upper.sort(axis=1)
cutoff_lower = d_lower[0,args.ngh_nb-1]+0.001
cutoff_upper = d_upper[0,args.ngh_nb-1]+0.001
#get n closest neighbours in each leaflet
lower_tmp = leaflet_sele["lower"] + lipids_sele_ff_bead[l_index] #that's because around requires the reference object to belong to the selection in which we look for neighbours!
upper_tmp = leaflet_sele["upper"] + lipids_sele_ff_bead[l_index]
lower_neighbours = lower_tmp.selectAtoms("around " + str(cutoff_lower) + " (resname " + str(lipids_ff_info[l_index][0]) + " and resid " + str(lipids_ff_info[l_index][1]) + ")")
upper_neighbours = upper_tmp.selectAtoms("around " + str(cutoff_upper) + " (resname " + str(lipids_ff_info[l_index][0]) + " and resid " + str(lipids_ff_info[l_index][1]) + ")")
#calculate center of geometry of n closest neighbours in each leaflet
lower_cog = np.float32(calculate_cog(lower_neighbours.coordinates(), box_dim))
upper_cog = np.float32(calculate_cog(upper_neighbours.coordinates(), box_dim))
#calculate distance between current lipid and each center of geo
d_lower_geo = MDAnalysis.analysis.distances.distance_array(tmp_ff_coord, lower_cog, box_dim)
d_upper_geo = MDAnalysis.analysis.distances.distance_array(tmp_ff_coord, upper_cog, box_dim)
d_inter_geo = MDAnalysis.analysis.distances.distance_array(lower_cog, upper_cog, box_dim)
#check for beginning of flip flop
if ff_t_start[l_index] == 0 and d_upper_geo > args.ngh_frac * d_inter_geo:
ff_t_start[l_index] = t
#deal with false starts
elif d_upper_geo < args.ngh_frac * d_inter_geo:
ff_t_start[l_index] = 0
#check for end of flip_lfop
if d_lower_geo < args.ngh_frac * d_inter_geo:
ff_t_end[l_index] = t
#check mid-point time criteria
if ff_t_mid[l_index] == 0:
#find PO4 neighbours within args.ngh_dist
tmp_sele = leaflet_sele["both"] + lipids_sele_ff_bead[l_index]
tmp_neighbours = tmp_sele.selectAtoms("around " + str(args.ngh_dist) + " (resid " + str(lipids_ff_info[l_index][1]) + " and resname " + str(lipids_ff_info[l_index][0]) + ")").resnums()
#check how many belown to the lower leaflet compared to the upper leaflet
tmp_neighbours = np.in1d(tmp_neighbours, lower_resnums)
if len(tmp_neighbours) > 0:
tmp_ratio = len(tmp_neighbours[tmp_neighbours==True]) / len(tmp_neighbours)
if tmp_ratio > 0.5:
ff_t_mid[l_index] = t
ff_nb_u2l[f_nb] += 1
#lower to upper
#==============
#initialise nb of lipids having flip-flopped using previous frame value
if f_nb > 0:
ff_nb_l2u[f_nb] = ff_nb_l2u[f_nb-1]
#process each flip-flopping lipids
#---------------------------------
for l_index in lipids_ff_l2u_index:
#display update
ff_counter += 1
progress = '\r -processing frame ' + str(f_nb+1) + '/' + str(nb_frames_to_process) + ' (every ' + str(args.frames_dt) + ' from ' + str(f_start) + ' to ' + str(f_end) + ' out of ' + str(nb_frames_xtc) + ') and flip-flopping lipid ' + str(ff_counter) + '/' + str(lipids_ff_nb) + ' '
sys.stdout.flush()
sys.stdout.write(progress)
if ff_t_end[l_index] == 0:
tmp_ff_coord = lipids_sele_ff_bead[l_index].coordinates()
#calculate distances with other PO4 in lower and upper leaflets
d_lower = MDAnalysis.analysis.distances.distance_array(tmp_ff_coord, tmp_lower_coord, box_dim)
d_upper = MDAnalysis.analysis.distances.distance_array(tmp_ff_coord, tmp_upper_coord, box_dim)
#get cutoff distance for n nearest neighbours
d_lower.sort(axis=1)
d_upper.sort(axis=1)
cutoff_lower = d_lower[0,args.ngh_nb-1]+0.001
cutoff_upper = d_upper[0,args.ngh_nb-1]+0.001
#get n closest neighbours in each leaflet
lower_tmp = leaflet_sele["lower"] + lipids_sele_ff_bead[l_index] #that's because around requires the reference object to belong to the selection in which we look for neighbours!
upper_tmp = leaflet_sele["upper"] + lipids_sele_ff_bead[l_index]
lower_neighbours = lower_tmp.selectAtoms("around " + str(cutoff_lower) + " (resname " + str(lipids_ff_info[l_index][0]) + " and resid " + str(lipids_ff_info[l_index][1]) + ")")
upper_neighbours = upper_tmp.selectAtoms("around " + str(cutoff_upper) + " (resname " + str(lipids_ff_info[l_index][0]) + " and resid " + str(lipids_ff_info[l_index][1]) + ")")
#calculate center of geometry of n closest neighbours in each leaflet
lower_cog = calculate_cog(lower_neighbours.coordinates(), box_dim)
upper_cog = calculate_cog(upper_neighbours.coordinates(), box_dim)
#calculate distance between current lipid and each center of geo
d_lower_geo = MDAnalysis.analysis.distances.distance_array(tmp_ff_coord, lower_cog, box_dim)
d_upper_geo = MDAnalysis.analysis.distances.distance_array(tmp_ff_coord, upper_cog, box_dim)
d_inter_geo = MDAnalysis.analysis.distances.distance_array(lower_cog, upper_cog, box_dim)
#check for beginning of flip flop
if ff_t_start[l_index] == 0 and d_lower_geo > args.ngh_frac * d_inter_geo:
ff_t_start[l_index] = t
#deal with false starts
elif d_lower_geo < args.ngh_frac * d_inter_geo:
ff_t_start[l_index] = 0
#check for end of flip_lfop
if d_upper_geo < args.ngh_frac * d_inter_geo:
ff_t_end[l_index] = t
#check mid-point time criteria
if ff_t_mid[l_index] == 0:
#find PO4 neighbours within args.ngh_dist
tmp_sele = leaflet_sele["both"] + lipids_sele_ff_bead[l_index]
tmp_neighbours = tmp_sele.selectAtoms("around " + str(args.ngh_dist) + " (resid " + str(lipids_ff_info[l_index][1]) + " and resname " + str(lipids_ff_info[l_index][0]) + ")").resnums()
#check how many belown to the upper leaflet compared to the lower leaflet
tmp_neighbours = np.in1d(tmp_neighbours, upper_resnums)
if len(tmp_neighbours) > 0:
tmp_ratio = len(tmp_neighbours[tmp_neighbours==True]) / len(tmp_neighbours)
if tmp_ratio > 0.5:
ff_t_mid[l_index] = t
ff_nb_l2u[f_nb] += 1
return
#=========================================================================================
# outputs
#=========================================================================================
def write_sele_update():
print " -updating selection file '" + str(args.selection_file_ff) + "'..."
filename_txt = os.getcwd() + '/' + str(args.output_folder) + '/' + str(args.selection_file_ff[:-5].split('/')[-1]) + '_update.sele'
output_txt = open(filename_txt, 'w')
#upper to lower
for l_index in lipids_ff_u2l_index:
output_txt.write(str(lipids_ff_info[l_index][0]) + "," + str(lipids_ff_info[l_index][1]) + "," + str(lipids_ff_info[l_index][2]) + "," + str(lipids_ff_info[l_index][3]) + "," + str(ff_t_start[l_index]) + "," + str(ff_t_end[l_index]) + "\n")
#lower to upper
for l_index in lipids_ff_l2u_index:
output_txt.write(str(lipids_ff_info[l_index][0]) + "," + str(lipids_ff_info[l_index][1]) + "," + str(lipids_ff_info[l_index][2]) + "," + str(lipids_ff_info[l_index][3]) + "," + str(ff_t_start[l_index]) + "," + str(ff_t_end[l_index]) + "\n")
output_txt.close()
return
def write_txt_times():
print " -writing flip-flopping times..."
filename_txt = os.getcwd() + '/' + str(args.output_folder) + '/ff_times.txt'
output_txt = open(filename_txt, 'w')
output_txt.write("[times of lipid flip-flops - written by ff_times v" + str(version_nb) + "]\n")
#upper to lower
for l_index in lipids_ff_u2l_index:
output_txt.write(str(lipids_ff_info[l_index][0]) + "," + str(lipids_ff_info[l_index][1]) + "," + str(lipids_ff_info[l_index][2]) + "," + str(ff_t_mid[l_index]) + "," + str(ff_t_end[l_index] - ff_t_start[l_index]) + "\n")
#lower to upper
for l_index in lipids_ff_l2u_index:
output_txt.write(str(lipids_ff_info[l_index][0]) + "," + str(lipids_ff_info[l_index][1]) + "," + str(lipids_ff_info[l_index][2]) + "," + str(ff_t_mid[l_index]) + "," + str(ff_t_end[l_index] - ff_t_start[l_index]) + "\n")
output_txt.close()
return
def write_xvg_evolution():
print " -writing evolution of number of flip-flops..."
filename_xvg = os.getcwd() + '/' + str(args.output_folder) + '/ff_evolution.xvg'
output_xvg = open(filename_xvg, 'w')
output_xvg.write("@ title \"Evolution of number of flip-flops\"\n")
output_xvg.write("@ xaxis label \"time (ns)\"\n")
output_xvg.write("@ yaxis label \"number of flip-flops\"\n")
output_xvg.write("@ autoscale ONREAD xaxes\n")
output_xvg.write("@ TYPE XY\n")
output_xvg.write("@ view 0.15, 0.15, 0.95, 0.85\n")
output_xvg.write("@ legend on\n")
output_xvg.write("@ legend box on\n")
output_xvg.write("@ legend loctype view\n")
output_xvg.write("@ legend 0.98, 0.8\n")
output_xvg.write("@ legend length 2\n")
output_xvg.write("@ s0 legend \"upper to lower\"\n")
output_xvg.write("@ s1 legend \"lower to upper\"\n")
for f_index in range(0,nb_frames_to_process):
results = str(frames_time[f_index]) + " " + str(ff_nb_u2l[f_index]) + " " + str(ff_nb_l2u[f_index])
output_xvg.write(results + "\n")
output_xvg.close()
return
##########################################################################################
# ALGORITHM
##########################################################################################
#load ata
set_lipids_beads()
load_MDA_universe()
identify_ff()
identify_leaflets()
#create data structures
global ff_t_mid, ff_t_start, ff_t_end #to store mid-point time, t_start and t_end (in ns)
global ff_nb_u2l
global ff_nb_l2u
global frames_time
global tmp_upper_coord, tmp_lower_coord
ff_t_mid = np.zeros(lipids_ff_nb)
ff_t_start = np.zeros(lipids_ff_nb)
ff_t_end = np.zeros(lipids_ff_nb)
ff_nb_u2l = np.zeros(nb_frames_to_process)
ff_nb_l2u = np.zeros(nb_frames_to_process)
frames_time = np.zeros(nb_frames_to_process)
#browse trajectory
print "\nChecking for flip-flopping status..."
for f_index in range(0,nb_frames_to_process):
ts = U.trajectory[frames_to_process[f_index]]
frames_time[f_index] = ts.time/float(1000)
tmp_upper_coord = leaflet_sele["upper"].coordinates()
tmp_lower_coord = leaflet_sele["lower"].coordinates()
check_ff(f_index, ts.time/float(1000), ts.dimensions)
#create outputs
print "\n\nWriting results files..."
write_sele_update()
write_txt_times()
write_xvg_evolution()
#exit
print "\nFinished successfully!" "Check results in", str(args.output_folder)
sys.exit(0)
| jhelie/ff_times | ff_times.py | Python | gpl-2.0 | 32,211 |
import os
import json
from time import time
from bottle import run, get, post, template, static_file
from bottle import response, request, redirect, error
from bottle.ext.websocket import GeventWebSocketServer
from bottle.ext.websocket import websocket
COOKIE_ID = 'omm-account'
COOKIE_SECRET = '49fz0348lQk5q110hRTt2An0'
COOKIE_EXPIRE = (3600 * 24 * 10) # 10 Days
allowed_static = ('.css', '.js', '.png')
connections = set()
chart_data = dict()
users_data = json.load(open("config/users.json")).get("users")
# Cookies
def set_cookie(username):
response.set_cookie(
'omm-account',
username,
secret=COOKIE_SECRET,
expires=time() + COOKIE_EXPIRE
)
def get_cookie():
return request.get_cookie(COOKIE_ID, secret=COOKIE_SECRET)
def clear_cookie():
response.set_cookie(
COOKIE_ID,
'',
secret=COOKIE_SECRET,
expires=time() - COOKIE_EXPIRE
)
# Methods
def broadcast(data):
for conn in connections:
conn.send(data)
def user_login(user, password):
return str(users_data.get(user, dict()).get("password")) == password
def user_image(user):
return str(users_data.get(user, dict()).get("image"))
# Routes
@get('/')
def index():
user = {"name": get_cookie(), "img": user_image(get_cookie())}
return template('www/html/index', user=user)
@get('/<path:path>')
def static(path):
if os.path.splitext(path)[1] in allowed_static:
return static_file(path, root='www')
@get('/login')
def login():
return template('www/html/login') if not get_cookie() else redirect("/")
@post('/login')
def login():
username = request.forms.get('username')
password = request.forms.get('password')
if user_login(username, password):
set_cookie(username)
redirect("/")
else:
return {"error": "Bad login"}
@post('/logout')
def logout():
clear_cookie()
redirect("/")
@post('/update')
def update():
value = request.POST.get("value")
username = get_cookie()
if value and username:
chart_data.update({username: value})
broadcast(json.dumps(chart_data))
@error(404)
def error404(error):
return 'Nothing here, sorry'
@get('/socket', apply=[websocket])
def router(sock):
connections.add(sock)
sock.send(json.dumps(chart_data))
while True:
data = sock.receive()
if data is not None:
broadcast(data)
else:
break
connections.remove(sock)
# Main
run(host='0.0.0.0', port=8080, server=GeventWebSocketServer)
| Alexander-0x80/office-mood-meter | omm.py | Python | mit | 2,567 |
#$#HEADER-START
# vim:set expandtab ts=4 sw=4 ai ft=python:
#
# Reflex Configuration Event Engine
#
# Copyright (C) 2016 Brandon Gillespie
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#$#HEADER-END
"""
General Webhooks and simple API routing
"""
import os # for trace
import time
import sys
import traceback
import base64
import re
from rfx import json2data
###############################################################################
SERVER = None
###############################################################################
# pylint: disable=no-self-use
def trace(arg):
"""Trace log debug info"""
with open("trace", "at") as outf:
outf.write("[{}] {} {}\n".format(os.getpid(), time.time(), arg))
###############################################################################
def set_DEBUG(module, value): # pylint: disable=invalid-name
"""do_DEBUG() wrapper from rfx.Base object, pivoting off SERVER global"""
if SERVER:
if not value:
del SERVER.debug[module]
return True
elif SERVER.debug.get(module, None) is None:
SERVER.debug[module] = value
return True
return False
###############################################################################
def do_DEBUG(*module): # pylint: disable=invalid-name
"""do_DEBUG() wrapper from rfx.Base object, pivoting off SERVER global"""
if SERVER:
return SERVER.do_DEBUG(*module)
return False
def debug(*args, **kwargs):
"""
debug wrapper for logging
"""
try:
if SERVER:
SERVER.DEBUG(*args, **kwargs)
else:
print("{} {}".format(args, kwargs))
except Exception: # pylint: disable=broad-except
with open("log_failed", "ta") as out:
out.write("\n\n--------------------------------------------------\n\n")
traceback.print_exc(file=out)
out.write(str(args))
out.write(str(kwargs))
###############################################################################
def log(*args, **kwargs):
"""
Log key=value pairs for easier splunk processing
test borked
x>> log(test="this is a test", x='this') # doctest: +ELLIPSIS
- - [...] test='this is a test' x=this
"""
try:
if SERVER:
try:
if SERVER.conf.get('requestid'):
# note: danger: this should be injected by traffic management,
# enable it with config requestid=true
reqid = SERVER.cherry.request.headers.get('X-Request-Id')
if reqid:
kwargs['reqid'] = reqid
elif SERVER.cherry.serving.request.__dict__.get('reqid'):
kwargs['reqid'] = SERVER.cherry.serving.request.reqid
elif SERVER.cherry.serving.request.__dict__.get('reqid'):
kwargs['reqid'] = SERVER.cherry.serving.request.reqid
except: # pylint: disable=bare-except
SERVER.NOTIFY("Logging Error: " + traceback.format_exc())
SERVER.NOTIFY(*args, **kwargs)
else:
sys.stdout.write(" ".join(args) + " ")
for key, value in kwargs.items():
sys.stdout.write("{}={} ".format(key, value))
sys.stdout.write("\n")
sys.stdout.flush()
except Exception: # pylint: disable=broad-except
with open("log_failed", "ta") as out:
out.write("\n\n--------------------------------------------------\n\n")
traceback.print_exc(file=out)
out.write(str(args))
out.write(str(kwargs))
###############################################################################
RX_TOK = re.compile(r'[^a-z0-9-]')
def get_jti(in_jwt):
"""
Pull the JTI from the payload of the jwt without verifying signature.
Dangerous, not good unless secondary verification matches.
"""
payload_raw = in_jwt.split(".")[1]
missing_padding = 4 - len(payload_raw) % 4
if missing_padding:
payload_raw += '=' * missing_padding
try:
data = json2data(base64.b64decode(payload_raw))
except:
raise ValueError("Error decoding JWT: {}".format(in_jwt))
token_id = str(data.get('jti', ''))
if RX_TOK.search(token_id):
raise ValueError("Invalid User ID: {}".format(token_id))
return token_id
| reflexsc/reflex | src/rfxengine/__init__.py | Python | agpl-3.0 | 5,071 |
from pulp.bindings import auth, consumer, consumer_groups, repo_groups, repository
from pulp.bindings.actions import ActionsAPI
from pulp.bindings.content import OrphanContentAPI, ContentSourceAPI, ContentCatalogAPI
from pulp.bindings.event_listeners import EventListenerAPI
from pulp.bindings.server_info import ServerInfoAPI, ServerStatusAPI
from pulp.bindings.tasks import TasksAPI, TaskSearchAPI
from pulp.bindings.upload import UploadAPI
class Bindings(object):
def __init__(self, pulp_connection):
"""
@type: pulp_connection: pulp.bindings.server.PulpConnection
"""
# Please keep the following in alphabetical order to ease reading
self.actions = ActionsAPI(pulp_connection)
self.bind = consumer.BindingsAPI(pulp_connection)
self.bindings = consumer.BindingSearchAPI(pulp_connection)
self.profile = consumer.ProfilesAPI(pulp_connection)
self.consumer = consumer.ConsumerAPI(pulp_connection)
self.consumer_content = consumer.ConsumerContentAPI(pulp_connection)
self.consumer_content_schedules = consumer.ConsumerContentSchedulesAPI(pulp_connection)
self.consumer_group = consumer_groups.ConsumerGroupAPI(pulp_connection)
self.consumer_group_search = consumer_groups.ConsumerGroupSearchAPI(pulp_connection)
self.consumer_group_actions = consumer_groups.ConsumerGroupActionAPI(pulp_connection)
self.consumer_group_bind = consumer_groups.ConsumerGroupBindAPI(pulp_connection)
self.consumer_group_content = consumer_groups.ConsumerGroupContentAPI(pulp_connection)
self.consumer_history = consumer.ConsumerHistoryAPI(pulp_connection)
self.consumer_search = consumer.ConsumerSearchAPI(pulp_connection)
self.content_orphan = OrphanContentAPI(pulp_connection)
self.content_source = ContentSourceAPI(pulp_connection)
self.content_catalog = ContentCatalogAPI(pulp_connection)
self.event_listener = EventListenerAPI(pulp_connection)
self.permission = auth.PermissionAPI(pulp_connection)
self.repo = repository.RepositoryAPI(pulp_connection)
self.repo_actions = repository.RepositoryActionsAPI(pulp_connection)
self.repo_distributor = repository.RepositoryDistributorAPI(pulp_connection)
self.repo_group = repo_groups.RepoGroupAPI(pulp_connection)
self.repo_group_actions = repo_groups.RepoGroupActionAPI(pulp_connection)
self.repo_group_distributor = repo_groups.RepoGroupDistributorAPI(pulp_connection)
self.repo_group_distributor_search = repo_groups.RepoGroupSearchAPI(pulp_connection)
self.repo_group_search = repo_groups.RepoGroupSearchAPI(pulp_connection)
self.repo_history = repository.RepositoryHistoryAPI(pulp_connection)
self.repo_importer = repository.RepositoryImporterAPI(pulp_connection)
self.repo_publish_schedules = repository.RepositoryPublishSchedulesAPI(pulp_connection)
self.repo_search = repository.RepositorySearchAPI(pulp_connection)
self.repo_sync_schedules = repository.RepositorySyncSchedulesAPI(pulp_connection)
self.repo_unit = repository.RepositoryUnitAPI(pulp_connection)
self.role = auth.RoleAPI(pulp_connection)
self.server_info = ServerInfoAPI(pulp_connection)
self.server_status = ServerStatusAPI(pulp_connection)
self.tasks = TasksAPI(pulp_connection)
self.tasks_search = TaskSearchAPI(pulp_connection)
self.uploads = UploadAPI(pulp_connection)
self.user = auth.UserAPI(pulp_connection)
self.user_search = auth.UserSearchAPI(pulp_connection)
| rbramwell/pulp | bindings/pulp/bindings/bindings.py | Python | gpl-2.0 | 3,641 |
#
# Copyright (C) 2017 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import unittest
from mock import Mock, patch, call
from pyanaconda.modules.boss.install_manager import InstallManager
from pyanaconda.modules.boss.install_manager.installation import SystemInstallationTask
class InstallManagerTestCase(unittest.TestCase):
"""Test the install manager."""
def install_with_no_modules_test(self):
"""Install with no modules."""
install_manager = InstallManager()
install_manager.module_observers = []
main_task = install_manager.install_system_with_task()
self.assertIsInstance(main_task, SystemInstallationTask)
self.assertEqual(main_task._subtasks, [])
def install_with_no_tasks_test(self):
"""Install with no tasks."""
observer = Mock()
observer.is_service_available = True
observer.service_name = "A"
observer.proxy.InstallWithTasks.return_value = []
install_manager = InstallManager()
install_manager.module_observers = [observer]
main_task = install_manager.install_system_with_task()
self.assertIsInstance(main_task, SystemInstallationTask)
self.assertEqual(main_task._subtasks, [])
@patch('pyanaconda.dbus.DBus.get_proxy')
def install_one_task_test(self, proxy_getter):
"""Install with one task."""
observer = Mock()
observer.is_service_available = True
observer.service_name = "A"
observer.proxy.InstallWithTasks.return_value = ["/A/1"]
task_proxy = Mock()
task_proxy.Steps = 1
proxy_getter.return_value = task_proxy
install_manager = InstallManager()
install_manager.module_observers = [observer]
main_task = install_manager.install_system_with_task()
proxy_getter.assert_called_once_with("A", "/A/1")
self.assertIsInstance(main_task, SystemInstallationTask)
self.assertEqual(main_task._subtasks, [task_proxy])
@patch('pyanaconda.dbus.DBus.get_proxy')
def install_three_tasks_test(self, proxy_getter):
"""Install with three tasks."""
observers = []
observer = Mock()
observer.is_service_available = True
observer.service_name = "A"
observer.proxy.InstallWithTasks.return_value = ["/A/1"]
observers.append(observer)
observer = Mock()
observer.is_service_available = True
observer.service_name = "B"
observer.proxy.InstallWithTasks.return_value = ["/B/1", "/B/2"]
observers.append(observer)
task_proxy = Mock()
task_proxy.Steps = 1
proxy_getter.return_value = task_proxy
install_manager = InstallManager()
install_manager.module_observers = observers
main_task = install_manager.install_system_with_task()
proxy_getter.assert_has_calls([
call("A", "/A/1"),
call("B", "/B/1"),
call("B", "/B/2")
])
self.assertIsInstance(main_task, SystemInstallationTask)
self.assertEqual(main_task._subtasks, [task_proxy, task_proxy, task_proxy])
| vathpela/anaconda | tests/nosetests/pyanaconda_tests/install_manager_test.py | Python | gpl-2.0 | 4,025 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from marshmallow import fields
class UUID(fields.UUID):
"""A UUID field."""
def _serialize(self, value, attr, obj, **kwargs):
validated = str(self._validated(value).hex) if value is not None else None
return super(fields.String, self)._serialize(validated, attr, obj) # noqa
| polyaxon/polyaxon | core/polyaxon/schemas/fields/uuids.py | Python | apache-2.0 | 908 |
from __future__ import unicode_literals
import re
from django.db import models
# Notification class
from django.db.models import Q
from django.utils import timezone
class Notification(models.Model):
board = models.ForeignKey("boards.Board", verbose_name=u"Board this notification belongs to", related_name="notifications", null=True, default=None, blank=True, on_delete=models.SET_NULL)
list = models.ForeignKey("boards.List", verbose_name=u"List this notification belongs to", related_name="notifications", null=True, default=None, blank=True, on_delete=models.SET_NULL)
card = models.ForeignKey("boards.Card", verbose_name=u"Card this notification belongs to", related_name="notifications", null=True, default=None, blank=True, on_delete=models.SET_NULL)
card_comment = models.ForeignKey("boards.CardComment", verbose_name=u"Card comment this notification belongs to", related_name="notifications", null=True, default=None, blank=True, on_delete=models.SET_NULL)
sender = models.ForeignKey("members.Member", verbose_name=u"Sender of this notification", related_name="sent_notifications", null=True, default=None, blank=True, on_delete=models.SET_NULL)
receiver = models.ForeignKey("members.Member", verbose_name=u"Receiver of this notification", related_name="received_notifications", null=True, default=None, blank=True, on_delete=models.SET_NULL)
description = models.TextField(verbose_name=u"Notification description", default="", blank=True)
is_read = models.BooleanField(verbose_name=u"Is this notification read?", default=False)
reading_datetime = models.DateTimeField(verbose_name=u"When this notification was read", default=None, null=True, blank=True)
creation_datetime = models.DateTimeField(verbose_name=u"Creation datetime")
def read(self):
self.reading_datetime = timezone.now()
self.is_read = True
self.save()
def save(self, *args, **kwargs):
if self.creation_datetime is None:
self.creation_datetime = timezone.now()
return super(Notification, self).save(*args, **kwargs)
# Add new card comment notifications
@staticmethod
def add_card_comment(card_comment, card):
board = card.board
card_comment_content = card_comment.content
# Adding blocking card
if card_comment.blocking_card and card_comment.blocking_card.list.type != "done":
for member in card.members.all():
Notification(
board=board, card=card, card_comment=card_comment,
sender=card_comment.author, receiver=member,
description="{0}: card {0} is blocked by {1}".format(board.name, card.name, card_comment.blocking_card.name)
).save()
# Adding reviews
if card_comment.review:
for member in card.members.all():
Notification(
board=board, card=card, card_comment=card_comment,
sender=card_comment.author, receiver=member,
description="{0}: review of card {1} by {2}".format(board.name, card.name, card_comment.author)
).save()
# Adding mentions
mentions = re.findall(r"@[\w\d]+", card_comment_content)
usernames = [mention.replace("@", "") for mention in mentions if mention != "@board"]
members = board.members.filter(Q(user__username__in=usernames)|Q(trello_member_profile__username__in=usernames))
for member in members:
Notification(
board=board, card=card, card_comment=card_comment,
sender=card_comment.author, receiver=member,
description="{0}: Mention of {1} in comment {2}".format(board.name, member.external_username, card.name)
).save()
# Add card movement notifications
@staticmethod
def move_card(mover, card, board=None):
if board is None:
board = card.board
# Notify a movement to the members of this card
for member in card.members.all():
Notification(
board=board, card=card,
sender=mover, receiver=member,
description="{0}: card {1} moved to {2}".format(board.name, card.name, card.list.name)
).save()
# Unblocking
blocked_cards = card.blocked_cards.all()
if blocked_cards.exists():
for blocked_card in blocked_cards:
# Send the notification to all card members
for member in card.members.all():
Notification(
board=board, card=card,
sender=mover, receiver=member,
description="{0}: card {1} is no longer blocked by {2}".format(board.name, blocked_card.name, card.name)
).save()
# If card is no longer blocked by any card, it can be moved. It is free.
if not blocked_card.blocking_cards.exclude(list__type="done").exists():
Notification(
board=board, card=card,
sender=mover, receiver=member,
description="{0}: card {1} can be started".format(board.name, blocked_card.name)
).save() | diegojromerolopez/djanban | src/djanban/apps/notifications/models.py | Python | mit | 5,341 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-02-07 22:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('description', models.TextField(blank=True, null=True)),
('updated', models.DateTimeField(auto_now_add=True)),
('status', models.PositiveIntegerField(choices=[(0, 'New task'), (1, 'In progress'), (2, 'Done'), (255, 'Deleted')], default=0)),
('assigned', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_assigned', to=settings.AUTH_USER_MODEL)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_owner', to=settings.AUTH_USER_MODEL)),
],
),
]
| moonsly/simple_task_manager | task_list/task_list/migrations/0001_initial.py | Python | gpl-3.0 | 1,284 |
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2010 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import *
import re, types
pattern_color = re.compile(r'#[0-9a-fA-F]{6}')
pattern_vector3D = re.compile(r'\([ ]*-?([0-9]+(\.[0-9]*)?|\.[0-9]+)([ ]+-?([0-9]+(\.[0-9]*)?|\.[0-9]+)){2}[ ]*\)')
def make_NCName(arg):
for c in (':',' '):
arg = arg.replace(c,"_%x_" % ord(c))
return arg
def cnv_anyURI(attribute, arg, element):
return unicode(arg)
def cnv_boolean(attribute, arg, element):
if arg.lower() in ("false","no"):
return "false"
if arg:
return "true"
return "false"
# Potentially accept color values
def cnv_color(attribute, arg, element):
""" A RGB color in conformance with §5.9.11 of [XSL], that is a RGB color in notation “#rrggbb”, where
rr, gg and bb are 8-bit hexadecimal digits.
"""
return str(arg)
def cnv_configtype(attribute, arg, element):
if str(arg) not in ("boolean", "short", "int", "long",
"double", "string", "datetime", "base64Binary"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
def cnv_data_source_has_labels(attribute, arg, element):
if str(arg) not in ("none","row","column","both"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
# Understand different date formats
def cnv_date(attribute, arg, element):
""" A dateOrDateTime value is either an [xmlschema-2] date value or an [xmlschema-2] dateTime
value.
"""
return str(arg)
def cnv_dateTime(attribute, arg, element):
""" A dateOrDateTime value is either an [xmlschema-2] date value or an [xmlschema-2] dateTime
value.
"""
return str(arg)
def cnv_double(attribute, arg, element):
return str(arg)
def cnv_duration(attribute, arg, element):
return str(arg)
def cnv_family(attribute, arg, element):
""" A style family """
if str(arg) not in ("text", "paragraph", "section", "ruby", "table", "table-column", "table-row", "table-cell",
"graphic", "presentation", "drawing-page", "chart"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
def __save_prefix(attribute, arg, element):
prefix = arg.split(':',1)[0]
if prefix == arg:
return unicode(arg)
namespace = element.get_knownns(prefix)
if namespace is None:
#raise ValueError, "'%s' is an unknown prefix" % str(prefix)
return unicode(arg)
p = element.get_nsprefix(namespace)
return unicode(arg)
def cnv_formula(attribute, arg, element):
""" A string containing a formula. Formulas do not have a predefined syntax, but the string should
begin with a namespace prefix, followed by a “:” (COLON, U+003A) separator, followed by the text
of the formula. The namespace bound to the prefix determines the syntax and semantics of the
formula.
"""
return __save_prefix(attribute, arg, element)
def cnv_ID(attribute, arg, element):
return str(arg)
def cnv_IDREF(attribute, arg, element):
return str(arg)
def cnv_integer(attribute, arg, element):
return str(arg)
def cnv_legend_position(attribute, arg, element):
if str(arg) not in ("start", "end", "top", "bottom", "top-start", "bottom-start", "top-end", "bottom-end"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
pattern_length = re.compile(r'-?([0-9]+(\.[0-9]*)?|\.[0-9]+)((cm)|(mm)|(in)|(pt)|(pc)|(px))')
def cnv_length(attribute, arg, element):
""" A (positive or negative) physical length, consisting of magnitude and unit, in conformance with the
Units of Measure defined in §5.9.13 of [XSL].
"""
global pattern_length
if not pattern_length.match(arg):
raise ValueError, "'%s' is not a valid length" % arg
return arg
def cnv_lengthorpercent(attribute, arg, element):
failed = False
try: return cnv_length(attribute, arg, element)
except: failed = True
try: return cnv_percent(attribute, arg, element)
except: failed = True
if failed:
raise ValueError, "'%s' is not a valid length or percent" % arg
return arg
def cnv_metavaluetype(attribute, arg, element):
if str(arg) not in ("float", "date", "time", "boolean", "string"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
def cnv_major_minor(attribute, arg, element):
if arg not in ('major','minor'):
raise ValueError, "'%s' is not either 'minor' or 'major'" % arg
pattern_namespacedToken = re.compile(r'[0-9a-zA-Z_]+:[0-9a-zA-Z._\-]+')
def cnv_namespacedToken(attribute, arg, element):
global pattern_namespacedToken
if not pattern_namespacedToken.match(arg):
raise ValueError, "'%s' is not a valid namespaced token" % arg
return __save_prefix(attribute, arg, element)
def cnv_NCName(attribute, arg, element):
""" NCName is defined in http://www.w3.org/TR/REC-xml-names/#NT-NCName
Essentially an XML name minus ':'
"""
if type(arg) in types.StringTypes:
return make_NCName(arg)
else:
return arg.getAttrNS(STYLENS, 'name')
# This function takes either an instance of a style (preferred)
# or a text string naming the style. If it is a text string, then it must
# already have been converted to an NCName
# The text-string argument is mainly for when we build a structure from XML
def cnv_StyleNameRef(attribute, arg, element):
try:
return arg.getAttrNS(STYLENS, 'name')
except:
return arg
# This function takes either an instance of a style (preferred)
# or a text string naming the style. If it is a text string, then it must
# already have been converted to an NCName
# The text-string argument is mainly for when we build a structure from XML
def cnv_DrawNameRef(attribute, arg, element):
try:
return arg.getAttrNS(DRAWNS, 'name')
except:
return arg
# Must accept list of Style objects
def cnv_NCNames(attribute, arg, element):
return ' '.join(arg)
def cnv_nonNegativeInteger(attribute, arg, element):
return str(arg)
pattern_percent = re.compile(r'-?([0-9]+(\.[0-9]*)?|\.[0-9]+)%')
def cnv_percent(attribute, arg, element):
global pattern_percent
if not pattern_percent.match(arg):
raise ValueError, "'%s' is not a valid length" % arg
return arg
# Real one doesn't allow floating point values
pattern_points = re.compile(r'-?[0-9]+,-?[0-9]+([ ]+-?[0-9]+,-?[0-9]+)*')
#pattern_points = re.compile(r'-?[0-9.]+,-?[0-9.]+([ ]+-?[0-9.]+,-?[0-9.]+)*')
def cnv_points(attribute, arg, element):
global pattern_points
if type(arg) in types.StringTypes:
if not pattern_points.match(arg):
raise ValueError, "x,y are separated by a comma and the points are separated by white spaces"
return arg
else:
try:
strarg = ' '.join([ "%d,%d" % p for p in arg])
except:
raise ValueError, "Points must be string or [(0,0),(1,1)] - not %s" % arg
return strarg
def cnv_positiveInteger(attribute, arg, element):
return str(arg)
def cnv_string(attribute, arg, element):
return unicode(arg)
def cnv_textnoteclass(attribute, arg, element):
if str(arg) not in ("footnote", "endnote"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
# Understand different time formats
def cnv_time(attribute, arg, element):
return str(arg)
def cnv_token(attribute, arg, element):
return str(arg)
pattern_viewbox = re.compile(r'-?[0-9]+([ ]+-?[0-9]+){3}$')
def cnv_viewbox(attribute, arg, element):
global pattern_viewbox
if not pattern_viewbox.match(arg):
raise ValueError, "viewBox must be four integers separated by whitespaces"
return arg
def cnv_xlinkshow(attribute, arg, element):
if str(arg) not in ("new", "replace", "embed"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
attrconverters = {
((ANIMNS,u'audio-level'), None): cnv_double,
((ANIMNS,u'color-interpolation'), None): cnv_string,
((ANIMNS,u'color-interpolation-direction'), None): cnv_string,
((ANIMNS,u'command'), None): cnv_string,
((ANIMNS,u'formula'), None): cnv_string,
((ANIMNS,u'id'), None): cnv_ID,
((ANIMNS,u'iterate-interval'), None): cnv_duration,
((ANIMNS,u'iterate-type'), None): cnv_string,
((ANIMNS,u'name'), None): cnv_string,
((ANIMNS,u'sub-item'), None): cnv_string,
((ANIMNS,u'value'), None): cnv_string,
# ((DBNS,u'type'), None): cnv_namespacedToken,
((CHARTNS,u'attached-axis'), None): cnv_string,
((CHARTNS,u'class'), (CHARTNS,u'grid')): cnv_major_minor,
((CHARTNS,u'class'), None): cnv_namespacedToken,
((CHARTNS,u'column-mapping'), None): cnv_string,
((CHARTNS,u'connect-bars'), None): cnv_boolean,
((CHARTNS,u'data-label-number'), None): cnv_string,
((CHARTNS,u'data-label-symbol'), None): cnv_boolean,
((CHARTNS,u'data-label-text'), None): cnv_boolean,
((CHARTNS,u'data-source-has-labels'), None): cnv_data_source_has_labels,
((CHARTNS,u'deep'), None): cnv_boolean,
((CHARTNS,u'dimension'), None): cnv_string,
((CHARTNS,u'display-label'), None): cnv_boolean,
((CHARTNS,u'error-category'), None): cnv_string,
((CHARTNS,u'error-lower-indicator'), None): cnv_boolean,
((CHARTNS,u'error-lower-limit'), None): cnv_string,
((CHARTNS,u'error-margin'), None): cnv_string,
((CHARTNS,u'error-percentage'), None): cnv_string,
((CHARTNS,u'error-upper-indicator'), None): cnv_boolean,
((CHARTNS,u'error-upper-limit'), None): cnv_string,
((CHARTNS,u'gap-width'), None): cnv_string,
((CHARTNS,u'interpolation'), None): cnv_string,
((CHARTNS,u'interval-major'), None): cnv_string,
((CHARTNS,u'interval-minor-divisor'), None): cnv_string,
((CHARTNS,u'japanese-candle-stick'), None): cnv_boolean,
((CHARTNS,u'label-arrangement'), None): cnv_string,
((CHARTNS,u'label-cell-address'), None): cnv_string,
((CHARTNS,u'legend-align'), None): cnv_string,
((CHARTNS,u'legend-position'), None): cnv_legend_position,
((CHARTNS,u'lines'), None): cnv_boolean,
((CHARTNS,u'link-data-style-to-source'), None): cnv_boolean,
((CHARTNS,u'logarithmic'), None): cnv_boolean,
((CHARTNS,u'maximum'), None): cnv_string,
((CHARTNS,u'mean-value'), None): cnv_boolean,
((CHARTNS,u'minimum'), None): cnv_string,
((CHARTNS,u'name'), None): cnv_string,
((CHARTNS,u'origin'), None): cnv_string,
((CHARTNS,u'overlap'), None): cnv_string,
((CHARTNS,u'percentage'), None): cnv_boolean,
((CHARTNS,u'pie-offset'), None): cnv_string,
((CHARTNS,u'regression-type'), None): cnv_string,
((CHARTNS,u'repeated'), None): cnv_nonNegativeInteger,
((CHARTNS,u'row-mapping'), None): cnv_string,
((CHARTNS,u'scale-text'), None): cnv_boolean,
((CHARTNS,u'series-source'), None): cnv_string,
((CHARTNS,u'solid-type'), None): cnv_string,
((CHARTNS,u'spline-order'), None): cnv_string,
((CHARTNS,u'spline-resolution'), None): cnv_string,
((CHARTNS,u'stacked'), None): cnv_boolean,
((CHARTNS,u'style-name'), None): cnv_StyleNameRef,
((CHARTNS,u'symbol-height'), None): cnv_string,
((CHARTNS,u'symbol-name'), None): cnv_string,
((CHARTNS,u'symbol-type'), None): cnv_string,
((CHARTNS,u'symbol-width'), None): cnv_string,
((CHARTNS,u'text-overlap'), None): cnv_boolean,
((CHARTNS,u'three-dimensional'), None): cnv_boolean,
((CHARTNS,u'tick-marks-major-inner'), None): cnv_boolean,
((CHARTNS,u'tick-marks-major-outer'), None): cnv_boolean,
((CHARTNS,u'tick-marks-minor-inner'), None): cnv_boolean,
((CHARTNS,u'tick-marks-minor-outer'), None): cnv_boolean,
((CHARTNS,u'values-cell-range-address'), None): cnv_string,
((CHARTNS,u'vertical'), None): cnv_boolean,
((CHARTNS,u'visible'), None): cnv_boolean,
((CONFIGNS,u'name'), None): cnv_formula,
((CONFIGNS,u'type'), None): cnv_configtype,
((DR3DNS,u'ambient-color'), None): cnv_string,
((DR3DNS,u'back-scale'), None): cnv_string,
((DR3DNS,u'backface-culling'), None): cnv_string,
((DR3DNS,u'center'), None): cnv_string,
((DR3DNS,u'close-back'), None): cnv_boolean,
((DR3DNS,u'close-front'), None): cnv_boolean,
((DR3DNS,u'depth'), None): cnv_length,
((DR3DNS,u'diffuse-color'), None): cnv_string,
((DR3DNS,u'direction'), None): cnv_string,
((DR3DNS,u'distance'), None): cnv_length,
((DR3DNS,u'edge-rounding'), None): cnv_string,
((DR3DNS,u'edge-rounding-mode'), None): cnv_string,
((DR3DNS,u'emissive-color'), None): cnv_string,
((DR3DNS,u'enabled'), None): cnv_boolean,
((DR3DNS,u'end-angle'), None): cnv_string,
((DR3DNS,u'focal-length'), None): cnv_length,
((DR3DNS,u'horizontal-segments'), None): cnv_string,
((DR3DNS,u'lighting-mode'), None): cnv_boolean,
((DR3DNS,u'max-edge'), None): cnv_string,
((DR3DNS,u'min-edge'), None): cnv_string,
((DR3DNS,u'normals-direction'), None): cnv_string,
((DR3DNS,u'normals-kind'), None): cnv_string,
((DR3DNS,u'projection'), None): cnv_string,
((DR3DNS,u'shade-mode'), None): cnv_string,
((DR3DNS,u'shadow'), None): cnv_string,
((DR3DNS,u'shadow-slant'), None): cnv_nonNegativeInteger,
((DR3DNS,u'shininess'), None): cnv_string,
((DR3DNS,u'size'), None): cnv_string,
((DR3DNS,u'specular'), None): cnv_boolean,
((DR3DNS,u'specular-color'), None): cnv_string,
((DR3DNS,u'texture-filter'), None): cnv_string,
((DR3DNS,u'texture-generation-mode-x'), None): cnv_string,
((DR3DNS,u'texture-generation-mode-y'), None): cnv_string,
((DR3DNS,u'texture-kind'), None): cnv_string,
((DR3DNS,u'texture-mode'), None): cnv_string,
((DR3DNS,u'transform'), None): cnv_string,
((DR3DNS,u'vertical-segments'), None): cnv_string,
((DR3DNS,u'vpn'), None): cnv_string,
((DR3DNS,u'vrp'), None): cnv_string,
((DR3DNS,u'vup'), None): cnv_string,
((DRAWNS,u'align'), None): cnv_string,
((DRAWNS,u'angle'), None): cnv_integer,
((DRAWNS,u'archive'), None): cnv_string,
((DRAWNS,u'auto-grow-height'), None): cnv_boolean,
((DRAWNS,u'auto-grow-width'), None): cnv_boolean,
((DRAWNS,u'background-size'), None): cnv_string,
((DRAWNS,u'blue'), None): cnv_string,
((DRAWNS,u'border'), None): cnv_string,
((DRAWNS,u'caption-angle'), None): cnv_string,
((DRAWNS,u'caption-angle-type'), None): cnv_string,
((DRAWNS,u'caption-escape'), None): cnv_string,
((DRAWNS,u'caption-escape-direction'), None): cnv_string,
((DRAWNS,u'caption-fit-line-length'), None): cnv_boolean,
((DRAWNS,u'caption-gap'), None): cnv_string,
((DRAWNS,u'caption-line-length'), None): cnv_length,
((DRAWNS,u'caption-point-x'), None): cnv_string,
((DRAWNS,u'caption-point-y'), None): cnv_string,
((DRAWNS,u'caption-id'), None): cnv_IDREF,
((DRAWNS,u'caption-type'), None): cnv_string,
((DRAWNS,u'chain-next-name'), None): cnv_string,
((DRAWNS,u'class-id'), None): cnv_string,
((DRAWNS,u'class-names'), None): cnv_NCNames,
((DRAWNS,u'code'), None): cnv_string,
((DRAWNS,u'color'), None): cnv_string,
((DRAWNS,u'color-inversion'), None): cnv_boolean,
((DRAWNS,u'color-mode'), None): cnv_string,
((DRAWNS,u'concave'), None): cnv_string,
((DRAWNS,u'concentric-gradient-fill-allowed'), None): cnv_boolean,
((DRAWNS,u'contrast'), None): cnv_string,
((DRAWNS,u'control'), None): cnv_IDREF,
((DRAWNS,u'copy-of'), None): cnv_string,
((DRAWNS,u'corner-radius'), None): cnv_length,
((DRAWNS,u'corners'), None): cnv_positiveInteger,
((DRAWNS,u'cx'), None): cnv_string,
((DRAWNS,u'cy'), None): cnv_string,
((DRAWNS,u'data'), None): cnv_string,
((DRAWNS,u'decimal-places'), None): cnv_string,
((DRAWNS,u'display'), None): cnv_string,
((DRAWNS,u'display-name'), None): cnv_string,
((DRAWNS,u'distance'), None): cnv_lengthorpercent,
((DRAWNS,u'dots1'), None): cnv_integer,
((DRAWNS,u'dots1-length'), None): cnv_lengthorpercent,
((DRAWNS,u'dots2'), None): cnv_integer,
((DRAWNS,u'dots2-length'), None): cnv_lengthorpercent,
((DRAWNS,u'end-angle'), None): cnv_double,
((DRAWNS,u'end'), None): cnv_string,
((DRAWNS,u'end-color'), None): cnv_string,
((DRAWNS,u'end-glue-point'), None): cnv_nonNegativeInteger,
((DRAWNS,u'end-guide'), None): cnv_length,
((DRAWNS,u'end-intensity'), None): cnv_string,
((DRAWNS,u'end-line-spacing-horizontal'), None): cnv_string,
((DRAWNS,u'end-line-spacing-vertical'), None): cnv_string,
((DRAWNS,u'end-shape'), None): cnv_IDREF,
((DRAWNS,u'engine'), None): cnv_namespacedToken,
((DRAWNS,u'enhanced-path'), None): cnv_string,
((DRAWNS,u'escape-direction'), None): cnv_string,
((DRAWNS,u'extrusion-allowed'), None): cnv_boolean,
((DRAWNS,u'extrusion-brightness'), None): cnv_string,
((DRAWNS,u'extrusion'), None): cnv_boolean,
((DRAWNS,u'extrusion-color'), None): cnv_boolean,
((DRAWNS,u'extrusion-depth'), None): cnv_double,
((DRAWNS,u'extrusion-diffusion'), None): cnv_string,
((DRAWNS,u'extrusion-first-light-direction'), None): cnv_string,
((DRAWNS,u'extrusion-first-light-harsh'), None): cnv_boolean,
((DRAWNS,u'extrusion-first-light-level'), None): cnv_string,
((DRAWNS,u'extrusion-light-face'), None): cnv_boolean,
((DRAWNS,u'extrusion-metal'), None): cnv_boolean,
((DRAWNS,u'extrusion-number-of-line-segments'), None): cnv_integer,
((DRAWNS,u'extrusion-origin'), None): cnv_double,
((DRAWNS,u'extrusion-rotation-angle'), None): cnv_double,
((DRAWNS,u'extrusion-rotation-center'), None): cnv_string,
((DRAWNS,u'extrusion-second-light-direction'), None): cnv_string,
((DRAWNS,u'extrusion-second-light-harsh'), None): cnv_boolean,
((DRAWNS,u'extrusion-second-light-level'), None): cnv_string,
((DRAWNS,u'extrusion-shininess'), None): cnv_string,
((DRAWNS,u'extrusion-skew'), None): cnv_double,
((DRAWNS,u'extrusion-specularity'), None): cnv_string,
((DRAWNS,u'extrusion-viewpoint'), None): cnv_string,
((DRAWNS,u'fill'), None): cnv_string,
((DRAWNS,u'fill-color'), None): cnv_string,
((DRAWNS,u'fill-gradient-name'), None): cnv_string,
((DRAWNS,u'fill-hatch-name'), None): cnv_string,
((DRAWNS,u'fill-hatch-solid'), None): cnv_boolean,
((DRAWNS,u'fill-image-height'), None): cnv_lengthorpercent,
((DRAWNS,u'fill-image-name'), None): cnv_DrawNameRef,
((DRAWNS,u'fill-image-ref-point'), None): cnv_string,
((DRAWNS,u'fill-image-ref-point-x'), None): cnv_string,
((DRAWNS,u'fill-image-ref-point-y'), None): cnv_string,
((DRAWNS,u'fill-image-width'), None): cnv_lengthorpercent,
((DRAWNS,u'filter-name'), None): cnv_string,
((DRAWNS,u'fit-to-contour'), None): cnv_boolean,
((DRAWNS,u'fit-to-size'), None): cnv_boolean,
((DRAWNS,u'formula'), None): cnv_string,
((DRAWNS,u'frame-display-border'), None): cnv_boolean,
((DRAWNS,u'frame-display-scrollbar'), None): cnv_boolean,
((DRAWNS,u'frame-margin-horizontal'), None): cnv_string,
((DRAWNS,u'frame-margin-vertical'), None): cnv_string,
((DRAWNS,u'frame-name'), None): cnv_string,
((DRAWNS,u'gamma'), None): cnv_string,
((DRAWNS,u'glue-point-leaving-directions'), None): cnv_string,
((DRAWNS,u'glue-point-type'), None): cnv_string,
((DRAWNS,u'glue-points'), None): cnv_string,
((DRAWNS,u'gradient-step-count'), None): cnv_string,
((DRAWNS,u'green'), None): cnv_string,
((DRAWNS,u'guide-distance'), None): cnv_string,
((DRAWNS,u'guide-overhang'), None): cnv_length,
((DRAWNS,u'handle-mirror-horizontal'), None): cnv_boolean,
((DRAWNS,u'handle-mirror-vertical'), None): cnv_boolean,
((DRAWNS,u'handle-polar'), None): cnv_string,
((DRAWNS,u'handle-position'), None): cnv_string,
((DRAWNS,u'handle-radius-range-maximum'), None): cnv_string,
((DRAWNS,u'handle-radius-range-minimum'), None): cnv_string,
((DRAWNS,u'handle-range-x-maximum'), None): cnv_string,
((DRAWNS,u'handle-range-x-minimum'), None): cnv_string,
((DRAWNS,u'handle-range-y-maximum'), None): cnv_string,
((DRAWNS,u'handle-range-y-minimum'), None): cnv_string,
((DRAWNS,u'handle-switched'), None): cnv_boolean,
# ((DRAWNS,u'id'), None): cnv_ID,
# ((DRAWNS,u'id'), None): cnv_nonNegativeInteger, # ?? line 6581 in RNG
((DRAWNS,u'id'), None): cnv_string,
((DRAWNS,u'image-opacity'), None): cnv_string,
((DRAWNS,u'kind'), None): cnv_string,
((DRAWNS,u'layer'), None): cnv_string,
((DRAWNS,u'line-distance'), None): cnv_string,
((DRAWNS,u'line-skew'), None): cnv_string,
((DRAWNS,u'luminance'), None): cnv_string,
((DRAWNS,u'marker-end-center'), None): cnv_boolean,
((DRAWNS,u'marker-end'), None): cnv_string,
((DRAWNS,u'marker-end-width'), None): cnv_length,
((DRAWNS,u'marker-start-center'), None): cnv_boolean,
((DRAWNS,u'marker-start'), None): cnv_string,
((DRAWNS,u'marker-start-width'), None): cnv_length,
((DRAWNS,u'master-page-name'), None): cnv_StyleNameRef,
((DRAWNS,u'may-script'), None): cnv_boolean,
((DRAWNS,u'measure-align'), None): cnv_string,
((DRAWNS,u'measure-vertical-align'), None): cnv_string,
((DRAWNS,u'mime-type'), None): cnv_string,
((DRAWNS,u'mirror-horizontal'), None): cnv_boolean,
((DRAWNS,u'mirror-vertical'), None): cnv_boolean,
((DRAWNS,u'modifiers'), None): cnv_string,
((DRAWNS,u'name'), None): cnv_NCName,
# ((DRAWNS,u'name'), None): cnv_string,
((DRAWNS,u'nav-order'), None): cnv_IDREF,
((DRAWNS,u'nohref'), None): cnv_string,
((DRAWNS,u'notify-on-update-of-ranges'), None): cnv_string,
((DRAWNS,u'object'), None): cnv_string,
((DRAWNS,u'ole-draw-aspect'), None): cnv_string,
((DRAWNS,u'opacity'), None): cnv_string,
((DRAWNS,u'opacity-name'), None): cnv_string,
((DRAWNS,u'page-number'), None): cnv_positiveInteger,
((DRAWNS,u'parallel'), None): cnv_boolean,
((DRAWNS,u'path-stretchpoint-x'), None): cnv_double,
((DRAWNS,u'path-stretchpoint-y'), None): cnv_double,
((DRAWNS,u'placing'), None): cnv_string,
((DRAWNS,u'points'), None): cnv_points,
((DRAWNS,u'protected'), None): cnv_boolean,
((DRAWNS,u'recreate-on-edit'), None): cnv_boolean,
((DRAWNS,u'red'), None): cnv_string,
((DRAWNS,u'rotation'), None): cnv_integer,
((DRAWNS,u'secondary-fill-color'), None): cnv_string,
((DRAWNS,u'shadow'), None): cnv_string,
((DRAWNS,u'shadow-color'), None): cnv_string,
((DRAWNS,u'shadow-offset-x'), None): cnv_length,
((DRAWNS,u'shadow-offset-y'), None): cnv_length,
((DRAWNS,u'shadow-opacity'), None): cnv_string,
((DRAWNS,u'shape-id'), None): cnv_IDREF,
((DRAWNS,u'sharpness'), None): cnv_string,
((DRAWNS,u'show-unit'), None): cnv_boolean,
((DRAWNS,u'start-angle'), None): cnv_double,
((DRAWNS,u'start'), None): cnv_string,
((DRAWNS,u'start-color'), None): cnv_string,
((DRAWNS,u'start-glue-point'), None): cnv_nonNegativeInteger,
((DRAWNS,u'start-guide'), None): cnv_length,
((DRAWNS,u'start-intensity'), None): cnv_string,
((DRAWNS,u'start-line-spacing-horizontal'), None): cnv_string,
((DRAWNS,u'start-line-spacing-vertical'), None): cnv_string,
((DRAWNS,u'start-shape'), None): cnv_IDREF,
((DRAWNS,u'stroke'), None): cnv_string,
((DRAWNS,u'stroke-dash'), None): cnv_string,
((DRAWNS,u'stroke-dash-names'), None): cnv_string,
((DRAWNS,u'stroke-linejoin'), None): cnv_string,
((DRAWNS,u'style'), None): cnv_string,
((DRAWNS,u'style-name'), None): cnv_StyleNameRef,
((DRAWNS,u'symbol-color'), None): cnv_string,
((DRAWNS,u'text-areas'), None): cnv_string,
((DRAWNS,u'text-path-allowed'), None): cnv_boolean,
((DRAWNS,u'text-path'), None): cnv_boolean,
((DRAWNS,u'text-path-mode'), None): cnv_string,
((DRAWNS,u'text-path-same-letter-heights'), None): cnv_boolean,
((DRAWNS,u'text-path-scale'), None): cnv_string,
((DRAWNS,u'text-rotate-angle'), None): cnv_double,
((DRAWNS,u'text-style-name'), None): cnv_StyleNameRef,
((DRAWNS,u'textarea-horizontal-align'), None): cnv_string,
((DRAWNS,u'textarea-vertical-align'), None): cnv_string,
((DRAWNS,u'tile-repeat-offset'), None): cnv_string,
((DRAWNS,u'transform'), None): cnv_string,
((DRAWNS,u'type'), None): cnv_string,
((DRAWNS,u'unit'), None): cnv_string,
((DRAWNS,u'value'), None): cnv_string,
((DRAWNS,u'visible-area-height'), None): cnv_string,
((DRAWNS,u'visible-area-left'), None): cnv_string,
((DRAWNS,u'visible-area-top'), None): cnv_string,
((DRAWNS,u'visible-area-width'), None): cnv_string,
((DRAWNS,u'wrap-influence-on-position'), None): cnv_string,
((DRAWNS,u'z-index'), None): cnv_nonNegativeInteger,
((FONS,u'background-color'), None): cnv_string,
((FONS,u'border-bottom'), None): cnv_string,
((FONS,u'border'), None): cnv_string,
((FONS,u'border-left'), None): cnv_string,
((FONS,u'border-right'), None): cnv_string,
((FONS,u'border-top'), None): cnv_string,
((FONS,u'break-after'), None): cnv_string,
((FONS,u'break-before'), None): cnv_string,
((FONS,u'clip'), None): cnv_string,
((FONS,u'color'), None): cnv_string,
((FONS,u'column-count'), None): cnv_positiveInteger,
((FONS,u'column-gap'), None): cnv_length,
((FONS,u'country'), None): cnv_token,
((FONS,u'end-indent'), None): cnv_length,
((FONS,u'font-family'), None): cnv_string,
((FONS,u'font-size'), None): cnv_string,
((FONS,u'font-style'), None): cnv_string,
((FONS,u'font-variant'), None): cnv_string,
((FONS,u'font-weight'), None): cnv_string,
((FONS,u'height'), None): cnv_string,
((FONS,u'hyphenate'), None): cnv_boolean,
((FONS,u'hyphenation-keep'), None): cnv_string,
((FONS,u'hyphenation-ladder-count'), None): cnv_string,
((FONS,u'hyphenation-push-char-count'), None): cnv_string,
((FONS,u'hyphenation-remain-char-count'), None): cnv_string,
((FONS,u'keep-together'), None): cnv_string,
((FONS,u'keep-with-next'), None): cnv_string,
((FONS,u'language'), None): cnv_token,
((FONS,u'letter-spacing'), None): cnv_string,
((FONS,u'line-height'), None): cnv_string,
((FONS,u'margin-bottom'), None): cnv_string,
((FONS,u'margin'), None): cnv_string,
((FONS,u'margin-left'), None): cnv_string,
((FONS,u'margin-right'), None): cnv_string,
((FONS,u'margin-top'), None): cnv_string,
((FONS,u'max-height'), None): cnv_string,
((FONS,u'max-width'), None): cnv_string,
((FONS,u'min-height'), None): cnv_lengthorpercent,
((FONS,u'min-width'), None): cnv_string,
((FONS,u'orphans'), None): cnv_string,
((FONS,u'padding-bottom'), None): cnv_string,
((FONS,u'padding'), None): cnv_string,
((FONS,u'padding-left'), None): cnv_string,
((FONS,u'padding-right'), None): cnv_string,
((FONS,u'padding-top'), None): cnv_string,
((FONS,u'page-height'), None): cnv_length,
((FONS,u'page-width'), None): cnv_length,
((FONS,u'space-after'), None): cnv_length,
((FONS,u'space-before'), None): cnv_length,
((FONS,u'start-indent'), None): cnv_length,
((FONS,u'text-align'), None): cnv_string,
((FONS,u'text-align-last'), None): cnv_string,
((FONS,u'text-indent'), None): cnv_string,
((FONS,u'text-shadow'), None): cnv_string,
((FONS,u'text-transform'), None): cnv_string,
((FONS,u'widows'), None): cnv_string,
((FONS,u'width'), None): cnv_string,
((FONS,u'wrap-option'), None): cnv_string,
((FORMNS,u'allow-deletes'), None): cnv_boolean,
((FORMNS,u'allow-inserts'), None): cnv_boolean,
((FORMNS,u'allow-updates'), None): cnv_boolean,
((FORMNS,u'apply-design-mode'), None): cnv_boolean,
((FORMNS,u'apply-filter'), None): cnv_boolean,
((FORMNS,u'auto-complete'), None): cnv_boolean,
((FORMNS,u'automatic-focus'), None): cnv_boolean,
((FORMNS,u'bound-column'), None): cnv_string,
((FORMNS,u'button-type'), None): cnv_string,
((FORMNS,u'command'), None): cnv_string,
((FORMNS,u'command-type'), None): cnv_string,
((FORMNS,u'control-implementation'), None): cnv_namespacedToken,
((FORMNS,u'convert-empty-to-null'), None): cnv_boolean,
((FORMNS,u'current-selected'), None): cnv_boolean,
((FORMNS,u'current-state'), None): cnv_string,
# ((FORMNS,u'current-value'), None): cnv_date,
# ((FORMNS,u'current-value'), None): cnv_double,
((FORMNS,u'current-value'), None): cnv_string,
# ((FORMNS,u'current-value'), None): cnv_time,
((FORMNS,u'data-field'), None): cnv_string,
((FORMNS,u'datasource'), None): cnv_string,
((FORMNS,u'default-button'), None): cnv_boolean,
((FORMNS,u'delay-for-repeat'), None): cnv_duration,
((FORMNS,u'detail-fields'), None): cnv_string,
((FORMNS,u'disabled'), None): cnv_boolean,
((FORMNS,u'dropdown'), None): cnv_boolean,
((FORMNS,u'echo-char'), None): cnv_string,
((FORMNS,u'enctype'), None): cnv_string,
((FORMNS,u'escape-processing'), None): cnv_boolean,
((FORMNS,u'filter'), None): cnv_string,
((FORMNS,u'focus-on-click'), None): cnv_boolean,
((FORMNS,u'for'), None): cnv_string,
((FORMNS,u'id'), None): cnv_ID,
((FORMNS,u'ignore-result'), None): cnv_boolean,
((FORMNS,u'image-align'), None): cnv_string,
((FORMNS,u'image-data'), None): cnv_anyURI,
((FORMNS,u'image-position'), None): cnv_string,
((FORMNS,u'is-tristate'), None): cnv_boolean,
((FORMNS,u'label'), None): cnv_string,
((FORMNS,u'list-source'), None): cnv_string,
((FORMNS,u'list-source-type'), None): cnv_string,
((FORMNS,u'master-fields'), None): cnv_string,
((FORMNS,u'max-length'), None): cnv_nonNegativeInteger,
# ((FORMNS,u'max-value'), None): cnv_date,
# ((FORMNS,u'max-value'), None): cnv_double,
((FORMNS,u'max-value'), None): cnv_string,
# ((FORMNS,u'max-value'), None): cnv_time,
((FORMNS,u'method'), None): cnv_string,
# ((FORMNS,u'min-value'), None): cnv_date,
# ((FORMNS,u'min-value'), None): cnv_double,
((FORMNS,u'min-value'), None): cnv_string,
# ((FORMNS,u'min-value'), None): cnv_time,
((FORMNS,u'multi-line'), None): cnv_boolean,
((FORMNS,u'multiple'), None): cnv_boolean,
((FORMNS,u'name'), None): cnv_string,
((FORMNS,u'navigation-mode'), None): cnv_string,
((FORMNS,u'order'), None): cnv_string,
((FORMNS,u'orientation'), None): cnv_string,
((FORMNS,u'page-step-size'), None): cnv_positiveInteger,
((FORMNS,u'printable'), None): cnv_boolean,
((FORMNS,u'property-name'), None): cnv_string,
((FORMNS,u'readonly'), None): cnv_boolean,
((FORMNS,u'selected'), None): cnv_boolean,
((FORMNS,u'size'), None): cnv_nonNegativeInteger,
((FORMNS,u'state'), None): cnv_string,
((FORMNS,u'step-size'), None): cnv_positiveInteger,
((FORMNS,u'tab-cycle'), None): cnv_string,
((FORMNS,u'tab-index'), None): cnv_nonNegativeInteger,
((FORMNS,u'tab-stop'), None): cnv_boolean,
((FORMNS,u'text-style-name'), None): cnv_StyleNameRef,
((FORMNS,u'title'), None): cnv_string,
((FORMNS,u'toggle'), None): cnv_boolean,
((FORMNS,u'validation'), None): cnv_boolean,
# ((FORMNS,u'value'), None): cnv_date,
# ((FORMNS,u'value'), None): cnv_double,
((FORMNS,u'value'), None): cnv_string,
# ((FORMNS,u'value'), None): cnv_time,
((FORMNS,u'visual-effect'), None): cnv_string,
((FORMNS,u'xforms-list-source'), None): cnv_string,
((FORMNS,u'xforms-submission'), None): cnv_string,
((MANIFESTNS,'algorithm-name'), None): cnv_string,
((MANIFESTNS,'checksum'), None): cnv_string,
((MANIFESTNS,'checksum-type'), None): cnv_string,
((MANIFESTNS,'full-path'), None): cnv_string,
((MANIFESTNS,'initialisation-vector'), None): cnv_string,
((MANIFESTNS,'iteration-count'), None): cnv_nonNegativeInteger,
((MANIFESTNS,'key-derivation-name'), None): cnv_string,
((MANIFESTNS,'media-type'), None): cnv_string,
((MANIFESTNS,'salt'), None): cnv_string,
((MANIFESTNS,'size'), None): cnv_nonNegativeInteger,
((METANS,u'cell-count'), None): cnv_nonNegativeInteger,
((METANS,u'character-count'), None): cnv_nonNegativeInteger,
((METANS,u'date'), None): cnv_dateTime,
((METANS,u'delay'), None): cnv_duration,
((METANS,u'draw-count'), None): cnv_nonNegativeInteger,
((METANS,u'frame-count'), None): cnv_nonNegativeInteger,
((METANS,u'image-count'), None): cnv_nonNegativeInteger,
((METANS,u'name'), None): cnv_string,
((METANS,u'non-whitespace-character-count'), None): cnv_nonNegativeInteger,
((METANS,u'object-count'), None): cnv_nonNegativeInteger,
((METANS,u'ole-object-count'), None): cnv_nonNegativeInteger,
((METANS,u'page-count'), None): cnv_nonNegativeInteger,
((METANS,u'paragraph-count'), None): cnv_nonNegativeInteger,
((METANS,u'row-count'), None): cnv_nonNegativeInteger,
((METANS,u'sentence-count'), None): cnv_nonNegativeInteger,
((METANS,u'syllable-count'), None): cnv_nonNegativeInteger,
((METANS,u'table-count'), None): cnv_nonNegativeInteger,
((METANS,u'value-type'), None): cnv_metavaluetype,
((METANS,u'word-count'), None): cnv_nonNegativeInteger,
((NUMBERNS,u'automatic-order'), None): cnv_boolean,
((NUMBERNS,u'calendar'), None): cnv_string,
((NUMBERNS,u'country'), None): cnv_token,
((NUMBERNS,u'decimal-places'), None): cnv_integer,
((NUMBERNS,u'decimal-replacement'), None): cnv_string,
((NUMBERNS,u'denominator-value'), None): cnv_integer,
((NUMBERNS,u'display-factor'), None): cnv_double,
((NUMBERNS,u'format-source'), None): cnv_string,
((NUMBERNS,u'grouping'), None): cnv_boolean,
((NUMBERNS,u'language'), None): cnv_token,
((NUMBERNS,u'min-denominator-digits'), None): cnv_integer,
((NUMBERNS,u'min-exponent-digits'), None): cnv_integer,
((NUMBERNS,u'min-integer-digits'), None): cnv_integer,
((NUMBERNS,u'min-numerator-digits'), None): cnv_integer,
((NUMBERNS,u'position'), None): cnv_integer,
((NUMBERNS,u'possessive-form'), None): cnv_boolean,
((NUMBERNS,u'style'), None): cnv_string,
((NUMBERNS,u'textual'), None): cnv_boolean,
((NUMBERNS,u'title'), None): cnv_string,
((NUMBERNS,u'transliteration-country'), None): cnv_token,
((NUMBERNS,u'transliteration-format'), None): cnv_string,
((NUMBERNS,u'transliteration-language'), None): cnv_token,
((NUMBERNS,u'transliteration-style'), None): cnv_string,
((NUMBERNS,u'truncate-on-overflow'), None): cnv_boolean,
((OFFICENS,u'automatic-update'), None): cnv_boolean,
((OFFICENS,u'boolean-value'), None): cnv_boolean,
((OFFICENS,u'conversion-mode'), None): cnv_string,
((OFFICENS,u'currency'), None): cnv_string,
((OFFICENS,u'date-value'), None): cnv_dateTime,
((OFFICENS,u'dde-application'), None): cnv_string,
((OFFICENS,u'dde-item'), None): cnv_string,
((OFFICENS,u'dde-topic'), None): cnv_string,
((OFFICENS,u'display'), None): cnv_boolean,
((OFFICENS,u'mimetype'), None): cnv_string,
((OFFICENS,u'name'), None): cnv_string,
((OFFICENS,u'process-content'), None): cnv_boolean,
((OFFICENS,u'server-map'), None): cnv_boolean,
((OFFICENS,u'string-value'), None): cnv_string,
((OFFICENS,u'target-frame'), None): cnv_string,
((OFFICENS,u'target-frame-name'), None): cnv_string,
((OFFICENS,u'time-value'), None): cnv_duration,
((OFFICENS,u'title'), None): cnv_string,
((OFFICENS,u'value'), None): cnv_double,
((OFFICENS,u'value-type'), None): cnv_string,
((OFFICENS,u'version'), None): cnv_string,
((PRESENTATIONNS,u'action'), None): cnv_string,
((PRESENTATIONNS,u'animations'), None): cnv_string,
((PRESENTATIONNS,u'background-objects-visible'), None): cnv_boolean,
((PRESENTATIONNS,u'background-visible'), None): cnv_boolean,
((PRESENTATIONNS,u'class'), None): cnv_string,
((PRESENTATIONNS,u'class-names'), None): cnv_NCNames,
((PRESENTATIONNS,u'delay'), None): cnv_duration,
((PRESENTATIONNS,u'direction'), None): cnv_string,
((PRESENTATIONNS,u'display-date-time'), None): cnv_boolean,
((PRESENTATIONNS,u'display-footer'), None): cnv_boolean,
((PRESENTATIONNS,u'display-header'), None): cnv_boolean,
((PRESENTATIONNS,u'display-page-number'), None): cnv_boolean,
((PRESENTATIONNS,u'duration'), None): cnv_string,
((PRESENTATIONNS,u'effect'), None): cnv_string,
((PRESENTATIONNS,u'endless'), None): cnv_boolean,
((PRESENTATIONNS,u'force-manual'), None): cnv_boolean,
((PRESENTATIONNS,u'full-screen'), None): cnv_boolean,
((PRESENTATIONNS,u'group-id'), None): cnv_string,
((PRESENTATIONNS,u'master-element'), None): cnv_IDREF,
((PRESENTATIONNS,u'mouse-as-pen'), None): cnv_boolean,
((PRESENTATIONNS,u'mouse-visible'), None): cnv_boolean,
((PRESENTATIONNS,u'name'), None): cnv_string,
((PRESENTATIONNS,u'node-type'), None): cnv_string,
((PRESENTATIONNS,u'object'), None): cnv_string,
((PRESENTATIONNS,u'pages'), None): cnv_string,
((PRESENTATIONNS,u'path-id'), None): cnv_string,
((PRESENTATIONNS,u'pause'), None): cnv_duration,
((PRESENTATIONNS,u'placeholder'), None): cnv_boolean,
((PRESENTATIONNS,u'play-full'), None): cnv_boolean,
((PRESENTATIONNS,u'presentation-page-layout-name'), None): cnv_StyleNameRef,
((PRESENTATIONNS,u'preset-class'), None): cnv_string,
((PRESENTATIONNS,u'preset-id'), None): cnv_string,
((PRESENTATIONNS,u'preset-sub-type'), None): cnv_string,
((PRESENTATIONNS,u'show'), None): cnv_string,
((PRESENTATIONNS,u'show-end-of-presentation-slide'), None): cnv_boolean,
((PRESENTATIONNS,u'show-logo'), None): cnv_boolean,
((PRESENTATIONNS,u'source'), None): cnv_string,
((PRESENTATIONNS,u'speed'), None): cnv_string,
((PRESENTATIONNS,u'start-page'), None): cnv_string,
((PRESENTATIONNS,u'start-scale'), None): cnv_string,
((PRESENTATIONNS,u'start-with-navigator'), None): cnv_boolean,
((PRESENTATIONNS,u'stay-on-top'), None): cnv_boolean,
((PRESENTATIONNS,u'style-name'), None): cnv_StyleNameRef,
((PRESENTATIONNS,u'transition-on-click'), None): cnv_string,
((PRESENTATIONNS,u'transition-speed'), None): cnv_string,
((PRESENTATIONNS,u'transition-style'), None): cnv_string,
((PRESENTATIONNS,u'transition-type'), None): cnv_string,
((PRESENTATIONNS,u'use-date-time-name'), None): cnv_string,
((PRESENTATIONNS,u'use-footer-name'), None): cnv_string,
((PRESENTATIONNS,u'use-header-name'), None): cnv_string,
((PRESENTATIONNS,u'user-transformed'), None): cnv_boolean,
((PRESENTATIONNS,u'verb'), None): cnv_nonNegativeInteger,
((PRESENTATIONNS,u'visibility'), None): cnv_string,
((SCRIPTNS,u'event-name'), None): cnv_formula,
((SCRIPTNS,u'language'), None): cnv_formula,
((SCRIPTNS,u'macro-name'), None): cnv_string,
((SMILNS,u'accelerate'), None): cnv_double,
((SMILNS,u'accumulate'), None): cnv_string,
((SMILNS,u'additive'), None): cnv_string,
((SMILNS,u'attributeName'), None): cnv_string,
((SMILNS,u'autoReverse'), None): cnv_boolean,
((SMILNS,u'begin'), None): cnv_string,
((SMILNS,u'by'), None): cnv_string,
((SMILNS,u'calcMode'), None): cnv_string,
((SMILNS,u'decelerate'), None): cnv_double,
((SMILNS,u'direction'), None): cnv_string,
((SMILNS,u'dur'), None): cnv_string,
((SMILNS,u'end'), None): cnv_string,
((SMILNS,u'endsync'), None): cnv_string,
((SMILNS,u'fadeColor'), None): cnv_string,
((SMILNS,u'fill'), None): cnv_string,
((SMILNS,u'fillDefault'), None): cnv_string,
((SMILNS,u'from'), None): cnv_string,
((SMILNS,u'keySplines'), None): cnv_string,
((SMILNS,u'keyTimes'), None): cnv_string,
((SMILNS,u'mode'), None): cnv_string,
((SMILNS,u'repeatCount'), None): cnv_nonNegativeInteger,
((SMILNS,u'repeatDur'), None): cnv_string,
((SMILNS,u'restart'), None): cnv_string,
((SMILNS,u'restartDefault'), None): cnv_string,
((SMILNS,u'subtype'), None): cnv_string,
((SMILNS,u'targetElement'), None): cnv_IDREF,
((SMILNS,u'to'), None): cnv_string,
((SMILNS,u'type'), None): cnv_string,
((SMILNS,u'values'), None): cnv_string,
((STYLENS,u'adjustment'), None): cnv_string,
((STYLENS,u'apply-style-name'), None): cnv_StyleNameRef,
((STYLENS,u'auto-text-indent'), None): cnv_boolean,
((STYLENS,u'auto-update'), None): cnv_boolean,
((STYLENS,u'background-transparency'), None): cnv_string,
((STYLENS,u'base-cell-address'), None): cnv_string,
((STYLENS,u'border-line-width-bottom'), None): cnv_string,
((STYLENS,u'border-line-width'), None): cnv_string,
((STYLENS,u'border-line-width-left'), None): cnv_string,
((STYLENS,u'border-line-width-right'), None): cnv_string,
((STYLENS,u'border-line-width-top'), None): cnv_string,
((STYLENS,u'cell-protect'), None): cnv_string,
((STYLENS,u'char'), None): cnv_string,
((STYLENS,u'class'), None): cnv_string,
((STYLENS,u'color'), None): cnv_string,
((STYLENS,u'column-width'), None): cnv_string,
((STYLENS,u'condition'), None): cnv_string,
((STYLENS,u'country-asian'), None): cnv_string,
((STYLENS,u'country-complex'), None): cnv_string,
((STYLENS,u'data-style-name'), None): cnv_StyleNameRef,
((STYLENS,u'decimal-places'), None): cnv_string,
((STYLENS,u'default-outline-level'), None): cnv_positiveInteger,
((STYLENS,u'diagonal-bl-tr'), None): cnv_string,
((STYLENS,u'diagonal-bl-tr-widths'), None): cnv_string,
((STYLENS,u'diagonal-tl-br'), None): cnv_string,
((STYLENS,u'diagonal-tl-br-widths'), None): cnv_string,
((STYLENS,u'direction'), None): cnv_string,
((STYLENS,u'display'), None): cnv_boolean,
((STYLENS,u'display-name'), None): cnv_string,
((STYLENS,u'distance-after-sep'), None): cnv_length,
((STYLENS,u'distance-before-sep'), None): cnv_length,
((STYLENS,u'distance'), None): cnv_length,
((STYLENS,u'dynamic-spacing'), None): cnv_boolean,
((STYLENS,u'editable'), None): cnv_boolean,
((STYLENS,u'family'), None): cnv_family,
((STYLENS,u'filter-name'), None): cnv_string,
((STYLENS,u'first-page-number'), None): cnv_string,
((STYLENS,u'flow-with-text'), None): cnv_boolean,
((STYLENS,u'font-adornments'), None): cnv_string,
((STYLENS,u'font-charset'), None): cnv_string,
((STYLENS,u'font-charset-asian'), None): cnv_string,
((STYLENS,u'font-charset-complex'), None): cnv_string,
((STYLENS,u'font-family-asian'), None): cnv_string,
((STYLENS,u'font-family-complex'), None): cnv_string,
((STYLENS,u'font-family-generic-asian'), None): cnv_string,
((STYLENS,u'font-family-generic'), None): cnv_string,
((STYLENS,u'font-family-generic-complex'), None): cnv_string,
((STYLENS,u'font-independent-line-spacing'), None): cnv_boolean,
((STYLENS,u'font-name-asian'), None): cnv_string,
((STYLENS,u'font-name'), None): cnv_string,
((STYLENS,u'font-name-complex'), None): cnv_string,
((STYLENS,u'font-pitch-asian'), None): cnv_string,
((STYLENS,u'font-pitch'), None): cnv_string,
((STYLENS,u'font-pitch-complex'), None): cnv_string,
((STYLENS,u'font-relief'), None): cnv_string,
((STYLENS,u'font-size-asian'), None): cnv_string,
((STYLENS,u'font-size-complex'), None): cnv_string,
((STYLENS,u'font-size-rel-asian'), None): cnv_length,
((STYLENS,u'font-size-rel'), None): cnv_length,
((STYLENS,u'font-size-rel-complex'), None): cnv_length,
((STYLENS,u'font-style-asian'), None): cnv_string,
((STYLENS,u'font-style-complex'), None): cnv_string,
((STYLENS,u'font-style-name-asian'), None): cnv_string,
((STYLENS,u'font-style-name'), None): cnv_string,
((STYLENS,u'font-style-name-complex'), None): cnv_string,
((STYLENS,u'font-weight-asian'), None): cnv_string,
((STYLENS,u'font-weight-complex'), None): cnv_string,
((STYLENS,u'footnote-max-height'), None): cnv_length,
((STYLENS,u'glyph-orientation-vertical'), None): cnv_string,
((STYLENS,u'height'), None): cnv_string,
((STYLENS,u'horizontal-pos'), None): cnv_string,
((STYLENS,u'horizontal-rel'), None): cnv_string,
((STYLENS,u'justify-single-word'), None): cnv_boolean,
((STYLENS,u'language-asian'), None): cnv_string,
((STYLENS,u'language-complex'), None): cnv_string,
((STYLENS,u'layout-grid-base-height'), None): cnv_length,
((STYLENS,u'layout-grid-color'), None): cnv_string,
((STYLENS,u'layout-grid-display'), None): cnv_boolean,
((STYLENS,u'layout-grid-lines'), None): cnv_string,
((STYLENS,u'layout-grid-mode'), None): cnv_string,
((STYLENS,u'layout-grid-print'), None): cnv_boolean,
((STYLENS,u'layout-grid-ruby-below'), None): cnv_boolean,
((STYLENS,u'layout-grid-ruby-height'), None): cnv_length,
((STYLENS,u'leader-char'), None): cnv_string,
((STYLENS,u'leader-color'), None): cnv_string,
((STYLENS,u'leader-style'), None): cnv_string,
((STYLENS,u'leader-text'), None): cnv_string,
((STYLENS,u'leader-text-style'), None): cnv_StyleNameRef,
((STYLENS,u'leader-type'), None): cnv_string,
((STYLENS,u'leader-width'), None): cnv_string,
((STYLENS,u'legend-expansion-aspect-ratio'), None): cnv_double,
((STYLENS,u'legend-expansion'), None): cnv_string,
((STYLENS,u'length'), None): cnv_positiveInteger,
((STYLENS,u'letter-kerning'), None): cnv_boolean,
((STYLENS,u'line-break'), None): cnv_string,
((STYLENS,u'line-height-at-least'), None): cnv_string,
((STYLENS,u'line-spacing'), None): cnv_length,
((STYLENS,u'line-style'), None): cnv_string,
((STYLENS,u'lines'), None): cnv_positiveInteger,
((STYLENS,u'list-style-name'), None): cnv_StyleNameRef,
((STYLENS,u'master-page-name'), None): cnv_StyleNameRef,
((STYLENS,u'may-break-between-rows'), None): cnv_boolean,
((STYLENS,u'min-row-height'), None): cnv_string,
((STYLENS,u'mirror'), None): cnv_string,
((STYLENS,u'name'), None): cnv_NCName,
((STYLENS,u'name'), (STYLENS,u'font-face')): cnv_string,
((STYLENS,u'next-style-name'), None): cnv_StyleNameRef,
((STYLENS,u'num-format'), None): cnv_string,
((STYLENS,u'num-letter-sync'), None): cnv_boolean,
((STYLENS,u'num-prefix'), None): cnv_string,
((STYLENS,u'num-suffix'), None): cnv_string,
((STYLENS,u'number-wrapped-paragraphs'), None): cnv_string,
((STYLENS,u'overflow-behavior'), None): cnv_string,
((STYLENS,u'page-layout-name'), None): cnv_StyleNameRef,
((STYLENS,u'page-number'), None): cnv_string,
((STYLENS,u'page-usage'), None): cnv_string,
((STYLENS,u'paper-tray-name'), None): cnv_string,
((STYLENS,u'parent-style-name'), None): cnv_StyleNameRef,
((STYLENS,u'position'), (STYLENS,u'tab-stop')): cnv_length,
((STYLENS,u'position'), None): cnv_string,
((STYLENS,u'print'), None): cnv_string,
((STYLENS,u'print-content'), None): cnv_boolean,
((STYLENS,u'print-orientation'), None): cnv_string,
((STYLENS,u'print-page-order'), None): cnv_string,
((STYLENS,u'protect'), None): cnv_boolean,
((STYLENS,u'punctuation-wrap'), None): cnv_string,
((STYLENS,u'register-true'), None): cnv_boolean,
((STYLENS,u'register-truth-ref-style-name'), None): cnv_string,
((STYLENS,u'rel-column-width'), None): cnv_string,
((STYLENS,u'rel-height'), None): cnv_string,
((STYLENS,u'rel-width'), None): cnv_string,
((STYLENS,u'repeat'), None): cnv_string,
((STYLENS,u'repeat-content'), None): cnv_boolean,
((STYLENS,u'rotation-align'), None): cnv_string,
((STYLENS,u'rotation-angle'), None): cnv_string,
((STYLENS,u'row-height'), None): cnv_string,
((STYLENS,u'ruby-align'), None): cnv_string,
((STYLENS,u'ruby-position'), None): cnv_string,
((STYLENS,u'run-through'), None): cnv_string,
((STYLENS,u'scale-to'), None): cnv_string,
((STYLENS,u'scale-to-pages'), None): cnv_string,
((STYLENS,u'script-type'), None): cnv_string,
((STYLENS,u'shadow'), None): cnv_string,
((STYLENS,u'shrink-to-fit'), None): cnv_boolean,
((STYLENS,u'snap-to-layout-grid'), None): cnv_boolean,
((STYLENS,u'style'), None): cnv_string,
((STYLENS,u'style-name'), None): cnv_StyleNameRef,
((STYLENS,u'tab-stop-distance'), None): cnv_string,
((STYLENS,u'table-centering'), None): cnv_string,
((STYLENS,u'text-align-source'), None): cnv_string,
((STYLENS,u'text-autospace'), None): cnv_string,
((STYLENS,u'text-blinking'), None): cnv_boolean,
((STYLENS,u'text-combine'), None): cnv_string,
((STYLENS,u'text-combine-end-char'), None): cnv_string,
((STYLENS,u'text-combine-start-char'), None): cnv_string,
((STYLENS,u'text-emphasize'), None): cnv_string,
((STYLENS,u'text-line-through-color'), None): cnv_string,
((STYLENS,u'text-line-through-mode'), None): cnv_string,
((STYLENS,u'text-line-through-style'), None): cnv_string,
((STYLENS,u'text-line-through-text'), None): cnv_string,
((STYLENS,u'text-line-through-text-style'), None): cnv_string,
((STYLENS,u'text-line-through-type'), None): cnv_string,
((STYLENS,u'text-line-through-width'), None): cnv_string,
((STYLENS,u'text-outline'), None): cnv_boolean,
((STYLENS,u'text-position'), None): cnv_string,
((STYLENS,u'text-rotation-angle'), None): cnv_string,
((STYLENS,u'text-rotation-scale'), None): cnv_string,
((STYLENS,u'text-scale'), None): cnv_string,
((STYLENS,u'text-underline-color'), None): cnv_string,
((STYLENS,u'text-underline-mode'), None): cnv_string,
((STYLENS,u'text-underline-style'), None): cnv_string,
((STYLENS,u'text-underline-type'), None): cnv_string,
((STYLENS,u'text-underline-width'), None): cnv_string,
((STYLENS,u'type'), None): cnv_string,
((STYLENS,u'use-optimal-column-width'), None): cnv_boolean,
((STYLENS,u'use-optimal-row-height'), None): cnv_boolean,
((STYLENS,u'use-window-font-color'), None): cnv_boolean,
((STYLENS,u'vertical-align'), None): cnv_string,
((STYLENS,u'vertical-pos'), None): cnv_string,
((STYLENS,u'vertical-rel'), None): cnv_string,
((STYLENS,u'volatile'), None): cnv_boolean,
((STYLENS,u'width'), None): cnv_string,
((STYLENS,u'wrap'), None): cnv_string,
((STYLENS,u'wrap-contour'), None): cnv_boolean,
((STYLENS,u'wrap-contour-mode'), None): cnv_string,
((STYLENS,u'wrap-dynamic-threshold'), None): cnv_length,
((STYLENS,u'writing-mode-automatic'), None): cnv_boolean,
((STYLENS,u'writing-mode'), None): cnv_string,
((SVGNS,u'accent-height'), None): cnv_integer,
((SVGNS,u'alphabetic'), None): cnv_integer,
((SVGNS,u'ascent'), None): cnv_integer,
((SVGNS,u'bbox'), None): cnv_string,
((SVGNS,u'cap-height'), None): cnv_integer,
((SVGNS,u'cx'), None): cnv_string,
((SVGNS,u'cy'), None): cnv_string,
((SVGNS,u'd'), None): cnv_string,
((SVGNS,u'descent'), None): cnv_integer,
((SVGNS,u'fill-rule'), None): cnv_string,
((SVGNS,u'font-family'), None): cnv_string,
((SVGNS,u'font-size'), None): cnv_string,
((SVGNS,u'font-stretch'), None): cnv_string,
((SVGNS,u'font-style'), None): cnv_string,
((SVGNS,u'font-variant'), None): cnv_string,
((SVGNS,u'font-weight'), None): cnv_string,
((SVGNS,u'fx'), None): cnv_string,
((SVGNS,u'fy'), None): cnv_string,
((SVGNS,u'gradientTransform'), None): cnv_string,
((SVGNS,u'gradientUnits'), None): cnv_string,
((SVGNS,u'hanging'), None): cnv_integer,
((SVGNS,u'height'), None): cnv_length,
((SVGNS,u'ideographic'), None): cnv_integer,
((SVGNS,u'mathematical'), None): cnv_integer,
((SVGNS,u'name'), None): cnv_string,
((SVGNS,u'offset'), None): cnv_string,
((SVGNS,u'origin'), None): cnv_string,
((SVGNS,u'overline-position'), None): cnv_integer,
((SVGNS,u'overline-thickness'), None): cnv_integer,
((SVGNS,u'panose-1'), None): cnv_string,
((SVGNS,u'path'), None): cnv_string,
((SVGNS,u'r'), None): cnv_length,
((SVGNS,u'rx'), None): cnv_length,
((SVGNS,u'ry'), None): cnv_length,
((SVGNS,u'slope'), None): cnv_integer,
((SVGNS,u'spreadMethod'), None): cnv_string,
((SVGNS,u'stemh'), None): cnv_integer,
((SVGNS,u'stemv'), None): cnv_integer,
((SVGNS,u'stop-color'), None): cnv_string,
((SVGNS,u'stop-opacity'), None): cnv_double,
((SVGNS,u'strikethrough-position'), None): cnv_integer,
((SVGNS,u'strikethrough-thickness'), None): cnv_integer,
((SVGNS,u'string'), None): cnv_string,
((SVGNS,u'stroke-color'), None): cnv_string,
((SVGNS,u'stroke-opacity'), None): cnv_string,
((SVGNS,u'stroke-width'), None): cnv_length,
((SVGNS,u'type'), None): cnv_string,
((SVGNS,u'underline-position'), None): cnv_integer,
((SVGNS,u'underline-thickness'), None): cnv_integer,
((SVGNS,u'unicode-range'), None): cnv_string,
((SVGNS,u'units-per-em'), None): cnv_integer,
((SVGNS,u'v-alphabetic'), None): cnv_integer,
((SVGNS,u'v-hanging'), None): cnv_integer,
((SVGNS,u'v-ideographic'), None): cnv_integer,
((SVGNS,u'v-mathematical'), None): cnv_integer,
((SVGNS,u'viewBox'), None): cnv_viewbox,
((SVGNS,u'width'), None): cnv_length,
((SVGNS,u'widths'), None): cnv_string,
((SVGNS,u'x'), None): cnv_length,
((SVGNS,u'x-height'), None): cnv_integer,
((SVGNS,u'x1'), None): cnv_lengthorpercent,
((SVGNS,u'x2'), None): cnv_lengthorpercent,
((SVGNS,u'y'), None): cnv_length,
((SVGNS,u'y1'), None): cnv_lengthorpercent,
((SVGNS,u'y2'), None): cnv_lengthorpercent,
((TABLENS,u'acceptance-state'), None): cnv_string,
((TABLENS,u'add-empty-lines'), None): cnv_boolean,
((TABLENS,u'algorithm'), None): cnv_formula,
((TABLENS,u'align'), None): cnv_string,
((TABLENS,u'allow-empty-cell'), None): cnv_boolean,
((TABLENS,u'application-data'), None): cnv_string,
((TABLENS,u'automatic-find-labels'), None): cnv_boolean,
((TABLENS,u'base-cell-address'), None): cnv_string,
((TABLENS,u'bind-styles-to-content'), None): cnv_boolean,
((TABLENS,u'border-color'), None): cnv_string,
((TABLENS,u'border-model'), None): cnv_string,
((TABLENS,u'buttons'), None): cnv_string,
((TABLENS,u'buttons'), None): cnv_string,
((TABLENS,u'case-sensitive'), None): cnv_boolean,
((TABLENS,u'case-sensitive'), None): cnv_string,
((TABLENS,u'cell-address'), None): cnv_string,
((TABLENS,u'cell-range-address'), None): cnv_string,
((TABLENS,u'cell-range-address'), None): cnv_string,
((TABLENS,u'cell-range'), None): cnv_string,
((TABLENS,u'column'), None): cnv_integer,
((TABLENS,u'comment'), None): cnv_string,
((TABLENS,u'condition'), None): cnv_formula,
((TABLENS,u'condition-source'), None): cnv_string,
((TABLENS,u'condition-source-range-address'), None): cnv_string,
((TABLENS,u'contains-error'), None): cnv_boolean,
((TABLENS,u'contains-header'), None): cnv_boolean,
((TABLENS,u'content-validation-name'), None): cnv_string,
((TABLENS,u'copy-back'), None): cnv_boolean,
((TABLENS,u'copy-formulas'), None): cnv_boolean,
((TABLENS,u'copy-styles'), None): cnv_boolean,
((TABLENS,u'count'), None): cnv_positiveInteger,
((TABLENS,u'country'), None): cnv_token,
((TABLENS,u'data-cell-range-address'), None): cnv_string,
((TABLENS,u'data-field'), None): cnv_string,
((TABLENS,u'data-type'), None): cnv_string,
((TABLENS,u'database-name'), None): cnv_string,
((TABLENS,u'database-table-name'), None): cnv_string,
((TABLENS,u'date-end'), None): cnv_string,
((TABLENS,u'date-start'), None): cnv_string,
((TABLENS,u'date-value'), None): cnv_date,
((TABLENS,u'default-cell-style-name'), None): cnv_StyleNameRef,
((TABLENS,u'direction'), None): cnv_string,
((TABLENS,u'display-border'), None): cnv_boolean,
((TABLENS,u'display'), None): cnv_boolean,
((TABLENS,u'display-duplicates'), None): cnv_boolean,
((TABLENS,u'display-filter-buttons'), None): cnv_boolean,
((TABLENS,u'display-list'), None): cnv_string,
((TABLENS,u'display-member-mode'), None): cnv_string,
((TABLENS,u'drill-down-on-double-click'), None): cnv_boolean,
((TABLENS,u'enabled'), None): cnv_boolean,
((TABLENS,u'end-cell-address'), None): cnv_string,
((TABLENS,u'end'), None): cnv_string,
((TABLENS,u'end-column'), None): cnv_integer,
((TABLENS,u'end-position'), None): cnv_integer,
((TABLENS,u'end-row'), None): cnv_integer,
((TABLENS,u'end-table'), None): cnv_integer,
((TABLENS,u'end-x'), None): cnv_length,
((TABLENS,u'end-y'), None): cnv_length,
((TABLENS,u'execute'), None): cnv_boolean,
((TABLENS,u'expression'), None): cnv_formula,
((TABLENS,u'field-name'), None): cnv_string,
((TABLENS,u'field-number'), None): cnv_nonNegativeInteger,
((TABLENS,u'field-number'), None): cnv_string,
((TABLENS,u'filter-name'), None): cnv_string,
((TABLENS,u'filter-options'), None): cnv_string,
((TABLENS,u'formula'), None): cnv_formula,
((TABLENS,u'function'), None): cnv_string,
((TABLENS,u'function'), None): cnv_string,
((TABLENS,u'grand-total'), None): cnv_string,
((TABLENS,u'group-by-field-number'), None): cnv_nonNegativeInteger,
((TABLENS,u'grouped-by'), None): cnv_string,
((TABLENS,u'has-persistent-data'), None): cnv_boolean,
((TABLENS,u'id'), None): cnv_string,
((TABLENS,u'identify-categories'), None): cnv_boolean,
((TABLENS,u'ignore-empty-rows'), None): cnv_boolean,
((TABLENS,u'index'), None): cnv_nonNegativeInteger,
((TABLENS,u'is-active'), None): cnv_boolean,
((TABLENS,u'is-data-layout-field'), None): cnv_string,
((TABLENS,u'is-selection'), None): cnv_boolean,
((TABLENS,u'is-sub-table'), None): cnv_boolean,
((TABLENS,u'label-cell-range-address'), None): cnv_string,
((TABLENS,u'language'), None): cnv_token,
((TABLENS,u'language'), None): cnv_token,
((TABLENS,u'last-column-spanned'), None): cnv_positiveInteger,
((TABLENS,u'last-row-spanned'), None): cnv_positiveInteger,
((TABLENS,u'layout-mode'), None): cnv_string,
((TABLENS,u'link-to-source-data'), None): cnv_boolean,
((TABLENS,u'marked-invalid'), None): cnv_boolean,
((TABLENS,u'matrix-covered'), None): cnv_boolean,
((TABLENS,u'maximum-difference'), None): cnv_double,
((TABLENS,u'member-count'), None): cnv_nonNegativeInteger,
((TABLENS,u'member-name'), None): cnv_string,
((TABLENS,u'member-type'), None): cnv_string,
((TABLENS,u'message-type'), None): cnv_string,
((TABLENS,u'mode'), None): cnv_string,
((TABLENS,u'multi-deletion-spanned'), None): cnv_integer,
((TABLENS,u'name'), None): cnv_string,
((TABLENS,u'name'), None): cnv_string,
((TABLENS,u'null-year'), None): cnv_positiveInteger,
((TABLENS,u'number-columns-repeated'), None): cnv_positiveInteger,
((TABLENS,u'number-columns-spanned'), None): cnv_positiveInteger,
((TABLENS,u'number-matrix-columns-spanned'), None): cnv_positiveInteger,
((TABLENS,u'number-matrix-rows-spanned'), None): cnv_positiveInteger,
((TABLENS,u'number-rows-repeated'), None): cnv_positiveInteger,
((TABLENS,u'number-rows-spanned'), None): cnv_positiveInteger,
((TABLENS,u'object-name'), None): cnv_string,
((TABLENS,u'on-update-keep-size'), None): cnv_boolean,
((TABLENS,u'on-update-keep-styles'), None): cnv_boolean,
((TABLENS,u'operator'), None): cnv_string,
((TABLENS,u'operator'), None): cnv_string,
((TABLENS,u'order'), None): cnv_string,
((TABLENS,u'orientation'), None): cnv_string,
((TABLENS,u'orientation'), None): cnv_string,
((TABLENS,u'page-breaks-on-group-change'), None): cnv_boolean,
((TABLENS,u'parse-sql-statement'), None): cnv_boolean,
((TABLENS,u'password'), None): cnv_string,
((TABLENS,u'position'), None): cnv_integer,
((TABLENS,u'precision-as-shown'), None): cnv_boolean,
((TABLENS,u'print'), None): cnv_boolean,
((TABLENS,u'print-ranges'), None): cnv_string,
((TABLENS,u'protect'), None): cnv_boolean,
((TABLENS,u'protected'), None): cnv_boolean,
((TABLENS,u'protection-key'), None): cnv_string,
((TABLENS,u'query-name'), None): cnv_string,
((TABLENS,u'range-usable-as'), None): cnv_string,
((TABLENS,u'refresh-delay'), None): cnv_boolean,
((TABLENS,u'refresh-delay'), None): cnv_duration,
((TABLENS,u'rejecting-change-id'), None): cnv_string,
((TABLENS,u'row'), None): cnv_integer,
((TABLENS,u'scenario-ranges'), None): cnv_string,
((TABLENS,u'search-criteria-must-apply-to-whole-cell'), None): cnv_boolean,
((TABLENS,u'selected-page'), None): cnv_string,
((TABLENS,u'show-details'), None): cnv_boolean,
((TABLENS,u'show-empty'), None): cnv_boolean,
((TABLENS,u'show-empty'), None): cnv_string,
((TABLENS,u'show-filter-button'), None): cnv_boolean,
((TABLENS,u'sort-mode'), None): cnv_string,
((TABLENS,u'source-cell-range-addresses'), None): cnv_string,
((TABLENS,u'source-cell-range-addresses'), None): cnv_string,
((TABLENS,u'source-field-name'), None): cnv_string,
((TABLENS,u'source-field-name'), None): cnv_string,
((TABLENS,u'source-name'), None): cnv_string,
((TABLENS,u'sql-statement'), None): cnv_string,
((TABLENS,u'start'), None): cnv_string,
((TABLENS,u'start-column'), None): cnv_integer,
((TABLENS,u'start-position'), None): cnv_integer,
((TABLENS,u'start-row'), None): cnv_integer,
((TABLENS,u'start-table'), None): cnv_integer,
((TABLENS,u'status'), None): cnv_string,
((TABLENS,u'step'), None): cnv_double,
((TABLENS,u'steps'), None): cnv_positiveInteger,
((TABLENS,u'structure-protected'), None): cnv_boolean,
((TABLENS,u'style-name'), None): cnv_StyleNameRef,
((TABLENS,u'table-background'), None): cnv_boolean,
((TABLENS,u'table'), None): cnv_integer,
((TABLENS,u'table-name'), None): cnv_string,
((TABLENS,u'target-cell-address'), None): cnv_string,
((TABLENS,u'target-cell-address'), None): cnv_string,
((TABLENS,u'target-range-address'), None): cnv_string,
((TABLENS,u'target-range-address'), None): cnv_string,
((TABLENS,u'title'), None): cnv_string,
((TABLENS,u'track-changes'), None): cnv_boolean,
((TABLENS,u'type'), None): cnv_string,
((TABLENS,u'use-labels'), None): cnv_string,
((TABLENS,u'use-regular-expressions'), None): cnv_boolean,
((TABLENS,u'used-hierarchy'), None): cnv_integer,
((TABLENS,u'user-name'), None): cnv_string,
((TABLENS,u'value'), None): cnv_string,
((TABLENS,u'value'), None): cnv_string,
((TABLENS,u'value-type'), None): cnv_string,
((TABLENS,u'visibility'), None): cnv_string,
((TEXTNS,u'active'), None): cnv_boolean,
((TEXTNS,u'address'), None): cnv_string,
((TEXTNS,u'alphabetical-separators'), None): cnv_boolean,
((TEXTNS,u'anchor-page-number'), None): cnv_positiveInteger,
((TEXTNS,u'anchor-type'), None): cnv_string,
((TEXTNS,u'animation'), None): cnv_string,
((TEXTNS,u'animation-delay'), None): cnv_string,
((TEXTNS,u'animation-direction'), None): cnv_string,
((TEXTNS,u'animation-repeat'), None): cnv_string,
((TEXTNS,u'animation-start-inside'), None): cnv_boolean,
((TEXTNS,u'animation-steps'), None): cnv_length,
((TEXTNS,u'animation-stop-inside'), None): cnv_boolean,
((TEXTNS,u'annote'), None): cnv_string,
((TEXTNS,u'author'), None): cnv_string,
((TEXTNS,u'bibliography-data-field'), None): cnv_string,
((TEXTNS,u'bibliography-type'), None): cnv_string,
((TEXTNS,u'booktitle'), None): cnv_string,
((TEXTNS,u'bullet-char'), None): cnv_string,
((TEXTNS,u'bullet-relative-size'), None): cnv_string,
((TEXTNS,u'c'), None): cnv_nonNegativeInteger,
((TEXTNS,u'capitalize-entries'), None): cnv_boolean,
((TEXTNS,u'caption-sequence-format'), None): cnv_string,
((TEXTNS,u'caption-sequence-name'), None): cnv_string,
((TEXTNS,u'change-id'), None): cnv_IDREF,
((TEXTNS,u'chapter'), None): cnv_string,
((TEXTNS,u'citation-body-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'citation-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'class-names'), None): cnv_NCNames,
((TEXTNS,u'column-name'), None): cnv_string,
((TEXTNS,u'combine-entries'), None): cnv_boolean,
((TEXTNS,u'combine-entries-with-dash'), None): cnv_boolean,
((TEXTNS,u'combine-entries-with-pp'), None): cnv_boolean,
((TEXTNS,u'comma-separated'), None): cnv_boolean,
((TEXTNS,u'cond-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'condition'), None): cnv_formula,
((TEXTNS,u'connection-name'), None): cnv_string,
((TEXTNS,u'consecutive-numbering'), None): cnv_boolean,
((TEXTNS,u'continue-numbering'), None): cnv_boolean,
((TEXTNS,u'copy-outline-levels'), None): cnv_boolean,
((TEXTNS,u'count-empty-lines'), None): cnv_boolean,
((TEXTNS,u'count-in-text-boxes'), None): cnv_boolean,
((TEXTNS,u'current-value'), None): cnv_boolean,
((TEXTNS,u'custom1'), None): cnv_string,
((TEXTNS,u'custom2'), None): cnv_string,
((TEXTNS,u'custom3'), None): cnv_string,
((TEXTNS,u'custom4'), None): cnv_string,
((TEXTNS,u'custom5'), None): cnv_string,
((TEXTNS,u'database-name'), None): cnv_string,
((TEXTNS,u'date-adjust'), None): cnv_duration,
((TEXTNS,u'date-value'), None): cnv_date,
# ((TEXTNS,u'date-value'), None): cnv_dateTime,
((TEXTNS,u'default-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'description'), None): cnv_string,
((TEXTNS,u'display'), None): cnv_string,
((TEXTNS,u'display-levels'), None): cnv_positiveInteger,
((TEXTNS,u'display-outline-level'), None): cnv_nonNegativeInteger,
((TEXTNS,u'dont-balance-text-columns'), None): cnv_boolean,
((TEXTNS,u'duration'), None): cnv_duration,
((TEXTNS,u'edition'), None): cnv_string,
((TEXTNS,u'editor'), None): cnv_string,
((TEXTNS,u'filter-name'), None): cnv_string,
((TEXTNS,u'first-row-end-column'), None): cnv_string,
((TEXTNS,u'first-row-start-column'), None): cnv_string,
((TEXTNS,u'fixed'), None): cnv_boolean,
((TEXTNS,u'footnotes-position'), None): cnv_string,
((TEXTNS,u'formula'), None): cnv_formula,
((TEXTNS,u'global'), None): cnv_boolean,
((TEXTNS,u'howpublished'), None): cnv_string,
((TEXTNS,u'id'), None): cnv_ID,
# ((TEXTNS,u'id'), None): cnv_string,
((TEXTNS,u'identifier'), None): cnv_string,
((TEXTNS,u'ignore-case'), None): cnv_boolean,
((TEXTNS,u'increment'), None): cnv_nonNegativeInteger,
((TEXTNS,u'index-name'), None): cnv_string,
((TEXTNS,u'index-scope'), None): cnv_string,
((TEXTNS,u'institution'), None): cnv_string,
((TEXTNS,u'is-hidden'), None): cnv_boolean,
((TEXTNS,u'is-list-header'), None): cnv_boolean,
((TEXTNS,u'isbn'), None): cnv_string,
((TEXTNS,u'issn'), None): cnv_string,
((TEXTNS,u'issn'), None): cnv_string,
((TEXTNS,u'journal'), None): cnv_string,
((TEXTNS,u'key'), None): cnv_string,
((TEXTNS,u'key1'), None): cnv_string,
((TEXTNS,u'key1-phonetic'), None): cnv_string,
((TEXTNS,u'key2'), None): cnv_string,
((TEXTNS,u'key2-phonetic'), None): cnv_string,
((TEXTNS,u'kind'), None): cnv_string,
((TEXTNS,u'label'), None): cnv_string,
((TEXTNS,u'last-row-end-column'), None): cnv_string,
((TEXTNS,u'last-row-start-column'), None): cnv_string,
((TEXTNS,u'level'), None): cnv_positiveInteger,
((TEXTNS,u'line-break'), None): cnv_boolean,
((TEXTNS,u'line-number'), None): cnv_string,
((TEXTNS,u'main-entry'), None): cnv_boolean,
((TEXTNS,u'main-entry-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'master-page-name'), None): cnv_StyleNameRef,
((TEXTNS,u'min-label-distance'), None): cnv_string,
((TEXTNS,u'min-label-width'), None): cnv_string,
((TEXTNS,u'month'), None): cnv_string,
((TEXTNS,u'name'), None): cnv_string,
((TEXTNS,u'note-class'), None): cnv_textnoteclass,
((TEXTNS,u'note'), None): cnv_string,
((TEXTNS,u'number'), None): cnv_string,
((TEXTNS,u'number-lines'), None): cnv_boolean,
((TEXTNS,u'number-position'), None): cnv_string,
((TEXTNS,u'numbered-entries'), None): cnv_boolean,
((TEXTNS,u'offset'), None): cnv_string,
((TEXTNS,u'organizations'), None): cnv_string,
((TEXTNS,u'outline-level'), None): cnv_string,
((TEXTNS,u'page-adjust'), None): cnv_integer,
((TEXTNS,u'pages'), None): cnv_string,
((TEXTNS,u'paragraph-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'placeholder-type'), None): cnv_string,
((TEXTNS,u'prefix'), None): cnv_string,
((TEXTNS,u'protected'), None): cnv_boolean,
((TEXTNS,u'protection-key'), None): cnv_string,
((TEXTNS,u'publisher'), None): cnv_string,
((TEXTNS,u'ref-name'), None): cnv_string,
((TEXTNS,u'reference-format'), None): cnv_string,
((TEXTNS,u'relative-tab-stop-position'), None): cnv_boolean,
((TEXTNS,u'report-type'), None): cnv_string,
((TEXTNS,u'restart-numbering'), None): cnv_boolean,
((TEXTNS,u'restart-on-page'), None): cnv_boolean,
((TEXTNS,u'row-number'), None): cnv_nonNegativeInteger,
((TEXTNS,u'school'), None): cnv_string,
((TEXTNS,u'section-name'), None): cnv_string,
((TEXTNS,u'select-page'), None): cnv_string,
((TEXTNS,u'separation-character'), None): cnv_string,
((TEXTNS,u'series'), None): cnv_string,
((TEXTNS,u'sort-algorithm'), None): cnv_string,
((TEXTNS,u'sort-ascending'), None): cnv_boolean,
((TEXTNS,u'sort-by-position'), None): cnv_boolean,
((TEXTNS,u'space-before'), None): cnv_string,
((TEXTNS,u'start-numbering-at'), None): cnv_string,
((TEXTNS,u'start-value'), None): cnv_nonNegativeInteger,
((TEXTNS,u'start-value'), None): cnv_positiveInteger,
((TEXTNS,u'string-value'), None): cnv_string,
((TEXTNS,u'string-value-if-false'), None): cnv_string,
((TEXTNS,u'string-value-if-true'), None): cnv_string,
((TEXTNS,u'string-value-phonetic'), None): cnv_string,
((TEXTNS,u'style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'suffix'), None): cnv_string,
((TEXTNS,u'tab-ref'), None): cnv_nonNegativeInteger,
((TEXTNS,u'table-name'), None): cnv_string,
((TEXTNS,u'table-type'), None): cnv_string,
((TEXTNS,u'time-adjust'), None): cnv_duration,
((TEXTNS,u'time-value'), None): cnv_dateTime,
((TEXTNS,u'time-value'), None): cnv_time,
((TEXTNS,u'title'), None): cnv_string,
((TEXTNS,u'track-changes'), None): cnv_boolean,
((TEXTNS,u'url'), None): cnv_string,
((TEXTNS,u'use-caption'), None): cnv_boolean,
((TEXTNS,u'use-chart-objects'), None): cnv_boolean,
((TEXTNS,u'use-draw-objects'), None): cnv_boolean,
((TEXTNS,u'use-floating-frames'), None): cnv_boolean,
((TEXTNS,u'use-graphics'), None): cnv_boolean,
((TEXTNS,u'use-index-marks'), None): cnv_boolean,
((TEXTNS,u'use-index-source-styles'), None): cnv_boolean,
((TEXTNS,u'use-keys-as-entries'), None): cnv_boolean,
((TEXTNS,u'use-math-objects'), None): cnv_boolean,
((TEXTNS,u'use-objects'), None): cnv_boolean,
((TEXTNS,u'use-other-objects'), None): cnv_boolean,
((TEXTNS,u'use-outline-level'), None): cnv_boolean,
((TEXTNS,u'use-soft-page-breaks'), None): cnv_boolean,
((TEXTNS,u'use-spreadsheet-objects'), None): cnv_boolean,
((TEXTNS,u'use-tables'), None): cnv_boolean,
((TEXTNS,u'value'), None): cnv_nonNegativeInteger,
((TEXTNS,u'visited-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'volume'), None): cnv_string,
((TEXTNS,u'year'), None): cnv_string,
((XFORMSNS,u'bind'), None): cnv_string,
((XLINKNS,u'actuate'), None): cnv_string,
((XLINKNS,u'href'), None): cnv_anyURI,
((XLINKNS,u'show'), None): cnv_xlinkshow,
((XLINKNS,u'title'), None): cnv_string,
((XLINKNS,u'type'), None): cnv_string,
}
class AttrConverters:
def convert(self, attribute, value, element):
""" Based on the element, figures out how to check/convert the attribute value
All values are converted to string
"""
conversion = attrconverters.get((attribute, element.qname), None)
if conversion is not None:
return conversion(attribute, value, element)
else:
conversion = attrconverters.get((attribute, None), None)
if conversion is not None:
return conversion(attribute, value, element)
return unicode(value)
| ashang/calibre | src/odf/attrconverters.py | Python | gpl-3.0 | 69,460 |
import zstackwoodpecker.operations.baremetal_operations as bare_operations
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import test_stub
import os
vm = None
def test():
global vm
# Create VM
vm = test_stub.create_vm()
vm.check()
# Create Virtual BMC
test_stub.create_vbmc(vm = vm, port = 6230)
# Create Chassis
chassis = os.environ.get('ipminame')
test_stub.create_chassis(chassis_name = chassis)
test_stub.hack_ks(port = 6230)
chassis_uuid = test_lib.lib_get_chassis_by_name(chassis).uuid
# First time Provision
bare_operations.provision_baremetal(chassis_uuid)
hwinfo = test_stub.check_hwinfo(chassis_uuid)
if not hwinfo:
test_util.test_fail('Fail to get hardware info during the first provision')
#Generate cfgItems
cfgItems, pxe_mac = test_stub.generate_cfgItems(chassis_uuid=chassis_uuid)
host_cfg = test_stub.create_hostcfg(chassis_uuid=chassis_uuid, unattended=True, password="password", cfgItems=cfgItems)
test_stub.hack_ks(port = 6230, ks_file = str(pxe_mac.replace(':','-')))
# Second time Provision to install system
bare_operations.provision_baremetal(chassis_uuid)
if not test_stub.verify_chassis_status(chassis_uuid, "Rebooting"):
test_util.test_fail('Chassis failed to Rebooting after the second provision')
if not test_stub.verify_chassis_status(chassis_uuid, "Provisioning"):
test_util.test_fail('Chassis failed to Provisioning after the second provision')
if not test_stub.verify_chassis_status(chassis_uuid, "Provisioned"):
test_util.test_fail('Chassis failed to Provisioned after the second provision')
vm.stop()
vm.start()
test_stub.delete_vbmc(vm = vm)
bare_operations.delete_chassis(chassis_uuid)
vm.destroy()
test_util.test_pass('Create PXE Test Success')
def error_cleanup():
global vm
if vm:
test_stub.delete_vbmc(vm = vm)
chassis = os.environ.get('ipminame')
chassis_uuid = test_lib.lib_get_chassis_by_name(chassis).uuid
bare_operations.delete_chassis(chassis_uuid)
vm.destroy()
| zstackorg/zstack-woodpecker | integrationtest/vm/baremetal/test_single_baremetal_installation_no_nic_no_bond.py | Python | apache-2.0 | 2,162 |
# -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
| boweeb/nhweb | config/settings/local.py | Python | bsd-3-clause | 1,911 |
from setuptools import setup
setup(
# general meta
name='ebs-deploy',
version='2.0.1',
author='Brian C. Dilley',
author_email='[email protected]',
description='Python based command line tools for managing '
'Amazon Elastic Beanstalk applications.',
platforms='any',
url='https://github.com/briandilley/ebs-deploy',
download_url='https://github.com/briandilley/ebs-deploy',
# packages
packages=[
'ebs_deploy',
'ebs_deploy.commands'
],
# dependencies
install_requires=[
'boto>=2.45.0',
'pyyaml>=3.10'
],
# additional files to include
include_package_data=True,
# the scripts
scripts=['scripts/ebs-deploy'],
# wut?
classifiers=['Intended Audience :: Developers']
)
| briandilley/ebs-deploy | setup.py | Python | mit | 805 |
# -*- coding: utf-8 -*-
from . import base
class TriggerApi(base.Api):
_path = 'trigger',
def __call__(self, trigger_id=None):
"""List currency info for a currency/currencies - Authenticated.
:param currency_id: (optional) Currency id.
:return: Dict(s) for a currency/currencies:
id (str): Currency id as integer string.
code (str): Code for the currency, eg. LTC.
name (str): Name for the currency, eg. LiteCoin.
maintenance (str): Maintence mode as integer string (0 - No issues, 1 - Maintenance,
2 - Updating wallet, 3 - Network issues).
"""
return self._get(trigger_id)
def create(self, market_id, ordertype, quantity, comparison, price, orderprice, expires=''):
return self._post(market_id=market_id, type=ordertype, quantity=quantity,
comparison=comparison, price=price, orderprice=orderprice,
expires=expires)
def remove(self, trigger_id):
return self._delete(trigger_id)
| katakumpo/cryptsy_api | cryptsy_api/trigger.py | Python | mit | 1,109 |
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation, MultiSource)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter, RectangularOutputPlotter, StartEndMeanPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from neuralnilm.rectangulariser import rectangularise
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity, softmax
from lasagne.objectives import squared_error, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer,
DimshuffleLayer, DropoutLayer, ConcatLayer, PadLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
# PATH = "/home/jack/experiments/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 25000
UKDALE_FILENAME = '/data/dk3810/ukdale.h5'
MAX_TARGET_POWER = 300
TARGET_APPLIANCE = ['fridge freezer', 'fridge', 'freezer']
SEQ_LENGTH = 512
N_SEQ_PER_BATCH = 64
TRAIN_BUILDINGS = [1, 2, 3, 4]
VALIDATION_BUILDINGS = [5]
SKIP_PROBABILITY_FOR_TARGET = 0.5
WINDOW_PER_BUILDING = {
1: ("2013-03-17", "2014-12-01"),
2: ("2013-05-22", "2013-10-01"),
3: ("2013-02-27", "2013-04-01"),
4: ("2013-03-09", "2013-09-20"),
5: ("2014-06-29", "2014-08-27")
}
INPUT_STATS = {
'mean': np.array([297.87216187], dtype=np.float32),
'std': np.array([374.43884277], dtype=np.float32)
}
real_appliance_source1 = RealApplianceSource(
filename=UKDALE_FILENAME,
appliances=[
TARGET_APPLIANCE,
['washer dryer', 'washing machine'],
'dish washer',
'kettle',
'microwave'
],
max_appliance_powers=[MAX_TARGET_POWER, 2400, 2500, 2600, 1500],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 1800, 1800, 12, 12],
min_off_durations=[12, 600, 1800, 12, 12],
divide_input_by_max_input_power=False,
window_per_building=WINDOW_PER_BUILDING,
seq_length=SEQ_LENGTH,
output_one_appliance=True,
train_buildings=TRAIN_BUILDINGS,
validation_buildings=VALIDATION_BUILDINGS,
n_seq_per_batch=N_SEQ_PER_BATCH,
skip_probability=0.75,
skip_probability_for_first_appliance=SKIP_PROBABILITY_FOR_TARGET,
target_is_start_and_end_and_mean=True,
standardise_input=True,
input_stats=INPUT_STATS
)
same_location_source1 = SameLocation(
filename=UKDALE_FILENAME,
target_appliance=TARGET_APPLIANCE,
window_per_building=WINDOW_PER_BUILDING,
seq_length=SEQ_LENGTH,
train_buildings=TRAIN_BUILDINGS,
validation_buildings=VALIDATION_BUILDINGS,
n_seq_per_batch=N_SEQ_PER_BATCH,
skip_probability=SKIP_PROBABILITY_FOR_TARGET,
target_is_start_and_end_and_mean=True,
standardise_input=True,
offset_probability=1,
divide_target_by=MAX_TARGET_POWER,
input_stats=INPUT_STATS
)
multi_source = MultiSource(
sources=[
{
'source': real_appliance_source1,
'train_probability': 0.5,
'validation_probability': 0
},
{
'source': same_location_source1,
'train_probability': 0.5,
'validation_probability': 1
}
],
standardisation_source=same_location_source1
)
def only_train_on_real_data(net, iteration):
net.logger.info(
"Iteration {}: Now only training on real data.".format(iteration))
net.source.sources[0]['train_probability'] = 0.0
net.source.sources[1]['train_probability'] = 1.0
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=lambda x, t: squared_error(x, t).mean(),
updates_func=nesterov_momentum,
learning_rate=1e-5,
learning_rate_changes_by_iteration={
400000: 1e-6,
500000: 1e-7
},
epoch_callbacks={
350000: only_train_on_real_data
},
do_save_activations=True,
auto_reshape=False,
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': PadLayer,
'width': 4
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1), # back to (batch, time, features)
'label': 'dimshuffle3'
},
{
'type': DenseLayer,
'num_units': 512 * 8,
'nonlinearity': rectify,
'label': 'dense0'
},
{
'type': DenseLayer,
'num_units': 512 * 6,
'nonlinearity': rectify,
'label': 'dense1'
},
{
'type': DenseLayer,
'num_units': 512 * 4,
'nonlinearity': rectify,
'label': 'dense2'
},
{
'type': DenseLayer,
'num_units': 512,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 3,
'nonlinearity': None
}
]
)
def exp_a(name):
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=multi_source,
plotter=StartEndMeanPlotter(
n_seq_to_plot=32, max_target_power=MAX_TARGET_POWER)
))
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e549.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| JackKelly/neuralnilm_prototype | scripts/e549.py | Python | mit | 7,665 |
from django.contrib import admin
from flooding_base.models import Application
from flooding_base.models import Configuration
from flooding_base.models import DataSourceDummy
from flooding_base.models import DataSourceEI
from flooding_base.models import GroupConfigurationPermission
from flooding_base.models import Map
from flooding_base.models import Setting
from flooding_base.models import Site
from flooding_base.models import SubApplication
from flooding_base.models import Text
class ConfigurationAdmin(admin.ModelAdmin):
list_display = ('name', 'id', 'datasourcetype', 'hasDataSource', )
class DataSourceEIAdmin(admin.ModelAdmin):
list_display = ('configuration', 'id', 'connectorurl',
'databaseurl', 'databaseurltagname',
'usecustomfilterresponse')
admin.site.register(Application)
admin.site.register(Configuration, ConfigurationAdmin)
admin.site.register(DataSourceDummy)
admin.site.register(DataSourceEI, DataSourceEIAdmin)
admin.site.register(GroupConfigurationPermission)
admin.site.register(Map)
admin.site.register(Setting)
admin.site.register(Site)
admin.site.register(SubApplication)
admin.site.register(Text)
| lizardsystem/flooding-lib | flooding_base/admin.py | Python | gpl-3.0 | 1,181 |
from hubcheck.pageobjects.po_generic_page import GenericPage
class LoginPage1(GenericPage):
def __init__(self,browser,catalog):
super(LoginPage1,self).__init__(browser,catalog)
self.path = '/login'
# load hub's classes
LoginPage_Locators = self.load_class('LoginPage_Locators')
Login = self.load_class('Login')
# update this object's locator
self.locators.update(LoginPage_Locators.locators)
# setup page object's components
self.auth = Login(self,{'base':'auth'})
def login_as(self,username,password,remember=False):
self.auth.login_as(username,password,remember)
class LoginPage1_Locators_Base_1(object):
"""locators for LoginPage object"""
locators = {
'auth' : "css=#hubForm",
}
class LoginPage1_Locators_Base_2(object):
"""locators for Login object"""
locators = {
'auth' : "css=#authentication",
}
class LoginPage1_Locators_Base_3(object):
"""locators for Login object"""
locators = {
'auth' : "css=#login_form",
}
class LoginPage1_Locators_Base_4(object):
"""locators for Login object"""
locators = {
'auth' : "css=.login_form",
}
| codedsk/hubcheck | hubcheck/pageobjects/po_login.py | Python | mit | 1,242 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import re
import warnings
from operator import itemgetter
from tabulate import tabulate
import numpy as np
from monty.io import zopen
from monty.json import MSONable
from pymatgen import Structure, Lattice, Element, Molecule
from pymatgen.io.cif import CifParser
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.io_utils import clean_lines
from pymatgen.util.string import str_delimited
"""
This module defines classes for reading/manipulating/writing the main sections
of FEFF input file(feff.inp), namely HEADER, ATOMS, POTENTIAL and the program
control tags.
XANES and EXAFS input files, are available, for non-spin case at this time.
"""
__author__ = "Alan Dozier, Kiran Mathew"
__credits__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0.3"
__maintainer__ = "Alan Dozier"
__email__ = "[email protected]"
__status__ = "Beta"
__date__ = "April 7, 2013"
# **Non-exhaustive** list of valid Feff.inp tags
VALID_FEFF_TAGS = ("CONTROL", "PRINT", "ATOMS", "POTENTIALS", "RECIPROCAL",
"REAL", "MARKER", "LATTICE", "TITLE", "RMULTIPLIER",
"SGROUP", "COORDINATES", "EQUIVALENCE", "CIF", "CGRID",
"CFAVERAGE", "OVERLAP", "EXAFS", "XANES", "ELNES", "EXELFS",
"LDOS", "ELLIPTICITY", "MULTIPOLE", "POLARIZATION",
"RHOZZP", "DANES", "FPRIME", "NRIXS", "XES", "XNCD",
"XMCD", "XNCDCONTROL", "END", "KMESH", "PRINT", "EGRID",
"DIMS", "AFOLP", "EDGE", "COMPTON", "DANES",
"FPRIME" "MDFF", "HOLE", "COREHOLE", "S02", "CHBROAD",
"EXCHANGE", "FOLP", "NOHOLE", "RGRID", "SCF",
"UNFREEZEF", "CHSHIFT", "DEBYE",
"INTERSTITIAL", "CHWIDTH", "EGAP", "EPS0", "EXTPOT",
"ION", "JUMPRM", "EXPOT", "SPIN", "LJMAX", "LDEC", "MPSE",
"PLASMON", "RPHASES", "RSIGMA", "PMBSE", "TDLDA", "FMS",
"DEBYA", "OPCONS", "PREP", "RESTART", "SCREEN", "SETE",
"STRFACTORS", "BANDSTRUCTURE", "RPATH", "NLEG", "PCRITERIA",
"SYMMETRY", "SS", "CRITERIA", "IORDER", "NSTAR", "ABSOLUTE",
"CORRECTIONS", "SIG2", "SIG3", "MBCONV", "SFCONV", "RCONV",
"SELF", "SFSE", "MAGIC", "TARGET", "STRFAC")
class Header(MSONable):
"""
Creates Header for the FEFF input file.
Has the following format::
* This feff.inp file generated by pymatgen, www.materialsproject.org
TITLE comment:
TITLE Source: CoO19128.cif
TITLE Structure Summary: (Co2 O2)
TITLE Reduced formula: CoO
TITLE space group: P1, space number: 1
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.0 90.0 120.0
TITLE sites: 4
* 1 Co 0.666666 0.333332 0.496324
* 2 Co 0.333333 0.666667 0.996324
* 3 O 0.666666 0.333332 0.878676
* 4 O 0.333333 0.666667 0.378675
Args:
struct: Structure object, See pymatgen.core.structure.Structure.
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: Comment for first header line
"""
def __init__(self, struct, source='', comment=''):
if struct.is_ordered:
self.struct = struct
self.source = source
sym = SpacegroupAnalyzer(struct)
data = sym.get_symmetry_dataset()
self.space_number = data["number"]
self.space_group = data["international"]
self.comment = comment or "None given"
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
@staticmethod
def from_cif_file(cif_file, source='', comment=''):
"""
Static method to create Header object from cif_file
Args:
cif_file: cif_file path and name
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: User comment that goes in header
Returns:
Header Object
"""
r = CifParser(cif_file)
structure = r.get_structures()[0]
return Header(structure, source, comment)
@property
def structure_symmetry(self):
"""
Returns space number and space group
Returns:
Space number and space group list
"""
return self.space_group, self.space_number
@property
def formula(self):
"""
Formula of structure
"""
return self.struct.composition.formula
@staticmethod
def from_file(filename):
"""
Returns Header object from file
"""
hs = Header.header_string_from_file(filename)
return Header.from_string(hs)
@staticmethod
def header_string_from_file(filename='feff.inp'):
"""
Reads Header string from either a HEADER file or feff.inp file
Will also read a header from a non-pymatgen generated feff.inp file
Args:
filename: File name containing the Header data.
Returns:
Reads header string.
"""
with zopen(filename, "r") as fobject:
f = fobject.readlines()
feff_header_str = []
ln = 0
# Checks to see if generated by pymatgen
try:
feffpmg = f[0].find("pymatgen")
if feffpmg == -1:
feffpmg = False
except IndexError:
feffpmg = False
# Reads pymatgen generated header or feff.inp file
if feffpmg:
nsites = int(f[8].split()[2])
for line in f:
ln += 1
if ln <= nsites + 9:
feff_header_str.append(line)
else:
# Reads header from header from feff.inp file from unknown
# source
end = 0
for line in f:
if (line[0] == "*" or line[0] == "T") and end == 0:
feff_header_str.append(line.replace("\r", ""))
else:
end = 1
return ''.join(feff_header_str)
@staticmethod
def from_string(header_str):
"""
Reads Header string and returns Header object if header was
generated by pymatgen.
Note: Checks to see if generated by pymatgen, if not it is impossible
to generate structure object so it is not possible to generate
header object and routine ends
Args:
header_str: pymatgen generated feff.inp header
Returns:
Structure object.
"""
lines = tuple(clean_lines(header_str.split("\n"), False))
comment1 = lines[0]
feffpmg = comment1.find("pymatgen")
if feffpmg == -1:
feffpmg = False
if feffpmg:
comment2 = ' '.join(lines[1].split()[2:])
source = ' '.join(lines[2].split()[2:])
basis_vec = lines[6].split(":")[-1].split()
# a, b, c
a = float(basis_vec[0])
b = float(basis_vec[1])
c = float(basis_vec[2])
lengths = [a, b, c]
# alpha, beta, gamma
basis_ang = lines[7].split(":")[-1].split()
alpha = float(basis_ang[0])
beta = float(basis_ang[1])
gamma = float(basis_ang[2])
angles = [alpha, beta, gamma]
lattice = Lattice.from_lengths_and_angles(lengths, angles)
natoms = int(lines[8].split(":")[-1].split()[0])
atomic_symbols = []
for i in range(9, 9 + natoms):
atomic_symbols.append(lines[i].split()[2])
# read the atomic coordinates
coords = []
for i in range(natoms):
toks = lines[i + 9].split()
coords.append([float(s) for s in toks[3:]])
struct = Structure(lattice, atomic_symbols, coords, False,
False, False)
h = Header(struct, source, comment2)
return h
else:
return "Header not generated by pymatgen, cannot return header object"
def __str__(self):
"""
String representation of Header.
"""
to_s = lambda x: "%0.6f" % x
output = ["* This FEFF.inp file generated by pymatgen",
''.join(["TITLE comment: ", self.comment]),
''.join(["TITLE Source: ", self.source]),
"TITLE Structure Summary: {}"
.format(self.struct.composition.formula),
"TITLE Reduced formula: {}"
.format(self.struct.composition.reduced_formula),
"TITLE space group: ({}), space number: ({})"
.format(self.space_group, self.space_number),
"TITLE abc:{}".format(" ".join(
[to_s(i).rjust(10) for i in self.struct.lattice.abc])),
"TITLE angles:{}".format(" ".join(
[to_s(i).rjust(10) for i in self.struct.lattice.angles])),
"TITLE sites: {}".format(self.struct.num_sites)]
for i, site in enumerate(self.struct):
output.append(" ".join(["*", str(i + 1), site.species_string,
" ".join([to_s(j).rjust(12)
for j in site.frac_coords])]))
return "\n".join(output)
def write_file(self, filename='HEADER'):
"""
Writes Header into filename on disk.
Args:
filename: Filename and path for file to be written to disk
"""
with open(filename, "w") as f:
f.write(str(self) + "\n")
class Atoms(MSONable):
"""
Atomic cluster centered around the absorbing atom.
"""
def __init__(self, struct, absorbing_atom, radius):
"""
Args:
struct (Structure): input structure
absorbing_atom (str/int): Symbol for absorbing atom or site index
radius (float): radius of the atom cluster in Angstroms.
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
self.absorbing_atom, self.center_index = \
get_absorbing_atom_symbol_index(absorbing_atom, struct)
self.radius = radius
self._cluster = self._set_cluster()
def _set_cluster(self):
"""
Compute and set the cluster of atoms as a Molecule object. The siteato
coordinates are translated such that the absorbing atom(aka central
atom) is at the origin.
Returns:
Molecule
"""
center = self.struct[self.center_index].coords
sphere = self.struct.get_neighbors(self.struct[self.center_index], self.radius)
symbols = [self.absorbing_atom]
coords = [[0, 0, 0]]
for i, site_dist in enumerate(sphere):
site_symbol = re.sub(r"[^aA-zZ]+", "", site_dist[0].species_string)
symbols.append(site_symbol)
coords.append(site_dist[0].coords - center)
return Molecule(symbols, coords)
@property
def cluster(self):
"""
Returns the atomic cluster as a Molecule object.
"""
return self._cluster
@staticmethod
def atoms_string_from_file(filename):
"""
Reads atomic shells from file such as feff.inp or ATOMS file
The lines are arranged as follows:
x y z ipot Atom Symbol Distance Number
with distance being the shell radius and ipot an integer identifying
the potential used.
Args:
filename: File name containing atomic coord data.
Returns:
Atoms string.
"""
with zopen(filename, "rt") as fobject:
f = fobject.readlines()
coords = 0
atoms_str = []
for line in f:
if coords == 0:
find_atoms = line.find("ATOMS")
if find_atoms >= 0:
coords = 1
if coords == 1 and not ("END" in line):
atoms_str.append(line.replace("\r", ""))
return ''.join(atoms_str)
@staticmethod
def cluster_from_file(filename):
"""
Parse the feff input file and return the atomic cluster as a Molecule
object.
Args:
filename (str): path the feff input file
Returns:
Molecule: the atomic cluster as Molecule object. The absorbing atom
is the one at the origin.
"""
atoms_string = Atoms.atoms_string_from_file(filename)
line_list = [l.split() for l in atoms_string.splitlines()[3:]]
coords = []
symbols = []
for l in line_list:
if l:
coords.append([float(i) for i in l[:3]])
symbols.append(l[4])
return Molecule(symbols, coords)
def get_lines(self):
"""
Returns a list of string representations of the atomic configuration
information(x, y, z, ipot, atom_symbol, distance, id).
Returns:
list: list of strings, sorted by the distance from the absorbing
atom.
"""
lines = [["{:f}".format(self._cluster[0].x),
"{:f}".format(self._cluster[0].y),
"{:f}".format(self._cluster[0].z),
0, self.absorbing_atom, "0.0", 0]]
for i, site in enumerate(self._cluster[1:]):
site_symbol = re.sub(r"[^aA-zZ]+", "", site.species_string)
ipot = self.pot_dict[site_symbol]
lines.append(["{:f}".format(site.x), "{:f}".format(site.y),
"{:f}".format(site.z), ipot, site_symbol,
"{:f}".format(self._cluster.get_distance(0, i + 1)), i + 1])
return sorted(lines, key=itemgetter(5))
def __str__(self):
"""
String representation of Atoms file.
"""
lines_sorted = self.get_lines()
# TODO: remove the formatting and update the unittests
lines_formatted = str(tabulate(lines_sorted,
headers=["* x", "y", "z", "ipot",
"Atom", "Distance", "Number"]))
atom_list = lines_formatted.replace("--", "**")
return ''.join(["ATOMS\n", atom_list, "\nEND\n"])
def write_file(self, filename='ATOMS'):
"""
Write Atoms list to file.
Args:
filename: path for file to be written
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Tags(dict):
"""
FEFF control parameters.
"""
def __init__(self, params=None):
"""
Args:
params: A set of input parameters as a dictionary.
"""
super().__init__()
if params:
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair. Warns if parameter is not in list of valid
Feff tags. Also cleans the parameter and val by stripping leading and
trailing white spaces.
Arg:
key: dict key value
value: value associated with key in dictionary
"""
if key.strip().upper() not in VALID_FEFF_TAGS:
warnings.warn(key.strip() + " not in VALID_FEFF_TAGS list")
super().__setitem__(key.strip(),
Tags.proc_val(key.strip(), val.strip())
if isinstance(val, str) else val)
def as_dict(self):
"""
Dict representation.
Returns:
Dictionary of parameters from fefftags object
"""
tags_dict = dict(self)
tags_dict['@module'] = self.__class__.__module__
tags_dict['@class'] = self.__class__.__name__
return tags_dict
@staticmethod
def from_dict(d):
"""
Creates Tags object from a dictionary.
Args:
d: Dict of feff parameters and values.
Returns:
Tags object
"""
i = Tags()
for k, v in d.items():
if k not in ("@module", "@class"):
i[k] = v
return i
def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the Tags. The reason why this
method is different from the __str__ method is to provide options
for pretty printing.
Args:
sort_keys: Set to True to sort the Feff parameters alphabetically.
Defaults to False.
pretty: Set to True for pretty aligned output. Defaults to False.
Returns:
String representation of Tags.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if isinstance(self[k], dict):
if k in ["ELNES", "EXELFS"]:
lines.append([k, self._stringify_val(self[k]["ENERGY"])])
beam_energy = self._stringify_val(self[k]["BEAM_ENERGY"])
beam_energy_list = beam_energy.split()
if int(beam_energy_list[1]) == 0: # aver=0, specific beam direction
lines.append([beam_energy])
lines.append([self._stringify_val(self[k]["BEAM_DIRECTION"])])
else:
# no cross terms for orientation averaged spectrum
beam_energy_list[2] = str(0)
lines.append([self._stringify_val(beam_energy_list)])
lines.append([self._stringify_val(self[k]["ANGLES"])])
lines.append([self._stringify_val(self[k]["MESH"])])
lines.append([self._stringify_val(self[k]["POSITION"])])
else:
lines.append([k, self._stringify_val(self[k])])
if pretty:
return tabulate(lines)
else:
return str_delimited(lines, None, " ")
@staticmethod
def _stringify_val(val):
"""
Convert the given value to string.
"""
if isinstance(val, list):
return " ".join([str(i) for i in val])
else:
return str(val)
def __str__(self):
return self.get_string()
def write_file(self, filename='PARAMETERS'):
"""
Write Tags to a Feff parameter tag file.
Args:
filename: filename and path to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__() + "\n")
@staticmethod
def from_file(filename="feff.inp"):
"""
Creates a Feff_tag dictionary from a PARAMETER or feff.inp file.
Args:
filename: Filename for either PARAMETER or feff.inp file
Returns:
Feff_tag object
"""
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
params = {}
eels_params = []
ieels = -1
ieels_max = -1
for i, line in enumerate(lines):
m = re.match(r"([A-Z]+\d*\d*)\s*(.*)", line)
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Tags.proc_val(key, val)
if key not in ("ATOMS", "POTENTIALS", "END", "TITLE"):
if key in ["ELNES", "EXELFS"]:
ieels = i
ieels_max = ieels + 5
else:
params[key] = val
if ieels >= 0:
if i >= ieels and i <= ieels_max:
if i == ieels + 1:
if int(line.split()[1]) == 1:
ieels_max -= 1
eels_params.append(line)
if eels_params:
if len(eels_params) == 6:
eels_keys = ['BEAM_ENERGY', 'BEAM_DIRECTION', 'ANGLES', 'MESH', 'POSITION']
else:
eels_keys = ['BEAM_ENERGY', 'ANGLES', 'MESH', 'POSITION']
eels_dict = {"ENERGY": Tags._stringify_val(eels_params[0].split()[1:])}
for k, v in zip(eels_keys, eels_params[1:]):
eels_dict[k] = str(v)
params[str(eels_params[0].split()[0])] = eels_dict
return Tags(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert Feff parameters to proper types, e.g.
integers, floats, lists, etc.
Args:
key: Feff parameter key
val: Actual value of Feff parameter.
"""
list_type_keys = list(VALID_FEFF_TAGS)
del list_type_keys[list_type_keys.index("ELNES")]
del list_type_keys[list_type_keys.index("EXELFS")]
boolean_type_keys = ()
float_type_keys = ("S02", "EXAFS", "RPATH")
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key.lower() == 'cif':
m = re.search(r"\w+.cif", val)
return m.group(0)
if key in list_type_keys:
output = list()
toks = re.split(r"\s+", val)
for tok in toks:
m = re.match(r"(\d+)\*([\d\.\-\+]+)", tok)
if m:
output.extend([smart_int_or_float(m.group(2))] *
int(m.group(1)))
else:
output.append(smart_int_or_float(tok))
return output
if key in boolean_type_keys:
m = re.search(r"^\W+([TtFf])", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_type_keys:
return float(val)
except ValueError:
return val.capitalize()
return val.capitalize()
def diff(self, other):
"""
Diff function. Compares two PARAMETER files and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other: The other PARAMETER dictionary to compare to.
Returns:
Dict of the format {"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different} Note that the
parameters are return as full dictionaries of values.
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": "Default"}
elif v1 != other[k1]:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"FEFF_TAGS1": "Default",
"FEFF_TAGS2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another Tags object to this object
Facilitates the use of "standard" Tags
"""
params = dict(self)
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Tags have conflicting values!")
else:
params[k] = v
return Tags(params)
class Potential(MSONable):
"""
FEFF atomic potential.
"""
def __init__(self, struct, absorbing_atom):
"""
Args:
struct (Structure): Structure object.
absorbing_atom (str/int): Absorbing atom symbol or site index
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
self.absorbing_atom, _ = \
get_absorbing_atom_symbol_index(absorbing_atom, struct)
@staticmethod
def pot_string_from_file(filename='feff.inp'):
"""
Reads Potential parameters from a feff.inp or FEFFPOT file.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichometry spinph
Args:
filename: file name containing potential data.
Returns:
FEFFPOT string.
"""
with zopen(filename, "rt") as f_object:
f = f_object.readlines()
ln = -1
pot_str = ["POTENTIALS\n"]
pot_tag = -1
pot_data = 0
pot_data_over = 1
sep_line_pattern = [re.compile('ipot.*Z.*tag.*lmax1.*lmax2.*spinph'),
re.compile('^[*]+.*[*]+$')]
for line in f:
if pot_data_over == 1:
ln += 1
if pot_tag == -1:
pot_tag = line.find("POTENTIALS")
ln = 0
if pot_tag >= 0 and ln > 0 and pot_data_over > 0:
try:
if len(sep_line_pattern[0].findall(line)) > 0 or \
len(sep_line_pattern[1].findall(line)) > 0:
pot_str.append(line)
elif int(line.split()[0]) == pot_data:
pot_data += 1
pot_str.append(line.replace("\r", ""))
except (ValueError, IndexError):
if pot_data > 0:
pot_data_over = 0
return ''.join(pot_str).rstrip('\n')
@staticmethod
def pot_dict_from_string(pot_data):
"""
Creates atomic symbol/potential number dictionary
forward and reverse
Arg:
pot_data: potential data in string format
Returns:
forward and reverse atom symbol and potential number dictionaries.
"""
pot_dict = {}
pot_dict_reverse = {}
begin = 0
ln = -1
for line in pot_data.split("\n"):
try:
if begin == 0 and line.split()[0] == "0":
begin += 1
ln = 0
if begin == 1:
ln += 1
if ln > 0:
atom = line.split()[2]
index = int(line.split()[0])
pot_dict[atom] = index
pot_dict_reverse[index] = atom
except (ValueError, IndexError):
pass
return pot_dict, pot_dict_reverse
def __str__(self):
"""
Returns a string representation of potential parameters to be used in
the feff.inp file,
determined from structure object.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichiometry spinph
Returns:
String representation of Atomic Coordinate Shells.
"""
central_element = Element(self.absorbing_atom)
ipotrow = [[0, central_element.Z, central_element.symbol, -1, -1, .0001, 0]]
for el, amt in self.struct.composition.items():
ipot = self.pot_dict[el.symbol]
ipotrow.append([ipot, el.Z, el.symbol, -1, -1, amt, 0])
ipot_sorted = sorted(ipotrow, key=itemgetter(0))
ipotrow = str(tabulate(ipot_sorted,
headers=["*ipot", "Z", "tag", "lmax1",
"lmax2", "xnatph(stoichometry)",
"spinph"]))
ipotlist = ipotrow.replace("--", "**")
ipotlist = ''.join(["POTENTIALS\n", ipotlist])
return ipotlist
def write_file(self, filename='POTENTIALS'):
"""
Write to file.
Args:
filename: filename and path to write potential file to.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Paths(MSONable):
"""
Set FEFF scattering paths('paths.dat' file used by the 'genfmt' module).
"""
def __init__(self, atoms, paths, degeneracies=None):
"""
Args:
atoms (Atoms): Atoms object
paths (list(list)): list of paths. Each path is a list of atom indices in the atomic
cluster(the molecular cluster created by Atoms class).
e.g. [[0, 1, 2], [5, 9, 4, 1]] -> 2 paths: one with 3 legs and the other with 4 legs.
degeneracies (list): list of degeneracies, one for each path. Set to 1 if not specified.
"""
self.atoms = atoms
self.paths = paths
self.degeneracies = degeneracies or [1] * len(paths)
assert len(self.degeneracies) == len(self.paths)
def __str__(self):
lines = ["PATH", "---------------"]
# max possible, to avoid name collision count down from max value.
path_index = 9999
for i, legs in enumerate(self.paths):
lines.append("{} {} {}".format(path_index, len(legs), self.degeneracies[i]))
lines.append("x y z ipot label")
for l in legs:
coords = self.atoms.cluster[l].coords.tolist()
tmp = "{:.6f} {:.6f} {:.6f}".format(*tuple(coords))
element = str(self.atoms.cluster[l].specie.name)
# the potential index for the absorbing atom(the one at the cluster origin) is 0
potential = 0 if np.linalg.norm(coords) <= 1e-6 else self.atoms.pot_dict[element]
tmp = "{} {} {}".format(tmp, potential, element)
lines.append(tmp)
path_index -= 1
return "\n".join(lines)
def write_file(self, filename="paths.dat"):
"""
Write paths.dat.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class FeffParserError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "FeffParserError : " + self.msg
def get_atom_map(structure):
"""
Returns a dict that maps each atomic symbol to a unique integer starting
from 1.
Args:
structure (Structure)
Returns:
dict
"""
syms = [site.specie.symbol for site in structure]
unique_pot_atoms = []
[unique_pot_atoms.append(i) for i in syms if not unique_pot_atoms.count(i)]
atom_map = {}
for i, atom in enumerate(unique_pot_atoms):
atom_map[atom] = i + 1
return atom_map
def get_absorbing_atom_symbol_index(absorbing_atom, structure):
"""
Return the absorbing atom symboll and site index in the given structure.
Args:
absorbing_atom (str/int): symbol or site index
structure (Structure)
Returns:
str, int: symbol and site index
"""
if isinstance(absorbing_atom, str):
return absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0]
elif isinstance(absorbing_atom, int):
return str(structure[absorbing_atom].specie), absorbing_atom
else:
raise ValueError("absorbing_atom must be either specie symbol or site index")
| blondegeek/pymatgen | pymatgen/io/feff/inputs.py | Python | mit | 32,854 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from keras.datasets import mnist
from hyperemble.neural_net import VanillaNeuralNet
def test_vanilla_neural_net():
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
clf = VanillaNeuralNet(n_layers=2, hidden_dim=200,
keep_prob=0.8, loss_func="auto",
verbose=1, batch_size=128, random_state=1)
clf.fit(X_train, y_train)
res = clf.score(X_test, y_test)
assert res > 0.92
| hduongtrong/hyperemble | hyperemble/neural_net/tests/test_neural_net.py | Python | bsd-2-clause | 760 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013, Sascha Peilicke <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (see the file COPYING); if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
from __future__ import absolute_import
__doc__ = 'Generate distribution packages from PyPI'
__docformat__ = 'restructuredtext en'
__author__ = 'Sascha Peilicke <[email protected]>'
__version__ = '0.5.0'
import argparse
import datetime
import distutils.core
import glob
import os
import pickle
import pkg_resources
import pprint
import pwd
import re
import setuptools.sandbox
import shutil
import sys
import tarfile
import tempfile
import urllib
try:
import xmlrpc.client as xmlrpclib
except:
import xmlrpclib
import zipfile
import jinja2
import py2pack.proxy
TEMPLATE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates') # absolute template path
pypi = xmlrpclib.ServerProxy('https://pypi.python.org/pypi') # XML RPC connection to PyPI
env = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATE_DIR)) # Jinja2 template environment
env.filters['parenthesize_version'] = \
lambda s: re.sub('([=<>]+)(.+)', r' (\1 \2)', s)
env.filters['basename'] = \
lambda s: s[s.rfind('/') + 1:]
SPDX_LICENSES_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'spdx_license_map.p') # absolute template path
SDPX_LICENSES = pickle.load(open(SPDX_LICENSES_FILE, 'rb'))
def list(args=None):
print('listing all PyPI packages...')
for package in pypi.list_packages(): # nothing fancy
print(package)
def search(args):
print('searching for package {0}...'.format(args.name))
for hit in pypi.search({'name': args.name}):
print('found {0}-{1}'.format(hit['name'], hit['version']))
def show(args):
check_or_set_version(args)
print('showing package {0}...'.format(args.name))
data = pypi.release_data(args.name, args.version) # fetch all meta data
pprint.pprint(data)
def fetch(args):
check_or_set_version(args)
url = newest_download_url(args)
if not url:
print("unable to find a source release for {0}!".format(args.name)) # pass out if nothing is found
sys.exit(1)
print('downloading package {0}-{1}...'.format(args.name, args.version))
print('from {0}'.format(url['url']))
urllib.urlretrieve(url['url'], url['filename']) # download the object behind the URL
def _parse_setup_py(file, data):
contents = file.read().decode('utf-8')
match = re.search("ext_modules", contents)
if match:
data["is_extension"] = True
match = re.search("[(,]\s*scripts\s*=\s*(\[.*?\])", contents, flags=re.DOTALL)
if match:
data["scripts"] = eval(match.group(1))
match = re.search("test_suite\s*=\s*(.*)", contents)
if match:
data["test_suite"] = eval(match.group(1))
match = re.search("install_requires\s*=\s*(\[.*?\])", contents, flags=re.DOTALL)
if match:
data["install_requires"] = eval(match.group(1))
match = re.search("extras_require\s*=\s*(\{.*?\})", contents, flags=re.DOTALL)
if match:
data["extras_require"] = eval(match.group(1))
match = re.search("data_files\s*=\s*(\[.*?\])", contents, flags=re.DOTALL)
if match:
data["data_files"] = eval(match.group(1))
match = re.search('entry_points\s*=\s*(\{.*?\}|""".*?"""|".*?")', contents, flags=re.DOTALL)
if match:
data["entry_points"] = eval(match.group(1))
def _run_setup_py(tarfile, setup_filename, data):
tempdir = tempfile.mkdtemp()
setuptools.sandbox.DirectorySandbox(tempdir).run(lambda: tarfile.extractall(tempdir))
setup_filename = os.path.join(tempdir, setup_filename)
distutils.core._setup_stop_after = "config"
setuptools.sandbox.run_setup(setup_filename, "")
dist = distutils.core._setup_distribution
shutil.rmtree(tempdir)
if dist.ext_modules:
data["is_extension"] = True
if dist.scripts:
data["scripts"] = dist.scripts
if dist.test_suite:
data["test_suite"] = dist.test_suite
if dist.install_requires:
data["install_requires"] = dist.install_requires
if dist.extras_require:
data["extras_require"] = dist.extras_require
if dist.data_files:
data["data_files"] = dist.data_files
if dist.entry_points:
data["entry_points"] = dist.entry_points
def _canonicalize_setup_data(data):
def _sanitize_requirements(req):
""" find lowest required version"""
version_dep = None
version_comp = None
pkg = pkg_resources.Requirement.parse(req)
for dep in pkg.specs:
version = pkg_resources.parse_version(dep[1])
# try to use the lowest version available
# i.e. for ">=0.8.4,>=0.9.7", select "0.8.4"
if (not version_dep or
version < pkg_resources.parse_version(version_dep)):
version_dep = dep[1]
version_comp = dep[0]
return filter(lambda x: x is not None,
[pkg.unsafe_name, version_comp, version_dep])
if "install_requires" in data:
# install_requires may be a string, convert to list of strings:
if isinstance(data["install_requires"], str):
data["install_requires"] = data["install_requires"].splitlines()
# find lowest version and take care of spaces between name and version
data["install_requires"] = [" ".join(_sanitize_requirements(req))
for req in data["install_requires"]]
if "extras_require" in data:
# extras_require value may be a string, convert to list of strings:
for (key, value) in data["extras_require"].items():
if isinstance(value, str):
data["extras_require"][key] = value.splitlines()
# find lowest version and take care of spaces between name and ver
data["extras_require"][key] = [
" ".join(_sanitize_requirements(req))
for req in data["extras_require"][key]]
if "data_files" in data:
# data_files may be a sequence of files without a target directory:
if len(data["data_files"]) and isinstance(data["data_files"][0], str):
data["data_files"] = [("", data["data_files"])]
# directory paths may be relative to the installation prefix:
prefix = sys.exec_prefix if "is_extension" in data else sys.prefix
data["data_files"] = [
(dir if (len(dir) and dir[0] == '/') else os.path.join(prefix, dir), files)
for (dir, files) in data["data_files"]]
if "entry_points" in data:
# entry_points may be a string with .ini-style sections, convert to a dict:
if isinstance(data["entry_points"], str):
data["entry_points"] = pkg_resources.EntryPoint.parse_map(data["entry_points"])
if "console_scripts" in data["entry_points"]:
try:
data["console_scripts"] = data["entry_points"]["console_scripts"].keys()
except AttributeError:
pass
def _augment_data_from_tarball(args, filename, data):
setup_filename = "{0}-{1}/setup.py".format(args.name, args.version)
docs_re = re.compile("{0}-{1}\/((?:AUTHOR|ChangeLog|CHANGES|COPYING|LICENSE|NEWS|README).*)".format(args.name, args.version), re.IGNORECASE)
shell_metachars_re = re.compile("[|&;()<>\s]")
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as f:
names = f.getnames()
if args.run:
_run_setup_py(f, setup_filename, data)
else:
_parse_setup_py(f.extractfile(setup_filename), data)
_canonicalize_setup_data(data)
elif zipfile.is_zipfile(filename):
with zipfile.ZipFile(filename) as f:
names = f.namelist()
if args.run:
_run_setup_py(f, setup_filename, data)
else:
with f.open(setup_filename) as s:
_parse_setup_py(s, data)
_canonicalize_setup_data(data)
else:
return
for name in names:
match = re.match(docs_re, name)
if match:
if "doc_files" not in data:
data["doc_files"] = []
if re.search(shell_metachars_re, match.group(1)): # quote filename if it contains shell metacharacters
data["doc_files"].append("'" + match.group(1) + "'")
else:
data["doc_files"].append(match.group(1))
if "test" in name.lower(): # Very broad check for testsuites
data["testsuite"] = True
def _normalize_license(data):
"""try to get SDPX license"""
l = data.get('license', None)
if l and l in SDPX_LICENSES.keys():
data['license'] = SDPX_LICENSES[l]
else:
data['license'] = ""
def generate(args):
check_or_set_version(args)
if not args.template:
args.template = file_template_list()[0]
if not args.filename:
args.filename = args.name + '.' + args.template.rsplit('.', 1)[1] # take template file ending
print('generating spec file for {0}...'.format(args.name))
data = pypi.release_data(args.name, args.version) # fetch all meta data
url = newest_download_url(args)
if url:
data['source_url'] = url['url']
else:
data['source_url'] = args.name + '-' + args.version + '.zip'
data['year'] = datetime.datetime.now().year # set current year
data['user_name'] = pwd.getpwuid(os.getuid())[4] # set system user (packager)
data['summary_no_ending_dot'] = re.sub('(.*)\.', '\g<1>', data.get('summary', ""))
tarball_file = glob.glob("{0}-{1}.*".format(args.name, args.version)) # we have a local tarball, try to
if tarball_file: # get some more info from that
_augment_data_from_tarball(args, tarball_file[0], data)
_normalize_license(data)
template = env.get_template(args.template)
result = template.render(data).encode('utf-8') # render template and encode properly
outfile = open(args.filename, 'wb') # write result to spec file
try:
outfile.write(result)
finally:
outfile.close()
def check_or_set_version(args):
if not args.version: # take first version found
releases = pypi.package_releases(args.name)
if len(releases) == 0:
print("unable to find a suitable release for {0}!".format(args.name))
sys.exit(1)
else:
args.version = pypi.package_releases(args.name)[0] # return first (current) release number
def newest_download_url(args):
for url in pypi.package_urls(args.name, args.version): # Fetch all download URLs
if url['packagetype'] == 'sdist': # Found the source URL we care for
return url
# No PyPI tarball release, let's see if an upstream download URL is provided:
data = pypi.release_data(args.name, args.version) # Fetch all meta data
if 'download_url' in data:
filename = os.path.basename(data['download_url'])
return {'url': data['download_url'], 'filename': filename}
return {} # We're all out of bubblegum
def file_template_list():
return [filename for filename in os.listdir(TEMPLATE_DIR) if not filename.startswith('.')]
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__))
parser.add_argument('--proxy', help='HTTP proxy to use')
subparsers = parser.add_subparsers(title='commands')
parser_list = subparsers.add_parser('list', help='list all packages on PyPI')
parser_list.set_defaults(func=list)
parser_search = subparsers.add_parser('search', help='search for packages on PyPI')
parser_search.add_argument('name', help='package name (with optional version)')
parser_search.set_defaults(func=search)
parser_show = subparsers.add_parser('show', help='show metadata for package')
parser_show.add_argument('name', help='package name')
parser_show.add_argument('version', nargs='?', help='package version (optional)')
parser_show.set_defaults(func=show)
parser_fetch = subparsers.add_parser('fetch', help='download package source tarball from PyPI')
parser_fetch.add_argument('name', help='package name')
parser_fetch.add_argument('version', nargs='?', help='package version (optional)')
parser_fetch.set_defaults(func=fetch)
parser_generate = subparsers.add_parser('generate', help='generate RPM spec or DEB dsc file for a package')
parser_generate.add_argument('name', help='package name')
parser_generate.add_argument('version', nargs='?', help='package version (optional)')
parser_generate.add_argument('-t', '--template', choices=file_template_list(), default='opensuse.spec', help='file template')
parser_generate.add_argument('-f', '--filename', help='spec filename (optional)')
parser_generate.add_argument('-r', '--run', action='store_true', help='run setup.py (optional, risky!)')
parser_generate.set_defaults(func=generate)
parser_help = subparsers.add_parser('help', help='show this help')
parser_help.set_defaults(func=lambda args: parser.print_help())
args = parser.parse_args()
# set HTTP proxy if one is provided
if args.proxy:
try:
urllib.urlopen(args.proxy)
except IOError:
print('the proxy \'{0}\' is not responding'.format(args.proxy))
sys.exit(1)
transport = py2pack.proxy.ProxiedTransport()
transport.set_proxy(args.proxy)
pypi._ServerProxy__transport = transport # Evil, but should do the trick
args.func(args)
# fallback if run directly
if __name__ == '__main__':
main()
| frispete/py2pack | py2pack/__init__.py | Python | gpl-2.0 | 14,913 |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'timboektu.views.home', name='home'),
# url(r'^timboektu/', include('timboektu.foo.urls')),
url(r'^$', 'timboektu.books.views.index'),
url(r'^post/(?P<post_id>\d+)/$', 'timboektu.books.views.detail'),
url(r'^post/new/$', 'timboektu.books.views.new'),
url(r'^post/edit/(?P<post_hash>.+)/$', 'timboektu.books.views.edit'),
url(r'^post/renew/(?P<post_hash>.+)/$', 'timboektu.books.views.renew'),
url(r'^department/(?P<department_id>\d+)/$', 'timboektu.books.views.department'),
url(r'^about/$', 'timboektu.books.views.about'),
url(r'^contribute/$', 'timboektu.books.views.contribute'),
url(r'^post/confirm/(?P<post_hash>.+)/$', 'timboektu.books.views.confirm'),
url(r'^delete/$', 'timboektu.books.views.delete'),
url(r'^locations/$', 'timboektu.books.views.locations'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| phoxicle/timboektu | timboektu/urls.py | Python | mit | 1,283 |
#
# Foris
# Copyright (C) 2019 CZ.NIC, z.s.p.o. <http://www.nic.cz>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from foris.config_handlers import dns
from foris.state import current_state
from .base import ConfigPageMixin
class DNSConfigPage(ConfigPageMixin, dns.DNSHandler):
slug = "dns"
menu_order = 19
template = "config/dns"
template_type = "jinja2"
def _action_check_connection(self):
return current_state.backend.perform(
"wan", "connection_test_trigger", {"test_kinds": ["dns"]}
)
def call_ajax_action(self, action):
if action == "check-connection":
return self._action_check_connection()
raise ValueError("Unknown AJAX action.")
| CZ-NIC/foris | foris/config/pages/dns.py | Python | gpl-3.0 | 1,320 |
# Best of NHK - by misty 2013-2020.
# import python libraries
import urllib
import urllib2
import re
import xbmc
import xbmcplugin
import xbmcgui
import xbmcaddon
import sys
import os
import datetime
import time
import calendar
import json
from random import randrange
#print(randrange(10))
addon01 = xbmcaddon.Addon('plugin.video.bestofnhk')
addonname = addon01.getAddonInfo('name')
addon_id = 'plugin.video.bestofnhk'
from t0mm0.common.addon import Addon
addon = Addon(addon_id, sys.argv)
from t0mm0.common.net import Net
net = Net()
settings = xbmcaddon.Addon(id='plugin.video.bestofnhk')
from xml.dom.minidom import parseString
import xml.etree.ElementTree as ET
pluginhandle = int(sys.argv[1])
# globals
#host1 = 'http://nhkworld-hds-live1.hds1.fmslive.stream.ne.jp/hds-live/nhkworld-hds-live1/_definst_/livestream/'
host2 = 'https://www3.nhk.or.jp/'
host3 = 'https://ak.c.ooyala.com/'
host4 = 'https://player.ooyala.com/player/all/'
host5 = 'https://www.nhk.or.jp/rj/podcast/rss/'
host6 = 'https://www.jibtv.com'
host7 = ''
host8 = 'https://api.nhk.or.jp/nhkworld/vodesdlist/v7a/'
host9 = 'https://www3.nhk.or.jp/nhkworld/assets/images/vod/icon/png320/'
host10 = 'https://api.nhk.or.jp/nhkworld/pg/v6a/'
host11 = 'https://www3.nhk.or.jp/nhkworld/upld/thumbnails/en/news/programs/'
host12 = 'https://api.nhk.or.jp/nhkworld/vodcliplist/v7a/'
host13 = 'https://api.nhk.or.jp/nhkworld/rdonews/v6a/'
host14 = 'https://api.nhk.or.jp/nhkworld/vodplaylist/v7a/'
apikey = 'apikey=EJfK8jdS57GqlupFgAfAAwr573q01y6k'
feat = 'nhkworld/rss/news/english/features_'
nhk_icon = addon01.getAddonInfo('icon') # icon.png in addon directory
jib_icon = 'http://www.jamaipanese.com/wp-content/uploads/2009/05/jibbywithfreesby.jpg'
download_path = settings.getSetting('download_folder')
Time = str(time.strftime ('%H:%M:%S%p/%Z/%c'))
str_Yr = str(time.strftime ('%Y'))
str_Mth = str(time.strftime ('%m'))
Yr = int(time.strftime ('%Y'))
Mth = int(time.strftime ('%m'))
Dy = int(time.strftime ('%d'))
Hr = str(time.strftime ('%H'))
Min = str(time.strftime ('%M'))
Date = str(time.strftime ('%m/%d/%Y'))
TimeZone = settings.getSetting('tz')
day = ''
tz_C = 0
#print "Date and time is: " + Date + " " + Time
use_color = settings.getSetting('usecolor')
month = { 1:'01_jan', 2:'02_feb', 3:'03_mar', 4:'04_apr', 5:'05_may', 6:'06_jun',
7:'07_jul', 8:'08_aug', 9:'09_sep', 10:'10_oct', 11:'11_nov', 12:'12_dec' }
lang = { 0:'Arabic', 1:'Bengali', 2:'Burmese', 3:'Chinese', 4:'English', 5:'French', 6:'Hindi',
7:'Indonesia', 8:'Japanese', 9:'Korean', 10:'Persian', 11:'Portuguese', 12:'Russian',
13:'Spanish', 14:'Swahili', 15:'Thai', 16:'Urdu', 17:'Vietnamese' }
lang_key = { 0:'ar', 1:'bn', 2:'my', 3:'zh', 4:'en', 5:'fr', 6:'hi', 7:'id', 8:'ja',
9:'ko', 10:'fa', 11:'pt', 12:'ru', 13:'es', 14:'sw', 15:'th', 16:'ur', 17:'vi' }
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
s_dict={}; e_dict={}; v_dict={}; p_dict={}; t_dict={}; req1_dict={}
z = randrange(133)
img = {0:'video/2053171/images/O0YyXBlk2CyLbEjZZFPyrl7dpA4jknc0bhYu9iWl.jpeg', 1:'video/2079016/images/Taf9c2PrMleSaB7DdYE6YX5acL9y6b6ywddV57tG.jpeg',
2:'video/3019104/images/U8ftOKW2BDqmlqgXLm2P5vSomn7Pj4MhPkh17I6g.jpeg', 3:'video/2079012/images/uUdt6pekz0KBANhUhV9uA1KJWYfgFO1yRUC8EC3r.jpeg',
4:'video/2064032/images/SL3FKzFGgrte1t78Ncs0uGnPHhIZor4sfTHhSShu.jpeg', 5:'video/2053166/images/Bk575HEqWeQQtCAxdoCFLxcgHd4nKYj3Lsb7OAIj.jpeg',
6:'video/9111005/images/QRBsV1cIdplD4mt87yjNc5X9VlhHAwP4XE4gA56R.jpeg', 7:'video/2029117/images/96a53dfd1b6e98e048d17d01899b3ee53eb26a31.jpg',
8:'video/3019073/images/RTpidoWBkKSMN7KPvXKFkTJ0hGn4caxfelZxkqId.jpeg', 9:'video/2007398/images/pYmhSPIcfIgcMHkp7Aos6PJyocFkwPPCdSDnBaVf.jpeg',
10:'video/2064028/images/FcblA0xVQcIrmChpDhMPXlzzcDYNVRIhWu0uBHIT.jpeg', 11:'video/3004615/images/kijkIzg7gcFqjUKcw78vmbCdwMH54C7qXIxP4ZJC.jpeg',
12:'video/2029125/images/1sBRY2CltpgNfNobToUXbd8J2jXwgi5ouDW98ae5.jpeg', 13:'video/2007390/images/MOwAz7U062d8vtllEdXmx59nkRyDySRjnPNF6Un2.jpeg',
14:'video/2029123/images/Kjn122N5bKfqjkHUvCOZnoXpn7mXs62S0oM5bDuA.jpeg', 15:'video/2029111/images/b22ba676282bb4922538ef23e3516624b9537266.jpg',
16:'video/2059049/images/e96bcf43c15390d21a21514372a0cdc2a32b78ec.jpg', 17:'video/2007150/images/ZPDvrpcvYW4zXNRwxQCOa57pYB1tKp6obtWVcoSc.jpeg',
18:'video/2046073/images/e3ac6115be9d6ff77816168850fa89a47f85c320.jpg', 19:'video/6119006/images/rbP8zsNXShZKpOmdBx4hbmvXnjMXZiNzti3yk3Iy.jpeg',
20:'video/2059076/images/bW5X1HuzEQpGwdEh8T2bqk4JAnrPzZXyFELkUICM.jpeg', 21:'video/2029106/images/422641daa78a4651c2263214f85b38dfaa66f76b.jpg',
22:'video/2064011/images/4d7bc151e9017385078a6686eed9c546dfc4fd7b.jpg', 23:'video/2032183/images/GVK4GGPV0aScRCsR7g2MmqauK6BAFDIl7CTQ6Wuz.jpeg',
24:'video/2064009/images/0f4dc9f98599c3187279a9f16cef5ef2d5f6e964.jpg', 25:'video/3004582/images/fNvoGcevk9GKYVtUvPWHrjrZlI60BKhVB89EMc01.jpeg',
26:'video/2066020/images/uw3jRuNpdqXiffNPt0jV7j2DT7B8hjzRdFMVg4Gs.jpeg', 27:'video/6023026/images/BLdfRUgKQzluaZ1NHVWKRvsjFzBLwb0XmzluQTKH.jpeg',
28:'video/6024014/images/BtNgodMdLv8x8zA7zxtxvaFjVp7yYx7yp4UGgqaV.jpeg', 29:'video/2046084/images/2c8aa6a76b36041de06ee0811388aaaad0455b6c.jpg',
30:'video/3004513/images/dfe21def666d8e6edd836448d193b24352748753.jpg', 31:'video/2029110/images/a832fbe4dffe156ae0ee2c39ba17f8b56a2a43f0.jpg',
32:'video/3004454/images/JvKM3qNkjHwNstK962ZFg8CJj6EdIdfpOPU8s05J.jpeg', 33:'video/3019109/images/kWsJOoQf3c0N1ZfehhmqOoTlZdV9mYr7k6i29X7J.jpeg',
34:'video/2079003/images/v3MprittwgQSlIJo24sg8WihWKDrXMJzS08VXoK3.jpeg', 35:'video/5003094/images/dc4d6ed666c0a2ae39cb8088d72a30b5845ef001.jpg',
36:'video/6121002/images/7BKeV3WZz2RigltNN9tyehK4jZmaSwp67Um0VkAp.jpeg', 37:'video/6121001/images/1t9R5PktLQFjTwnHLgBTp3fh38kJjm3siHQI6gh4.jpeg',
38:'video/6120002/images/DlurEdgrWEfSocwkpeRFKo1N3bqMHUQ6ELgT78yK.jpeg', 39:'video/2061342/images/1omTb6Ed1z3VNPG9ch0j61avgSubEYQMPXAolFXi.jpeg',
40:'video/2029104/images/55bc04d408b7cab74b50097c8faf699d09f8c7f8.jpg', 41:'video/2061340/images/kRokNUDEb1hl5pd05moiZxUMyavYfysvEsgCNzB1.jpeg',
42:'video/2061339/images/7RXc1jtahLNWS9Fkj4Pam94aTqwoeCNhpu0yQr11.jpeg', 43:'video/2079017/images/RnyYCYFvjp7stAb2cxNfCgbMoVCTT79olm1s0DrZ.jpeg',
44:'video/2061336/images/6ZzrtTySligelx9MPrMw74eGoJRmlWHHSbiAVTso.jpeg', 45:'video/2004351/images/WOtHa3l8JjwQVu2wyKe9QSTlGDvDpfcNRILTqUbr.jpeg',
46:'video/2079005/images/gs2C2vYUzDsM5VHxYHNXLhPnS2QV6OvqmjFT3S7Z.jpeg', 47:'video/3004649/images/B19jqkG9OdtB0fokEBONKqKkcO1rQQL8eCTFlONN.jpeg',
48:'video/2068019/images/f8iZwgghpyRckVqwWDsLICyJgLin0aOpT6tRA12H.jpeg', 49:'video/2029133/images/0zqSOqrKdxl6xvnR6A02RdEmoMyMuCLeCMStFDXD.jpeg',
50:'video/2032193/images/rrwDGEq4ezLP9sEDjfXnlO5hvsbeQPnI1UyQ8dcH.jpeg', 51:'video/2029130/images/UWs2eFqi0ZA2gJwNltyNRZtlTEdxPw87qUhgVeX2.jpeg',
52:'video/2029101/images/jyaxRwRUi0tX7roRDDMdhjbksA270Puwt2yRIrrr.jpeg', 53:'video/3019098/images/l4OSOiFVsQTCrUPMnPsVDFfUoF6p1M4nnZYKsMCa.jpeg',
54:'video/3019022/images/RtS14vWB3k8hqPNDbvL1lb3AbeT0YztV9j5TzluY.jpeg', 55:'video/2029129/images/YDWoSvV5N2jbhLocflS2Re679cOwrd2nGCkrHXkT.jpeg',
56:'video/6119005/images/lqgxlgys9FYYaoPN1wZVvtdX5Yyon1L7qs7y5UNz.jpeg', 57:'video/2004348/images/LKPkufOawbxnLU4W7rvuA5hulI6qfcrrC9VL6yff.jpeg',
58:'video/2064037/images/TWFLdKetLMscu1r2AggZvtfADHER63BFxgLlq69C.jpeg', 59:'video/2077009/images/JWsKAGlGXZmGU40FVqhacdgNSLmAY1326RWwT0iP.jpeg',
60:'video/2068005/images/32123c4f569adc22d474221774161fb060c95f6b.jpg', 61:'video/2049081/images/RnR1rQGVFYPAaImzvQb4TWrPTYsd2vK83yu63BHJ.jpeg',
62:'video/2029096/images/gMa8UPu7mlHneaSymkUK8kxuGR5uRsP9fH6SJlL8.jpeg', 63:'video/2079014/images/TlMDcaIyXA9cLR8E8qKeeEB0XN0ysdagbRhKW4g6.jpeg',
64:'video/3004627/images/f9sGULvKDDXpsnd5i9R1azA5WzAAD9Pw97AMMSWQ.jpeg', 65:'video/2064044/images/EXDDHFml3l4j8grDxG1RnpuqVOQ90dq9q9V0xYtt.jpeg',
66:'video/3019094/images/teN6LhiVrbnn3AzU0qsKddVqWB2WPIH0GGXMx0qc.jpeg', 67:'video/5006021/images/GQGMOVB4zU5TGuyuvexIRq3DcpYjbfqzHhN6ms9E.jpeg',
68:'video/2046110/images/mWVIYrEdxFgJ56lEjzHXO9ZnihsAA5353mQy1sH6.jpeg', 69:'video/2029126/images/4adqaDqA6DYygeog5Lkbcff0ikdhVb5oLDmabtbg.jpeg',
70:'video/2007400/images/qI57eZb8qVW8mIZwiKRbKj1O90ljgOm0EmAWQLxG.jpeg', 71:'video/2059068/images/KXYNyzYI2wzoZsonH5vIjnXNZdV0njeiVTIpYMzL.jpeg',
72:'video/2049078/images/famGpHlgLEKMNYHjmWFgfST7Bqf5Ligcdgawa5Ss.jpeg', 73:'video/3019074/images/ND398dFbHryGO6kfDSLbiV58luvEVlUr4LZYHqkA.jpeg',
74:'video/2059067/images/f127ca6bbeb6731c8cafe65607a4de64f84fc313.jpg', 75:'video/6028012/images/LL9A0yzYNb6fCfIjK7BAVVuXjDPaaKglQhvPpMbm.jpeg',
76:'video/2019246/images/NJtK3bQSubIeuNgVreVoDslOJNfD3DUWpmvv7v3g.jpeg', 77:'video/2068013/images/239a78e4e1fd4d8be718216dae8d0345024ea386.jpg',
78:'video/2069041/images/tyoGF0aKVEso7sWKKIIaNkxAZ1sqI8GUsJg2W4WS.jpeg', 79:'video/2064033/images/CsB3KuplnwlZcryK12wZ53F1PAomEOx3aIedMzn4.jpeg',
80:'video/2032196/images/1LvUl0VER2KP6L10DaoDp1P29oBMEKIPaW4q1uBH.jpeg', 81:'video/3019090/images/2ALyiUoFOirRTHkhwNbjs4VU11nvjs2nJL8T4ORe.jpeg',
82:'video/2007395/images/FfOFVZcD5RAcuPel2c3hWy6YNhAFiVeAUaf3mMSh.jpeg', 83:'video/2007392/images/ifHYXp1n4d9qOeLNYXnX9JPvY5Ck7w3GKH1j37BI.jpeg',
84:'video/2064026/images/1FYeggnqyeD6YuvjhOg9vDq0PsIazcJEJWBXNobe.jpeg', 85:'video/2049073/images/xWeutxOXmXYbaPTCj9A7lSEEqQCu47VpyuUxxZ7e.jpeg',
86:'video/2059057/images/bfdc4c4facd1529b3af6a201c6f2296f16e3bc25.jpg', 87:'video/2068012/images/35cae975434fa2665422df87e0be145031c05de6.jpg',
88:'video/2029082/images/5dd7424f8379756e4a8451c3bc7afb024738c107.jpg', 89:'video/3019047/images/197d33a607e9d3a5d2496f33a8d4ba258f2cfcac.jpg',
90:'video/2032192/images/vYc2DH6YojWdHFRpeBtBmGs7Ju0futMz2q2VwZs9.jpeg', 91:'video/2059022/images/c1e92ccc362ac6bd2e7ae83a124f302c246aed54.jpg',
92:'video/2049067/images/4NvPbRuPgfF5hUF20h9B5JBqbc1XGH6d4IvVS7P6.jpeg', 93:'video/3004602/images/4btVByLuOUkVUG1oH7H4Fo1z3KRm4GAvUkiVgWeH.jpeg',
94:'video/2049072/images/I6enwJ36WjnMWFYCiytaeyGaY896iC1PawnMxRTA.jpeg', 95:'video/2032188/images/uoMw0pOirBZ9zCDobxVcZg7grp8Vfeam7atLyCyl.jpeg',
96:'video/2049070/images/WgQ6hvh8JdEN4qCmQZgDMjTFcZa2mUkbUND3n6kt.jpeg', 97:'video/2053152/images/uhoKqmORsWlrNb55Zl4mD7kawbAREl1AimIusNlP.jpeg',
98:'video/2059078/images/HchRYMxthJoLr6ffPzB5cUARoSSmK7ql0OCLNziv.jpeg', 99:'video/2069031/images/636be35e59212946c5347c767b15a35d66376c53.jpg',
100:'video/2066022/images/byk6FKVsUQTednDtfFCbgIDA77RSnfbrIAmBbRMN.jpeg', 101:'video/2007332/images/508a921cdbf4f6db1bb99830dd82f5aacd9dda73.jpg',
102:'video/2058493/images/py3WqcNHgyxLxSPJdXGbUnRhLUOh0qShTjlZJnAu.jpeg', 103:'video/2064013/images/c79dab82b8c5e1f41761e016e9293add4b15577d.jpg',
104:'video/2064007/images/9e8737acfa9c31fd1dbcb233169fc954d46bcf54.jpg', 105:'video/2029122/images/Jr4nJXlNYNQP4uYqyFneQRP57AhXT65143YKwrKz.jpeg',
106:'video/6024002/images/lFG6zaUl4S02uBrj1f2LDNlbSACmS0LfcArn0dYz.jpeg', 107:'video/6024001/images/ITMzW2Gy6tEPDImwqUISTBojEp7GQUqqeIoMm858.jpeg',
108:'video/2029118/images/pU2ZuIUfJRMmlNk0gfzdG469zMjUtLaNfF9rIUeO.jpeg', 109:'video/2007318/images/123e3cae17b12efe83e4b38daec8547f08599235.jpg',
110:'video/3004536/images/0fda24c5880fed5dbdc4e7426c749e87651bd0ec.jpg', 111:'video/2007346/images/701d1b40d4f1b7c0289d1a522894afc8813f4bff.jpg',
112:'video/2007326/images/12a25a21f641af5f113b7f54f749be2fb4b346d9.jpg', 113:'special/fukushima/images/A0x2Rq7Ojwqs1t3FfrBL2VFSLrv7g3MtZjGiQP0e.jpeg',
114:'special/fukushima/images/ywoBGITSx4BaZ6188SIk4lOspfdv1OBaRC6fUjSx.jpeg', 115:'special/nagano/images/EU6h6X8Fo8xrJ9QDNcLNlVIv9M6AeVrAJiK2DXk4.jpeg',
116:'special/mie/images/olQ5Hvn5hD4cAtgIbu3zoGIPTpp2soPgHI8INb5j.jpeg', 117:'special/wakayama/images/JmKwKWVFYPJy4mXSKcYMnAwoutcpBDfo5wEQ7iwO.jpeg',
118:'special/nara/images/AxLkpaG1qvqeVRrq4XXegascly15Ro3zOv2TbSak.jpeg', 119:'special/nara/images/UhBehrFRqUPtQYeO5V3ZIYS0wyHvHwFNjfOnPJoz.jpeg',
120:'special/tokushima/images/wBuFKSZepjzTKAYbK4iTa9CVRvnPELSCmNclZgfY.jpeg', 121:'video/9999047/images/wRqD9soSv5Y6qLWlWQz8PArEZEdwlCPfZ70N1QG0.jpeg',
122:'video/5001288/images/3fcYxSufUgJO1WLnY4i1wpGQqUUbTRK41pvFFgFB.jpeg', 123:'video/2088002/images/6ChBBGG155obtxNbtER3yWzCZm4Vm4Nxd9Z8TVfS.jpeg',
124:'video/9999531/images/vOLpDIarmpojU3y19WN6SsugpsGAqCwnltD42VBF.jpeg', 125:'video/9999418/images/JVLcrYw8swn65XEDuyaHBNxlaobLWoh4YtlYKmiO.jpeg',
126:'video/9999191/images/c1HyNwXrF5dG0x0E1G7UPvMTjioaHzekHt2sCGUg.jpeg', 127:'video/9999147/images/8gdGgpy5DrQh2kj8NlkOVwPmC6cBTeb4O0etaYr4.jpeg',
128:'video/9999131/images/Bpo4ViEOlwZ15faYJTo51KQcThgfInyr8rSTQnIu.jpeg', 129:'video/9999116/images/mXeye2lD9XIGULOuST1HAlH06U2UtuY9RN7jprsc.jpeg',
130:'video/9999083/images/6MYzRXETwdnpdxwS4VnAI56OAYir4KDlTDCk1Nfh.jpeg', 131:'video/9999061/images/Y3RWptbrnVyFXIZJTWzWncPp7elOU8vrj3vfXgiq.jpeg',
132:'video/9999074/images/gR2x5A8J3ra1NsytdCemK59Kd8Mfzeg3k181MqOJ.jpeg'}
# NHK World Schedule Time Zone and DST correction
#print "Time zone is: " + TimeZone
# TZ message box
if TimeZone == " ":
print "TimeZone is not selected."
line1 = "The schedule will not be correct for your time zone."
line2 = "Please set your time zone in the Best of NHK addon settings."
line3 = "Changes take effect after close and re-open of Best of NHK."
xbmcgui.Dialog().ok(addonname, line1, line2, line3)
else:
pass
# TZ and DST calc
isdst = time.localtime().tm_isdst
#print "isdst is: " + str(isdst)
tz_link = TimeZone
match=re.compile('\((.+?)\) .+?').findall(tz_link)
for tz_gmt in match:
try:
if isdst == int(1) and tz_gmt == 'GMT':
tz_corrected = -60
elif isdst == int(0) and tz_gmt == 'GMT':
tz_corrected = 0
print int(tz_corrected)
tz_C = tz_corrected
except:
t = tz_gmt[4:]
(H,M) = t.split(':')
result = int(H) + int(M)/60.0
print "result = "+str(result)
if isdst == int(1) and tz_gmt[3:4] == '-':
tz_corrected = (result - 1) * 60
elif isdst == int(0) and tz_gmt[3:4] == '-':
tz_corrected = result * 60
elif isdst == int(1) and tz_gmt[3:4] == '+':
tz_corrected = (result + 1) * -60
elif isdst == int(0) and tz_gmt[3:4] == '+':
tz_corrected = result * -60
#print int(tz_corrected)
tz_C = tz_corrected
d_atetime = datetime.datetime(Yr,Mth,Dy,00,00,00)
e_poch_midnt = calendar.timegm(d_atetime.timetuple())
start_time = int(e_poch_midnt) + int(60*tz_C) # e_poch_midnt = GMT midnight
end_time = int(start_time) + ((60*60*24)-60) # date+23:59:00
sch = 'https://api.nhk.or.jp/nhkworld/epg/v7a/world/s'+str(int(start_time))+'-e'+str(int(end_time))+'.json?%s' % apikey
now = 'https://api.nhk.or.jp/nhkworld/epg/v7a/world/now.json?%s' % apikey
# Main Menu
def CATEGORIES():
addDir('NHK World Live Schedule', '', 'schedule', nhk_icon)
addDir('NHK World Live Stream', '', 'live_strm', nhk_icon)
addDir('NHK World Chinese Language Stream', '', 'other_live_strm', nhk_icon)
addDir('NHK World On Demand', '', 'vod_cats', nhk_icon)
addDir('JIBTV On Demand', 'https://www.jibtv.com/programs/', 'jibtv', jib_icon)
addDir('NHK World News', '', 'news', nhk_icon)
addDir('NHK Radio News', '', 'audio', nhk_icon)
addDir('NHK Videos on Youtube', '', 'youtube1', nhk_icon)
# Create content list
def addDir(name,url,mode,iconimage):
params = {'url':url, 'mode':mode, 'name':name}
addon.add_directory(params, {'title': str(name)}, img = iconimage, fanart = 'https://www3.nhk.or.jp/nhkworld/en/ondemand/'+img[z])
#addon.add_directory(params, {'title': str(name)}, img = iconimage, fanart = 'https://www.jnto.go.jp/eng/wallpaper/'+str_Yr+'/img/type-a/1920-1080/'+month[Mth]+'.jpg')
def addDir1(name,url,mode,iconimage):
params = {'url':url, 'mode':mode, 'name':name, 'iconimage':iconimage}
addon.add_directory(params, {'title': str(name)}, img = iconimage, fanart = iconimage)
def addDir2(name,url,mode,plot,iconimage):
params = {'url':url, 'mode':mode, 'name':name, 'plot':plot, 'iconimage':iconimage}
addon.add_directory(params, {'title': str(name), 'plot': plot}, img = iconimage, fanart = iconimage)
def addLink(name,url,plot,img,fanart):
addon.add_item({'url': fanart}, {'title': name, 'plot': plot}, img = img, fanart = fanart, resolved=False, total_items=0, playlist=False, item_type='video',
is_folder=False)
def addDirYT(title, url):
liz=xbmcgui.ListItem(title)
liz.setProperty('IsPlayable', 'false')
liz.setInfo(type="Video", infoLabels={"label":title,"title":title} )
liz.setArt({'thumb':nhk_icon,'fanart':'https://www3.nhk.or.jp/nhkworld/en/ondemand/'+img[z]})
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz,isFolder=True)
# NHK World Live Schedule
def IDX_SCHED(url):
# File write for textbox
root = addon.get_path()
sch_path = os.path.join(root, 'nhk_schedule.txt')
try:
with open (sch_path) as f: pass
print 'File "nhk_schedule.txt" already exists.'
except IOError as e:
print 'Creating new file "nhk_schedule.txt".'
req_now = urllib2.urlopen(now)
pl_now = json.load(req_now)
req = urllib2.urlopen(sch)
sch_json = json.load(req)
f = open(sch_path, 'w')
f.write('[B]Currently streaming:[/B]' + '\n' + '\n')
pubDate = int(pl_now['channel']['item'][0]['pubDate'])
name = pl_now['channel']['item'][0]['title']
desc = pl_now['channel']['item'][0]['description']
sub_name = pl_now['channel']['item'][0]['subtitle']
show_time = str(datetime.datetime.fromtimestamp(pubDate/1000).strftime('%H:%M'))
if sub_name == "":
if use_color == "true":
f.write('[COLOR blue][B]' + show_time + ' - ' + name.encode('UTF-8') + '[/B][/COLOR]' + ' - ' + '[COLOR green]' + desc.encode('UTF-8') + '[/COLOR]' + '\n' + '\n')
else:
f.write('[B]' + show_time + ' - ' + name.encode('UTF-8') + '[/B]' + ' - ' + desc.encode('UTF-8') + '\n' + '\n')
else:
if use_color == "true":
f.write('[COLOR blue][B]' + show_time + ' - ' + name.encode('UTF-8') + ' - ' + sub_name.encode('UTF-8') + '[/B][/COLOR]' + ' - ' + '[COLOR green]' + desc.encode('UTF-8') + '[/COLOR]' + '\n' + '\n')
else:
f.write('[B]' + show_time + ' - ' + name.encode('UTF-8') + ' - ' + sub_name.encode('UTF-8') + '[/B]' + ' - ' + desc.encode('UTF-8') + '\n' + '\n')
f.write('[B]Next:[/B]' + '\n' + '\n')
try:
for i in range(1,4):
pubDate = int(pl_now['channel']['item'][i]['pubDate'])
name = pl_now['channel']['item'][i]['title']
desc = pl_now['channel']['item'][i]['description']
sub_name = pl_now['channel']['item'][i]['subtitle']
show_time = str(datetime.datetime.fromtimestamp(pubDate/1000).strftime('%H:%M'))
if sub_name == "":
if use_color == "true":
f.write('[COLOR blue][B]' + show_time + ' - ' + name.encode('UTF-8') + '[/B][/COLOR]' + ' - ' + '[COLOR green]' + desc.encode('UTF-8') + '[/COLOR]' + '\n' + '\n')
else:
f.write('[B]' + show_time + ' - ' + name.encode('UTF-8') + '[/B]' + ' - ' + desc.encode('UTF-8') + '\n' + '\n')
else:
if use_color == "true":
f.write('[COLOR blue][B]' + show_time + ' - ' + name.encode('UTF-8') + ' - ' + sub_name.encode('UTF-8') + '[/B][/COLOR]' + ' - ' + '[COLOR green]' + desc.encode('UTF-8') + '[/COLOR]' + '\n' + '\n')
else:
f.write('[B]' + show_time + ' - ' + name.encode('UTF-8') + ' - ' + sub_name.encode('UTF-8') + '[/B]' + ' - ' + desc.encode('UTF-8') + '\n' + '\n')
except:
pass
f.write('[B]Today\'s schedule:[/B]' + '\n' + '\n')
try:
for i in range(200):
pubDate = int(sch_json['channel']['item'][i]['pubDate'])
name = sch_json['channel']['item'][i]['title']
desc = sch_json['channel']['item'][i]['description']
sub_name = sch_json['channel']['item'][i]['subtitle']
show_time = str(datetime.datetime.fromtimestamp(pubDate/1000).strftime('%H:%M'))
if sub_name == "":
if use_color == "true":
f.write('[COLOR blue][B]' + show_time + ' - ' + name.encode('UTF-8') + '[/B][/COLOR]' + ' - ' + '[COLOR green]' + desc.encode('UTF-8') + '[/COLOR]' + '\n' + '\n')
else:
f.write('[B]' + show_time + ' - ' + name.encode('UTF-8') + '[/B]' + ' - ' + desc.encode('UTF-8') + '\n' + '\n')
else:
if use_color == "true":
f.write('[COLOR blue][B]' + show_time + ' - ' + name.encode('UTF-8') + ' - ' + sub_name.encode('UTF-8') + '[/B][/COLOR]' + ' - ' + '[COLOR green]' + desc.encode('UTF-8') + '[/COLOR]' + '\n' + '\n')
else:
f.write('[B]' + show_time + ' - ' + name.encode('UTF-8') + ' - ' + sub_name.encode('UTF-8') + '[/B]' + ' - ' + desc.encode('UTF-8') + '\n' + '\n')
except:
pass
f.close()
TextBox()
class TextBox:
# constants
WINDOW = 10147
CONTROL_LABEL = 1
CONTROL_TEXTBOX = 5
def __init__(self, *args, **kwargs):
# activate the text viewer window
xbmc.executebuiltin('ActivateWindow(%d)' % ( self.WINDOW, ))
# get window
self.win = xbmcgui.Window(self.WINDOW)
# give window time to initialize
xbmc.sleep(1000)
self.setControls()
def setControls(self):
# set heading
heading = 'NHK World Schedule for ' + Date
self.win.getControl(self.CONTROL_LABEL).setLabel(heading)
# read & set text
root = addon.get_path()
sch_path = os.path.join(root, 'nhk_schedule.txt')
f = open(sch_path)
text = f.read()
self.win.getControl(self.CONTROL_TEXTBOX).setText(text)
# live streams
def IDX_LIVE_STRM():
req_now = urllib2.urlopen(now)
pl_now = json.load(req_now)
pubDate = int(pl_now['channel']['item'][0]['pubDate'])
name = pl_now['channel']['item'][0]['title']
desc = pl_now['channel']['item'][0]['description']
sub_name = pl_now['channel']['item'][0]['subtitle']
thumbnl_ = pl_now['channel']['item'][0]['thumbnail']
thumbnl = host2[:-1]+thumbnl_
show_time = str(datetime.datetime.fromtimestamp(pubDate/1000).strftime('%H:%M'))
# menu
if TimeZone == '(GMT+09:00) Osaka, Sapporo, Tokyo':
media_item_list(name.encode('UTF-8') + ' - 720', 'https://nhkwlive-ojp.akamaized.net/hls/live/2003459/nhkwlive-ojp-en/index_2M.m3u8', desc.encode('UTF-8'), thumbnl, thumbnl)
media_item_list(name.encode('UTF-8') + ' - 1080', 'https://nhkwlive-ojp.akamaized.net/hls/live/2003459/nhkwlive-ojp-en/index_4M.m3u8', desc.encode('UTF-8'), thumbnl, thumbnl)
else:
media_item_list(name.encode('UTF-8') + ' - 720', 'https://nhkwlive-xjp.akamaized.net/hls/live/2003458/nhkwlive-xjp-en/index_2M.m3u8', desc.encode('UTF-8'), thumbnl, thumbnl)
media_item_list(name.encode('UTF-8') + ' - 1080', 'https://nhkwlive-ojp.akamaized.net/hls/live/2003459/nhkwlive-ojp-en/index_4M.m3u8', desc.encode('UTF-8'), thumbnl, thumbnl)
try:
if sub_name == "":
if use_color == "true":
addLink('[COLOR blue][B]' + show_time + ' - ' + name.encode('UTF-8') + '[/B][/COLOR]', '', desc.encode('UTF-8'), thumbnl, thumbnl)
else:
addLink('[B]' + show_time + ' - ' + name.encode('UTF-8') + '[/B]', '', desc.encode('UTF-8'), thumbnl, thumbnl)
else:
if use_color == "true":
addLink('[COLOR blue][B]' + show_time + ' - ' + name.encode('UTF-8') + ' - ' + sub_name.encode('UTF-8') + '[/B][/COLOR]', '', desc.encode('UTF-8'), thumbnl, thumbnl)
else:
addLink('[B]' + show_time + ' - ' + name.encode('UTF-8') + ' - ' + sub_name.encode('UTF-8') + '[/B]', '', desc.encode('UTF-8'), thumbnl, thumbnl)
for i in range(1,20):
pubDate = int(pl_now['channel']['item'][i]['pubDate'])
name = pl_now['channel']['item'][i]['title']
desc = pl_now['channel']['item'][i]['description']
sub_name = pl_now['channel']['item'][i]['subtitle']
thumbnl_ = pl_now['channel']['item'][i]['thumbnail']
thumbnl = host2[:-1]+thumbnl_
show_time = str(datetime.datetime.fromtimestamp(pubDate/1000).strftime('%H:%M'))
if sub_name == "":
if use_color == "true":
addLink('[COLOR blue][B]' + show_time + ' - ' + name.encode('UTF-8') + '[/B][/COLOR]', '', desc.encode('UTF-8'), thumbnl, thumbnl)
else:
addLink('[B]' + show_time + ' - ' + name.encode('UTF-8') + '[/B]', '', desc.encode('UTF-8'), thumbnl, thumbnl)
else:
if use_color == "true":
addLink('[COLOR blue][B]' + show_time + ' - ' + name.encode('UTF-8') + ' - ' + sub_name.encode('UTF-8') + '[/B][/COLOR]', '', desc.encode('UTF-8'), thumbnl, thumbnl)
else:
addLink('[B]' + show_time + ' - ' + name.encode('UTF-8') + ' - ' + sub_name.encode('UTF-8') + '[/B]', '', desc.encode('UTF-8'), thumbnl, thumbnl)
except:
pass
xbmcplugin.setContent(pluginhandle, 'episodes')
def IDX_OTHER_LIVE_STRM():
fanart = 'https://www3.nhk.or.jp/nhkworld/en/ondemand/'+img[z]
media_item_list('NHK Live - Chinese', 'https://nhkw-zh-hlscomp.akamaized.net/8thz5iufork8wjip/playlist.m3u8', '', nhk_icon, fanart)
def IDX_VOD_CATS(url):
addDir('On Demand Full Listing', host8+'all/all/en/all/all.json?%s' % apikey, 'vod', nhk_icon)
addDir('Latest Episodes', host8+'all/all/en/all/12.json?%s' % apikey, 'vod', nhk_icon)
addDir('Most Watched', host8+'mostwatch/all/en/all/12.json?%s' % apikey, 'vod', nhk_icon)
addDir('Playlists', host14+'en/all.json?%s' % apikey, 'p_lists', nhk_icon)
addDir('Video Clips', host12+'all/all/en/all/all.json?%s' % apikey, 'vod', nhk_icon)
addDir('Art & Design', host8+'category/19/en/all/all.json?%s' % apikey, 'vod', host9+'19.png')
addDir('Biz/Tech', host8+'category/14/en/all/all.json?%s' % apikey, 'vod', host9+'14.png')
addDir('Culture & Lifestyle', host8+'category/20/en/all/all.json?%s' % apikey, 'vod', host9+'20.png')
addDir('Current Affairs', host8+'category/12/en/all/all.json?%s' % apikey, 'vod', host9+'12.png')
addDir('Debate', host8+'category/13/en/all/all.json?%s' % apikey, 'vod', host9+'13.png')
addDir('Disaster Preparedness', host8+'category/29/en/all/all.json?%s' % apikey, 'vod', host9+'29.png')
addDir('Documentary', host8+'category/15/en/all/all.json?%s' % apikey, 'vod', host9+'15.png')
addDir('Drama', host8+'category/26/en/all/all.json?%s' % apikey, 'vod', host9+'26.png')
addDir('Entertainment', host8+'category/21/en/all/all.json?%s' % apikey, 'vod', host9+'21.png')
addDir('Food', host8+'category/17/en/all/all.json?%s' % apikey, 'vod', host9+'17.png')
#addDir('Interactive', host8+'category/27/en/all/all.json?%s' % apikey, 'vod', host9+'27.png')
addDir('Interview', host8+'category/16/en/all/all.json?%s' % apikey, 'vod', host9+'16.png')
addDir('Learn Japanese', host8+'category/28/en/all/all.json?%s' % apikey, 'vod', host9+'28.png')
addDir('NHK World News', '', 'news', host9+'11.png')
#addDir('News', host8+'category/11/en/all/all.json?%s' % apikey, 'vod', host9+'11.png')
addDir('Pop Culture & Fashion', host8+'category/22/en/all/all.json?%s' % apikey, 'vod', host9+'22.png')
addDir('Science & Nature', host8+'category/23/en/all/all.json?%s' % apikey, 'vod', host9+'23.png')
addDir('Sport', host8+'category/25/en/all/all.json?%s' % apikey, 'vod', host9+'25.png')
addDir('Travel', host8+'category/18/en/all/all.json?%s' % apikey, 'vod', host9+'18.png')
# video on demand
def IDX_VOD(url,mode):
req = urllib2.urlopen(url)
vod_json = json.load(req)
try:
for i in range(5000):
series_ = vod_json['data']['episodes'][i]['title']
ep_name_ = vod_json['data']['episodes'][i]['sub_title']
plot_ = vod_json['data']['episodes'][i]['description']
thumbnl_ = vod_json['data']['episodes'][i]['image_l']
vid_id = vod_json['data']['episodes'][i]['vod_id']
series = (series_).encode('UTF-8').replace('[\'','').replace('\']','').replace('<br />',' ').replace('<span style="font-style: italic;">', '').replace('</span>','')
ep_name = (ep_name_).encode('UTF-8').replace('<br>',' ').replace('[\'','').replace('\']','').replace('["','').replace('"]','').replace("\\\'","'").replace('<br />',' ').replace('&','&').replace('<span style="font-style: italic;">','').replace('</span>','').replace('\\xe0','a').replace('\\xc3\\x89','E').replace('\\xe9','e').replace('\\xef\\xbd\\x9e',' ~ ').replace('\\xd7','x').replace('\\xc3\\x97','x').replace('\\xc3','').replace('<i>','').replace('</i>','').replace('<p>','').replace('</p>','').replace('<b>','').replace('</b>','')
plot = (plot_).encode('UTF-8').replace('<br>',' ').replace('♡',' ').replace('[\'','').replace('\']','').replace("\\\'","'").replace('<br />',' ').replace('&','&').replace('<span style="font-style: italic;">','').replace('</span>','').replace('\\xe0','a').replace('\\xc3\\x89','E').replace('\\xe9','e').replace('\\xef\\xbd\\x9e',' ~ ').replace('<em>','').replace('</em>','').replace('\\xc3','').replace('<i>','').replace('</i>','').replace('<p>','').replace('</p>','').replace('<b>','').replace('</b>','')#.replace('["','').replace('"]','')
thumbnl = host2[:-1]+thumbnl_
if series == '':
addDir2(ep_name, vid_id, 'vod_resolve', plot, thumbnl)
elif ep_name == '':
addDir2(series, vid_id, 'vod_resolve', plot, thumbnl)
else:
addDir2(series + ' - ' + ep_name, vid_id, 'vod_resolve', plot, thumbnl)
except:
pass
xbmcplugin.setContent(pluginhandle, 'episodes')
def VOD_RESOLVE(name,url,plot,iconimage):
vid_id = str(url)
req = urllib2.Request('https://movie-s.nhk.or.jp/v/refid/nhkworld/prefid/'+vid_id+'?embed=js&targetId=videoplayer&de-responsive=true&de-callback-method=nwCustomCallback&de-appid='+vid_id+'&de-subtitle-on=false', headers=hdr)
response = urllib2.urlopen(req)
link=response.read()
response.close()
match = re.compile("'data-de-program-uuid','(.+?)'").findall(link)
for p_uuid_ in match:
p_uuid = str(p_uuid_).replace("['" , "").replace("']" , "")
req = urllib2.urlopen('https://movie-s.nhk.or.jp/ws/ws_program/api/67f5b750-b419-11e9-8a16-0e45e8988f42/apiv/5/mode/json?v='+p_uuid)
vod_json = json.load(req)
try:
v1 = vod_json['response']['WsProgramResponse']['program']['asset']['assetFiles'][0]['rtmp']['play_path']
vlink_1 = v1.split('?')
vlink1 = 'https://nhkw-mzvod.akamaized.net/www60/mz-nhk10/definst/' + vlink_1[0] + '/chunklist.m3u8'
media_item_list('720: '+ name, vlink1, plot, iconimage, iconimage)
except:
pass
try:
v2 = vod_json['response']['WsProgramResponse']['program']['asset']['referenceFile']['rtmp']['play_path']
vlink_2 = v2.split('?')
vlink2 = 'https://nhkw-mzvod.akamaized.net/www60/mz-nhk10/definst/' + vlink_2[0] + '/chunklist.m3u8'
media_item_list('1080: '+ name, vlink2, plot, iconimage, iconimage)
except:
pass
xbmcplugin.setContent(pluginhandle, 'episodes')
#playlists
def IDX_P_LISTS(url):
req = urllib2.urlopen(url)
plists_json = json.load(req)
try:
for i in range(100):
p_list_id_ = plists_json['data']['playlist'][i]['playlist_id']
series_ = plists_json['data']['playlist'][i]['title']
thumbnl_ = plists_json['data']['playlist'][i]['image_original']
c_type = plists_json['data']['playlist'][i]['content_type']
p_list_id = host14+'en/'+p_list_id_+'.json?%s' % apikey
series = (series_).encode('UTF-8').replace('[\'','').replace('\']','').replace('<br />',' ').replace('\\xd7','x').replace('\\xc3\\x97','x').replace('\\xc3','').replace('<span style="font-style: italic;">', '').replace('</span>','')
thumbnl = host2[:-1]+thumbnl_
addDir2(series, p_list_id, 'p_resolve', '', thumbnl)
except:
pass
xbmcplugin.setContent(pluginhandle, 'episodes')
def P_RESOLVE(name,url,mode,plot,iconimage):
req = urllib2.urlopen(url)
vod_json = json.load(req)
try:
for i in range(100):
series_ = vod_json['data']['playlist'][0]['track'][i]['title_clean']
ep_name_ = vod_json['data']['playlist'][0]['track'][i]['sub_title_clean']
plot_ = vod_json['data']['playlist'][0]['track'][i]['description_clean']
thumbnl_ = vod_json['data']['playlist'][0]['track'][i]['image_l']
vid_id = vod_json['data']['playlist'][0]['track'][i]['vod_id']
series = (series_).encode('UTF-8').replace('[\'','').replace('\']','').replace('<br />',' ').replace('\\xd7','x').replace('\\xc3\\x97','x').replace('\\xc3','').replace('<span style="font-style: italic;">', '').replace('</span>','').replace('\\xc3\\xa9','e')
ep_name = (ep_name_).encode('UTF-8').replace('<br>',' ').replace('[\'','').replace('\']','').replace('["','').replace('"]','').replace("\\\'","'").replace('<br />',' ').replace('&','&').replace('<span style="font-style: italic;">','').replace('</span>','').replace('\\xe0','a').replace('\\xc3\\x89','E').replace('\\xe9','e').replace('\\xef\\xbd\\x9e',' ~ ').replace('\\xd7','x').replace('\\xc3\\x97','x').replace('\\xc3','').replace('<i>','').replace('</i>','').replace('<p>','').replace('</p>','').replace('<b>','').replace('</b>','')
plot = (plot_).encode('UTF-8').replace('<br>',' ').replace('♡',' ').replace('[\'','').replace('\']','').replace("\\\'","'").replace('<br />',' ').replace('&','&').replace('<span style="font-style: italic;">','').replace('</span>','').replace('\\xe0','a').replace('\\xc3\\x89','E').replace('\\xe9','e').replace('\\xef\\xbd\\x9e',' ~ ').replace('<em>','').replace('</em>','').replace('\\xc3','').replace('<i>','').replace('</i>','').replace('<p>','').replace('</p>','').replace('<b>','').replace('</b>','')#.replace('["','').replace('"]','')
thumbnl = host2[:-1]+thumbnl_
s_dict[i]=series; e_dict[i]=ep_name; v_dict[i]=vid_id; p_dict[i]=plot; t_dict[i]=thumbnl
req1_dict[i] = 'https://movie-s.nhk.or.jp/v/refid/nhkworld/prefid/'+v_dict[i]+'?embed=js&targetId=videoplayer&de-responsive=true&de-callback-method=nwCustomCallback&de-appid='+v_dict[i]+'&de-subtitle-on=false'
if s_dict[i] == '':
addDir2(e_dict[i],req1_dict[i],'p_resolve2',p_dict[i],t_dict[i])
elif e_dict[i] == '':
addDir2(s_dict[i],req1_dict[i],'p_resolve2',p_dict[i],t_dict[i])
else:
addDir2(s_dict[i]+' - '+e_dict[i],req1_dict[i],'p_resolve2',p_dict[i],t_dict[i])
except:
pass
xbmcplugin.setContent(pluginhandle, 'episodes')
def P_RESOLVE2(name,url,mode,plot,iconimage):
req = urllib2.Request(url, headers=hdr)
response = urllib2.urlopen(req)
link=response.read()
response.close()
match = re.compile("'data-de-program-uuid','(.+?)'").findall(link)
for p_uuid_ in match:
p_uuid = str(p_uuid_).replace("['" , "").replace("']" , "")
req = urllib2.urlopen('https://movie-s.nhk.or.jp/ws/ws_program/api/67f5b750-b419-11e9-8a16-0e45e8988f42/apiv/5/mode/json?v='+p_uuid)
vod_json = json.load(req)
try:
v1 = vod_json['response']['WsProgramResponse']['program']['asset']['assetFiles'][0]['rtmp']['play_path']
vlink_1 = v1.split('?')
vlink1 = 'https://nhkw-mzvod.akamaized.net/www60/mz-nhk10/definst/' + vlink_1[0] + '/chunklist.m3u8'
media_item_list('720: '+ name, vlink1, plot, iconimage, iconimage)
except:
pass
try:
v2 = vod_json['response']['WsProgramResponse']['program']['asset']['referenceFile']['rtmp']['play_path']
vlink_2 = v2.split('?')
vlink2 = 'https://nhkw-mzvod.akamaized.net/www60/mz-nhk10/definst/' + vlink_2[0] + '/chunklist.m3u8'
media_item_list('1080: '+ name, vlink2, plot, iconimage, iconimage)
except:
pass
xbmcplugin.setContent(pluginhandle, 'episodes')
# jibtv
def IDX_JIBTV(url):
link = net.http_GET(url).content
match1 = re.compile('<tr data-href="(.+?)">\r\n\t*<td class="text-center w-40"><img src="(.+?)" class="img-responsive img-rounded" width="100%" /></td>\r\n\t*<td><span class="font-500">(.+?)</span><span ').findall(link)
match2 = re.compile('<tr data-href="(.+?)">\r\n\t*<td class="text-center w-40"><img src="(.+?)" class="img-responsive img-rounded" width="100%" /></td>\r\n\t*<td><span class="font-500">(.+?)\r\n</span><span ').findall(link)
match3 = re.compile('<tr data-href="(.+?)">\n\t*<td class="text-center w-40"><img src="(.+?)" class="img-responsive img-rounded" width="100%" /></td>\n\t*<td><span class="font-500">(.+?)</span><span ').findall(link)
match4 = re.compile('<tr data-href="(.+?)">\n\t*<td class="text-center w-40"><img src="(.+?)" class="img-responsive img-rounded" width="100%" /></td>\n\t*<td><span class="font-500">(.+?)\n</span><span ').findall(link)
for vid_page_, thumbnl_, title_ in match1 + match2 + match3 + match4:
vid_page = host6+vid_page_
thumbnl = host6+thumbnl_
title = (title_).encode('UTF-8').replace('<br>',' - ').replace('<br />',' - ')
addDir1(title, vid_page, 'jib_feat', thumbnl)
def JIB_FEAT(url,iconimage):
link = net.http_GET(url).content
try:
title_ = re.compile('<meta property="og:title" content="(.+?)"').findall(link)
titl_e = ''.join(title_)
title = (titl_e).encode('UTF-8').replace('<br />',' - ')
desc_ = re.compile('<meta property="og:description" content="(.+?)"').findall(link)
plot = ''.join(desc_)
meta_id = re.compile('play\(\{ meta_id: (.+?) \}\)').findall(link)
vid_id_ = ''.join(meta_id)
vid_id = vid_id_[0:3]
try:
link2 = net.http_GET('https://jibtv-vcms.logica.io/api/v1/metas/%s/medias' % vid_id).content
vid_src_ = re.compile('"url":"(.+?)"').findall(link2)
vid_src = ''.join(vid_src_)
media_item_list(title, vid_src, plot, iconimage, iconimage)
except:
link2 = net.http_GET('https://jibtv-vcms.logica.io/api/v1/metas/%s/playlist' % vid_id).content
match1=re.compile('"metas":\[\{"meta_id":(.+?),"name":"(.+?)".+?,\{"meta_id":(.+?),"name":"(.+?)"').findall(link2)
for vid_id1, title1, vid_id2, title2 in match1:
link3 = net.http_GET('https://jibtv-vcms.logica.io/api/v1/metas/%s/medias' % vid_id1).content
link4 = net.http_GET('https://jibtv-vcms.logica.io/api/v1/metas/%s/medias' % vid_id2).content
vid_src1_ = re.compile('"url":"(.+?)"').findall(link3)
vid_src1 = ''.join(vid_src1_)
media_item_list(title1, vid_src1, plot, iconimage, iconimage)
vid_src2_ = re.compile('"url":"(.+?)"').findall(link4)
vid_src2 = ''.join(vid_src2_)
media_item_list(title2, vid_src2, plot, iconimage, iconimage)
except:
pass
xbmcplugin.setContent(pluginhandle, 'episodes')
# New NHK News
def IDX_NEWS(url):
addDir('NHK Top Stories', host2+'nhkworld/data/en/news/all.json', 'topnews', nhk_icon)
addDir('News At a Glance', host2+'nhkworld/en/news/ataglance/index.json', 'glance', nhk_icon)
addDir('News Videos', host10+'list/en/newsvideos/all/all.json?%s' % apikey, 'topnews', nhk_icon)
try:
media_item_list('Newsline', host2+'nhkworld/data/en/news/programs/1001.xml','',host11+'1001.jpg',host11+'1001.jpg')
except:
pass
try:
media_item_list('Newsroom Tokyo', host2+'nhkworld/data/en/news/programs/1002.xml','',host11+'1002.jpg',host11+'1002.jpg')
except:
pass
try:
media_item_list('Newsline Asia 24', host2+'nhkworld/data/en/news/programs/1003.xml','',host11+'1003.jpg',host11+'1003.jpg')
except:
pass
try:
media_item_list('Newsline Biz', host2+'nhkworld/data/en/news/programs/1004.xml','',host11+'1004.jpg',host11+'1004.jpg')
except:
pass
try:
media_item_list('Newsline In Depth', host2+'nhkworld/data/en/news/programs/1005.xml','',host11+'1005.jpg',host11+'1005.jpg')
except:
pass
try:
media_item_list('Biz Stream', host2+'nhkworld/data/en/news/programs/2074.xml','',host11+'2074_2.jpg',host11+'2074_2.jpg')
except:
pass
'''
def THE_NEWS(url):
req = urllib2.Request(url, headers=hdr)
file = urllib2.urlopen(req)
data = file.read()
file.close()
dom = parseString(data)
v_url = dom.getElementsByTagName('file.high')[0].toxml()
image = dom.getElementsByTagName('image')[0].toxml()
name = dom.getElementsByTagName('media.title')[0].toxml()
vid_url = v_url.replace('<file.high><![CDATA[','').replace(']]></file.high>','').replace('rtmp://flv.nhk.or.jp/ondemand/flv','https://nhkworld-vh.akamaihd.net/i').replace('hq.mp4',',l,h,q.mp4.csmil/master.m3u8')
thumbnl = host2 + image.replace('','')
name_ = name.replace('<media.title>','').replace('</media.title>','').replace("_#039_","'").replace('_quot_','"')
media_item_list(name_,vid_url,'',thumbnl,thumbnl)
'''
# Latest top news stories
def IDX_TOPNEWS(url):
req = urllib2.Request(url, headers=hdr)
file = urllib2.urlopen(req)
top_json = json.load(file)
try:
for i in range(500):
if top_json['data'][i]['videos']:
ep_name_ = top_json['data'][i]['title']
plot_ = top_json['data'][i]['description']
thumbnl_ = top_json['data'][i]['thumbnails']['middle']
xml_link = top_json['data'][i]['videos']['config']
ep_name = (ep_name_).encode('UTF-8').replace('<br>',' ').replace('[\'','').replace('\']','').replace('["','').replace('"]','').replace("\\\'","'").replace('<br />',' ').replace('&','&').replace('<span style="font-style: italic;">','').replace('</span>','').replace('\\xe0','a').replace('\\xc3\\x89','E').replace('\\xe9','e').replace('\\xef\\xbd\\x9e',' ~ ').replace('\\xd7','x').replace('\\xc3\\x97','x').replace('\\xc3','').replace('<i>','').replace('</i>','').replace('<p>','').replace('</p>','')
plot = (plot_).encode('UTF-8').replace('<br>',' ').replace('♡',' ').replace('[\'','').replace('\']','').replace('["','').replace('"]','').replace("\\\'","'").replace('<br />',' ').replace('&','&').replace('<span style="font-style: italic;">','').replace('</span>','').replace('\\xe0','a').replace('\\xc3\\x89','E').replace('\\xe9','e').replace('\\xef\\xbd\\x9e',' ~ ').replace('<em>','').replace('</em>','').replace('\\xc3','').replace('<i>','').replace('</i>','').replace('<p>','').replace('</p>','')
thumbnl = host2[:-1]+thumbnl_
addDir2(ep_name, xml_link, 'tn_resolve', plot, thumbnl)
except:
pass
xbmcplugin.setContent(pluginhandle, 'episodes')
# News at a glance
def IDX_GLANCE(url):
req = urllib2.Request(url, headers=hdr)
file = urllib2.urlopen(req)
g_json = json.load(file)
try:
for i in range(5000):
if g_json['data'][i]['video']:
ep_name_ = g_json['data'][i]['title']
plot_ = g_json['data'][i]['description']
thumbnl_ = g_json['data'][i]['image']['main_pc']
xml_link = g_json['data'][i]['video']['config']
ep_name = (ep_name_).encode('UTF-8').replace('<br>',' ').replace('[\'','').replace('\']','').replace('["','').replace('"]','').replace("\\\'","'").replace('<br />',' ').replace('&','&').replace('<span style="font-style: italic;">','').replace('</span>','').replace('\\xe0','a').replace('\\xc3\\x89','E').replace('\\xe9','e').replace('\\xef\\xbd\\x9e',' ~ ').replace('\\xd7','x').replace('\\xc3\\x97','x').replace('\\xc3','').replace('<i>','').replace('</i>','').replace('<p>','').replace('</p>','')
plot = (plot_).encode('UTF-8').replace('<br>',' ').replace('♡',' ').replace('[\'','').replace('\']','').replace('["','').replace('"]','').replace("\\\'","'").replace('<br />',' ').replace('&','&').replace('<span style="font-style: italic;">','').replace('</span>','').replace('\\xe0','a').replace('\\xc3\\x89','E').replace('\\xe9','e').replace('\\xef\\xbd\\x9e',' ~ ').replace('<em>','').replace('</em>','').replace('\\xc3','').replace('<i>','').replace('</i>','').replace('<p>','').replace('</p>','')
thumbnl = host2[:-1]+thumbnl_
addDir2(ep_name, xml_link, 'g_resolve', plot, thumbnl)
except:
pass
xbmcplugin.setContent(pluginhandle, 'episodes')
def RESOLVE(name,url,mode,plot,iconimage):
req = urllib2.Request((host2[:-1]+url), headers=hdr)
file = urllib2.urlopen(req)
data = file.read()
file.close()
dom = parseString(data)
v_url = dom.getElementsByTagName('file.high')[0].toxml()
if mode == 'tn_resolve':
vid_url = v_url.replace('<file.high><![CDATA[','').replace(']]></file.high>','').replace('rtmp://flv.nhk.or.jp/ondemand/flv','https://nhkworld-vh.akamaihd.net/i').replace('HQ.mp4',',L,H,Q.mp4.csmil/master.m3u8')
else:
vid_url = v_url.replace('<file.high>','').replace('</file.high>','').replace('rtmp://flv.nhk.or.jp/ondemand/flv','https://nhkworld-vh.akamaihd.net/i').replace('mp4','mp4/master.m3u8')
media_item_list(name,vid_url,plot,iconimage,iconimage)
xbmcplugin.setContent(pluginhandle, 'episodes')
# Pre-recorded NHK World Radio in 17 languages
def IDX_RADIO(url):
fanart = 'https://www3.nhk.or.jp/nhkworld/en/ondemand/'+img[z]
for i in range(17):
media_item_list('NHK Radio News in '+lang[i], host13+lang_key[i]+'/news.json?%s' % apikey,'','',fanart)
def IDX_YOUTUBE1():
addDirYT(title="NHK World Channel",
url="plugin://plugin.video.youtube/user/NHKWorld/")
addDirYT(title="Youtube Search for 'NHK World'",
url='plugin://plugin.video.youtube/search/?q=NHK World')
addDirYT(title="NHK Videos - Select Playlists in next menu",
url="plugin://plugin.video.youtube/channel/UCMsBttS0NCgp7HXuAeN22QQ/")
addDirYT(title="NHK Online",
url='plugin://plugin.video.youtube/user/NHKonline/')
addDirYT(title="UNESCO/NHK",
url="plugin://plugin.video.youtube/playlist/PLWuYED1WVJIPKU_tUlzLTfkbNnAtkDOhS/")
addDirYT(title="Core Kyoto",
url="plugin://plugin.video.youtube/search/?q='Core Kyoto'+NHK")
addDirYT(title="Cycle Around Japan",
url="plugin://plugin.video.youtube/search/?q='Cycle Around Japan'+NHK")
addDirYT(title="Japan Railway Journal",
url="plugin://plugin.video.youtube/search/?q=NHK Japan Railway Journal")
addDirYT(title="J-Trip Plan",
url="plugin://plugin.video.youtube/search/?q=NHK J-Trip Plan")
addDirYT(title="NHK Documentary",
url='plugin://plugin.video.youtube/search/?q=NHK Documentary')
addDirYT(title="Japan's Top Inventions",
url='plugin://plugin.video.youtube/search/?q=intitle:"Japan\'s Top Inventions"')
addDirYT(title="Japanology",
url='plugin://plugin.video.youtube/search/?q=intitle:"Japanology"')
addDirYT(title="Begin Japanology",
url="plugin://plugin.video.youtube/search/?q=Begin Japanology")
addDirYT(title="Japanology Plus",
url="plugin://plugin.video.youtube/search/?q=Japanology Plus")
addDirYT(title="Seasoning the Seasons",
url='plugin://plugin.video.youtube/search/?q=intitle:"Seasoning the Seasons"')
addDirYT(title="Tokyo Eye",
url="plugin://plugin.video.youtube/search/?q=Tokyo Eye")
addDirYT(title="Trails to Tsukiji",
url="plugin://plugin.video.youtube/search/?q=Trails to Tsukiji")
addDirYT(title="Trails to Oishii Tokyo",
url="plugin://plugin.video.youtube/search/?q=Trails to Oishii Tokyo")
addDirYT(title="Dining with the Chef",
url="plugin://plugin.video.youtube/search/?q=Dining with the chef")
addDirYT(title="Journeys in Japan",
url='plugin://plugin.video.youtube/search/?q=intitle:"journeys in japan"')
addDirYT(title="Train Cruise",
url="plugin://plugin.video.youtube/search/?q='Train Cruise'+NHK")
addDirYT(title="Cool Japan",
url='plugin://plugin.video.youtube/search/?q=intitle:"cool japan"')
addDirYT(title="At Home with Venetia in Kyoto",
url="plugin://plugin.video.youtube/search/?q=At Home with Venetia in Kyoto")
addDirYT(title="Japan from Above",
url='plugin://plugin.video.youtube/search/?q=Japan from above')
addDirYT(title="Blends",
url="plugin://plugin.video.youtube/search/?q=NHK Blends")
addDirYT(title="Somewhere Street",
url="plugin://plugin.video.youtube/search/?q=NHK Somewhere Street")
addDirYT(title="Supreme Skils",
url="plugin://plugin.video.youtube/search/?q='Supreme Skills'+NHK")
addDirYT(title="NHK Documentary - Silk Road",
url="plugin://plugin.video.youtube/playlist/PLB8KCZnnrFKmP6CPynDrFVheEt9VOBPk4/")
addDirYT(title="NHK Documentary - Silk Road II",
url="plugin://plugin.video.youtube/playlist/PLdwCuEoZ_6l7FvbsfjidxMIybBrF5jnb5/")
# Create media items list
def media_item_list(name,url,plot,img,fanart):
if mode=='audio':
req = urllib2.Request(url, headers=hdr)
file = urllib2.urlopen(req)
radio_json = json.load(file)
a_link = radio_json['data']['audio']
radionews_url = 'https://nhkworld-vh.akamaihd.net/i'+a_link+'/master.m3u8'
addon.add_music_item({'url': radionews_url}, {'title': name}, context_replace = nhk_icon, fanart = fanart, playlist=False)
elif mode=='news':
req = urllib2.Request(url, headers=hdr)
file = urllib2.urlopen(req)
data = file.read()
file.close()
dom = parseString(data)
v_url = dom.getElementsByTagName('file.high')[0].toxml()
image = dom.getElementsByTagName('image')[0].toxml()
vid_url = v_url.replace('<file.high><![CDATA[','').replace(']]></file.high>','').replace('rtmp://flv.nhk.or.jp/ondemand/flv','https://nhkworld-vh.akamaihd.net/i').replace('hq.mp4',',l,h,q.mp4.csmil/master.m3u8')
thumbnl = host2 + image.replace('','')
addon.add_video_item({'url': vid_url}, {'title': name, 'plot': plot}, img = thumbnl, fanart = thumbnl, playlist=False)
else:
addon.add_video_item({'url': url}, {'title': name, 'plot': plot}, img = img, fanart = fanart, playlist=False)
# Downloader
#def download_media():
# print df
# Query play, mode, url and name
play = addon.queries.get('play', None)
mode = addon.queries['mode']
url = addon.queries.get('url', '')
name = addon.queries.get('name', '')
iconimage = addon.queries.get('iconimage', '')
plot = addon.queries.get('plot', '')
print "Play: " +str(play)
print "Mode: "+str(mode)
print "URL: "+str(url)
print "Name: "+str(name)
# Program flow control
if play:
addon.resolve_url(url.encode('UTF-8')) # <<< Play resolved media url
if mode=='main':
print ""
CATEGORIES()
elif mode=='schedule':
print ""+url
IDX_SCHED(url)
elif mode=='live_strm':
print ""+url
IDX_LIVE_STRM()
elif mode=='other_live_strm':
print ""+url
IDX_OTHER_LIVE_STRM()
elif mode=='p_lists':
print ""+url
IDX_P_LISTS(url)
elif mode=='p_resolve':
print ""+url
P_RESOLVE(name,url,mode,plot,iconimage)
elif mode=='p_resolve2':
print ""+url
P_RESOLVE2(name,url,mode,plot,iconimage)
elif mode=='vod':
print ""+url
IDX_VOD(url,mode)
elif mode=='vod_cats':
print ""+url
IDX_VOD_CATS(url)
elif mode=='vod_resolve':
print ""+url
VOD_RESOLVE(name,url,plot,iconimage)
elif mode=='jibtv':
print ""+url
IDX_JIBTV(url)
elif mode=='jib_feat':
print ""+url
JIB_FEAT(url,iconimage)
elif mode=='youtube1':
print ""+url
IDX_YOUTUBE1()
elif mode=='youtube2':
print ""+url
IDX_YOUTUBE2()
elif mode=='news':
print ""+url
IDX_NEWS(url)
elif mode=='the_news':
print ""+url
THE_NEWS(url)
elif mode=='newsroom':
print ""+url
IDX_NEWS(url)
elif mode=='topnews':
print ""+url
IDX_TOPNEWS(url)
elif mode=='tn_resolve':
print ""+url
RESOLVE(name,url,mode,plot,iconimage)
elif mode=='glance':
print ""+url
IDX_GLANCE(url)
elif mode=='g_resolve':
print ""+url
RESOLVE(name,url,mode,plot,iconimage)
elif mode=='audio':
print ""+url
IDX_RADIO(url)
if not play:
addon.end_of_directory() | misty-/addons | plugin.video.bestofnhk/default.py | Python | gpl-3.0 | 54,172 |
from .base import Base
class Repository(Base):
def find(self, id=None):
if id:
url = 'repositories/{0}.json'.format(id)
else:
url = 'repositories.json'
return self._do_get(url)
def find_by_name(self, repository_name):
repos = self.find()
for repo in repos:
if repo['repository']['name'] == repository_name:
return repo
return None
def get_id_by_name(self, repository_name):
repo = self.find_by_name(repository_name)
if repo is not None:
return repo['repository']['id']
else:
return None
def create(self, name, title, color_label='label-white', vcs='subversion', create_structure=False):
url = 'repositories.json'
data = {
'repository': {
'name': name,
'title': title,
'type_id': vcs,
'color_label': color_label
}
}
return self._do_post(url, data)
| sherzberg/python-beanstalk-api | beanstalk/api/repository.py | Python | gpl-3.0 | 1,040 |
# Natural Language Toolkit: Probabilistic Chart Parsers
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes and interfaces for associating probabilities with tree
structures that represent the internal organization of a text. The
probabilistic parser module defines ``BottomUpProbabilisticChartParser``.
``BottomUpProbabilisticChartParser`` is an abstract class that implements
a bottom-up chart parser for ``PCFG`` grammars. It maintains a queue of edges,
and adds them to the chart one at a time. The ordering of this queue
is based on the probabilities associated with the edges, allowing the
parser to expand more likely edges before less likely ones. Each
subclass implements a different queue ordering, producing different
search strategies. Currently the following subclasses are defined:
- ``InsideChartParser`` searches edges in decreasing order of
their trees' inside probabilities.
- ``RandomChartParser`` searches edges in random order.
- ``LongestChartParser`` searches edges in decreasing order of their
location's length.
The ``BottomUpProbabilisticChartParser`` constructor has an optional
argument beam_size. If non-zero, this controls the size of the beam
(aka the edge queue). This option is most useful with InsideChartParser.
"""
from __future__ import print_function, unicode_literals
##//////////////////////////////////////////////////////
## Bottom-Up PCFG Chart Parser
##//////////////////////////////////////////////////////
# [XX] This might not be implemented quite right -- it would be better
# to associate probabilities with child pointer lists.
from functools import reduce
from nltk.tree import Tree, ProbabilisticTree
from nltk.grammar import Nonterminal, WeightedGrammar
from nltk.parse.api import ParserI
from nltk.parse.chart import Chart, LeafEdge, TreeEdge, AbstractChartRule
from nltk.compat import python_2_unicode_compatible
# Probabilistic edges
class ProbabilisticLeafEdge(LeafEdge):
def prob(self): return 1.0
class ProbabilisticTreeEdge(TreeEdge):
def __init__(self, prob, *args, **kwargs):
TreeEdge.__init__(self, *args, **kwargs)
self._prob = prob
# two edges with different probabilities are not equal.
self._comparison_key = (self._comparison_key, prob)
def prob(self): return self._prob
@staticmethod
def from_production(production, index, p):
return ProbabilisticTreeEdge(p, (index, index), production.lhs(),
production.rhs(), 0)
# Rules using probabilistic edges
class ProbabilisticBottomUpInitRule(AbstractChartRule):
NUM_EDGES=0
def apply_iter(self, chart, grammar):
for index in range(chart.num_leaves()):
new_edge = ProbabilisticLeafEdge(chart.leaf(index), index)
if chart.insert(new_edge, ()):
yield new_edge
class ProbabilisticBottomUpPredictRule(AbstractChartRule):
NUM_EDGES=1
def apply_iter(self, chart, grammar, edge):
if edge.is_incomplete(): return
for prod in grammar.productions():
if edge.lhs() == prod.rhs()[0]:
new_edge = ProbabilisticTreeEdge.from_production(prod, edge.start(), prod.prob())
if chart.insert(new_edge, ()):
yield new_edge
class ProbabilisticFundamentalRule(AbstractChartRule):
NUM_EDGES=2
def apply_iter(self, chart, grammar, left_edge, right_edge):
# Make sure the rule is applicable.
if not (left_edge.end() == right_edge.start() and
left_edge.nextsym() == right_edge.lhs() and
left_edge.is_incomplete() and right_edge.is_complete()):
return
# Construct the new edge.
p = left_edge.prob() * right_edge.prob()
new_edge = ProbabilisticTreeEdge(p,
span=(left_edge.start(), right_edge.end()),
lhs=left_edge.lhs(), rhs=left_edge.rhs(),
dot=left_edge.dot()+1)
# Add it to the chart, with appropriate child pointers.
changed_chart = False
for cpl1 in chart.child_pointer_lists(left_edge):
if chart.insert(new_edge, cpl1+(right_edge,)):
changed_chart = True
# If we changed the chart, then generate the edge.
if changed_chart: yield new_edge
@python_2_unicode_compatible
class SingleEdgeProbabilisticFundamentalRule(AbstractChartRule):
NUM_EDGES=1
_fundamental_rule = ProbabilisticFundamentalRule()
def apply_iter(self, chart, grammar, edge1):
fr = self._fundamental_rule
if edge1.is_incomplete():
# edge1 = left_edge; edge2 = right_edge
for edge2 in chart.select(start=edge1.end(), is_complete=True,
lhs=edge1.nextsym()):
for new_edge in fr.apply_iter(chart, grammar, edge1, edge2):
yield new_edge
else:
# edge2 = left_edge; edge1 = right_edge
for edge2 in chart.select(end=edge1.start(), is_complete=False,
nextsym=edge1.lhs()):
for new_edge in fr.apply_iter(chart, grammar, edge2, edge1):
yield new_edge
def __str__(self):
return 'Fundamental Rule'
class BottomUpProbabilisticChartParser(ParserI):
"""
An abstract bottom-up parser for ``PCFG`` grammars that uses a ``Chart`` to
record partial results. ``BottomUpProbabilisticChartParser`` maintains
a queue of edges that can be added to the chart. This queue is
initialized with edges for each token in the text that is being
parsed. ``BottomUpProbabilisticChartParser`` inserts these edges into
the chart one at a time, starting with the most likely edges, and
proceeding to less likely edges. For each edge that is added to
the chart, it may become possible to insert additional edges into
the chart; these are added to the queue. This process continues
until enough complete parses have been generated, or until the
queue is empty.
The sorting order for the queue is not specified by
``BottomUpProbabilisticChartParser``. Different sorting orders will
result in different search strategies. The sorting order for the
queue is defined by the method ``sort_queue``; subclasses are required
to provide a definition for this method.
:type _grammar: PCFG
:ivar _grammar: The grammar used to parse sentences.
:type _trace: int
:ivar _trace: The level of tracing output that should be generated
when parsing a text.
"""
def __init__(self, grammar, beam_size=0, trace=0):
"""
Create a new ``BottomUpProbabilisticChartParser``, that uses
``grammar`` to parse texts.
:type grammar: PCFG
:param grammar: The grammar used to parse texts.
:type beam_size: int
:param beam_size: The maximum length for the parser's edge queue.
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
if not isinstance(grammar, WeightedGrammar):
raise ValueError("The grammar must be probabilistic WeightedGrammar")
self._grammar = grammar
self.beam_size = beam_size
self._trace = trace
def grammar(self):
return self._grammar
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
:type trace: int
:param trace: The trace level. A trace level of ``0`` will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
:rtype: None
"""
self._trace = trace
# TODO: change this to conform more with the standard ChartParser
def nbest_parse(self, tokens, n=None):
self._grammar.check_coverage(tokens)
chart = Chart(list(tokens))
grammar = self._grammar
# Chart parser rules.
bu_init = ProbabilisticBottomUpInitRule()
bu = ProbabilisticBottomUpPredictRule()
fr = SingleEdgeProbabilisticFundamentalRule()
# Our queue!
queue = []
# Initialize the chart.
for edge in bu_init.apply_iter(chart, grammar):
if self._trace > 1:
print(' %-50s [%s]' % (chart.pp_edge(edge,width=2),
edge.prob()))
queue.append(edge)
while len(queue) > 0:
# Re-sort the queue.
self.sort_queue(queue, chart)
# Prune the queue to the correct size if a beam was defined
if self.beam_size:
self._prune(queue, chart)
# Get the best edge.
edge = queue.pop()
if self._trace > 0:
print(' %-50s [%s]' % (chart.pp_edge(edge,width=2),
edge.prob()))
# Apply BU & FR to it.
queue.extend(bu.apply(chart, grammar, edge))
queue.extend(fr.apply(chart, grammar, edge))
# Get a list of complete parses.
parses = chart.parses(grammar.start(), ProbabilisticTree)
# Assign probabilities to the trees.
prod_probs = {}
for prod in grammar.productions():
prod_probs[prod.lhs(), prod.rhs()] = prod.prob()
for parse in parses:
self._setprob(parse, prod_probs)
# Sort by probability
parses.sort(reverse=True, key=lambda tree: tree.prob())
return parses[:n]
def _setprob(self, tree, prod_probs):
if tree.prob() is not None: return
# Get the prob of the CFG production.
lhs = Nonterminal(tree.node)
rhs = []
for child in tree:
if isinstance(child, Tree):
rhs.append(Nonterminal(child.node))
else:
rhs.append(child)
prob = prod_probs[lhs, tuple(rhs)]
# Get the probs of children.
for child in tree:
if isinstance(child, Tree):
self._setprob(child, prod_probs)
prob *= child.prob()
tree.set_prob(prob)
def sort_queue(self, queue, chart):
"""
Sort the given queue of ``Edge`` objects, placing the edge that should
be tried first at the beginning of the queue. This method
will be called after each ``Edge`` is added to the queue.
:param queue: The queue of ``Edge`` objects to sort. Each edge in
this queue is an edge that could be added to the chart by
the fundamental rule; but that has not yet been added.
:type queue: list(Edge)
:param chart: The chart being used to parse the text. This
chart can be used to provide extra information for sorting
the queue.
:type chart: Chart
:rtype: None
"""
raise NotImplementedError()
def _prune(self, queue, chart):
""" Discard items in the queue if the queue is longer than the beam."""
if len(queue) > self.beam_size:
split = len(queue)-self.beam_size
if self._trace > 2:
for edge in queue[:split]:
print(' %-50s [DISCARDED]' % chart.pp_edge(edge,2))
del queue[:split]
class InsideChartParser(BottomUpProbabilisticChartParser):
"""
A bottom-up parser for ``PCFG`` grammars that tries edges in descending
order of the inside probabilities of their trees. The "inside
probability" of a tree is simply the
probability of the entire tree, ignoring its context. In
particular, the inside probability of a tree generated by
production *p* with children *c[1], c[2], ..., c[n]* is
*P(p)P(c[1])P(c[2])...P(c[n])*; and the inside
probability of a token is 1 if it is present in the text, and 0 if
it is absent.
This sorting order results in a type of lowest-cost-first search
strategy.
"""
# Inherit constructor.
def sort_queue(self, queue, chart):
"""
Sort the given queue of edges, in descending order of the
inside probabilities of the edges' trees.
:param queue: The queue of ``Edge`` objects to sort. Each edge in
this queue is an edge that could be added to the chart by
the fundamental rule; but that has not yet been added.
:type queue: list(Edge)
:param chart: The chart being used to parse the text. This
chart can be used to provide extra information for sorting
the queue.
:type chart: Chart
:rtype: None
"""
queue.sort(key=lambda edge: edge.prob())
# Eventually, this will become some sort of inside-outside parser:
# class InsideOutsideParser(BottomUpProbabilisticChartParser):
# def __init__(self, grammar, trace=0):
# # Inherit docs.
# BottomUpProbabilisticChartParser.__init__(self, grammar, trace)
#
# # Find the best path from S to each nonterminal
# bestp = {}
# for production in grammar.productions(): bestp[production.lhs()]=0
# bestp[grammar.start()] = 1.0
#
# for i in range(len(grammar.productions())):
# for production in grammar.productions():
# lhs = production.lhs()
# for elt in production.rhs():
# bestp[elt] = max(bestp[lhs]*production.prob(),
# bestp.get(elt,0))
#
# self._bestp = bestp
# for (k,v) in self._bestp.items(): print k,v
#
# def _sortkey(self, edge):
# return edge.structure()[PROB] * self._bestp[edge.lhs()]
#
# def sort_queue(self, queue, chart):
# queue.sort(key=self._sortkey)
import random
class RandomChartParser(BottomUpProbabilisticChartParser):
"""
A bottom-up parser for ``PCFG`` grammars that tries edges in random order.
This sorting order results in a random search strategy.
"""
# Inherit constructor
def sort_queue(self, queue, chart):
i = random.randint(0, len(queue)-1)
(queue[-1], queue[i]) = (queue[i], queue[-1])
class UnsortedChartParser(BottomUpProbabilisticChartParser):
"""
A bottom-up parser for ``PCFG`` grammars that tries edges in whatever order.
"""
# Inherit constructor
def sort_queue(self, queue, chart): return
class LongestChartParser(BottomUpProbabilisticChartParser):
"""
A bottom-up parser for ``PCFG`` grammars that tries longer edges before
shorter ones. This sorting order results in a type of best-first
search strategy.
"""
# Inherit constructor
def sort_queue(self, queue, chart):
queue.sort(key=lambda edge: edge.length())
##//////////////////////////////////////////////////////
## Test Code
##//////////////////////////////////////////////////////
def demo(choice=None, draw_parses=None, print_parses=None):
"""
A demonstration of the probabilistic parsers. The user is
prompted to select which demo to run, and how many parses should
be found; and then each parser is run on the same demo, and a
summary of the results are displayed.
"""
import sys, time
from nltk import tokenize, toy_pcfg1, toy_pcfg2
from nltk.parse import pchart
# Define two demos. Each demo has a sentence and a grammar.
demos = [('I saw John with my telescope', toy_pcfg1),
('the boy saw Jack with Bob under the table with a telescope',
toy_pcfg2)]
if choice is None:
# Ask the user which demo they want to use.
print()
for i in range(len(demos)):
print('%3s: %s' % (i+1, demos[i][0]))
print(' %r' % demos[i][1])
print()
print('Which demo (%d-%d)? ' % (1, len(demos)), end=' ')
choice = int(sys.stdin.readline().strip())-1
try:
sent, grammar = demos[choice]
except:
print('Bad sentence number')
return
# Tokenize the sentence.
tokens = sent.split()
# Define a list of parsers. We'll use all parsers.
parsers = [
pchart.InsideChartParser(grammar),
pchart.RandomChartParser(grammar),
pchart.UnsortedChartParser(grammar),
pchart.LongestChartParser(grammar),
pchart.InsideChartParser(grammar, beam_size = len(tokens)+1) # was BeamParser
]
# Run the parsers on the tokenized sentence.
times = []
average_p = []
num_parses = []
all_parses = {}
for parser in parsers:
print('\ns: %s\nparser: %s\ngrammar: %s' % (sent,parser,grammar))
parser.trace(3)
t = time.time()
parses = parser.nbest_parse(tokens)
times.append(time.time()-t)
p = (reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses) if parses else 0)
average_p.append(p)
num_parses.append(len(parses))
for p in parses: all_parses[p.freeze()] = 1
# Print some summary statistics
print()
print(' Parser Beam | Time (secs) # Parses Average P(parse)')
print('------------------------+------------------------------------------')
for i in range(len(parsers)):
print('%18s %4d |%11.4f%11d%19.14f' % (parsers[i].__class__.__name__,
parsers[i].beam_size,
times[i],num_parses[i],average_p[i]))
parses = all_parses.keys()
if parses: p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else: p = 0
print('------------------------+------------------------------------------')
print('%18s |%11s%11d%19.14f' % ('(All Parses)', 'n/a', len(parses), p))
if draw_parses is None:
# Ask the user if we should draw the parses.
print()
print('Draw parses (y/n)? ', end=' ')
draw_parses = sys.stdin.readline().strip().lower().startswith('y')
if draw_parses:
from nltk.draw.tree import draw_trees
print(' please wait...')
draw_trees(*parses)
if print_parses is None:
# Ask the user if we should print the parses.
print()
print('Print parses (y/n)? ', end=' ')
print_parses = sys.stdin.readline().strip().lower().startswith('y')
if print_parses:
for parse in parses:
print(parse)
if __name__ == '__main__':
demo()
| bbengfort/TextBlob | textblob/nltk/parse/pchart.py | Python | mit | 18,791 |
#!/usr/bin/env python2.7
import numpy as np
import matplotlib.pyplot as plt
Freq=np.array([20,30,40,45,48,50,52,55,60,62,65,68,70,80,85,88,90,95,98,100,105,110,120,125,130,140,145,160])
Db=np.array([81.7,85.1,94,103.8,110.8,112.7,110.9,105.8,96.2,95.9,94.7,94.6,95.2,99.2,102.6,105.3,106.9,117.5,119.3,117.3,110.2,108.1,108.4,108.9,109.7,118.6,120,116.8])
plt.xlabel('Frecuencia')
plt.ylabel('Decibel')
plt.title('DecibelvsFreq a 0.5volts')
#for i in range(len(Freq)):
# plt.text(Freq[i],Db[i], r'$Freq=%f, \ Db=%f$' % (Freq[i], Db[i]))
plt.axis([0, 370, 50, 130])
plt.plot(Freq,Db,'bo',Freq,Db,'k')
plt.grid(True)
plt.show()
| P1R/cinves | TrabajoFinal/tubo350cm/2-DbvsFreq/F-Db-Maximos/DbvsFreq-Ampde0.5.py | Python | apache-2.0 | 628 |
"""
Pyramid security concern.
see http://docs.pylonsproject.org/projects/pyramid/en/latest/tutorials/wiki2/authorization.html
"""
import logging
from pyramid.security import Allow
from .models import DBSession, User, Group
log = logging.getLogger(__name__)
class GroupFinder(object):
"""
Method creator of :meth:`groupfinder`
"""
_users = {}
def reset(self):
"""
Reset the cache of users groups.
"""
self._users = {}
def __call__(self, login, request):
"""
:param login: user login
:type login: unicode
:param request: pyramid request
:type login: :class:`pyramid.request.Request`
:return: list of groups name.
:rtype: list of unicode
"""
if login in self._users:
return self._users[login]
user = User.by_login(DBSession(), login)
if user:
rv = [g.name for g in user.groups]
else:
rv = []
self._users[login] = rv
return rv
groupfinder = GroupFinder()
class RootFactory(object):
"""
Pyramid root factory that contains the ACL.
:param request: pyramid request
:type login: :class:`pyramid.request.Request`
"""
__name__ = None
__parent__ = None
_acl = None
def __init__(self, request):
ca = request.client_addr
rm = request.method
try:
cru = request.current_route_url()
except ValueError as e:
cru = str(e)
log.info('[%s] %s %s' % (ca, rm, cru))
self.__acl__ = self.get_acl(request)
def get_acl(self, request):
"""
Get ACL.
Initialize the __acl__ from the sql database once,
then use the cached version.
:param request: pyramid request
:type login: :class:`pyramid.request.Request`
:return: ACLs in pyramid format. (Allow, group name, permission name)
:rtype: list of tupple
"""
if RootFactory._acl is None:
acl = []
session = DBSession()
groups = Group.all(session)
for g in groups:
acl.extend([(Allow, g.name, p.name) for p in g.permissions])
RootFactory._acl = acl
return RootFactory._acl
| sayoun/pyvac | pyvac/security.py | Python | bsd-3-clause | 2,292 |
# -*- coding: utf-8 *-*
from .base import RedisBase
NOT_SET = object()
class ListCommands(RedisBase):
# LIST COMMANDS
def blpop(self, keys, timeout=0):
"""
LPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
keys.append(timeout)
return self.execute_command('BLPOP', *keys)
def brpop(self, keys, timeout=0):
"""
RPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
keys.append(timeout)
return self.execute_command('BRPOP', *keys)
def brpoplpush(self, src, dst, timeout=0):
"""
Pop a value off the tail of ``src``, push it on the head of ``dst``
and then return it.
This command blocks until a value is in ``src`` or until ``timeout``
seconds elapse, whichever is first. A ``timeout`` value of 0 blocks
forever.
"""
if timeout is None:
timeout = 0
return self.execute_command('BRPOPLPUSH', src, dst, timeout)
def lindex(self, name, index):
"""
Return the item from list ``name`` at position ``index``
Negative indexes are supported and will return an item at the
end of the list
"""
return self.execute_command('LINDEX', name, index)
def linsert(self, name, where, refvalue, value):
"""
Insert ``value`` in list ``name`` either immediately before or after
[``where``] ``refvalue``
Returns the new length of the list on success or -1 if ``refvalue``
is not in the list.
"""
return self.execute_command('LINSERT', name, where, refvalue, value)
def llen(self, name):
"Return the length of the list ``name``"
return self.execute_command('LLEN', name)
def lpop(self, name):
"Remove and return the first item of the list ``name``"
return self.execute_command('LPOP', name)
def lpush(self, name, *values):
"Push ``values`` onto the head of the list ``name``"
return self.execute_command('LPUSH', name, *values)
def lpushx(self, name, value):
"Push ``value`` onto the head of the list ``name`` if ``name`` exists"
return self.execute_command('LPUSHX', name, value)
def lrange(self, name, start, end):
"""
Return a slice of the list ``name`` between
position ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LRANGE', name, start, end)
def lrem(self, name, count, value=NOT_SET):
"""
Remove the first ``count`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The count argument influences the operation in the following ways:
count > 0: Remove elements equal to value moving from head to tail.
count < 0: Remove elements equal to value moving from tail to head.
count = 0: Remove all elements equal to value.
If using non-strict Redis (strict_redis=False), count and value are swapped,
and count is than by default 0.
"""
if not self.strict_redis:
count, value = value, count
if count is NOT_SET:
count = 0
elif value is NOT_SET:
raise ValueError("value must be set.")
return self.execute_command('LREM', name, count, value)
def lset(self, name, index, value):
"Set ``position`` of list ``name`` to ``value``"
return self.execute_command('LSET', name, index, value)
def ltrim(self, name, start, end):
"""
Trim the list ``name``, removing all values not within the slice
between ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LTRIM', name, start, end)
def rpop(self, name):
"Remove and return the last item of the list ``name``"
return self.execute_command('RPOP', name)
def rpoplpush(self, src, dst):
"""
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
"""
return self.execute_command('RPOPLPUSH', src, dst)
def rpush(self, name, *values):
"Push ``values`` onto the tail of the list ``name``"
return self.execute_command('RPUSH', name, *values)
def rpushx(self, name, value):
"Push ``value`` onto the tail of the list ``name`` if ``name`` exists"
return self.execute_command('RPUSHX', name, value)
| katakumpo/niceredis | niceredis/client/list.py | Python | mit | 5,489 |
from past.builtins import basestring
import os.path
import nineml
root = os.path.abspath(os.path.join(os.path.dirname(__file__), 'catalog'))
class NineMLCatalogSpecifiedMultipleNamesError(Exception):
pass
def load(path, name=None):
"""
Retrieves a model from the catalog from the given path
"""
path, name = get_full_path(path, name)
doc = nineml.read(path)
if name is not None:
elem = doc[name]
else:
elem = doc
return elem
def get_full_path(path, name=None):
if isinstance(path, basestring):
if '#' in path:
parts = path.split('#')
if name is not None:
raise NineMLCatalogSpecifiedMultipleNamesError(
"Name specified both in kwarg ('{}') and in path string "
"'{}' (i.e. section following #)".format(name, parts[1]))
path, name = parts
if path.endswith('.xml'):
path = path[:-4]
path = path.strip('/').split('/')
return os.path.join(root, *path) + '.xml', name
| tclose/NineMLCatalog | ninemlcatalog/base.py | Python | mit | 1,059 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.