repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
SUSE/azure-sdk-for-python
|
azure-mgmt-cdn/azure/mgmt/cdn/models/sso_uri.py
|
Python
|
mit
| 893
| 0
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SsoUri(Mode
|
l):
"""SSO URI required to login to the supplemental portal.
:param sso_uri_value: The URI us
|
ed to login to the supplemental portal.
:type sso_uri_value: str
"""
_attribute_map = {
'sso_uri_value': {'key': 'ssoUriValue', 'type': 'str'},
}
def __init__(self, sso_uri_value=None):
self.sso_uri_value = sso_uri_value
|
RDCEP/hybrid-dile-server
|
lib/utils/geolocation.py
|
Python
|
apache-2.0
| 5,668
| 0.010233
|
import math
class GeoLocation:
'''
Class representing a coordinate on a sphere, most likely Earth.
This class is based from the code smaple in this paper:
http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates
The owner of that website, Jan Philip Matuschek, is the full owner of
his intellectual property. The python port was realized by jfein:
https://github.com/jfein/PyGeoTools/blob/master/geolocation.py
'''
MIN_LAT = math.radians(-90)
MAX_LAT = math.radians(90)
MIN_LON = math.radians(-180)
MAX_LON = math.radians(180)
EARTH_RADIUS = 6378.1 # kilometers
CONV_FACTOR = 0.621371
@classmethod
def from_degrees(cls, deg_lat, deg_lon):
rad_lat = math.radians(deg_lat)
rad_lon = math.radians(deg_lon)
return GeoLocation(rad_lat, rad_lon, deg_lat, deg_lon)
@classmethod
def from_radians(cls, rad_lat, rad_lon):
deg_lat = math.degrees(rad_lat)
deg_lon = math.degrees(rad_lon)
return GeoLocation(rad_lat, rad_lon, deg_lat, deg_lon)
def __init__(
self,
rad_lat,
rad_lon,
deg_lat,
deg_lon
):
self.rad_lat = float(rad_lat)
self.rad_lon = float(rad_lon)
self.deg_lat = float(deg_lat)
self.deg_lon = float(deg_lon)
self._check_bounds()
def __str__(self):
degree_sign= u'\N{DEGREE SIGN}'
return ("({0:.4f}deg, {1:.4f}deg) = ({2:.6f}rad, {3:.6f}rad)").format(
self.deg_lat, self.deg_lon, self.rad_lat, self.rad_lon)
def _check_bounds(self):
if (self.rad_lat < GeoLocation.MIN_LAT
or self.rad_lat > GeoLocation.MAX_LAT
or self.rad_lon < GeoLocation.MIN_LON
or self.rad_lon > GeoLocation.MAX_LON):
raise Exception("Illegal arguments")
def distance_to(self, other, unit="kilometers", radius=EARTH_RADIUS):
'''
Computes the great circle distance between this GeoLocation instance
and the other.
'''
distance = radius * math.acos(
math.sin(self.rad_lat) * math.sin(other.rad_lat) +
math.cos(self.rad_lat) *
math.cos(other.rad_lat) *
math.cos(self.r
|
ad_lon - other.rad_lon)
)
if unit.lower() == "kilometers":
return distance
elif unit.lower() == "miles":
return distance/GeoLocation.CONV_FACTORS
else:
raise Exception("Illegal arguments")
def bounding_locations(self, dist, unit="kilometers", radius=EARTH_RADIUS):
'''
Computes the bounding coordi
|
nates of all points on the surface
of a sphere that has a great circle distance to the point represented
by this GeoLocation instance that is less or equal to the distance argument.
Param:
distance - the distance from the point represented by this GeoLocation
instance. Must be measured in the same unit as the radius
argument (which is kilometers by default)
radius - the radius of the sphere. defaults to Earth's radius.
Returns a list of two GeoLoations - the SW corner and the NE corner - that
represents the bounding box.
'''
if unit.lower() == "kilometers":
distance = dist
elif unit.lower() == "miles":
distance = dist/GeoLocation.CONV_FACTOR
else:
raise Exception("Illegal arguments")
if radius < 0 or distance < 0:
raise Exception("Illegal arguments")
# angular distance in radians on a great circle
rad_dist = distance / radius
min_lat = self.rad_lat - rad_dist
max_lat = self.rad_lat + rad_dist
if min_lat > GeoLocation.MIN_LAT and max_lat < GeoLocation.MAX_LAT:
delta_lon = math.asin(math.sin(rad_dist) / math.cos(self.rad_lat))
min_lon = self.rad_lon - delta_lon
if min_lon < GeoLocation.MIN_LON:
min_lon += 2 * math.pi
max_lon = self.rad_lon + delta_lon
if max_lon > GeoLocation.MAX_LON:
max_lon -= 2 * math.pi
# a pole is within the distance
else:
min_lat = max(min_lat, GeoLocation.MIN_LAT)
max_lat = min(max_lat, GeoLocation.MAX_LAT)
min_lon = GeoLocation.MIN_LON
max_lon = GeoLocation.MAX_LON
return [ GeoLocation.from_radians(min_lat, min_lon) ,
GeoLocation.from_radians(max_lat, max_lon) ]
if __name__ == '__main__':
# Test degree to radian conversion
loc1 = GeoLocation.from_degrees(26.062951, -80.238853)
loc2 = GeoLocation.from_radians(loc1.rad_lat, loc1.rad_lon)
assert (loc1.rad_lat == loc2.rad_lat and loc1.rad_lon == loc2.rad_lon
and loc1.deg_lat == loc2.deg_lat and loc1.deg_lon == loc2.deg_lon)
# Test distance between two locations
loc1 = GeoLocation.from_degrees(26.062951, -80.238853)
loc2 = GeoLocation.from_degrees(26.060484,-80.207268)
assert loc1.distance_to(loc2) == loc2.distance_to(loc1)
# Test bounding box
loc = GeoLocation.from_degrees(22.5,-135.0)
distance = 1 # 1 kilometer
SW_loc, NE_loc = loc.bounding_locations(distance)
print loc.distance_to(SW_loc)
print loc.distance_to(NE_loc)
|
Crompulence/cpl-library
|
test/valgrind/test_valgrind.py
|
Python
|
gpl-3.0
| 3,969
| 0.006551
|
import pytest
from cplpy import run_test, prepare_config
import subprocess as sp
import os
import glob
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def get_subprocess_error(e):
print("subprocess ERROR")
import json
error = json.loads(e[7:])
print(error['code'], error['message'])
# -----MAPPING TESTS-----
# EXPLANATION: These tests fail due to no_procs(MD) != k*no_procs(CFD),
# k in [1,2,3,...] in one direction.
MD_EXEC = "./md"
CFD_EXEC = "./cfd"
TEST_TEMPLATE_DIR = os.path.join(os.environ["CPL_PATH"], "test/templates")
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture()
def prepare_config_fix():
#Try to setup code
mdcodes = "array_stuff.f90 md_recvsend_cells.f90"
bldmd = ("mpif90 " + mdcodes
+ "-I" + os.environ["CPL_PATH"]
+ "/include -L" + os.environ["CPL_PATH"] + "/lib "
+ "-Wl,-rpath=$CPL_PATH/lib/ -lcpl -o ./md")
cfdcodes = "array_stuff.f90 cfd_sendrecv_cells.f90"
bldcfd= ("mpif90 " + cfdcodes
+ " -I" + os.environ["CPL_PATH"] + "/include "
+ " -L" + os.environ["CPL_PATH"] + "/lib "
+ "-Wl,-rpath=$CPL_PATH/lib/ -lcpl -o ./cfd")
with cd(TEST_DIR):
try:
out = sp.check_output("rm -f md cfd", shell=True)
out = sp.check_output(bldmd, shell=True)
out = sp.check_output(bldcfd, shell=True)
except sp.CalledProcessError as e:
if e.output.startswith('error: {'):
get_subprocess_error(e.output)
def test_memory_leak():
#Try to run code
cmd = ("mpiexec -n 4 valgrind --leak-check=full --log-file='vg_md.%q{PMI_RANK}' ./md "
+ ": -n 2 valgrind --leak-check=full --log-file='vg_cfd.%q{PMI_RANK}' ./cfd")
with cd(TEST_DIR):
try:
out = sp.check_output("rm -f vg_*", shell=True)
out = sp.check_output(cmd, shell=True)
except sp.CalledProcessError as e:
if e.output.startswith('error: {'):
get_subprocess_error(e.output)
#Check error
files = glob.glob("vg_*")
for filename in files:
with open(filename,'r') as f:
filestr = f.read()
findstr= "definitely lost:"
indx = filestr.find(findstr)
line = filestr[indx+len(findstr):].split("\n")[0]
print(line)
assert int(line.split(" ")[1]) == 0
#@pytest.fixture()
#def prepare_config_fix(tmpdir):
# prepare_config(tmpdir, TEST_DIR, MD_FNAME, CFD_FNAME)
# #Build code
# try:
# check_output("./build.sh", stderr=STDOUT, shell=True)
# except:
# raise
#@pytest.mark.parametrize("cfdprocs, mdprocs, err_msg", [
# ((1, 2, 1), (2, 2, 1), "")])
#def test_valgrind(prepare_config_fix, cfdprocs, mdprocs, err_msg):
# MD_PARAMS = {"lx": 24.0, "ly": 24.0, "lz": 24.0}
# MD_PARAMS["npx"], MD_PARAMS["npy"], MD_PARAMS["npz"] = mdprocs
# CFD_PARAMS = {"lx": 24.0, "ly": 24.0, "lz": 24.0,
# "ncx": 24, "ncy": 24, "ncz": 24,
# "which_test": "cell_test"}
# CFD_PARAMS["npx"], CFD_PARAMS["npy"], CFD_PARAMS["npz"] = cfdprocs
# CONFIG_PARAMS = {"cfd_bcx": 1, "cfd_bcy": 1, "cfd_bcz": 1,
#
|
"olap_xlo": 1, "olap_xhi": 24,
# "olap_ylo": 1, "olap_yhi": 4,
# "olap_zlo": 1, "olap_zhi": 24,
# "cnst_xlo": 1, "cnst_xhi": 1,
# "cnst_ylo": 1, "cnst_yhi": 1,
# "cnst_zlo": 1, "cnst_zhi": 1,
# "tstep_ratio": 50, }
# parametrizeConfig(template_dir, config_params)
|
|
mozilla/build-relengapi
|
relengapi/blueprints/tooltool/__init__.py
|
Python
|
mpl-2.0
| 14,685
| 0.000885
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import datetime
import random
import re
import sqlalchemy as sa
import structlog
from flask import Blueprint
from flask import current_app
from flask import g
from flask import redirect
from flask import url_for
from flask.ext.login import current_user
from flask.ext.login import login_required
from werkzeug import Response
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import NotFound
from relengapi.blueprints.tooltool import grooming
from relengapi.blueprints.tooltool import tables
from relengapi.blueprints.tooltool import types
from relengapi.blueprints.tooltool import util
from relengapi.lib import angular
from relengapi.lib import api
from relengapi.lib import time
from relengapi.lib.permissions import p
metadata = {
'repository_of_record': 'https://git.mozilla.org/?p=build/tooltool.git;a=summary',
'bug_report_url': 'http://goo.gl/XZpyie', # bugzilla saved new-bug form
}
bp = Blueprint('tooltool', __name__,
template_folder='templates',
static_folder='static')
is_valid_sha512 = re.compile(r'^[0-9a-f]{128}$').match
p.tooltool.download.public.doc("Download PUBLIC files from tooltool")
p.tooltool.upload.public.doc("Upload PUBLIC files to tooltool")
# note that internal does not imply public; that's up to the user.
p.tooltool.download.internal.doc("Download INTERNAL files from tooltool")
p.tooltool.upload.internal.doc("Upload INTERNAL files to tooltool")
p.tooltool.manage.doc("Manage to
|
oltool files, including deleting and changing visibility levels")
|
# This value should be fairly short (and its value is included in the
# `upload_batch` docstring). Uploads cannot be validated until this
# time has elapsed, otherwise a malicious uploader could alter a file
# after it had been verified.
UPLOAD_EXPIRES_IN = 60
GET_EXPIRES_IN = 60
logger = structlog.get_logger()
def get_region_and_bucket(region_arg):
cfg = current_app.config['TOOLTOOL_REGIONS']
if region_arg and region_arg in cfg:
return region_arg, cfg[region_arg]
# no region specified, so return one at random
return random.choice(cfg.items())
bp.root_widget_template(
'tooltool_root_widget.html', priority=100,
condition=lambda: not current_user.is_anonymous)
@bp.route('/')
@login_required
def root():
return angular.template('tooltool.html',
url_for('.static', filename='tooltool.js'),
url_for('.static', filename='tooltool.css'))
@bp.route('/upload')
@api.apimethod([types.UploadBatch], unicode)
def search_batches(q):
"""Search upload batches. The required query parameter ``q`` can match a
substring of an author's email or a batch message."""
tbl = tables.Batch
q = tbl.query.filter(sa.or_(
tbl.author.contains(q),
tbl.message.contains(q)))
return [row.to_json() for row in q.all()]
@bp.route('/upload/<int:id>')
@api.apimethod(types.UploadBatch, int)
def get_batch(id):
"""Get a specific upload batch by id."""
row = tables.Batch.query.filter(tables.Batch.id == id).first()
if not row:
raise NotFound
return row.to_json()
@bp.route('/upload', methods=['POST'])
@api.apimethod(types.UploadBatch, unicode, body=types.UploadBatch)
def upload_batch(region=None, body=None):
"""Create a new upload batch. The response object will contain a
``put_url`` for each file which needs to be uploaded -- which may not be
all! The caller is then responsible for uploading to those URLs. The
resulting signed URLs are valid for one hour, so uploads should begin
within that timeframe. Consider using Amazon's MD5-verification
capabilities to ensure that the uploaded files are transferred correctly,
although the tooltool server will verify the integrity anyway. The
upload must have the header ``Content-Type: application/octet-stream```.
The query argument ``region=us-west-1`` indicates a preference for URLs
in that region, although if the region is not available then URLs in
other regions may be returned.
The returned URLs are only valid for 60 seconds, so all upload requests
must begin within that timeframe. Clients should therefore perform all
uploads in parallel, rather than sequentially. This limitation is in
place to prevent malicious modification of files after they have been
verified."""
region, bucket = get_region_and_bucket(region)
if not body.message:
raise BadRequest("message must be non-empty")
if not body.files:
raise BadRequest("a batch must include at least one file")
if body.author:
raise BadRequest("Author must not be specified for upload")
try:
body.author = current_user.authenticated_email
except AttributeError:
# no authenticated_email -> use the stringified user (probably a token
# ID)
body.author = str(current_user)
# verify permissions based on visibilities
visibilities = set(f.visibility for f in body.files.itervalues())
for v in visibilities:
prm = p.get('tooltool.upload.{}'.format(v))
if not prm or not prm.can():
raise Forbidden("no permission to upload {} files".format(v))
session = g.db.session(tables.DB_DECLARATIVE_BASE)
batch = tables.Batch(
uploaded=time.now(),
author=body.author,
message=body.message)
s3 = current_app.aws.connect_to('s3', region)
for filename, info in body.files.iteritems():
log = logger.bind(tooltool_sha512=info.digest, tooltool_operation='upload',
tooltool_batch_id=batch.id, mozdef=True)
if info.algorithm != 'sha512':
raise BadRequest("'sha512' is the only allowed digest algorithm")
if not is_valid_sha512(info.digest):
raise BadRequest("Invalid sha512 digest")
digest = info.digest
file = tables.File.query.filter(tables.File.sha512 == digest).first()
if file and file.visibility != info.visibility:
raise BadRequest("Cannot change a file's visibility level")
if file and file.instances != []:
if file.size != info.size:
raise BadRequest("Size mismatch for {}".format(filename))
else:
if not file:
file = tables.File(
sha512=digest,
visibility=info.visibility,
size=info.size)
session.add(file)
log.info("generating signed S3 PUT URL to {} for {}; expiring in {}s".format(
info.digest[:10], current_user, UPLOAD_EXPIRES_IN))
info.put_url = s3.generate_url(
method='PUT', expires_in=UPLOAD_EXPIRES_IN, bucket=bucket,
key=util.keyname(info.digest),
headers={'Content-Type': 'application/octet-stream'})
# The PendingUpload row needs to reflect the updated expiration
# time, even if there's an existing pending upload that expires
# earlier. The `merge` method does a SELECT and then either UPDATEs
# or INSERTs the row. However, merge needs the file_id, rather than
# just a reference to the file object; and for that, we need to flush
# the inserted file.
session.flush()
pu = tables.PendingUpload(
file_id=file.id,
region=region,
expires=time.now() + datetime.timedelta(seconds=UPLOAD_EXPIRES_IN))
session.merge(pu)
session.add(tables.BatchFile(filename=filename, file=file, batch=batch))
session.add(batch)
session.commit()
body.id = batch.id
return body
@bp.route('/upload/complete/sha512/<digest>')
@api.apimethod(unicode, unicode, status_code=202)
def upload_complete(digest):
"""Signal that a file has been uploaded and the s
|
kyrus/PyMyo
|
modules/b64decode/command.py
|
Python
|
bsd-3-clause
| 886
| 0.021445
|
###############################################################################
## File : b64deco
|
de.py
## Description: Base64 decode a supplied list of strings
## :
## Created_On : Wed Sep 26 12:33:16 2012
## Created_By : Rich Smith ([email protected])
## Mod
|
ified_On: Tue Jan 29 16:42:41 2013
## Modified_By: Rich Smith ([email protected])
## License : BSD-3
##
##
###############################################################################
import base64
__author__ = "[email protected]"
__version__ = 1.0
__updated__ = "26/09/2012"
__help__ = "Module for decoding a string from Base64 representation"
__alias__ = ["b64d"]
def Command(pymyo, name, *args):
"""
Base64 decode each argument supplied
"""
for s in args:
try:
pymyo.output( base64.decodestring(s) )
except:
pymyo.error("Error decoding %s"%(s) )
|
sebcourtois/pypeline-tool-devkit
|
pytaya/core/rendering.py
|
Python
|
gpl-3.0
| 12,494
| 0.006163
|
import maya.cmds;mc = maya.cmds
import pymel.core;pm = pymel.core
from pytaya.core.general import listForNone
from pytd.util.logutils import logMsg
from pytd.util.sysutils import grouper
def fileNodesFromObjects(oObjList):
return fileNodesFromShaders(shadersFromObjects(oObjList))
def fileNodesFromShaders(oMatList):
oFileNodeList = set()
for oMat in oMatList:
oFileNodeList.update(oMat.listHistory(type="file"))
return list(oFileNodeList)
def shadersFromObjects(objList, connectedTo=""):
sAttrName = connectedTo
if not objList:
return []
oMatSgList = shadingGroupsFromObjects(objList)
oMatList = []
for oMatSg in oMatSgList:
sName = oMatSg.attr(sAttrName).name() if connectedTo else oMatSg.name()
oMatList.extend(pm.ls(listForNone(mc.listConnections(sName, source=True,
destination=False)),
type=mc.listNodeTypes('shader', ex="texture")))
return oMatList
def shadingGroupsFromObjects(objList):
oShdGrpList = set()
for obj in objList:
oObj = obj if isinstance(obj, pm.PyNode) else pm.PyNode(obj)
oShdGrpList.update(shadingGroupsForObject(oObj))
return list(oShdGrpList)
def shadingGroupsForObject(oObj, warn=True):
oShdGrpList = []
oShape = None
if isinstance(oObj, pm.general.MeshFace):
indiceList = oObj.indices()
for oShdEng in oObj.listHistory(type="shadingEngine"):
if set(indiceList).intersection(set(oShdEng.members()[0].indices())):
oShdGrpList.append(oShdEng)
elif isinstance(oObj, pm.general.NurbsSurfaceFace):
oShape = oObj.node()
elif isinstance(oObj, pm.nt.Transform):
oShape = oObj.getShape()
elif isinstance(oObj, (pm.nt.Mesh, pm.nt.NurbsSurface)):
oShape = oObj
elif warn:
logMsg("Can't get shading groups from {}".format(repr(oObj)) , warning=True)
if not oShdGrpList:
if oShape:
oShdGrpList = oShape.shadingGroups()
if not oShdGrpList:
oShdGrpList = oShape.connections(type="shadingEngine")
return oShdGrpList
def conformShadingNetworkToNamespace(oMeshList, sNamespaceToMatch , **kwargs):
bForce = kwargs.get("force", False)
oShadingGroupMembersDct = {}
oMatNotConformList = []
for oShape in oMeshList:
# print "\nfor shape: ", oShape
oMatSGList = shadingGroupsForObject(oShape)
for oMatSG in oMatSGList:
# print "for shadingGroup: ", oMatSG
oMatList = pm.ls(oMatSG.inputs(), type=mc.listNodeTypes('shader', ex="texture"))
oMat = oMatList[0]
##ignore shadingGroups where materials are defaultNode
if oMat.isDefaultNode():
continue
##ignore shadingGroups where materials are already in namespace to match
sMatNamespace = oMat.namespace()
# print "sMatNamespace", sMatNamespace
# print "sNamespaceToMatch", sNamespaceToMatch
if sMatNamespace == sNamespaceToMatch:
continue
else:
oMatNotConformList.append(oMat)
oMembers = oMatSG.members()
for oMember in oMembers:
# print "member :", oMember
if oMember.node() == oShape:
oShadingGroupMembersDct.setdefault(oMatSG, []).append(oMember)
# for k, v in oShadingGroupMembersDct.iteritems():
# print "for shadingGroup: ", k, ", specific members are: ", v
if oMatNotConformList:
if bForce:
pass
else:
result = pm.confirmDialog(title='Materials not conform to Namespace...'
, message="Found materials not conform to Namespace,\nCopy Shading Network, Conform to Namespace & Assign ?"
, button=["OK", 'Cancel']
, defaultButton='Cancel'
, cancelButton='Cancel'
, dismissString='Cancel')
if result == "Cancel":
pm.warning("Materials Namespace conformation cancelled.")
return bForce
else:
bForce = True
else:
if sNamespaceToMatch:
logMsg('Materials already conformed to Namespace: "{0}"'.format(sNamespaceToMatch) , warning=True)
return bForce
##Force current namespace to the one to match to duplicate in this namespace
mc.namespace(set=":")
mc.namespace(set=sNamespaceToMatch if sNamespaceToMatch else ":")
oMatNotConformList = []
oShapeAssignedList = []
for oMatSG, oMembers in oShadingGroupMembersDct.iteritems():
oNewMatSGs = pm.duplicate(oMatSG, rr=True, un=True)
oNewMatSG = oNewMatSGs[0]
# print "old shadingGroup: ", oMatSG
# print "new shadingGroup: ", oNewMatSGs[0]
# print "oMembers", oMembers
# print oMembers[0]
for oMember in oMembers:
oShape = oMember.node()
if oShape not in oShapeAssignedList:
oShapeAssignedList.append(oShape)
try:
pm.sets(oNewMatSG, e=True, forceElement=oShape)
logMsg('Material "{0}" assigned first to: "{1}"'.format(oNewMatSG, oShape) , warning=True)
except:
logMsg('Could not assign material "{0}" first to: "{1}"'.format(oNewMatSG, oShape) , warning=True)
try:
pm.sets(oNewMatSG, e=True, forceElement=oMembers)
logMsg('Material "{0}" assigned to: "{1}"'.format(oNewMatSG, oMembers) , warning=True)
except:
logMsg('Could not assign material "{0}" to: "{1}"'.format(oNewMatSG, oMembers) , warning=True)
mc.namespace(set=":")
return bForce
def transferUvAndShaders(oSrcGrp, oDestGrp):
notCompatibleShapeList = []
sSourceNameSpace = oSrcGrp.namespace()
notFoundList = []
transferList = []
oTargetList = pm.ls(oDestGrp, dag=True, tr=True)
#searchCount = len(oTargetList)
for oTargetXfm in oTargetList:
oShape = oTargetXfm.getShape(ni=True)
if isinstance(oShape, pm.nt.Mesh):
sXfmName = oTargetXfm.nodeName()
sSourceName = sSourceNameSpace + sXfmName
oSourceXfm = pm.PyNode(sSourceName)
if oSourceXfm:
transferList.append((oSourceXfm, oTargetXfm))
# print oSourceXfm, oTargetXfm
else:
notFoundList.append(oTargetXfm)
print 'No match found for "{0}"'.format(sXfmName)
print "Searching... {0}".format(oTargetXfm.nodeName())
# oSet = fncTools.checkSet("noMatchFound")
# if notFoundList:
# pm.sets(oSet, addElement=notFoundList)
result = pm.confirmDialog(title='Transfer Uvs',
message='Found {0}/{1} mismatches :'.format(len(notFoundList), len(transferList)),
button=['Ok',
|
'Cancel'],
defaultButton='Cancel',
cancelButton='Cancel',
dismissString='Cancel')
if result == 'Cancel':
return
else :
for oSourceXfm, oTargetXfm in transferList:
oSourceShape = oSourceXfm.getShape(ni=True)
oHistList = oTargetXfm.listHistory()
oShapeList = pm.ls(oHistList, type="mesh")
oTargetShape = None
bShapeOrig = Fa
|
lse
oTargetCurrentShape = oTargetXfm.getShape(ni=True)
if len(oShapeList) > 1:
for oShape in oShapeList:
if oShape.getAttr("intermediateObject") and oShape.attr("worldMesh").outputs():
bShapeOrig = True
oShape.setAttr("intermediateObject", False)
oTargetShape = oShape
break
else:
oTargetShape = oTargetCurrentShape
if oTargetShape:
try:
|
Collisionc/sickbeard_mp4_automator
|
post_process/sample.py
|
Python
|
mit
| 214
| 0.028037
|
#!/usr/bin/python
import os
i
|
mport json
def main():
print("Sample Post Script")
files = json.loads(os.environ.get('MH_FILES'))
for filename in files:
print(filename)
if __name__ == "__main__":
|
main()
|
pudo/kompromatron
|
kompromatron/web.py
|
Python
|
mit
| 177
| 0.00565
|
from kompromatron.core import app
from k
|
ompromatron.views.base import base
# app.register_blueprint(entities)
# app.r
|
egister_blueprint(relations)
#app.register_blueprint(base)
|
HoboSci/OBIS-Capelin
|
1Loop_on_date_python_script.py
|
Python
|
mit
| 18,420
| 0.014327
|
import os
import platform
import subprocess
import datetime as dt
import time
import calendar
import sys
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# General Parameters - Tools - Proxy Network - Output Directory
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Path declaration to the motu-client.py opensource-TOOLS to connect to MOTU CopernicusMarineHub.
# Input the 'motu-client.py' absolute path. By default, usually in "Downloads" dir. after having followed the article on "python basic requirements":
# http://marine.copernicus.eu/faq/what-are-the-motu-and-python-requirements/?idpage=169
motu_cl = 'C:/Users\Sam\Downlo
|
ads/motu-client-python-1.5.00-20180223190259664-bin.tar/motu-client-python/motu-client.py'
# File to log unsuccessful data extraction request(s)
logfile = 'logfile.txt'
# Copernicus Marine API Key - Login Credentials
username_cmems = 'XXX'
password_cmems = 'XXX
|
'
# Proxy Configuration
# Please replace "False" by "True" if you use a proxy to connect to internet and fill in the below variables.
proxy_flag = False
proxy_server_url = "http://your_proxy_address"
proxy_server_port = "8080"
proxy_user_login = "your_proxy_login"
proxy_user_password = "your_proxy_password"
# Output directory name to store the Copernicus Marine data - (do not use whitespace character)
# If only 'copernicus' is given (not in absolute path), then it will be converted automatically into '$HOME/copernicus/'
local_storage_directory_name = 'glorys_data'
# - - - - - - - - - - - - - - - - - - - - - - - - -
# Product(s), Dataset(s) and MOTU server Parameters
# - - - - - - - - - - - - - - - - - - - - - - - - -
# CMEMS MOTU server ID & Service ID
# /!\ To find the information about the motu server name, you can simply rely on the "VIEW SCRIPT" button of the Copernicus Marine Online Catalogue (http://marine.copernicus.eu), using its DataExtraction WebInterface (also called GUI). It will generate the parameters based on your selection/extraction.
# Please refer to this article to understand how to call/trigger this webservice/feature to generate the right parameters : http://marine.copernicus.eu/faq/how-to-write-and-run-the-script-to-download-cmems-products-through-subset-or-direct-download-mechanisms/?idpage=169
# -m MOTU, --motu=MOTU the motu server to use (url)
# -s SERVICE_ID, --service-id=SERVICE_ID
# The service identifier (string)
motu_serv_id = "http://nrtcmems.mercator-ocean.fr/motu-web/Motu"
service_prod_id = "GLOBAL_ANALYSIS_FORECAST_PHY_001_024-TDS"
# CMEMS Dataset ID and Variables
# Define a dict to get {file name(Type_): [variable(-v), dataset(-d)]}
# (more details on how to get these parameters here http://bit.ly/2cUe9dT) - dead link
# /!\ Same comment as above. Please check this article for other examples : http://marine.copernicus.eu/faq/can-you-give-a-few-examples-of-command-lines-to-download/?idpage=169
# I would also highly recommend you to check this one to get an in-depth understanding of how it works
# (/!\ all CMEMS products are NOT hosted by a single server - they are grouped by product family, and you can always rely on the "VIEW SCRIPT" button to get the right parameters)
# -v VARIABLE, --variable=VARIABLE
# The variable (list of strings)
# -d PRODUCT_ID, --product-id=PRODUCT_ID
# The product (data set) to download (string)
dict_id = {"Northward-Velocity_dailymean": \
["-v vo", "-d global-analysis-forecast-phy-001-024"],\
"Temperature_hourly": \
["-v sea_water_potential_temperature", "-d global-analysis-forecast-phy-001-024-hourly-t-u-v-ssh"]
}
# And I can already image your next question : What if I'd like to download several variables from different datasets?
# Well, Let's take an example then !
# Let's say that you want to download from the daily dataset global-analysis-forecast-phy-001-024, the salinity, the Sea Surface Height, and the Temperature.
# And you also want to download the same variables (except salinity which is not available) for the hourly dataset global-analysis-forecast-phy-001-024-hourly-t-u-v-ssh.
# Then it will give us the following dictionary :
# (to use it, just uncomment the following lines - deleting the "#" at the beginning)
# dict_id = {"Salinity_daily_": \
# ["-v so", "-d global-analysis-forecast-phy-001-024"], \
# "SeaSurfaceHeight_daily_": \
# ["-v zos", "-d global-analysis-forecast-phy-001-024"], \
# "Temperature_daily_": \
# ["-v thetao", "-d global-analysis-forecast-phy-001-024"], \
# "SeaSurfaceHeight_hourly_": \
# ["-v zos", "-d global-analysis-forecast-phy-001-024-hourly-t-u-v-ssh"], \
# "Temperature_hourly_": \
# ["-v thetao", "-d global-analysis-forecast-phy-001-024-hourly-t-u-v-ssh"], \
# "Eastward-Velocity_dailymean_": \
# ["-v uo", "-d global-analysis-forecast-phy-001-024"]
# }
# - - - - - - - - - - - - - - - - - - - - - -
# Geographical Area Parameters and Timerange
# - - - - - - - - - - - - - - - - - - - - - -
# -y LATITUDE_MIN, --latitude-min=LATITUDE_MIN
# The min latitude (float in the interval [-90 ; 90])
# -Y LATITUDE_MAX, --latitude-max=LATITUDE_MAX
# The max latitude (float in the interval [-90 ; 90])
# -x LONGITUDE_MIN, --longitude-min=LONGITUDE_MIN
# The min longitude (float in the interval [-180 ; 180])
# -X LONGITUDE_MAX, --longitude-max=LONGITUDE_MAX
# The max longitude (float in the interval [-180 ; 180])
# -z DEPTH_MIN, --depth-min=DEPTH_MIN
# The min depth (float in the interval [0 ; 2e31] or
# string 'Surface')
# -Z DEPTH_MAX, --depth-max=DEPTH_MAX
# The max depth (float in the interval [0 ; 2e31] or
# string 'Surface')
# Area : x east-west longitude, y north-south latitude, z depth
xmin_longitude = "-45"
xmax_longitude = "-20"
ymin_latitude = "57"
ymax_latitude = "61"
zmin_depth = "0.494"
zmax_depth = "0.4942"
# Date - Timerange
yyyystart = 2007
mmstart = 01
yyyyend = 2007
mmend = 12
hhstart = " 12:00:00"
hhend = " 12:00:00"
dd = 1
# Output files
out_path= "C:\Users\Sam\Downloads\glorys_data"
pre_name= "TestPythonExtr_"
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Main Program
#
# Motu Client Call through Python Loop
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Specific comment For WINDOWS USER:
# If you're using this script for the first time, you
# shouldn't be worried by the following. Just save your
# script (ctrl + s), quit (alt + F4) and launch it
# (WinKey + R then input cmd then ENTER) by typing
# "C:\Python27\python script_name.py"
#
# For users, be careful if you have to modify the lines below.
# CMEMS Central Service Desk will be happy to help you
# either via email ([email protected])
# or via the CMEMS Forum (http://bit.ly/1L1Iy5f)
# Get PYTHON PATH depending on OS
if platform.system() == "Windows":
PYTHON = "C:/Python27/python.exe"
else:
PYTHON = "/usr/bin/python"
# Check motu-client.py file exists
if not os.path.exists(motu_cl):
print "\n[ERROR] Path to motu-client.py cannot be found: %s\n\n[INFO] Please correct value of 'motu_cl' variable."%motu_cl
print "\n\n[INFO] If you haven't downloaded the motu-client-python yet, get the latest version here:\nhttps://github.com/clstoulouse/motu-client-python/releases/latest\n"
sys.exit()
# Check if output directory is well formated and if it exists, otherwise create it
absolute_path_substring = ['/home/', 'C:\\']
if local_storage_directory_name[-1] != '/':
local_storage_directory_name = local_storage_directory_name + "/"
if not any(x in local_storage_directory_name for x in absolute_path_substring):
local_storage_directory_name = os.path.expanduser('~') + "/" + local_storage_directory_name
if not os.path.exists(local_storage_directory_name):
os.makedirs(local_storage_directory_name)
# Flags to let the ser
|
chrigu6/vocabulary
|
vocabulary/trainer/forms.py
|
Python
|
gpl-3.0
| 1,356
| 0.0059
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from trainer.models import Language
class AddWordForm(forms.Form):
language = forms.ModelChoiceField(queryset=Language.objects.all())
word = forms.CharField(required=True)
class CreateSetForm(forms.Form):
name = models.CharField(default="")
class UserCreateForm(UserCreationForm):
email = forms.EmailField(required=True)
first_name = forms.CharField(required=True)
last_name = forms.CharField(required=True)
|
class Meta:
model = User
fields = ("username", "email", "first_name", "last_name", "password1", "password2")
def save(self, commit=True):
user = super(UserCreateForm,self).save(commit=False)
user.email = self.cleaned_data["email"]
user.name = self.cleaned_data["first_name"]
u
|
ser.prename = self.cleaned_data["last_name"]
if commit:
user.save()
return user
class LoginForm(forms.Form):
username = forms.CharField(required=True)
password = forms.CharField(widget=forms.PasswordInput())
class UploadFileForm(forms.Form):
language = forms.ModelChoiceField(label='Language', queryset=Language.objects.all(), required=True)
file = forms.FileField(required=True)
|
sillygod/my-travel-in-learning-python
|
proxy.py
|
Python
|
gpl-2.0
| 1,022
| 0.004892
|
import logging
from flask import (
Flask,
request,
Response
)
import requests
app = Flask(__name__)
@app.route('/<path:url>', methods=['GET', 'POST', 'PUT', 'PATCH'])
def proxy(url):
# extract the request info and change its destination
# how to deal with socketio
if url == "socket.io/":
target = request.base_url
else:
# target = f"http://localhost:80/{url}"
target = f"http://www.google.com/{url}"
data = request.data or request.form
logging.debug(f'url: {url}, target: {target}')
truely_request = requests.Request(meth
|
od=request.method, url=target, headers=request.header
|
s, data=data, cookies=request.cookies)
resp = requests.Session().send(truely_request.prepare())
logging.debug(resp.content)
response = app.make_response((resp.content, resp.status_code, resp.headers.items()))
for k, v in resp.cookies:
response.set_cookie(k, v)
return response
if __name__ == "__main__":
app.run(host="0.0.0.0", port=9999)
|
chenzeyuczy/keba
|
src/SysuLesson/wsgi.py
|
Python
|
gpl-2.0
| 397
| 0
|
"""
WSGI co
|
nfig for SysuLesson project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_w
|
sgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SysuLesson.settings")
application = get_wsgi_application()
|
anirudhagar13/PES-Portal
|
pes_portal/club/admin.py
|
Python
|
apache-2.0
| 548
| 0.010949
|
from django.contrib import admin
'''from tester.models import Club,Mem
|
ber,Signup,Event
class admin_club(admin.ModelAdmin):
list_display=["club_name"]
class admin_event(admin.ModelAdmin):
list_display=["event_name"]
class admin_student(admin.ModelAdmin):
list_display=["usn","name"]
class admin_member(admin.ModelAdmin):
list_display=["club_id","usn"]
admin.site.register(Club,admin_club)
admin.site.register(Mem
|
ber,admin_member)
admin.site.register(Signup,admin_student)
admin.site.register(Event,admin_event)
'''
|
syl20bnr/nupic
|
nupic/research/TP.py
|
Python
|
gpl-3.0
| 132,910
| 0.00793
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
""" @file TP.py
Temporal pooler implementation.
This is the Python implementation and is used as the base class for the C++
implementation.
"""
import copy
import cPickle as pickle
import itertools
import numpy
from nupic.bindings.math import Random
from nupic.bindings.algorithms import getSegmentActivityLevel, isSegmentActive
from nupic.math import GetNTAReal
from nupic.research.TrivialPredictor import TrivialPredictor
from nupic.support.consoleprinter import ConsolePrinterMixin
# Default verbosity while running unit tests
VERBOSITY = 0
# The current TP version used to track the checkpoint state.
TP_VERSION = 1
# The numpy equivalent to the floating point type used by NTA
dtype = GetNTAReal()
class TP(ConsolePrinterMixin):
"""
Class implementing the temporal pooler algorithm as described in the
published Cortical Learning Algorithm documentation. The implementation here
attempts to closely match the pseudocode in the documentation. This
implementation does contain several additional bells and whistles such as
a column confidence measure.
@todo Document other constructor parameters.
@todo Have some higher level flags for fast learning, HiLo, Pooling, etc.
"""
def __init__(self,
numberOfCols=500,
cellsPerColumn=10,
initialPerm=0.11,
connectedPerm=0.50,
minThreshold=8,
newSynapseCount=15,
permanenceInc=0.10,
permanenceDec=0.10,
permanenceMax=1.0,
globalDecay=0.10,
activationThreshold=12,
doPooling=False,
segUpdateValidDuration=5,
burnIn=2,
collectStats=False,
seed=42,
verbosity=VERBOSITY,
checkSynapseConsistency=False, # for cpp only -- ignored
trivialPredictionMethods= '',
pamLength=1,
maxInfBacktrack=10,
maxLrnBacktrack=5,
maxAge=100000,
maxSeqLength=32,
maxSegmentsPerCell=-1,
maxSynapsesPerSegment=-1,
outputType='normal',
):
"""
Construct the TP
@param pamLength Number of time steps to remain in "Pay Attention Mode" after
we detect we've reached the end of a learned sequence. Setting
this to 0 disables PAM mode. When we are in PAM mode, we do
not burst unpredicted columns during learning, which in turn
prevents us from falling into a previously learned sequence
for a while (until we run through another 'pamLength' steps).
The advantge of PAM mode is that it requires fewer
presentations to learn a set of sequences which share
elements. The disadvantage of PAM mode is that if a learned
sequence is immediately followed by set set of elements that
should be learned as a 2nd sequence, the first pamLength
elements of that sequence will not be learned as part of that
2nd sequence.
@param maxAge Controls global decay. Global decay will only decay segments
that have not been activated for maxAge iterations, and will
only do the global decay loop every maxAge iterations. The
default (maxAge=1) reverts to the behavior where global decay
is applied every iteration to every segment. Using maxAge > 1
can significantly speed up the TP when global decay is used.
@param maxSeqLength If not 0, we will never learn more than maxSeqLength inputs
in a row without starting over at start cells. This sets an
upper bound on the length of learned sequences and thus is
another means (besides maxAge and globalDecay) by which to
limit how much the TP tries to learn.
@param maxSegmentsPerCell The maximum number of segments allowed on a cell. This
is used to turn on "fixed size CLA" mode. When in effect,
globalDecay is not applicable and must be set to 0 and
maxAge must be set to 0. When this is used (> 0),
maxSynapsesPerSegment must also be > 0.
@param maxSynapsesPerSegment The maximum number of synapses allowed in a segment.
This is used to turn on "fixed size CLA" mode. When in effect,
globalDecay is not applicable and must be set to 0 and maxAge
must be set to 0. When this is used (> 0), maxSegmentsPerCell
must also be > 0.
@param outputType Can be one of the following: 'normal', 'activeState',
'activeState1CellPerCol'.
'normal': output the OR of the active and predicted state.
'activeState': output only the active state.
'activeState1CellPerCol': output only the active state, and at
most 1 cell/column. If more than 1 cell is active in a column,
the one with the highest confidence is sent up.
Default is 'normal'.
@param trivialPredictionMethods List (as string) of trivial predictions to compute alongside
the full TP. See TrivialPredictor.py for a list of allowed
methods.
@param doPooling If True, pooling is enabled. False is the default.
@param burnIn Used for evaluating the prediction score. Default is 2.
@param collectStats If True, collect training / inference stats. Default is
False.
"""
## @todo document
self.version = TP_VERSION
ConsolePrinterMixin.__init__(self, verbosity)
# Check arguments
assert pamLength > 0, "This implementation must have pamLength > 0"
# Fixed size CLA mode?
if maxSegmentsPerCell != -1 or maxSynapsesPerSegment != -1:
assert (maxSegmentsPerCell > 0 and maxSynapsesPerSegment > 0)
assert (globalDecay == 0.0)
assert (maxAge == 0)
assert maxSynapsesPerSegment >= newSynapseCount, ("TP requires that "
"maxSynapsesPerSegment >= newSynapseCount. (Currently %s >= %s)" % (
maxSynapsesPerSegment, newSynapseCount))
# Seed random number generator
if seed >= 0:
self._ra
|
ndom = Random(seed)
else:
self._random = Random(numpy.random.randint(256))
# Store creation parameters
## @todo document
self.numberOfCols = numberOfCols
## @todo document
self.cellsPerColumn = cellsPerColumn
self._numberOfCells = numberOfCols * cellsPerColumn
## @todo document
self.initialPerm = numpy.float32(initialPerm)
## @todo document
self.connectedPerm = numpy.float32(connectedPerm)
## @todo document
self.minThreshold = minThres
|
hold
## @todo document
self.newSynapseCount = newSynapseCount
## @todo document
self.permanenceInc = numpy.float32(permanenceInc)
## @todo document
self.permanenceDec = numpy.float32(permanenceDec)
## @
|
CiscoSystems/avos
|
openstack_dashboard/dashboards/identity/users/tests.py
|
Python
|
apache-2.0
| 26,197
| 0
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from socket import timeout as socket_timeout # noqa
from django.core.urlresolvers import reverse
from django import http
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
USERS_INDEX_URL = reverse('horizon:identity:users:index')
USER_CREATE_URL = reverse('horizon:identity:users:create')
USER_UPDATE_URL = reverse('horizon:identity:users:update', args=[1])
class UsersViewTests(test.BaseAdminViewTests):
def _get_default_domain(self):
domain = {"id": self.request.session.get('domain_context',
None),
"name": self.request.session.get('domain_context_name',
None)}
return api.base.APIDictWrapper(domain)
def _get_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
@test.create_stubs({api.keystone: ('user_list',)})
def test_index(self):
domain = self._get_default_domain()
domain_id = domain.id
users = self._get_users(domain_id)
api.keystone.user_list(IgnoreArg(),
domain=domain_id).AndReturn(users)
self.mox.ReplayAll()
res = self.client.get(USERS_INDEX_URL)
self.assertTemplateUsed(res, 'identity/users/index.html')
self.assertItemsEqual(res.context['table'].data, users)
if domain_id:
for user in res.context['table'].data:
self.assertItemsEqual(user.domain_id, domain_id)
def test_index_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_index()
@test.create_stubs({api.keystone: ('user_create',
'get_default_domain',
'tenant_list',
'add_tenant_user_role',
'get_default_role',
'roles_for_user',
'role_list')})
def test_create(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
role = self.roles.first()
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(),
domain=domain_id,
user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_create(IgnoreArg(),
name=user.name,
email=user.email,
password=user.password,
project=self.tenant.id,
enabled=True,
domain=domain_id).AndReturn(user)
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()).AndReturn(role)
api.keystone.roles_for_user(IgnoreArg(), user.id, self.tenant.id)
api.keystone.add_tenant_user_role(IgnoreArg(), self.tenant.id,
user.id, role.id)
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': user.password}
res = self.client.post(USER_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
def test_create_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_create()
@test.create_s
|
tubs({api.keystone: ('user_create',
'get_default_domain',
'add_tenant_user_role',
'tenant_list',
'get_default_role',
'roles_for_user',
'role_list')})
def test_create_with_empty_email(self):
user = self.users.get(id="5
|
")
domain = self._get_default_domain()
domain_id = domain.id
role = self.roles.first()
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(),
domain=domain_id,
user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_create(IgnoreArg(),
name=user.name,
email=user.email,
password=user.password,
project=self.tenant.id,
enabled=True,
domain=domain_id).AndReturn(user)
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()).AndReturn(role)
api.keystone.add_tenant_user_role(IgnoreArg(), self.tenant.id,
user.id, role.id)
api.keystone.roles_for_user(IgnoreArg(), user.id, self.tenant.id)
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': "",
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': user.password}
res = self.client.post(USER_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_with_password_mismatch(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
|
maateen/TextSuggestBangla
|
textsuggest/processors/command.py
|
Python
|
gpl-3.0
| 219
| 0.031963
|
import subprocess as sp
def matches(text):
return text.startswith('#')
def process(text):
text = text[1:]
result = sp.check_output(text, shell=True).decode('utf-8').rstrip().repla
|
ce(
|
'\\n', '\n')
return result
|
Moguri/prototype-nitrogen
|
game/main.py
|
Python
|
apache-2.0
| 1,902
| 0.001577
|
import os
import sys
from direct.showbase.ShowBase import ShowBase
import panda3d.core as p3d
import blenderpanda
import inputmapper
from nitrogen import gamestates
if hasattr(sys, 'frozen'):
APP_ROOT_DIR = os.path.dirname(sys.executable)
else:
APP_ROOT_DIR = os.path.dirname(__file__)
if not APP_ROOT_DIR:
print("empty app_root_dir")
sys.exit()
# prc files to load sorted by load order
CONFIG_ROOT_DIR = os.path.join(APP_ROOT_DIR, 'config')
CONFIG_FILES = [
os.path.join(CONFIG_ROOT_DIR, 'game.prc'),
os.path.join
|
(CONFIG_ROOT_DIR, 'user.prc'),
]
for config_file in CONFIG_FILES:
if os.path.exists(config_file):
print("Loading config file:", config_file)
config_file = p3d.Filename.from_os_specific(config_file)
p3d.load_prc_file(config_file)
else:
print("Could not find config file", config_file)
class GameApp(ShowBase):
def __init__(self):
ShowBase.__init__(self)
|
blenderpanda.init(self)
self.input_mapper = inputmapper.InputMapper(os.path.join(CONFIG_ROOT_DIR, 'input.conf'))
self.accept('quit', sys.exit)
self.disableMouse()
winprops = self.win.get_properties()
self.win.move_pointer(0, winprops.get_x_size() // 2, winprops.get_y_size() // 2)
winprops = p3d.WindowProperties()
winprops.set_mouse_mode(p3d.WindowProperties.M_confined)
self.win.request_properties(winprops)
self.current_state = gamestates.MainState()
def update_gamestate(task):
self.current_state.update(p3d.ClockObject.get_global_clock().get_dt())
return task.cont
self.taskMgr.add(update_gamestate, 'GameState')
def change_state(self, next_state):
self.current_state.cleanup()
self.current_state = next_state()
def main():
app = GameApp()
app.run()
if __name__ == '__main__':
main()
|
DavidHHShao/slack
|
tests/unit/http_client/test_raise_error.py
|
Python
|
mit
| 3,437
| 0.005237
|
# Copyright (c) 2014 Katsuya Noguchi
#
# Permission i
|
s hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditi
|
ons:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import unittest
import slack.http_client
from slack.exception import SlackError, \
InvalidAuthError, \
NotAuthedError, \
AccountInactiveError, \
ChannelNotFoundError, \
ChannelArchivedError, \
NotInChannelError, \
RateLimitedError
class TestRaiseErrorClient(unittest.TestCase):
def test_ok_response(self):
# does not raise error if response is ok
slack.http_client._raise_error_if_not_ok({ 'ok': True })
def test_invalid_auth(self):
self.assertRaises(InvalidAuthError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'invalid_auth' })
def test_not_authed(self):
self.assertRaises(NotAuthedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'not_authed' })
def test_account_inactive(self):
self.assertRaises(AccountInactiveError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'account_inactive' })
def test_channel_not_found(self):
self.assertRaises(ChannelNotFoundError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'channel_not_found' })
def test_is_archived(self):
self.assertRaises(ChannelArchivedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'is_archived' })
def test_not_in_channel(self):
self.assertRaises(NotInChannelError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'not_in_channel' })
def test_rate_limited(self):
self.assertRaises(RateLimitedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'rate_limited' })
def test_slack_error(self):
self.assertRaises(SlackError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'unknown_error' })
|
miguelarauj1o/8-Queens
|
eightqueens/__main__.py
|
Python
|
mit
| 6,082
| 0.010209
|
from __future__ import division
import state
import time
import csv
import random
import sys
POPULATION_SIZE = 100
MAX_COLLISION = 28
VALID_ARGS = "emg"
class FitnessListener():
def __init__(self, qtd=0):
self._qtd = qtd
def log(self):
self._qtd += 1
def retrive_qtd(self):
return self._qtd
def reset(self):
self._qtd = 0
def copy(self):
return FitnessListener(self._qtd)
def choose_method(string):
if "roulette".startswith(string):
method = "roulette"
elif "tourney".startswith(string):
method = "tourney"
else:
sys.exit(string + " is not a valid population generation method.")
return method
def choose_generations(string):
try:
generations = int(string)
except ValueError:
sys.exit("Argument " + string + " is not an integer.\nThe argument provided with --generations must be an integer.")
else:
return generations
def make_config(arguments):
elitist = None
method = None
max_generations = None
mutation_rate = 0.8
#flag para permitir argumentos "inválidos" se vierem
#depois de opções que precisam de argumentos
next_is_literal_argument = False
err = False
for index, arg in enumerate(arguments[1:]):
index += 1
if arg[:2] == "--":
argstr = arg[2:]
if argstr == "elitist":
elitist = True
elif argstr == "method":
if len(arguments) > index+1:
methodstr = arguments[index+1]
method = choose_method(methodstr)
next_is_literal_argument = True
else:
sys.exit("--method used, but no method specified for population generation")
elif argstr == "generations":
if len(arguments) > index+1:
genstr = arguments[index+1]
max_generations = choose_generations(genstr)
next_is_literal_argument = True
else:
sys.exit("--generations used, but no number of generations specified")
elif argstr == "mutation":
mutation_rate = arguments[index+1]
next_is_literal_argumen
|
t = True
else:
sys.exit("argument " + argstr + " is invalid")
elif arg[:1] == "-":
argstr = arg[1:]
err = False
for c in argstr:
if c not in VALID_ARGS:
print "Unknown command-line argument", c
err = True
if not err:
if 'e' in argstr:
|
elitist = True
if 'm' in argstr:
if 'm' not in argstr[:-1] and len(arguments) > index+1:
methodstr = arguments[index+1]
method = choose_method(methodstr)
next_is_literal_argument = True
elif 'm' in argstr[:-1]:
sys.exit("-m option must be immediately followed by method name")
else:
sys.exit("-m used, but no method specified for population generation")
if 'g' in argstr:
if 'g' not in argstr[:-1] and len(arguments) > index+1:
genstr = arguments[index+1]
max_generations = choose_generations(genstr)
next_is_literal_argument = True
elif 'g' in argstr[:-1]:
sys.exit("-g option must be immediately followed by number of generations")
else:
sys.exit("-g used, but no number of generations specified")
else:
sys.exit(1)
#se o argumento não era válido, levantar um erro
#se não tivermos a flag de aceitar inválidos
#levantada
elif not next_is_literal_argument:
print "Unknown command-line argument", arg
err = True
#mas caso a flag de aceitar argumento inválido
#estivesse levantada, precisamos abaixá-la
else:
next_is_literal_argument = False
if err:
sys.exit(1)
else:
return elitist, method, max_generations, mutation_rate
def register_loop(population,generation,results_file):
maxfitness = max([x.fitness() for x in population])
print "Generation %d, Max fitness: %d" % (generation, max([x.fitness() for x in population]))
avgfitness = sum([x.fitness() for x in population])/len(population)
print "Average fitness:", avgfitness
results_file.writerow([generation, maxfitness, avgfitness])
if __name__ == '__main__':
random.seed(time.time())
generation = 1
listener = FitnessListener()
elitist, method, max_generations, mutation_rate = make_config(sys.argv)
population = [state.State(listener=listener, crossover_rate = 1.0, mutation_rate = mutation_rate) for x in range(POPULATION_SIZE)]
if elitist == None:
elitist = False
if method == None:
method = "roulette"
with open('results' + str(int(time.time())) + '.csv', 'w+') as csvfile:
results_file = csv.writer(csvfile, delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
results_file.writerow(['Generation', 'Max Fitness', 'Avg Fitness'])
while not MAX_COLLISION in [x.fitness() for x in population] and ((generation <= max_generations) if max_generations else True):
register_loop(population = population,generation = generation,results_file = results_file)
population = state.generateNextPopulation(listener=listener, population=population, n=POPULATION_SIZE, method=method, elitist=elitist)
generation += 1
register_loop(population = population,generation = generation,results_file = results_file)
for x in population:
if x.fitness() == MAX_COLLISION:
print x.state
|
kvchen/keffbot-py
|
plugins/event.py
|
Python
|
mit
| 87
| 0.011494
|
"""!ev
|
ent [num]: Displays the next upcoming H
|
@B event."""
__match__ = r"!event( .*)"
|
kyelewisstgc/EventMaster-Python
|
tests/test_unit.py
|
Python
|
mit
| 1,220
| 0.007377
|
# This doesn't work- not updated with eventmaster.py updates
# TODO: Fix This :)
# Import Libraries
import eventmaster
import time
import random
import sys
import unittest
import sys
class InputsTestCase(unittest.TestCase):
def setUp(self):
self.s3 = E2S3.E2S3S
|
witcher()
self.s3.set_verbose(0)
self.s3.set_CommsXML_IP("127.0.0.1")
self.s3.set_CommsXML_Port(9876)
if not self.s3.connect(): return -1
while self.s3.is_ready() != 1: time.sleep(1)
def test_set_valid_name_on_invalid_input(self):
test_str = "PYTEST-{0
|
!s}".format(random.randint(1,10))
self.assertRaises(ValueError, lambda: self.s3.get_input(99).set_Name(test_str))
def test_set_valid_name_on_valid_input(self):
test_str = "PYTEST-{0!s}".format(random.randint(1,10))
while(self.s3.has_been_processed(self.s3.get_input(0).set_Name(test_str))==0): time.sleep(1)
time.sleep(1)
self.assertEqual(test_str, self.s3.get_input(0).get_Name())
def test_set_invalid_name_on_valid_input(self):
MyObject = type('MyObject', (object,), {})
self.assertEqual(self.s3.get_input(0).set_Name(MyObject), None)
print unittest.main()
sys.exit()
|
lepklin/twitter-db-ui
|
removeDouble.py
|
Python
|
mit
| 1,784
| 0.019619
|
#!/usr/bin/python
import psycopg2
import sys
import pprint
import ge
|
ocoder
def printProgress(iteration, total, prefix='', suffix='',
|
decimals=2, barLength=100):
filledLength = int(round(barLength * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
bar = '#' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('%s [%s] %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
print("\n")
def main():
#Define our connection string
conn_string = "host='localhost' dbname='my_database' user='postgres' password='secret'"
# print the connection string we will use to connect
#print "Connecting to database\n ->%s" % (conn_string)
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect("dbname='twitterdb' user='test' host='localhost' password='test'")
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
#print "Connected!\n"
# execute our Query
cursor.execute("SELECT user_id FROM users2 ")
rows = cursor.fetchall()
i = 0
l = len(rows)
printProgress(i, l, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
for r in rows:
print(r[0])
cursor2 = conn.cursor()
cursor2.execute("delete from users2 where user_id=(%s) and friends_count!=(select max(friends_count) from users2 where user_id=(%s))",(str(r[0]),str(r[0]),))
#cursor3 = conn.cursor()
#rows2= cursor2.fetchall()
printProgress(i, l, prefix='Progress:', suffix='Complete', barLength=50)
i +=1
#337125576
conn.commit()
main()
|
saraghav/blue-box
|
TensorFlow/input_data.py
|
Python
|
apache-2.0
| 5,847
| 0.015735
|
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import urllib
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def maybe_download(filename, work_directory):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytest
|
ream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(n
|
um_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
class DataSet(object):
def __init__(self, images, labels, fake_data=False):
if fake_data:
self._num_examples = 10000
else:
assert images.shape[0] == labels.shape[0], (
"images.shape: %s labels.shape: %s" % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1.0 for _ in xrange(784)]
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir, fake_data=False, one_hot=False):
class DataSets(object):
pass
data_sets = DataSets()
if fake_data:
data_sets.train = DataSet([], [], fake_data=True)
data_sets.validation = DataSet([], [], fake_data=True)
data_sets.test = DataSet([], [], fake_data=True)
return data_sets
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 5000
local_file = maybe_download(TRAIN_IMAGES, train_dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, train_dir)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = maybe_download(TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, train_dir)
test_labels = extract_labels(local_file, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
data_sets.train = DataSet(train_images, train_labels)
data_sets.validation = DataSet(validation_images, validation_labels)
data_sets.test = DataSet(test_images, test_labels)
return data_sets
|
j0gurt/ggrc-core
|
src/ggrc/converters/query_helper.py
|
Python
|
apache-2.0
| 33,268
| 0.006523
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""This module contains a class for handling request queries."""
# flake8: noqa
import collections
import datetime
import functools
import operator
import flask
import sqlalchemy as sa
from sqlalchemy.orm import undefer
from ggrc import db
from ggrc import models
from ggrc.login import is_creator
from ggrc.fulltext.mysql import MysqlRecordProperty as Record
from ggrc.models import inflector
from ggrc.models.reflection import AttributeInfo
from ggrc.models.relationship_helper import RelationshipHelper
from ggrc.models.custom_attribute_definition import CustomAttributeDefinition
from ggrc.models.custom_attribute_value import CustomAttributeValue
from ggrc.converters import get_exportables
from ggrc.rbac import context_query_filter
from ggrc.utils import query_helpers, benchmark, convert_date_format
from ggrc_basic_permissions import UserRole
class BadQueryException(Exception):
pass
# pylint: disable=too-few-public-methods
class QueryHelper(object):
"""Helper class for handling request queries
Primary use for this class is to get list of object ids for each object type
defined in the query. All objects must pass the query filters if they are
defined.
query object = [
{
object_name: search class name,
permissions: either read or update, if none are given it defaults to read
order_by: [
{
"name": the name of the field by which to do the sorting
"desc": optional; if True, invert the sorting order
}
]
limit: [from, to] - limit the result list to a slice result[from, to]
filters: {
relevant_filters:
these filters will return all ids of the "search class name" object
that are mapped to objects defined in the dictionary inside the list.
[ list of filters joined by OR expression
[ list of filters joined by AND expression
{
"object_name": class of relevant object,
"slugs": list of relevant object slugs,
optional and if exists will be converted into ids
"ids": list of relevant object ids
}
]
],
object_filters: {
TODO: allow filtering by title, description and other object fields
}
}
}
]
After the query is done (by `get_ids` method), the results are appended to
each query object:
query object with results = [
{
object_name: search class name,
(all other object query fields)
ids: [ list of filtered objects ids ]
}
]
The result fields may or may not be present in the resulting query depending
on the attributes of `get` method.
"""
def __init__(self, query, ca_disabled=False):
self.object_map = {o.__name__: o for o in models.all_models.all_models}
self.query = self._clean_query(query)
self.ca_disabled = ca_disabled
self._set_attr_name_map()
self._count = 0
def _set_attr_name_map(self):
""" build a map for attributes names and display names
Dict containing all display_name to attr_name mappings
for all objects used in the current query
Example:
{ Program: {"Program URL": "url", "Code": "slug", ...} ...}
"""
self.attr_name_map = {}
for object_query in self.query:
object_name = object_query["object_name"]
object_class = self.object_map[object_name]
aliases = AttributeInfo.gather_aliases(object_class)
self.attr_name_map[object_class] = {}
for key, value in aliases.items():
filter_by = None
if isinstance(value, dict):
filter_name = value.get("filter_by", None)
if filter_name is not None:
filter_by = getattr(object_class, filter_name, None)
name = value["display_name"]
else:
name = value
if name:
self.attr_name_map[object_class][name.lower()] = (key.lower(),
filter_by)
def _clean_query(self, query):
""" sanitize the query object """
for object_query in query:
filters = object_query.get("filters", {}).get("expression")
self._clean_filters(filters)
|
self._macro_expand_object_query(object_query)
return query
def _clean_filters(self, expression):
"""Prepare the filter expression for building the query."""
if not expression or not isinstance(expression, dict):
return
slugs = expression.get("slugs")
if slugs:
ids = expression.get("ids", [])
ids.extend(self._slugs_to_ids(expression["object_name"], slugs))
expression["ids"] = ids
try:
expression["ids"] = [int(id_) for id_ in expressi
|
on.get("ids", [])]
except ValueError as error:
# catch missing relevant filter (undefined id)
if expression.get("op", {}).get("name", "") == "relevant":
raise BadQueryException(u"Invalid relevant filter for {}".format(
expression.get("object_name", "")))
raise error
self._clean_filters(expression.get("left"))
self._clean_filters(expression.get("right"))
def _expression_keys(self, exp):
"""Return the list of keys specified in the expression."""
operator_name = exp.get("op", {}).get("name", None)
if operator_name in ["AND", "OR"]:
return self._expression_keys(exp["left"]).union(
self._expression_keys(exp["right"]))
left = exp.get("left", None)
if left is not None and isinstance(left, collections.Hashable):
return set([left])
else:
return set()
def _macro_expand_object_query(self, object_query):
"""Expand object query."""
def expand_task_dates(exp):
"""Parse task dates from the specified expression."""
if not isinstance(exp, dict) or "op" not in exp:
return
operator_name = exp["op"]["name"]
if operator_name in ["AND", "OR"]:
expand_task_dates(exp["left"])
expand_task_dates(exp["right"])
elif isinstance(exp["left"], (str, unicode)):
key = exp["left"]
if key in ["start", "end"]:
parts = exp["right"].split("/")
if len(parts) == 3:
try:
month, day, year = [int(part) for part in parts]
except Exception:
raise BadQueryException(
"Date must consist of numbers")
exp["left"] = key + "_date"
exp["right"] = datetime.date(year, month, day)
elif len(parts) == 2:
month, day = parts
exp["op"] = {"name": u"AND"}
exp["left"] = {
"op": {"name": operator_name},
"left": "relative_" + key + "_month",
"right": month,
}
exp["right"] = {
"op": {"name": operator_name},
"left": "relative_" + key + "_day",
"right": day,
}
elif len(parts) == 1:
exp["left"] = "relative_" + key + "_day"
else:
raise BadQueryException(u"Field {} should be a date of one of the"
u" following forms: DD, MM/DD, MM/DD/YYYY"
.format(key))
if object_query["object_name"] == "TaskGroupTask":
filters = object_query.get("filters")
if filters is not None:
exp = filters["expression"]
keys = self._expression_keys(exp)
if "start" in keys or "end" in keys:
expand_task_dates(exp)
def get_ids(self):
"""Get a list of filtered object IDs.
self.query should contain a list of queries for different objects which
will get evaluated and turned into a list of object IDs.
Returns:
list of dicts: same query as the input with all ids that match the filter
"""
for object_query in self.query:
objects = self._get_objects(object_query)
object_query["ids"] = [o.id for o in objects]
return self.query
@staticmethod
def _get_type_query(model, permission_type):
"""Filter
|
BaReinhard/Hacktoberfest-Data-Structure-and-Algorithms
|
data_structures/binary_tree/python/binary_tree.py
|
Python
|
gpl-3.0
| 694
| 0.048991
|
class BinaryTree:
def __init__(self,rootObj):
self.key = rootObj
self.leftChild = None
self.rightChild = None
def insertLeft(self,newNode):
if self.leftChild == None
|
:
self.leftChild = BinaryTree(newNode)
else:
t = BinaryTree(newNode)
t.leftChild = self.leftChild
self.leftChild = t
def insertRight(self,newNode):
if self.rightChild == None:
self.rightChild = BinaryTree(newNode)
else:
t = BinaryTree(newNode)
t.rightChild = self.r
|
ightChild
self.rightChild = t
def getRightChild(self):
return self.rightChild
def getLeftChild(self):
return self.leftChild
def setRootVal(self,obj):
self.key = obj
def getRootVal(self):
return self.key
|
umd-mith/bagcat
|
setup.py
|
Python
|
mit
| 647
| 0.027821
|
from sys import version, exit
from setuptools import setup
requirements = open("requirements.txt").re
|
ad().split()
with open("README.md") as f:
long_description = f.read()
setup(
name = 'bagcat',
version = '0.0.6',
url = 'https://github.com/umd-mith/bagcat/',
author = 'Ed Summers',
author_email = '[email protected]',
py_modules = ['bagcat',],
install_requires = requirements,
description = "A command line utility for managing BagIt packages in Amazon S3",
long_
|
description=long_description,
long_description_content_type="text/markdown",
entry_points={"console_scripts": ["bagcat=bagcat:main"]},
)
|
Nzaga/home-assistant
|
tests/components/automation/test_init.py
|
Python
|
mit
| 2,584
| 0
|
"""
tests.test_component_demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo component.
"""
import unittest
import homeassistant.core as ha
import homeassistant.components.automation as automation
import homeassistant.components.automation.event as event
from homeassistant.const import CONF_PLATFORM, ATTR_ENTITY_ID
class TestAutomationEvent(unittest.TestCase):
""" Test the event automation. """
def setUp(self): # pylint: disable=invalid-name
self.hass = ha.HomeAssistant()
self.calls = []
def record_call(service):
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_setup_fails_if_unknown_platform(self):
self.assertFalse(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'i_do_not_exist'
}
}))
def test_service_data_not_a_dict(self):
automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'event',
event.CONF_EVENT_TYPE: 'test_event',
automation.CONF_SERVICE: 'test.automation',
automation.CONF_SERVICE_DATA: 100
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_service_specify_data(self):
automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'event',
event.CONF_EVENT_TYPE: 'test_event',
automation.CONF_SERVICE: 'test.automation',
automation.CONF_SERVICE_DATA: {'some': 'data'}
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('
|
data', self.calls[0].data['some'])
def test_service_specify_entity_id(self):
automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'event',
event.CONF_E
|
VENT_TYPE: 'test_event',
automation.CONF_SERVICE: 'test.automation',
automation.CONF_SERVICE_ENTITY_ID: 'hello.world'
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(['hello.world'], self.calls[0].data[ATTR_ENTITY_ID])
|
AustinTSchaffer/DailyProgrammer
|
LeetCode/FindMinimumInRotatedSortedArray2/app.py
|
Python
|
mit
| 277
| 0.00361
|
from t
|
yping import List
class Solution:
def findMin(self, nums: List[int]) -> int:
first = nums[0]
# Iterate until the next number is less than current.
for num in nums:
if num < first:
return num
return first
| |
Francis-Liu/animated-broccoli
|
nova/db/sqlalchemy/api.py
|
Python
|
apache-2.0
| 240,590
| 0.001176
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import sys
import threading
import uuid
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db import options as oslo_db_options
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import update_match
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
from sqlalchemy import and_
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import aliased
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import noload
from sqlalchemy.orm import undefer
from sqlalchemy.schema import Table
from sqlalchemy import sql
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql import false
from sqlalchemy.sql import func
from sqlalchemy.sql import null
from sqlalchemy.sql import true
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
import nova.context
from nova.db.sqlalchemy import models
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova import quota
db_opts = [
cfg.StrOpt('osapi_compute_unique_server_name_scope',
default='',
help='When set, compute API will consider duplicate hostnames '
'invalid within the specified scope, regardless of case. '
'Should be empty, "project" or "global".'),
]
api_db_opts = [
cfg.StrOpt('connection',
help='The SQLAlchemy connection string to use to connect to '
'the Nova API database.',
secret=True),
cfg.BoolOpt('sql
|
ite_synchronous',
default=True,
help='If True, SQLite uses synchronous mode.'),
cfg.StrOpt('slave_connection',
secret=True,
help='The SQLAlchemy connection string to use to connect to the'
' slave database.'),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help='The SQL mode to be used for MySQL sessions. '
'This option, including the default, overr
|
ides any '
'server-set SQL mode. To use whatever SQL mode '
'is set by the server configuration, '
'set this to no value. Example: mysql_sql_mode='),
cfg.IntOpt('idle_timeout',
default=3600,
help='Timeout before idle SQL connections are reaped.'),
cfg.IntOpt('max_pool_size',
help='Maximum number of SQL connections to keep open in a '
'pool.'),
cfg.IntOpt('max_retries',
default=10,
help='Maximum number of database connection retries '
'during startup. Set to -1 to specify an infinite '
'retry count.'),
cfg.IntOpt('retry_interval',
default=10,
help='Interval between retries of opening a SQL connection.'),
cfg.IntOpt('max_overflow',
help='If set, use this value for max_overflow with '
'SQLAlchemy.'),
cfg.IntOpt('connection_debug',
default=0,
help='Verbosity of SQL debugging information: 0=None, '
'100=Everything.'),
cfg.BoolOpt('connection_trace',
default=False,
help='Add Python stack traces to SQL as comment strings.'),
cfg.IntOpt('pool_timeout',
help='If set, use this value for pool_timeout with '
'SQLAlchemy.'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.register_opts(oslo_db_options.database_opts, 'database')
CONF.register_opts(api_db_opts, group='api_database')
LOG = logging.getLogger(__name__)
_ENGINE_FACADE = {'main': None, 'api': None}
_MAIN_FACADE = 'main'
_API_FACADE = 'api'
_LOCK = threading.Lock()
def _create_facade(conf_group):
# NOTE(dheeraj): This fragment is copied from oslo.db
return db_session.EngineFacade(
sql_connection=conf_group.connection,
slave_connection=conf_group.slave_connection,
sqlite_fk=False,
autocommit=True,
expire_on_commit=False,
mysql_sql_mode=conf_group.mysql_sql_mode,
idle_timeout=conf_group.idle_timeout,
connection_debug=conf_group.connection_debug,
max_pool_size=conf_group.max_pool_size,
max_overflow=conf_group.max_overflow,
pool_timeout=conf_group.pool_timeout,
sqlite_synchronous=conf_group.sqlite_synchronous,
connection_trace=conf_group.connection_trace,
max_retries=conf_group.max_retries,
retry_interval=conf_group.retry_interval)
def _create_facade_lazily(facade, conf_group):
global _LOCK, _ENGINE_FACADE
if _ENGINE_FACADE[facade] is None:
with _LOCK:
if _ENGINE_FACADE[facade] is None:
_ENGINE_FACADE[facade] = _create_facade(conf_group)
return _ENGINE_FACADE[facade]
def get_engine(use_slave=False):
conf_group = CONF.database
facade = _create_facade_lazily(_MAIN_FACADE, conf_group)
return facade.get_engine(use_slave=use_slave)
def get_api_engine():
conf_group = CONF.api_database
facade = _create_facade_lazily(_API_FACADE, conf_group)
return facade.get_engine()
def get_session(use_slave=False, **kwargs):
conf_group = CONF.database
facade = _create_facade_lazily(_MAIN_FACADE, conf_group)
return facade.get_session(use_slave=use_slave, **kwargs)
def get_api_session(**kwargs):
conf_group = CONF.api_database
facade = _create_facade_lazily(_API_FACADE, conf_group)
return facade.get_session(**kwargs)
_SHADOW_TABLE_PREFIX = 'shadow_'
_DEFAULT_QUOTA_NAME = 'default'
PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`nova.context.authorize_project_context` and
:py:func:`nova.context.authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_instance_exists_using_uuid(f):
"""Decorator to require the specified instance to exist.
Requires the wrapped function to use context and instance_uuid as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, instance_uuid, *args, **kwargs):
instance_get_by_uuid(context, instance_uuid)
return f(context, instance_uuid, *args, **kwargs)
return wrapper
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
Requires the wrapped function to use
|
SysTheron/adhocracy
|
src/adhocracy/tests/test_pep8.py
|
Python
|
agpl-3.0
| 2,111
| 0
|
#!/usr/bin/env python
import os.path
import unittest
import pep8
SRC_PATH = os.path.dirname(os.path.dirname(__file__))
EXCLUDE = ['.svn', 'CVS', '.bzr', '.hg', '.git',
'Paste-1.7.5.1-py2.6.egg', 'PasteDeploy-1.5.0-py2.6.egg', 'data']
class AdhocracyStyleGuide(pep8.StyleGuide):
def ignore_code(self, code):
IGNORED = [
'E111', # indentation is not a multiple of four
'E121', # continuation line indentation is not a multiple of four
'E122', # continuation line missing indentation or outdented
'E123', # closing bracket does not match indentation of opening
# bracket
'E124', # closing bracket does not match visual indentation
'E126', # continuation line over
'E127', # continuation line over
'E128', # continuation line under
'E225', # missing whitespace around operator
'E226', # missing optional whitespace around operator
'E231', # missing whitespace after
'E241', # multiple spaces after
'E251', # no spaces around keyword
'E261', # at least two spaces before inline comment
'E301', # expected 1 blank line
'E302', # expected 2 blank lines
'E303', # too many blank lines
'E501', # line too long
'E701', # multiple statements on one line
'E702', # multiple statements on one line
'E711', # comparison to None
|
should be 'if cond is None:'
'E712', # comparison to True shou
|
ld be 'if cond is True:' or
# 'if cond:'
'W291', # trailing whitespace
'W292', # no newline at end of file
'W293', # blank line contains whitespace
'W391', # blank line at end of file
]
return code in IGNORED
class TestPep8(unittest.TestCase):
def test_pep8(self):
sg = AdhocracyStyleGuide(exclude=EXCLUDE)
sg.input_dir(SRC_PATH)
self.assertEqual(sg.options.report.get_count(), 0)
|
dvklopfenstein/PrincetonAlgorithms
|
py/AlgsSedgewickWayne/WeightedQuickUnionUF.py
|
Python
|
gpl-2.0
| 16,078
| 0.003483
|
"""Weighted Quick Union Algorithm takes steps to avoid tall trees."""
from AlgsSedgewickWayne.BaseComp import BaseComp
class WeightedQuickUnionUF(BaseComp):
""" UNION FIND: Weighted Quick-union [lazy approach] to avoid tall trees."""
def __init__(self, N): # $ = N
"""Initialize union-find data structure w/N objects (0 to N-1)."""
super(WeightedQuickUnionUF, self).__init__("WeightedQuickUnionUF")
self.ID = range(N) # Set if of each object to itself.
# Keep track of SIZE(# objects in tree) of each tree rooted at i
self.SZ = [1]*N # Needed to determine which tree is smaller/bigger
def _root(self, i):
"""Chase parent pointers until reach root."""
d = 0 # Used for informative prints for educational purposes
while i != self.ID[i]: # depth of i array accesses
i = self.ID[i]
d += 1
return BaseComp.NtRoot(rootnode=i, depth=d)
def connected(self, p, q): # $ = lg N
"""Return if p and q are in the same connected component (i.e. have the same root)."""
return self._root(p).rootnode == self._root(q).rootnode # Runs depth of p & q array accesses
def union(self, p, q): # $ = lg N
"""Add connection between p and q."""
# Runs Depth of p and q array accesses...
p_root = self._root(p).rootnode
q_root = self._root(q).rootnode
if p_root == q_root:
return
# IMPROVEMENT #1: Modification to Quick-Union to make it weighted: 4:03
# Balance trees by linking root of smaller tree to root of larger tree
# Modified quick-union:
# * Link root of smaller tree to root of larger tree.
# * Update the SZ[] array.
# Each union involves changing only one array entry
if self.SZ[p_root] < self.SZ[q_root]: # Make ID[p_root] a child of q_root
self.ID[p_root] = q_root # link root of smaller tree(p_root) to root of larger tree(q_root)
self.SZ[q_root] += self.SZ[p_root] # Larger tree size increases
else: # Make ID[q_root] a child of p_root
self.ID[q_root] = p_root # link root of smaller tree(q_root) to root of larger tree(p_root)
self.SZ[p_root] += self.SZ[q_root]
def __str__(self):
"""Print the size vector as well as the ID vector."""
return '\n'.join([
super(WeightedQuickUnionUF, self).__str__(),
"siz: " + ' '.join('{SZ:>2}'.format(SZ=e) for e in self.SZ)])
# algorithm init union find
# ----------- ---- ----- ----
# quick-find N N 1
# quick-union N N* N <- worst case, if tree is tall
# weighted QU N lg N lg N
#--------------------------------------------------------------------------
# Lecture Week 1 Union-Find: Dynamic Connectivity (10:22)
#--------------------------------------------------------------------------
# 00:55 STEPS TO DEVELOPING A USABLE ALGORITHM:
# * Model the problem.
# * Find an algorithm to solve it.
# * Fast enough? Fits in memory?
# * If not, figure out why.
# * Find a way to address the problem.
# * Iterate until satidfied. (Find a new algorithm)
#
# union(4, 3) 0 1--2 3--4
# union(3, 8) | |
# union(6, 5) 5--6 7 8 9
# union(4, 4)
# union(2, 1)
# connected(0, 7) NO
# connected(8, 9) Yes
#
# union(5, 0) 0--1--2 3--4
# union(7, 2) | | | | |
# union(6, 1) 5--6 7 8 9
# union(1, 0)
# connected(0, 7) Yes
# DYNAMIC CONNECTIVITY APPLICATIONS: (04:50) Week 1 Lecture "Dynamic Connectivity(1:22)
# * Pixels in a digitial photo
# * Computers in a network.
# * Friends in a social network.
# * Transistors in a computer chip.
# * Elements in a mathematical set.
# * Variable names in Fortran progam.
# * Metallic sites in a composit system.
# 04:51 WHEN PROGRAMMING, CONVENIENT TO NAME OBJECTS 0 TO N-1:
# * Use integers as array index.
# * Suppress details not relevant to union-find.
# Can use symbol table to translate from site names to integers:
# Stay runed (Chapter 3)
#
# 05:33 MODELING THE CONNECTIONS
# We assume "is connected to" is an **equivalence relation**:
# * **Reflexive**: p is connected to p
# * **Symmetric**: If p is connect to q, then q is connected to p.
# * **Transitive**: If p is connected to q and q is connected to r, then p is connected to r.
# 06:17 CONNECTED COMPONENTS
# Maximal set of objects that are mutually connected.
#
# 0 1 2-3
# / |/|
# 4-5 6 7
#
# 3 Connected Components: {0} {1 4 5} {2 3 6 7}
#
# PROPERTY: Any two objects in the component are connected,
# and there is no object outside that is connected to those objects
# 07:53 Union-find data type (API)
# **Goal:** Design efficient data structure for union-find
# * Number of objects N can be huge.
# * Number of operations(e.g. union, connected) M can be huge.
# * Find queries and union commands may be intermixed.
#
# public class UF
# UP(int N) # init union-find data structure w/N objects(0 to N-1)
# void union(int p, int q) # Add connection between p and q
# boolean connected(int p, int q) # are p and q in the same component
# 10:15 QUESTION: How many connected components result after performing the
# following sequence of union operations on a set of 10 items?
#
# 1-2 3-4 5-6 7-8 2-8 0-5 1-9
#
# ANSWER: 3; { 1 2 7 8 9 }, {3 4}, AND {0 5 6}
#
# 0 1--2 3--4
# | \
# 5--6 7--8--9
#
# UNION-FIND APPLICATIONS: (00:27) Week 1 Lecture "Union-Find Applications" (1:22)
# * Percolation
# * Games (Go, Hex)
# * Dynamic connectivity
# * Least common ancestor
# * Equivalence of finite state automata
# * Hoshen-Kopelman algorithm in physics.
# * Hinley-Milner polymorphic type inference.
# * Kruskal's minimum spanning tree algorithm.
# * Compiling equivalence statements in Fortran.
# * Morphological attribute openings and closings.
# * Matlab's bwlabel() function in image processing.
############################################
|
###############################
# Lecture Week 1 Quick-Union Improvements (13:02)
###########################################################################
#
# 00:22 IMPROVEMENT 1: WEIGHTING
#
# WEIGHTED QUICK-UNION.
# * Modify quick-union to avoid tall trees.
# * Keep track of size of each tree (number of objects).
# * Balance by linking root of smaller tree to root of larger tree.
# reasonable alternatives: union by height or "rank"
# 01:21 WEIGHTED QUICK-UNIO
|
N DEMO
# ------------------------------
# i 0 1 2 3 4 5 6 7 8 9
# INI: id[] 0 1 2 3 4 5 6 7 8 9
#
# 0 1 2 3 4 5 6 7 8 9
#
# 03:21 -- union(4, 3) --------
# WAS: id[] 0 1 2 3 4 5 6 7 8 9
# NOW: id[] 0 1 2 4 4 5 6 7 8 9
# . . . X . . . . . .
#
# 0 1 2 4 5 6 7 8 9
# |
# 3
#
#
# 01:45 -- union(3, 8) --------
# WAS: id[] 0 1 2 4 4 5 6 7 8 9
# NOW: id[] 0 1 2 4 4 5 6 7 4 9
# . . . . . . . . X .
#
# 0 1 2 4 5 6 7 9
# / \
# 3 8
#
#
# 01:58 -- union(6, 5) --------
# WAS: id[] 0 1 2 4 4 5 6 7 4 9
# NOW: id[] 0 1 2 4 4 6 6 7 4 9
# . . . . . X . . . .
#
# 0 1 2 4 6 7 9
# / \ |
# 3 8 5
#
# 02:04 -- union(9, 4) --------
# WAS: id[] 0 1 2 4 4 6 6 7 4 9
# NOW: id[] 0 1 2 4 4 6 6 7 4 4
# . . . . . . . . . X
#
# 0 1 2 4 6 7
# /|\ |
# 3 8 9 5
#
#
# 02:12 -- union(2, 1) --------
# WAS: id[] 0 1 2 4 4 6 6 7 4 4
# NOW: id[] 0 2 2 4 4 6 6 7 4 4
# . X . . . . . . . .
#
# 0 2 4 6 7
# | /|\ |
# 1 3 8 9 5
#
#
# 02:17 -- union(5, 0) --------
# WAS: id[] 0 1 2 4 4 6 6 7 4 4
# NOW: id[] 6 2 2 4 4 6 6 7 4 4
# X . . . . . . . . .
#
# 2 4 6 7
# | /|\ / \
# 1 3 8 9 0 5
#
#
# 02:29 -- union(7, 2) --------
# WAS: id[] 6 2 2 4 4 6 6 7 4 4
# NOW: id[] 6 2 2 4 4 6 6 2 4 4
# . . . . . . . X . .
#
# 2 4 6
# / \ /|\ / \
# 1 7 3 8 9 0 5
#
#
# 02:37 -- union(6, 1) --------
# WAS: id[] 6 2 2 4 4 6 6 2 4 4
# NOW: id[] 6 2 6 4 4 6 6 2 4 4
# . . X . . . . . . .
#
# 2 4 6
# / \ /|\ /|\
# 1 7 3 8 9 0 2 5
# / \
# 1 7
#
#
# 02:37 -- union(6, 1) --------
# WAS: id[]
|
btrent/knave
|
pychess/Utils/Rating.py
|
Python
|
gpl-3.0
| 2,135
| 0.007026
|
from pychess.Utils.const import *
class Rating ():
def __init__(self, ratingtype, elo, deviation=DEVIATION_NONE, wins=0,
losses=0, draws=0, bestElo=0, bestTime=0):
self.type = ratingtype
for v in (elo, deviation, wins, losses, draws, bestElo, bestTime):
assert v == None or type(v) == int, v
self.elo = elo
self.deviation = deviation
self.wins = wins
self.losses = losses
self.draws = draws
self.bestElo = bestElo
self.bestTime = bestTime
def get_elo (self):
return self._elo
def set_elo (self, elo):
self._elo = elo
def __repr__ (self):
r = "type=%s, elo=%s" % (self.type, self.elo)
if self.deviation != None:
r += ", deviation=%s" % str(self.deviation)
if self.wins > 0:
r += ", wins=%s" % str(self.wins)
if self.losses > 0:
r += ", losses=%s" % str(self.losses)
if self.draws > 0:
r += ", draws=%s" % str(self.draws)
if self.bestElo > 0:
r += ", bestElo=%s" % str(self.bestElo)
if self.bestTime > 0:
r += ", bestTime=%s" % str(self.bestTime)
return r
def copy (self):
return Rating(self.type, self.elo, de
|
viation=self.deviation,
wins=self.wins, losses=self.losses, draws=self.draws,
bestElo=self.bestElo, bestTime=se
|
lf.bestTime)
def update (self, rating):
if self.type != rating.type:
raise TypeError
elif self.elo != rating.elo:
self.elo = rating.elo
elif self.deviation != rating.deviation:
self.deviation = rating.deviation
elif self.wins != rating.wins:
self.wins = rating.wins
elif self.losses != rating.losses:
self.losses = rating.losses
elif self.draws != rating.draws:
self.draws = rating.draws
elif self.bestElo != rating.bestElo:
self.bestElo = rating.bestElo
elif self.bestTime != rating.bestTime:
self.bestTime = rating.bestTime
|
jtovar2/demo_app
|
backend/resources/org_api.py
|
Python
|
mit
| 2,570
| 0.003113
|
from flask import Flask, request, abort
import json
import ndb_util
import model
from google.appengine.api import users
from google.appengine.ext import ndb
from flask_restful import Resource
#TODO auth stuff
class OrganizationApi(Resource):
def get(self, id=None):
id = str(id)
if id is None:
print "soo id is None"
abort(401)
org_key = ndb.Key('Organization', id)
org = org_key.get()
if org is None:
print 'org doesnt exists'
abort(401)
client_id = users.get_current_user().user_id()
# maybe the client tahts making the http is an user taht wroks for org
user_key = ndb.Key('User', client_id)
if client_id != id and user_key not in org.workers:
abort(401)
print str(type(org.workers)) + ' ' + str(org.workers) + ' ' + str(user_key)
return org.to_json()
def put(self, id=None):
id = str(id)
client_id = users.get_current_user().user_id()
if id is None or client_id != id:
print id + ' ' + client_id
print "first one"
abort(401)
org_key = ndb.Key('Organization', id)
org = org_key.get()
print org
if org is None:
print "second one"
abort(401)
body = request.get_json(force=True)
body['id'] = id
if body['workers'] > 0:
body['workers'] = self._generate_kind_keys(body['workers'], 'User')
org = org.entity_from_dict(body)
if org is False:
print "third one"
abort(401)
else:
key = org.put()
print key
return org.to_json()
def post(self):
body = request.get_json(force=True)
body['id'] = users.get_current_user().user_id()
|
org_key = ndb.Key('Organization', body['id'])
if org_key.get() != None:
abort(401)
org = model.Organization()
|
org = org.entity_from_dict(body)
print org
if org is False:
abort()
else:
org.put()
return org.to_json()
def delete(self,id=None):
id = str(id)
client_id = users.get_current_user().user_id()
if id is None or client_id != id:
abort(401)
org_key = ndb.Key('Organization', id)
org_key.delete()
return '', 200
def _generate_kind_keys(self, ids, kind):
keys = []
for id in ids:
keys.append(ndb.Key(kind, id))
return keys
|
oaubert/advene
|
lib/advene/gui/edit/transcribe.py
|
Python
|
gpl-2.0
| 51,178
| 0.007386
|
#
# Advene: Annotate Digital Videos, Exchange on the NEt
# Copyright (C) 2008-2017 Olivier Aubert <[email protected]>
#
# Advene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Advene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Advene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""Transcription view.
"""
import logging
logger = logging.getLogger(__name__)
import sys
import re
import os
import operator
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import Pango
try:
from gi.repository import GtkSource
except ImportError:
GtkSource=None
import urllib.request, urllib.parse, urllib.error
import advene.core.config as config
# Advene part
from advene.model.package import Package
from advene.model.schema import AnnotationType
import advene.util.importer
import advene.util.helper as helper
from advene.util.tools import unescape_string, is_uri
from gettext import gettext as _
from advene.gui.views import AdhocView
from advene.gui.util import dialog, get_pixmap_button, get_small_stock_button, name2color
from advene.gui.util import decode_drop_parameters
from advene.gui.edit.properties import EditWidget
from advene.gui.util.completer import Completer
from advene.gui.widget import TimestampRepresentation
name="Note-taking view plugin"
def register(controller):
controller.register_viewclass(TranscriptionEdit)
class TranscriptionImporter(advene.util.importer.GenericImporter):
"""Transcription importer.
"""
def __init__(self, transcription_edit=None, **kw):
super(TranscriptionImporter, self).__init__(**kw)
self.transcription_edit=transcription_edit
self.name = _("Transcription importer")
def process_file(self, filename):
if filename != 'transcription':
return None
if self.package is None:
self.init_package()
self.convert(self.transcription_edit.parse_transcription())
return self.package
class TranscriptionEdit(AdhocView):
view_name = _("Note taking")
view_id = 'transcribe'
tooltip = _("Take notes on the fly as a timestamped transcription")
def __init__ (self, controller=None, parameters=None, filename=None):
super(TranscriptionEdit, self).__init__(controller=controller)
self.close_on_package_load = False
self.contextual_actions = (
(_("Save view"), self.save_view),
(_("Save default options"), self.save_default_options),
)
self.controller=controller
self.package=controller.package
self.sourcefile=None
self.empty_re = re.compile(r'^\s*$')
self.options = {
'timestamp': True, # _("If checked, click inserts timestamp marks"))
'play-on-scroll': False,
'empty-annotations': True, # _("Do not generate annotations for empty text"))
'delay': config.data.reaction_time,
# Marks will be automatically inserted it no keypress occurred in the 3 previous seconds.
'automatic-mark-insertion-delay': 1500,
'insert-on-double-click': True,
'insert-on-single-click': False,
'mark-prefix': "",
'mark-suffix': "",
'autoscroll': True,
'autoinsert': True,
'snapshot-size': 32,
'font-size': 0,
'annotation-type-id': None,
}
self.colors = {
'default': name2color('lightblue'),
'ignore': name2color('tomato'),
'current': name2color('green'),
}
self.marks = []
self.current_mark = None
opt, arg = self.load_parameters(parameters)
self.options.update(opt)
self.button_height=20
# When modifying an offset with Control+Scroll, store the last value.
# If play-on-scroll, then set the destination upon Control release
self.timestamp_play = None
self.widget=self.build_widget()
self.update_font_size()
if filename is not None:
self.load_transcription(filename=filename)
for n, v in arg:
if n == 'text':
self.load_transcription(buffer=v)
def get_element_height(self, element):
return self.button_height
def get_save_arguments(self):
arguments = [ ('text', "".join(self.generate_transcription())) ]
return self.options, arguments
def edit_preferences(self, *p):
cache=dict(self.options)
ew=EditWidget(cache.__setitem__, cache.get)
ew.set_name(_("Preferences"))
ew.add_checkbox(_("Timestamp"), "timestamp", _("Click inserts timestamp marks"))
ew.add_checkbox(_("Insert on double-click"), 'insert-on-double-click', _("A double click inserts the mark"))
ew.add_checkbox(_("Insert on single-click"), 'insert-on-single-click', _("A single click inserts the mark"))
ew.add_entry(_("Mark prefix"), 'mark-prefix', _("Text to insert before a mark (use \\n for newline)"))
ew.add_entry(_("Mark suffix"), 'mark-suffix', _("Text to insert after a mark (use \\n for newline)"))
ew.add_checkbox(_("Play on scroll"), "play-on-scroll", _("Play the new position upon timestamp modification"))
ew.add_checkbox(_("Generate empty annotations"), "empty-annotations", _("If checked, generate annotations for empty text"))
ew.add_spin(_("Reaction time"), "delay", _("Reaction time (substracted from current player time, except when paused.)"), -5000, 5000)
ew.add_checkbox(_("Auto-insert"), "autoinsert", _("Automatic timestamp mark insertion"))
ew.add_spin(_("Automatic insertion delay"), 'automatic-mark-insertion-delay', _("If autoinsert is active, timestamp marks will be automatically inserted when text is entered after no interaction since this delay (in ms).\n1000 is typically a good value."), 0, 100000)
ew.add_spin(_("Font size"), "font-size", _("Font size for text (0 for standard size)"), 0, 48)
res=ew.popup()
if res:
if cache['font-size'] != self.options['font-size']:
# Font-size was changed. Update the textview.
self.update_font_size(cache['font-size'])
self.options.update(cache)
return True
|
def update_font_size(self, size=None):
if size is None:
size=self.options['font-size']
if size == 0:
# Get the default value from a temporary textview
t=Gtk.TextView()
size=int(t.get_pang
|
o_context().get_font_description().get_size() / Pango.SCALE)
del t
f=self.textview.get_pango_context().get_font_description()
f.set_size(size * Pango.SCALE)
self.textview.modify_font(f)
def show_searchbox(self, *p):
self.searchbox.show()
self.searchbox.entry.grab_focus()
return True
def highlight_search_forward(self, searched):
"""Highlight with the searched_string tag the given string.
"""
b=self.textview.get_buffer()
begin, end=b.get_bounds()
# Remove searched_string tag occurences that may be left from
# a previous invocation
b.remove_tag_by_name("searched_string", begin, end)
finished=False
while not finished:
res=begin.forward_search(searched, Gtk.TextSearchFlags.TEXT_ONLY)
if not res:
finished=True
else:
matchStart, matchEnd = res
b.apply_tag_by_name("searched_string", matchStart, matchEnd)
begin=matchEnd
def textview_drag_received(self, wid
|
MauricioAlmeida/maoaberta
|
maoaberta/organizations/models.py
|
Python
|
gpl-2.0
| 2,083
| 0.00096
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from projects.models import Project
class Necessity(models.Model):
"""
Item or service that an organization regularly needs
"""
name = models.CharField(verbose_name=_('Name'), max_length=20)
satisfied = models.BooleanField(verbose_name=_('Satisfied'), default=False)
def __repr__(self):
return '<Necessity({!r}, satisfied={!r})>'.format(self.name, self.satisfied)
def __str__(self):
return self.name
class Organization(models.Model):
name = models.CharField(
max_length=64, verbose_name=_('Name'), help_text=_('Organization name')
)
description = models.TextField(
verbose_name=_('Description'), help_text=_('Organization description')
)
photo = models.ImageField(verbose_name=_('Photo'), upload_to='organization_photos')
coordinator = models.ForeignKey(
'contributors.Contributor', verbose_name=_('Coordinator'),
help_text=_('Person responsible for the organization')
)
projects = models.ManyToManyField(Project, blank=True)
necessities = models.ManyToManyField(Necessity, blank=True)
necessity_description = models.TextField(
verbose_name=_('Necessity description'),
help_text=_('Text to explain the organization material needs')
)
email = models.EmailField(
verbose_name=_('Organization email'), blank=True,
help_text=_('Contact email for the organization')
)
homepage_url = models.URLField(
verbose_name=_('Homepage URL'), blank=True,
help_text=_('Organization homepage link'),
)
facebook_url = models.U
|
RLField(
verbose_name=_('Facebook URL'), blank=True,
help_text=_('Organization faceboo
|
k link')
)
twitter_url = models.URLField(
verbose_name=_('Twitter URL'), blank=True,
help_text=_('Organization twitter link')
)
def __repr__(self):
return '<Organization({})>'.format(self.name)
def __str__(self):
return self.name
|
osborne6/luminotes
|
view/Updates_rss.py
|
Python
|
gpl-3.0
| 1,836
| 0.03976
|
import cgi
from urllib import urlencode
from Rss_channel import Rss_channel
from Rss_item import Rss_item
class Updates_rss( Rss_channel ):
def __init__(
self,
recent_notes,
notebook_id,
notebook_name,
https_url,
):
if notebook_name == u"Luminotes":
notebook_path = u"/"
elif notebook_name == u"Luminotes user guide":
notebook_path = u"/guide"
elif notebook_name == u"Luminotes blog":
notebook_path = u"/blog"
else:
notebook_path = u"/notebooks/%s" % notebook_id
notebook_path = https_url + notebook_path
Rss_channel.__init__(
self,
cgi.escape( notebook_name ),
notebook_path,
u"Luminotes notebook",
recent_notes and [ Rss_item(
title = u"Note updated",
link = self.note_link( notebook_id, notebook_name, note_id, revision, https_url ),
description = cgi.escape( u'A note in <a href="%s">this notebook</a> has been updated. <a href="%s?note_id=%s">View the note.</a>' % ( notebook_path, notebook_path, note_id ) ),
date = revision.strftime( "%Y-%m-%dT%H:%M:%SZ" ),
guid = self.note_link( notebook_id, notebook_name, note_id, revi
|
sion, https_url ),
) for ( note_id, revision ) in recent_notes ] or [ Rss_item(
title = u"Unknown notebook",
link = None,
description = cgi.escape( u'Sorry, that notebook is unknown.' ),
date =
|
None,
guid = None,
) ],
)
@staticmethod
def note_link( notebook_id, notebook_name, note_id, revision, https_url ):
query = urlencode( [
( u"notebook_id", notebook_id ),
( u"notebook_name", notebook_name.encode( "utf8" ) ),
( u"note_id", note_id ),
( u"revision", unicode( revision ) ),
] )
return cgi.escape( u"%s/notebooks/get_update_link?%s" % ( https_url, query ) )
|
Asurada2015/TFAPI_translation
|
math_ops_basicoperation/tf_pow.py
|
Python
|
apache-2.0
| 416
| 0.002747
|
import tensorflow as tf
"""tf.pow(x,y,name=None)
功能:计算x各元素的y次方。
输入:x,y为张量,可以为`float3
|
2`, `float64`, `int32`, `int64`,`complex64`,`complex128`类型。"""
x = tf.constant([[2, 3, 5], [2, 3, 5]], tf.float64)
y = tf.constant([[2, 3, 4]], tf.float64)
z = tf.pow(x, y)
sess = tf.Session()
print(sess.run(z))
sess.close()
"""[[ 4. 27. 625.]
|
[ 4. 27. 625.]]"""
|
erudit/zenon
|
docs/conf.py
|
Python
|
gpl-3.0
| 9,700
| 0.005984
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Érudit.org documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 14 17:16:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with au
|
todoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sy
|
s.path.insert(0, os.path.abspath('../eruditorg'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'base.settings.base')
import django
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Érudit.org'
copyright = '2016 Érudit'
author = 'David Cormier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'fr'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ruditorgdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ruditorg.tex', 'Érudit.org Documentation',
'Érudit', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show U
|
mikitex70/pelican-plugins
|
readtime/__init__.py
|
Python
|
agpl-3.0
| 24
| 0
|
from .readtime impor
|
t *
| |
davidt/reviewboard
|
reviewboard/hostingsvcs/tests/test_redmine.py
|
Python
|
mit
| 899
| 0
|
from __future__ import unicode_literals
from reviewboard.hostingsvcs.tests.testcases import ServiceTests
class RedmineTests(ServiceTests):
"""Unit tests for the Redmine hosting service."""
service_name = 'redmine'
fixtures = ['test_scmtools']
def test_service_support(self):
|
"""Testing the Redmine service support capabi
|
lities"""
self.assertTrue(self.service_class.supports_bug_trackers)
self.assertFalse(self.service_class.supports_repositories)
def test_bug_tracker_field(self):
"""Testing the Redmine bug tracker field value"""
self.assertFalse(
self.service_class.get_bug_tracker_requires_username())
self.assertEqual(
self.service_class.get_bug_tracker_field(None, {
'redmine_url': 'http://redmine.example.com',
}),
'http://redmine.example.com/issues/%s')
|
izrik/tudor
|
tests/logic_t/layer/LogicLayer/test_search.py
|
Python
|
gpl-2.0
| 2,413
| 0
|
#!/usr/bin/env python
import unittest
from tests.logic_t.layer.LogicLayer.util import generate_ll
class SearchTest(unittest.TestCase):
def setUp(self):
self.ll = generate_ll()
self.pl = self.ll.pl
self.admin = self.pl.create_user('[email protected]', None, True)
self.pl.add(self.admin)
self.pl.commit()
def test_empty_db_yields_no_results(self):
# when
results = self.ll.search('something', self.admin)
# then
self.assertIsNotNone(results)
results2 = list(results)
self.assertEqual([], results2)
def test_matching_summary_yields_task(self):
# given
task = self.pl.create_task('one two three')
self.pl.add(task)
self.pl.commit()
# when
results = self.ll.search('two', self.admin)
# then
self.assertIsNotNone(results)
results2 = list(results)
self.asse
|
rtEqual([task], results2)
def test_no_matching_summary_yields_nothing(self):
# given
task = self.pl.create_task('one two three')
self.pl.add(task)
self.pl.commit()
# when
results = self.ll.search('four', self.admin)
# then
self.assertIsNotNone(results)
results2 = list(results)
self.assertEqual([], results2)
def test_non_admin_may_access_own_tasks(self):
# given
user1 = self.pl.create_u
|
ser('[email protected]', None, False)
self.pl.add(user1)
task = self.pl.create_task('one two three')
task.users.append(user1)
self.pl.add(task)
self.pl.commit()
# when
results = self.ll.search('two', user1)
# then
self.assertIsNotNone(results)
results2 = list(results)
self.assertEqual([task], results2)
def test_non_admin_may_not_access_other_tasks(self):
# given
user1 = self.pl.create_user('[email protected]', None, False)
self.pl.add(user1)
user2 = self.pl.create_user('[email protected]', None, False)
self.pl.add(user2)
task = self.pl.create_task('one two three')
task.users.append(user1)
self.pl.add(task)
self.pl.commit()
# when
results = self.ll.search('two', user2)
# then
self.assertIsNotNone(results)
results2 = list(results)
self.assertEqual([], results2)
|
mmikitka/usdanutrient
|
usdanutrient/importservice.py
|
Python
|
gpl-3.0
| 15,884
| 0.003463
|
import inspect
import os
import re
import sys
import csv
import sqlalchemy.orm.exc
from sqlalchemy.orm.session import make_transient
from sqlalchemy import and_, Boolean, Date, func, Integer, Numeric
from datetime import date
from decimal import Decimal
import model
def db_import_file(engine, table_class, fname, col_order):
with open(fname) as f:
rows = []
for line in f:
values = line.split('^')
row = {}
for ind in range(len(col_order)):
col_name = col_order[ind]
col_value = None
wrapped_value = values[ind].strip().decode('windows-1252')
match = re.match('[~]{0,1}([^~]*)[~]{0,1}', wrapped_value)
if match:
col_value = match.group(1)
else:
if len(wrapped_value):
raise ValueError(
"Unexpected value, '{}'; regular expression did not match line:\n{}.".format(
wrapped_value, line))
if type(table_class.__dict__[col_name].type) is Integer:
if col_value == '':
col_value = None
else:
col_value = int(col_value)
elif type(table_class.__dict__[col_name].type) is Numeric:
if col_value == '':
col_value = None
else:
col_value = Decimal(col_value)
elif type(table_class.__dict__[col_name].type) is Date:
match_date = re.match('([\d]{2})/([\d]{4})', col_value)
if match_date:
month = match_date.group(1)
year = match_date.group(2)
col_value = date(int(year), int(month), 1)
else:
col_value = None
elif type(table_class.__dict__[col_name].type) is Boolean:
if (col_value.upper() == 'N'
or col_value == '0'
or not col_value):
col_value = False
else:
col_value = True
row[col_name] = col_value
rows.append(row)
engine.execute(table_class.__table__.insert(), rows)
def db_import_custom_file(processing_callback, callback_args):
fname = callback_args['fname']
engine = callback_args['engine']
table_class = callback_args['table_class']
bulk = callback_args['bulk']
print("Processing file '{}'".format(fname))
with open(fname) as f:
csvreader = csv.reader(f, delimiter='|')
rows_out = []
for row_in in csvreader:
row_out = processing_callback(row_in, callback_args)
if row_out:
rows_out.append(row_out)
if not bulk:
engine.execute(table_class.__table__.insert(), rows_out)
rows_out = []
if bulk and rows_out:
engine.execute(table_class.__table__.insert(), rows_out)
def process_row_generic(row_in, args):
row_out = {}
col_order = args['col_order']
table_class = args['table_class']
for ind in range(len(col_order)):
col_name = col_order[ind]
col_value = row_in[ind]
if type(table_class.__dict__[col_name].type) is Integer:
if col_value == '':
col_value = None
else:
col_value = int(col_value)
if type(table_class.__dict__[col_name].type) is Numeric:
if col_value == '':
col_value = None
else:
col_value = Decimal(col_value)
row_out[col_name] = col_value
return row_ou
|
t
def process_row_local_food(row_in, args):
session = args['session']
result = None
foods = session.\
query(model.Food).\
filter(model.Food.long_desc == row_in[0])
for food in foods:
session.delete(food)
session.commit()
food_group = session.\
query(model.FoodGroup).\
filter(model.FoodGroup.
|
name == row_in[3]).\
one()
result = {
'long_desc': row_in[0],
'short_desc': row_in[1],
'manufacturer': row_in[2],
'group_id': food_group.id,
'refuse_pct': row_in[4]
}
return result
def process_row_local_food_weight(row_in, args):
session = args['session']
food = session.\
query(model.Food).\
filter(model.Food.long_desc == row_in[0]).\
one()
session.\
query(model.Weight).\
filter(and_(
model.Weight.food_id == food.id,
model.Weight.measurement_desc == row_in[2],
)).\
delete()
session.commit()
prev_sequence = session.\
query(func.max(model.Weight.sequence)).\
filter(model.Weight.food_id == food.id).\
scalar()
sequence = 1
if prev_sequence:
sequence = int(prev_sequence) + 1
return {
'food_id': food.id,
'sequence': sequence,
'amount': row_in[1],
'measurement_desc': row_in[2],
'grams': row_in[3]
}
def process_row_local_food_weight_alias(row_in, args):
session = args['session']
food = session.\
query(model.Food).\
filter(model.Food.long_desc == row_in[0]).\
one()
session.\
query(model.Weight).\
filter(and_(
model.Weight.food_id == food.id,
model.Weight.measurement_desc == row_in[2],
)).\
delete()
session.commit()
weight = session.\
query(model.Weight).\
filter(model.Weight.food_id == food.id).\
filter(model.Weight.measurement_desc == row_in[1]).\
one()
prev_sequence = session.\
query(func.max(model.Weight.sequence)).\
filter(model.Weight.food_id == food.id).\
scalar()
sequence = 1
if prev_sequence:
sequence = int(prev_sequence) + 1
return {
'food_id': food.id,
'sequence': sequence,
'amount': weight.amount,
'measurement_desc': row_in[2],
'grams': weight.grams,
'num_data_points': weight.num_data_points,
'std_dev': weight.std_dev
}
def db_import_nutrient_category_map_file(engine, session, fname):
print("Processing file '{}'".format(fname))
# Sigh. There are two instances of the nutrient, 'Energy', each
# with a different unit of measurement: kcal and kJ. Rename
# the nutrient before proceeding.
energies = session.\
query(model.Nutrient).\
filter(model.Nutrient.name == 'Energy')
for energy in energies:
if energy.units == 'kcal':
energy.name = 'Energy (kcal)'
elif energy.units == 'kJ':
energy.name = 'Energy (kJ)'
session.add(energy)
session.commit()
with open(fname) as f:
csvreader = csv.reader(f, delimiter='|')
rows_out = []
for row_in in csvreader:
nutrient = session.\
query(model.Nutrient).\
filter(model.Nutrient.name == row_in[0]).\
one()
category = session.\
query(model.NutrientCategory).\
filter(model.NutrientCategory.name == row_in[1]).\
one()
nutrient.category_id = category.id
session.add(nutrient)
session.commit()
def process_row_local_food_nutrient_data(row_in, args):
session = args['session']
try:
food = session.\
query(model.Food).\
filter(model.Food.long_desc == row_in[0]).\
one()
except sqlalchemy.orm.exc.NoResultFound:
raise ValueError("Unable to find USDA Food '{}'".format(row_in[0]))
except sqlalchemy.orm.exc.MultipleResultsFound:
raise ValueError("Multiple results of food '{}'".format(row_in[0]))
try:
nutrien
|
secnot/rectpack
|
rectpack/packer.py
|
Python
|
apache-2.0
| 17,585
| 0.006028
|
from .maxrects import MaxRectsBssf
import operator
import itertools
import collections
import decimal
# Float to Decimal helper
def float2dec(ft, decimal_digits):
"""
Convert float (or int) to Decimal (rounding up) with the
requested number of decimal digits.
Arguments:
ft (float, int): Number to convert
decimal (int): Number of digits after decimal point
Return:
Decimal: Number converted to decima
"""
with decimal.localcontext() as ctx:
ctx.rounding = decimal.ROUND_UP
places = decimal.Decimal(10)**(-decimal_digits)
return decimal.Decimal.from_float(float(ft)).quantize(places)
# Sorting algos for rectangle lists
SORT_AREA = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: r[0]*r[1]) # Sort by area
SORT_PERI = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: r[0]+r[1]) # Sort by perimeter
SORT_DIFF = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: abs(r[0]-r[1])) # Sort by Diff
SORT_SSIDE = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: (min(r[0], r[1]), max(r[0], r[1]))) # Sort by short side
SORT_LSIDE = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: (max(r[0], r[1]), min(r[0], r[1]))) # Sort by long side
SORT_RATIO = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: r[0
|
]/r[1]) # Sort by side ratio
SORT
|
_NONE = lambda rectlist: list(rectlist) # Unsorted
class BinFactory(object):
def __init__(self, width, height, count, pack_algo, *args, **kwargs):
self._width = width
self._height = height
self._count = count
self._pack_algo = pack_algo
self._algo_kwargs = kwargs
self._algo_args = args
self._ref_bin = None # Reference bin used to calculate fitness
self._bid = kwargs.get("bid", None)
def _create_bin(self):
return self._pack_algo(self._width, self._height, *self._algo_args, **self._algo_kwargs)
def is_empty(self):
return self._count<1
def fitness(self, width, height):
if not self._ref_bin:
self._ref_bin = self._create_bin()
return self._ref_bin.fitness(width, height)
def fits_inside(self, width, height):
# Determine if rectangle widthxheight will fit into empty bin
if not self._ref_bin:
self._ref_bin = self._create_bin()
return self._ref_bin._fits_surface(width, height)
def new_bin(self):
if self._count > 0:
self._count -= 1
return self._create_bin()
else:
return None
def __eq__(self, other):
return self._width*self._height == other._width*other._height
def __lt__(self, other):
return self._width*self._height < other._width*other._height
def __str__(self):
return "Bin: {} {} {}".format(self._width, self._height, self._count)
class PackerBNFMixin(object):
"""
BNF (Bin Next Fit): Only one open bin at a time. If the rectangle
doesn't fit, close the current bin and go to the next.
"""
def add_rect(self, width, height, rid=None):
while True:
# if there are no open bins, try to open a new one
if len(self._open_bins)==0:
# can we find an unopened bin that will hold this rect?
new_bin = self._new_open_bin(width, height, rid=rid)
if new_bin is None:
return None
# we have at least one open bin, so check if it can hold this rect
rect = self._open_bins[0].add_rect(width, height, rid=rid)
if rect is not None:
return rect
# since the rect doesn't fit, close this bin and try again
closed_bin = self._open_bins.popleft()
self._closed_bins.append(closed_bin)
class PackerBFFMixin(object):
"""
BFF (Bin First Fit): Pack rectangle in first bin it fits
"""
def add_rect(self, width, height, rid=None):
# see if this rect will fit in any of the open bins
for b in self._open_bins:
rect = b.add_rect(width, height, rid=rid)
if rect is not None:
return rect
while True:
# can we find an unopened bin that will hold this rect?
new_bin = self._new_open_bin(width, height, rid=rid)
if new_bin is None:
return None
# _new_open_bin may return a bin that's too small,
# so we have to double-check
rect = new_bin.add_rect(width, height, rid=rid)
if rect is not None:
return rect
class PackerBBFMixin(object):
"""
BBF (Bin Best Fit): Pack rectangle in bin that gives best fitness
"""
# only create this getter once
first_item = operator.itemgetter(0)
def add_rect(self, width, height, rid=None):
# Try packing into open bins
fit = ((b.fitness(width, height), b) for b in self._open_bins)
fit = (b for b in fit if b[0] is not None)
try:
_, best_bin = min(fit, key=self.first_item)
best_bin.add_rect(width, height, rid)
return True
except ValueError:
pass
# Try packing into one of the empty bins
while True:
# can we find an unopened bin that will hold this rect?
new_bin = self._new_open_bin(width, height, rid=rid)
if new_bin is None:
return False
# _new_open_bin may return a bin that's too small,
# so we have to double-check
if new_bin.add_rect(width, height, rid):
return True
class PackerOnline(object):
"""
Rectangles are packed as soon are they are added
"""
def __init__(self, pack_algo=MaxRectsBssf, rotation=True):
"""
Arguments:
pack_algo (PackingAlgorithm): What packing algo to use
rotation (bool): Enable/Disable rectangle rotation
"""
self._rotation = rotation
self._pack_algo = pack_algo
self.reset()
def __iter__(self):
return itertools.chain(self._closed_bins, self._open_bins)
def __len__(self):
return len(self._closed_bins)+len(self._open_bins)
def __getitem__(self, key):
"""
Return bin in selected position. (excluding empty bins)
"""
if not isinstance(key, int):
raise TypeError("Indices must be integers")
size = len(self) # avoid recalulations
if key < 0:
key += size
if not 0 <= key < size:
raise IndexError("Index out of range")
if key < len(self._closed_bins):
return self._closed_bins[key]
else:
return self._open_bins[key-len(self._closed_bins)]
def _new_open_bin(self, width=None, height=None, rid=None):
"""
Extract the next empty bin and append it to open bins
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found
"""
factories_to_delete = set() #
new_bin = None
for key, binfac in self._empty_bins.items():
# Only return the new bin if the rect fits.
# (If width or height is None, caller doesn't know the size.)
if not binfac.fits_inside(width, height):
continue
# Create bin and add to open_bins
new_bin = binfac.new_bin()
if new_bin is None:
continue
self._open_bins.append(new_bin)
# If the factory was depleted mark for deletion
if binfac.is_empty():
factories_to_delete.add(key)
break
# Delete marked factories
for f in factories_to_delete:
del self._empty_bins[f]
return new_bin
def add_bin(self, width, height, count=1, **kwargs):
# accept the same parameters as PackingAlgorithm
|
pgleeson/TestArea
|
models/Cerebellum/pythonScripts/Test_SingleGranule.py
|
Python
|
gpl-2.0
| 2,965
| 0.018887
|
#
#
# File to test behaviour of the Golgi Cell.
#
# To execute this type of file, type '..\..\..\nC.bat -python XXX.py' (Windows)
# or '../../../nC.sh -python XXX.py' (Linux/Mac). Note: you may have to update the
# NC_HOME and NC_MAX_MEMORY variables in nC.bat/nC.sh
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Medical Research Council and the
# Wellcome Trust
#
#
import sys
import os
try:
from java.io import File
except ImportError:
print "Note: this file should be run using ..\\..\\..\\nC.bat -python XXX.py' or '../../../nC.sh -python XXX.py'"
print "See
|
http://www.neuroconstruct.org/docs/python.html for more details"
quit()
sys.path.append(os.environ["NC_HOME"]+"/pythonNeuroML/nCUtils")
import ncutils as nc
projFile = File("../Cerebellum.ncx")
############## Main settings ##################
simConfigs = []
#simConfigs.append("Default Simulation Configuration")
simConfigs.append("Single Golgi Cell")
simDt = 0.001
simulators =
|
["NEURON", "GENESIS_PHYS", "GENESIS_SI"] # Note: nernst object isn't implemented in MOOSE yet
varTimestepNeuron = True
varTimestepTolerance = 0.00001
plotSims = True
plotVoltageOnly = True
runInBackground = True
analyseSims = True
verbose = False
#############################################
def testAll(argv=None):
if argv is None:
argv = sys.argv
print "Loading project from "+ projFile.getCanonicalPath()
simManager = nc.SimulationManager(projFile,
verbose = verbose)
simManager.runMultipleSims(simConfigs = simConfigs,
simDt = simDt,
simulators = simulators,
runInBackground = runInBackground,
varTimestepNeuron = varTimestepNeuron,
varTimestepTolerance = varTimestepTolerance)
simManager.reloadSims(plotVoltageOnly = plotVoltageOnly,
analyseSims = analyseSims)
# These were discovered using analyseSims = True above.
# They need to hold for all simulators
spikeTimesToCheck = {'SingleGolgi_0': [12.2, 33.5, 93.0, 197.4, 310.1, 424.8,
508.0, 529.3, 564.5, 613.8, 668.3, 724.1, 780.2,
836.6, 893.0, 949.5, 1157.6, 1277.6, 1394.4]}
spikeTimeAccuracy = 1 # Note run time of 1500 ms...
report = simManager.checkSims(spikeTimesToCheck = spikeTimesToCheck,
spikeTimeAccuracy = spikeTimeAccuracy)
print report
return report
if __name__ == "__main__":
testAll()
|
lightning-round/salud-api
|
app/mod_profiles/common/parsers/user.py
|
Python
|
gpl-2.0
| 615
| 0
|
# -*- coding: utf-8 -*-
from flask_restful import reqparse
from app.mod_profiles.validators.generic_validators import is_valid_id
# Parser general
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, required=True)
parser.add_argument('email', type=str, required=True)
parser.add_arg
|
ument('password', type=str, required=True)
parser.add_argument('profile_id', type=is_valid_id, required=True)
# Parser para recurso POST
parser_post = parser.copy()
# Parser para
|
recurso PUT
parser_put = parser.copy()
parser_put.remove_argument('password')
parser_put.add_argument('password', type=str)
|
kyvinh/home-assistant
|
tests/components/recorder/models_original.py
|
Python
|
apache-2.0
| 5,545
| 0
|
"""Models for SQLAlchemy.
This file contains the original models definitions before schema tracking was
implemented. It is used to test the schema migration logic.
"""
import json
from datetime import datetime
import logging
from sqlalchemy import (Boolean, Column, DateTime, ForeignKey, Index, Integer,
String, Text, distinct)
from sqlalchemy.ext.declarative import declarative_base
import homeassistant.util.dt as dt_util
from homeassistant.core import Event, EventOrigin, State, split_entity_id
from homeassistant.remote import JSONEncoder
# SQLAlchemy Schema
# pylint: disable=invalid-name
Base = declarative_base()
_LOGGER = logging.getLogger(__name__)
class Events(Base): # type: ignore
"""Event history data."""
__tablename__ = 'events'
event_id = Column(Integer, primary_key=True)
event_type = Column(String(32), index=True)
event_data = Column(Text)
origin = Column(String(32))
time_fired = Column(DateTime(timezone=True))
created = Column(DateTime(timezone=True), default=datetime.utcnow)
@staticmethod
def from_event(event):
"""Create an event database object from a native event."""
return Events(event_type=event.event_type,
event_data=json.dumps(event.data, cls=JSONEncoder),
origin=str(event.origin),
time_fired=event.time_fired)
def to_native(self):
"""Convert to a natve HA Event."""
try:
return Event(
self.event_type,
json.loads(self.event_data),
EventOrigin(self.origin),
_process_timestamp(self.time_fired)
)
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting to event: %s", self)
return None
class States(Base): # type: ignore
"""State change history."""
__tablename__ = 'states'
state_id = Column(Integer, primary_key=True)
domain = Column(String(64))
entity_id = Column(String(255))
state = Column(String(255))
attributes = Column(Text)
event_id = Column(Integer, ForeignKey('events.event_id'))
last_changed = Column(DateTime(timezone=True), default=datetime.utcnow)
last_updated = Column(DateTime(timezone=True), default=datetime.utcnow)
created = Column(DateTime(timezone=True), default=datetime.utcnow)
__table_args__ = (Index('states__state_changes',
'last_changed', 'last_updated', 'entity_id'),
Index('states__significant_changes',
'domain', 'last_updated', 'entity_id'), )
@staticmethod
def from_event(event):
"""Create object from a state_changed event."""
entity_id = event.data['entity_id']
state = event.data.get('new_state')
dbstate = States(entity_id=entity_id)
# State got deleted
if state is None:
dbstate.state = ''
dbstate.domain = split_entity_id(entity_id)[0]
dbstate.attributes = '{}'
dbstate.last_changed = event.time_fired
dbstate.last_updated = event.time_fired
else:
dbstate.domain = state.domain
dbstate.state = state.state
dbstate.attributes = json.dumps(dict(state.attributes),
cls=JSONEncoder)
dbstate.last_changed = state.last_changed
dbstate.last_updated = state.last_updated
return dbstate
def to_native(self):
"""Convert to an HA state object."""
try:
return State(
self.entity_id, self.state,
json.loads(self.attributes),
_process_timestamp(self.last_changed),
_process_timestamp(self.last_updated)
)
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting row to state: %s", self)
return None
class RecorderRuns(Base): # type: ignore
"""Representation of recorder run."""
__tablename__ = 'recorder_runs'
run_id = Column(Integer, primary_key=True)
start = Column(DateTime(timezone=True), default=datetime.utcnow)
end = Column(DateTime(timezone=True))
closed_incorrect = Column(Boolean, default=False)
created = Column(DateTime(timezone=True), default=datetime.utcnow)
def entity_ids(self, point_in_time=None):
"""Return the entity ids that existed in this run.
Specify point_in_time if you want to know which existed at that point
in time inside the run.
"""
from sqlalchemy.orm.session import Session
session = Session.object_session(self)
assert session is not None, 'RecorderRuns need to be persisted'
query = session.query(distinct(States.entity_id)).filter(
States.last_updated >= self.start)
if point_in_time is not None:
query = query.filter(States.last_updated < point_in_time)
elif self.end is not None:
query
|
= query.filter(States.last_updated < self.end)
return [row[0] for row in query]
def to_native(self):
"""Return self, native format is this model."""
return self
def _process_timestamp(ts):
"""Process a timestamp into datetime object."""
if ts is None:
return None
elif ts.tzinfo is None:
return dt_util.UTC.localize(ts)
else:
re
|
turn dt_util.as_utc(ts)
|
airbnb/airflow
|
tests/utils/test_python_virtualenv.py
|
Python
|
apache-2.0
| 2,580
| 0.003101
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from airflow.utils.python_virtualenv import prepare_virtualenv
class TestPrepareVirtualenv(unittest.TestCase):
@mock.patch('airflow.utils.python_virtualenv.execute_in_subprocess')
def test_should_create_virtualenv(self, mock_execute_in_subprocess):
python_bin = prepare_virtualenv(
venv_directory="/VENV", python_bin="pythonVER", system_site_packages=False, requirements=[]
)
self.assertEqual("/VENV/bin/python", python_bin)
mock_execute_in_subprocess.assert_called_once_with(['virtualenv', '/VENV', '--python=pythonVER'])
@mock.patch('airflow.utils.python_virtualenv.execute_in_subprocess')
def test_should_create_virtualenv_with_system_packages(self, mock_execute_in_subprocess):
python_bin = prepare_virtualenv(
venv_directory="/VENV", python_bin="pythonVER", system_site_packages=True, requirements=[]
)
self.assertEqual("/VENV/bin/python", python_bin)
mock_execute_in_subprocess.assert_called_once_with(
['virtualenv', '/VENV', '--system-site-packages', '--python=pythonVER']
)
@mock.patch('airflow.utils.python_virtualenv.execute_in_subprocess')
def test_should_create_virtualenv_with_extra_packages(self, mock_execute_in_subprocess):
pyt
|
hon_bin = prepare_virtualenv(
venv_directory="/VENV",
python_bin="pythonVER",
system_site_packages=False,
requirements=['apache-beam[gcp]'],
)
self.assertEqual("/VENV/bin/python", python_bin)
mock_ex
|
ecute_in_subprocess.assert_any_call(['virtualenv', '/VENV', '--python=pythonVER'])
mock_execute_in_subprocess.assert_called_with(['/VENV/bin/pip', 'install', 'apache-beam[gcp]'])
|
MostlyOpen/odoo_addons
|
myo_person_mng/models/person_mng.py
|
Python
|
agpl-3.0
| 3,872
| 0.00155
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp import api, fields, models
from openerp.exceptions import UserError
class PersonManagement(models.Model):
_name = 'myo.person.mng'
name = fields.Char('Name', required=True)
alias = fields.Char('Alias', help='Common name that the Person is referred.')
code = fields.Char
|
(string='Person Code', required=False)
notes = fields.Text(string='Notes')
date_inclusion = fields.Datetime("Inclusion Date", required=False, readonly=False,
|
default=lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
batch_name = fields.Char('Batch Name', required=False)
country_id_2 = fields.Many2one('res.country', 'Nationality')
birthday = fields.Date("Date of Birth")
age = fields.Char(
string='Age',
compute='_compute_age',
store=True
)
estimated_age = fields.Char(string='Estimated Age', required=False)
spouse_name = fields.Char('Spouse Name')
spouse_id = fields.Many2one('myo.person', 'Spouse', ondelete='restrict')
father_name = fields.Char('Father Name')
father_id = fields.Many2one('myo.person', 'Father', ondelete='restrict')
mother_name = fields.Char('Mother Name')
mother_id = fields.Many2one('myo.person', 'Mother', ondelete='restrict')
responsible_name = fields.Char('Responsible Name')
responsible_id = fields.Many2one('myo.person', 'Responsible', ondelete='restrict')
identification_id = fields.Char('Person ID')
otherid = fields.Char('Other ID')
gender = fields.Selection(
[('M', 'Male'),
('F', 'Female')
], 'Gender'
)
marital = fields.Selection(
[('single', 'Single'),
('married', 'Married'),
('widower', 'Widower'),
('divorced', 'Divorced'),
], 'Marital Status'
)
active = fields.Boolean('Active',
help="If unchecked, it will allow you to hide the person without removing it.",
default=1)
person_id = fields.Many2one('myo.person', 'Person')
_order = 'name'
_sql_constraints = [
('code_uniq',
'UNIQUE(code)',
u'Error! The Person Code must be unique!'
)
]
@api.multi
@api.constrains('birthday')
def _check_birthday(self):
for person in self:
if person.birthday > fields.Date.today():
raise UserError(u'Error! Date of Birth must be in the past!')
@api.one
@api.depends('birthday')
def _compute_age(self):
now = datetime.now()
if self.birthday:
dob = datetime.strptime(self.birthday, '%Y-%m-%d')
delta = relativedelta(now, dob)
# self.age = str(delta.years) + "y " + str(delta.months) + "m " + str(delta.days) + "d"
self.age = str(delta.years)
else:
self.age = "No Date of Birth!"
|
GiantSpaceRobot/FindFungi
|
FindFungi-v0.23/CSV-to-Tree.py
|
Python
|
mit
| 2,781
| 0.024811
|
#!/usr/bin/env python
"""
This script accepts .csv pipeline output and gives a .ps file with a basic tree structure
"""
__author__ = "Paul Donovan"
__maintainer__ = "Paul Donovan"
__email__ = "[email protected]"
import sys
import argparse
from ete3 import NCBITaxa
#Display help and usage
parser = argparse.ArgumentParser(description="Incorrect number of command line arguments")
parser.add_argument('Sorted-LCA.csv')
parser.add_argument('Output.gv')
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
args = parser.parse_args()
ncbi = NCBITaxa()
#The number of species you want to create the tree with
NumberOfSpecies = 10
#Read CSV results into list, remove all but the top 10 most abundant taxonomies
ResultsList = list(line.strip().split(",") for line in open(sys.argv[1]))
ResultsList = ResultsList[0:int(NumberOfSpecies) + 1] #Take first n items in list (+1 is to negate the header line)
#Open output file for writing
Output = open(sys.argv[2], "w")
#Write header line in dot format
Output.write('digraph G {\n\tsize="8,5!";\n')
#Define lists, dicts and variables
ResultTaxids = list()
TreeList = list()
BadChars = "()[]{}/|"
TaxidFreqDict = {}
Counter = 0
#Re-open CSV file, create a dictionary with taxid as key and number of reads as value
with open(sys.argv[1]) as f:
for line in f:
if not line.startswith("#"):
tok = line.strip().split(",")
TaxidFreqDict[tok[1]] = tok[2]
#Build the dot script
for line in ResultsList:
if line[0].startswith("#"):
pass
|
else:
ResultTaxid = line[1]
ResultTaxids.append(ResultTaxid)
lineage = ncbi.get_lineage(ResultTa
|
xid)
for index, taxid in enumerate(lineage):
name = ncbi.get_taxid_translator([str(taxid)])
name = name[taxid]
for char in name:
if char in BadChars:
name = name.replace(str(char),"_") #Replace ugly strings
NextIndex = int(index) + 1
if NextIndex == len(lineage):
pass
else:
NextTaxid = lineage[NextIndex]
NextName = ncbi.get_taxid_translator([str(NextTaxid)])
NextName = NextName[NextTaxid]
for char in NextName:
if char in BadChars:
NextName = NextName.replace(str(char),"_") #Replace ugly strings
NodeToNode = str('\t"' + str(name) + '" -> "' + str(NextName) + '";\n')
if any(NodeToNode in s for s in TreeList):
pass
else:
Output.write(NodeToNode)
TreeList.append(NodeToNode)
if str(NextTaxid) in TaxidFreqDict: #If there is information available about number of reads for this taxid, use it
value = TaxidFreqDict[str(NextTaxid)]
Freq = format(int(value), ",d") #Adds commas to numbers to make them more human-readable
Output.write(str('\t"' + str(NextName) + '" [xlabel="' + str(Freq) + ' reads"];\n'))
Output.write("}")
Output.close()
|
rspavel/spack
|
var/spack/repos/builtin/packages/vtk/package.py
|
Python
|
lgpl-2.1
| 12,385
| 0.000888
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
from spack import *
class Vtk(CMakePackage):
"""The Visualization Toolkit (VTK) is an open-source, freely
available software system for 3D computer graphics, image
processing and visualization. """
homepage = "http://www.vtk.org"
url = "https://www.vtk.org/files/release/9.0/VTK-9.0.0.tar.gz"
list_url = "http://www.vtk.org/download/"
maintainers = ['chuckatkins', 'danlipsa']
version('9.0.0', sha256='15def4e6f84d72f82386617fe595ec124dda3cbd13ea19a0dcd91583197d8715')
version('8.2.0', sha256='34c3dc775261be5e45a8049155f7228b6bd668106c72a3c435d95730d17d57bb')
version('8.1.2', sha256='0995fb36857dd76ccfb8bb07350c214d9f9099e80b1e66b4a8909311f24ff0db')
version('8.1.1', sha256='71a09b4340f0a9c58559fe946dc745ab68a866cf20636a41d97b6046cb736324')
version('8.1.0', sha256='6e269f07b64fb13774f5925161fb4e1f379f4e6a0131c8408c555f6b58ef3cb7')
version('8.0.1', sha256='49107352923dea6de05a7b4c3906aaf98ef39c91ad81c383136e768dcf304069')
version('7.1.0', sha256='5f3ea001204d4f714be972a810a62c0f2277fbb9d8d2f8df39562988ca37497a')
version('7.0.0', sha256='78a990a15ead79cdc752e86b83cfab7dbf5b7ef51ba409db02570dbdd9ec32c3')
version('6.3.0', sha256='92a493354c5fa66bea73b5fc014154af5d9f3f6cee8d20a826f4cd5d4b0e8a5e')
version('6.1.0', sha256='bd7df10a479606d529a8b71f466c44a2bdd11fd534c62ce0aa44fad91883fa34')
# VTK7 defaults to OpenGL2 rendering backend
variant('opengl2', default=True, description='Enable OpenGL2 backend')
variant('osmesa
|
', default=False, description='Enable OSMesa support')
variant('python', default=False, description='Enable Python support')
variant('qt', default=False, description='Build with support for Qt')
variant('xdmf', default=False, description='Build XDMF file support')
variant('ffmpeg', default=False, description='Build with FFMPEG supp
|
ort')
variant('mpi', default=True, description='Enable MPI support')
patch('gcc.patch', when='@6.1.0')
# At the moment, we cannot build with both osmesa and qt, but as of
# VTK 8.1, that should change
conflicts('+osmesa', when='+qt')
extends('python', when='+python')
# Acceptable python versions depend on vtk version
# We need vtk at least 8.0.1 for python@3,
# and at least 9.0 for [email protected]
depends_on('[email protected]:2.9', when='@:8.0 +python', type=('build', 'run'))
depends_on('[email protected]:3.7.9', when='@8.0.1:8.9 +python',
type=('build', 'run'))
depends_on('[email protected]:', when='@9.0: +python', type=('build', 'run'))
# We need mpi4py if buidling python wrappers and using MPI
depends_on('py-mpi4py', when='+python+mpi', type='run')
# python3.7 compatibility patch backported from upstream
# https://gitlab.kitware.com/vtk/vtk/commit/706f1b397df09a27ab8981ab9464547028d0c322
patch('python3.7-const-char.patch', when='@7.0.0:8.1.1 ^[email protected]:')
# The use of the OpenGL2 backend requires at least OpenGL Core Profile
# version 3.2 or higher.
depends_on('[email protected]:', when='+opengl2')
depends_on('[email protected]:', when='~opengl2')
if sys.platform != 'darwin':
depends_on('glx', when='~osmesa')
depends_on('libxt', when='~osmesa')
# Note: it is recommended to use mesa+llvm, if possible.
# mesa default is software rendering, llvm makes it faster
depends_on('mesa+osmesa', when='+osmesa')
# VTK will need Qt5OpenGL, and qt needs '-opengl' for that
depends_on('qt+opengl', when='+qt')
depends_on('boost', when='+xdmf')
depends_on('boost+mpi', when='+xdmf +mpi')
depends_on('ffmpeg', when='+ffmpeg')
depends_on('mpi', when='+mpi')
depends_on('expat')
depends_on('freetype')
depends_on('glew')
# set hl variant explicitly, similar to issue #7145
depends_on('hdf5+hl')
depends_on('jpeg')
depends_on('jsoncpp')
depends_on('libxml2')
depends_on('lz4')
depends_on('netcdf-c~mpi', when='~mpi')
depends_on('netcdf-c+mpi', when='+mpi')
depends_on('netcdf-cxx')
depends_on('libpng')
depends_on('libtiff')
depends_on('zlib')
depends_on('eigen', when='@8.2.0:')
depends_on('double-conversion', when='@8.2.0:')
depends_on('sqlite', when='@8.2.0:')
# For finding Fujitsu-MPI wrapper commands
patch('find_fujitsu_mpi.patch', when='@:8.2.0%fj')
def url_for_version(self, version):
url = "http://www.vtk.org/files/release/{0}/VTK-{1}.tar.gz"
return url.format(version.up_to(2), version)
def setup_build_environment(self, env):
# VTK has some trouble finding freetype unless it is set in
# the environment
env.set('FREETYPE_DIR', self.spec['freetype'].prefix)
def cmake_args(self):
spec = self.spec
opengl_ver = 'OpenGL{0}'.format('2' if '+opengl2' in spec else '')
cmake_args = [
'-DBUILD_SHARED_LIBS=ON',
'-DVTK_RENDERING_BACKEND:STRING={0}'.format(opengl_ver),
# In general, we disable use of VTK "ThirdParty" libs, preferring
# spack-built versions whenever possible
'-DVTK_USE_SYSTEM_LIBRARIES:BOOL=ON',
# However, in a few cases we can't do without them yet
'-DVTK_USE_SYSTEM_GL2PS:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBHARU=OFF',
'-DNETCDF_DIR={0}'.format(spec['netcdf-c'].prefix),
'-DNETCDF_C_ROOT={0}'.format(spec['netcdf-c'].prefix),
'-DNETCDF_CXX_ROOT={0}'.format(spec['netcdf-cxx'].prefix),
# Allow downstream codes (e.g. VisIt) to override VTK's classes
'-DVTK_ALL_NEW_OBJECT_FACTORY:BOOL=ON',
# Disable wrappers for other languages.
'-DVTK_WRAP_JAVA=OFF',
'-DVTK_WRAP_TCL=OFF',
]
# Some variable names have changed
if spec.satisfies('@8.2.0:'):
cmake_args.extend([
'-DVTK_USE_SYSTEM_OGG:BOOL=OFF',
'-DVTK_USE_SYSTEM_THEORA:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBPROJ:BOOL=OFF',
'-DVTK_USE_SYSTEM_PUGIXML:BOOL=OFF',
])
else:
cmake_args.extend([
'-DVTK_USE_SYSTEM_OGGTHEORA:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBPROJ4:BOOL=OFF',
])
if '+mpi' in spec:
if spec.satisfies('@:8.2.0'):
cmake_args.extend([
'-DVTK_Group_MPI:BOOL=ON',
'-DVTK_USE_SYSTEM_DIY2:BOOL=OFF'
])
else:
cmake_args.extend([
'-DVTK_USE_MPI=ON'
])
if '+ffmpeg' in spec:
cmake_args.extend(['-DModule_vtkIOFFMPEG:BOOL=ON'])
# Enable/Disable wrappers for Python.
if '+python' in spec:
cmake_args.extend([
'-DVTK_WRAP_PYTHON=ON',
'-DPYTHON_EXECUTABLE={0}'.format(spec['python'].command.path),
])
if '+mpi' in spec:
cmake_args.append('-DVTK_USE_SYSTEM_MPI4PY:BOOL=ON')
if spec.satisfies('@9.0.0: ^python@3:'):
cmake_args.append('-DVTK_PYTHON_VERSION=3')
else:
cmake_args.append('-DVTK_WRAP_PYTHON=OFF')
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DCMAKE_MACOSX_RPATH=ON'
])
if '+qt' in spec:
qt_ver = spec['qt'].version.up_to(1)
qt_bin = spec['qt'].prefix.bin
qmake_exe = os.path.join(qt_bin, 'qmake')
cmake_args.extend([
# Enable Qt support here.
'-DVTK_QT_VERSION:STRING={0}'.format(qt_ver),
'-DQT_QMAKE_EXECUTABLE:PATH={0}'.format(qmake_exe),
'-DVTK_Group_Qt:BOOL=ON',
])
# NOTE: The following definitions are required in order to allow
# VTK to build with qt~webkit versions (see the documentation for
|
voytekresearch/neurodsp
|
neurodsp/plts/utils.py
|
Python
|
apache-2.0
| 1,740
| 0.002299
|
"""Utility functions for plots."""
from functools import wraps
from os.path import join as pjoin
import matplotlib.pyplot as plt
###################################################################################################
###################################################################################################
def check_ax(ax, figsize=None):
"""Check whether a figure axes object is defined, define if not.
Parameters
----------
ax : matplotlib.Axes or None
Axes object to check if is defined.
Returns
-------
ax : matplotlib.Axes
Figure axes object to use.
"""
if not ax:
_, ax = plt.subplots(figsize=figsize)
return ax
def savefig(func):
"""Decorator function to save out figures."""
@wraps(func)
def decorated(*args, **kwargs):
# G
|
rab file name and path arguments, if they are in kwargs
file_name = kwargs.pop('file_name', None)
file_path = kwargs.pop('file_path', None)
# Check for an explicit argument for whether to save figure or not
# Defaults to saving when file name given (since bool(str)->True; boo
|
l(None)->False)
save_fig = kwargs.pop('save_fig', bool(file_name))
# Check any collect any other plot keywords
save_kwargs = kwargs.pop('save_kwargs', {})
save_kwargs.setdefault('bbox_inches', 'tight')
# Check and collect whether to close the plot
close = kwargs.pop('close', None)
func(*args, **kwargs)
if save_fig:
full_path = pjoin(file_path, file_name) if file_path else file_name
plt.savefig(full_path, **save_kwargs)
if close:
plt.close()
return decorated
|
ec429/quIRC
|
gen_numerics.py
|
Python
|
gpl-3.0
| 1,245
| 0.011245
|
#!/usr/bin/python
# gen_numerics.py: generate numerics.h
import numerics
print """/*
quIRC - simple terminal-based IRC client
Copyright (C) 2010-13 Edward Cree
See quirc.c for license information
numeric: IRC numeric replies
*/
/***
This file is generated by gen_numerics.py from masters in numerics.py.
Do not make edits directly to this file! Edit the masters instead.
***/
/*
A symbolic name defined here does not necessarily imply recognition or decoding of that numeric reply.
Some numeric replies are non-normative; that is, they are not defined in the original RFC1459 or its superseding RFC2812, but instead are either defined in other, non-normative documents, or are entirely experimental. These are denoted with an X before the name (of the form RPL_X_BOGOSITY); where a numeric is being identified purely on the basis of usage "in the wild", the symbolic name will be completely arbitrary and may not ali
|
gn with usage elsewhere.
*/
/* Error replies */"""
errs = [n for n in numerics.nums.values() if isinstance(n, numerics.NumericError)]
for e in errs:
print str(e)
print """
/* Command responses */"""
rpls = [n for n in numerics.nums.values() if isinstance(n, numerics.NumericReply)]
for r in
|
rpls:
print str(r)
|
mozilla/normandy
|
normandy/base/api/filters.py
|
Python
|
mpl-2.0
| 1,421
| 0
|
import django_filters
from rest_framework import filters
class CaseInsensitiveBooleanFilter(django_filters.Filter):
# The default django_filters boolean filter *only* accepts True and False
# which is problematic when dealing with non-Python clients. This allows
# the lower case variants, as well as 0 and 1.
def filter(self, qs, value):
if value is not None:
lc_value = value.lower()
if lc_value in ["true", "1"]:
value = True
elif lc_value in ["false", "0"]:
value = False
return qs.filter(**{self.field_name: value})
return qs
class AliasedOrderingFilter(filters.OrderingFilter):
aliases = {}
def get_valid_fields(self, *args, **kwargs):
valid_fields = super().get_valid_fields(*args, **kwargs)
for alias, mapping in self.aliases.items():
valid_fields.append((alias, mapping[1]))
return va
|
lid_fields
def get_ordering(self, *args, **kwargs):
ordering = super().get_ordering(*args, **kwargs)
if ordering is not None:
return list(map(self.replace_alias, ordering))
return ordering
def replace_alias(self, term):
field = term.lstrip("-")
if field in self.aliases:
modifier =
|
"-" if term.startswith("-") else ""
return modifier + self.aliases[field][0]
return term
|
evewspace/eve-wspace
|
evewspace/wsgi.py
|
Python
|
apache-2.0
| 397
| 0.002519
|
""
|
"
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os, sys
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "evewspace.settings")
application = get_wsgi_application
|
()
|
temugen/pipil
|
pipil.py
|
Python
|
mit
| 10,540
| 0.017268
|
# Copyright (C) 2011 Brad Misik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
|
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substanti
|
al portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Set to False to test alternative image processor
use_PIL = True
# The port to use with IPC for opening/saving image files
nopil_port = 4859
import string
from multiprocessing import Process
import imp
import os
import tempfile
import subprocess
from time import sleep
import atexit
import socket
# Remove our temporary files when the module is unloaded
temp_files = []
def cleanup_temp():
for filename in temp_files:
os.remove(filename)
atexit.register(cleanup_temp)
try:
# Do not attempt an import here
# Tkinter can't be loaded in a process and its subprocesses simultaneously
imp.find_module('Tkinter')
_has_Tk = True
except:
_has_Tk = False
def _pil_open(filename):
image = PILImage.open(filename)
data = image.getdata()
# Only get the RGB components in case the image is ARGB
data = [tuple(color[len(color) - 3:]) for color in data]
return (data, image.size)
def _nopil_open_pipe(filename):
# Run a java utility to print out the pixels of the image to stdout
command = ['java', '-jar', 'ImagePiper.jar', 'read', filename]
image_piper = subprocess.Popen(command, stdout=subprocess.PIPE)
# Read the output from ImagePiper
stdout, stderr = image_piper.communicate()
lines = stdout.splitlines()
# Read the encoding from the first line of output
radix = int(lines.pop(0))
# Read the width and the height from the second line of output
w, h = tuple(int(x, radix) for x in lines.pop(0).split())
# Read the pixels line by line, with each line corresponding to a line from the image
data = [Color.int_to_rgb(int(pixel, radix)) for line in lines for pixel in line.split()]
return (data, (w, h))
def _bytes_to_int(bs):
return sum(ord(bs[i]) << (8 * (len(bs) - i - 1)) for i in xrange(len(bs)))
def _bytes_to_rgb(bs):
return tuple(ord(bs[i]) for i in xrange(1, 4))
def _nopil_open_socket(filename):
# Listen on a local IPv4-style socket to receive image data
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', nopil_port))
s.listen(1)
# Run a java utility to send the pixels of the image over our socket
command = ['java', '-jar', 'ImagePiper.jar', 'send', filename]
subprocess.Popen(command)
# Wait for the java utility to connect and move sockets
conn, addr = s.accept()
s.close()
# Read the width and the height
size = conn.recv(8)
size = [_bytes_to_int(size[i*4:i*4+4]) for i in xrange(2)]
w, h = size
# Read entire lines in from the socket
lines = [conn.recv(4 * w) for line in xrange(h)]
data = [_bytes_to_rgb(lines[line][i*4:i*4+4]) for line in xrange(h) for i in xrange(w)]
# Close the connection
conn.close()
return (data, (w, h))
def _pil_save(image, filename):
w, h = image.size
pil_image = PILImage.new("RGB", (w, h))
pil_image.putdata(image.data)
pil_image.save(filename, "png")
def _nopil_save(image, filename):
# Run a java utility to read in the pixels of the image and save them to a file
command = ['java', '-jar', 'ImagePiper.jar', 'write', filename]
image_piper = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
# Read the encoding from ImagePiper and create a codec for it
radix = int(image_piper.stdout.readline())
codec = IntegerCodec()
# Write the width and the height
w, h = image.size
image_piper.stdin.write("%s %s\n" % (codec.encode(w, radix), codec.encode(h, radix)))
# Write the pixels line by line
pixels = map(lambda pixel: codec.encode(Color.rgb_to_int(pixel), radix), image.data)
lines = (" ".join(pixels[image._get_index((0, line)):image._get_index((w, line))]) for line in xrange(h))
image_piper.stdin.write("\n".join(lines))
# Flush the writes
image_piper.communicate()
try:
from PIL import Image as PILImage
_has_PIL = True
except:
_has_PIL = False
_nopil_open = _nopil_open_socket
class IntegerCodec:
def __init__(self):
self._base_list = string.digits + string.letters + '_@'
def decode(self, int_string, radix):
return int(int_string, radix)
def encode(self, integer, radix):
# Only encode absolute value of integer
sign = ''
if integer < 0:
sign = '-'
integer = abs(integer)
int_string = ''
while integer != 0:
int_string = self._base_list[integer % radix] + int_string
integer /= radix
return sign + int_string
class Color:
def __init__(self, color):
if type(color) is type(0):
self.color = Color.int_to_rgb(color)
else:
self.color = color
def as_int(self):
return Color.rgb_to_int(self.color)
def as_rgb(self):
return self.color
@staticmethod
def int_to_rgb(rgb_int):
r = (rgb_int >> 16) & 255
g = (rgb_int >> 8) & 255
b = rgb_int & 255
return (r, g, b)
@staticmethod
def rgb_to_int(rgb):
r, g, b = rgb
rgb_int = r
rgb_int = (rgb_int << 8) + g
rgb_int = (rgb_int << 8) + b
return rgb_int
def squared_euclidean_distance(self, other):
return sum((self.color[i] - other.color[i])**2 for i in xrange(len(self.color)))
class Image:
def __init__(self, *args):
if type(args[0]) is type("string"):
# Assume we were passed a filename
self._open(args[0])
elif type(args[0]) is type(self):
# Assume we were passed another image
self._copy(args[0])
else:
# Assume we were passed a size tuple and possibly a color
self._create(*args)
def _open(self, filename):
if _has_PIL and use_PIL:
_opener = _pil_open
else:
_opener = _nopil_open
self.data, self.size = _opener(filename)
def _create(self, size, color = (0, 0, 0)):
size = tuple(int(x) for x in size)
w, h = self.size = size
self.data = [color] * w * h
def _copy(self, image):
self.size = image.size
self.data = image.data[:]
def _get_index(self, loc):
# Convert an (x, y) pair to a 1-dimensional index
loc = tuple(int(x) for x in loc)
x, y = loc
w, h = self.size
return y * w + x
def getpixel(self, loc):
return self.data[self._get_index(loc)]
def putpixel(self, loc, color):
color = tuple(min(x, 255) for x in color)
self.data[self._get_index(loc)] = color
def temp_file(self):
handle, filename = tempfile.mkstemp()
self.save(filename)
os.close(handle)
temp_files.append(filename)
return filename
def _show_in_os(self):
# Save the image to a temporary file for another process to read
filename = self.temp_file()
if os.name == 'nt':
os.startfile(filename)
else:
# Assume we are on a mac and attempt to use the open command
retcode = subprocess.call(['open', filename])
if retcode is not 0:
# The open command failed, so assume we are on Linux
subprocess.call(['xdg-open', filename])
def show(self, default=False, wait=False):
# Open the image using the user's default imaging viewing application, cannot wait
if default or not _has_Tk:
self._show_in_os()
else:
# Open the file using our own image viewer
viewer = ImageViewer(self, wait)
def save(self, filename):
if _
|
GrahamDigital/django-scheduler
|
tests/test_utils.py
|
Python
|
bsd-3-clause
| 2,204
| 0.003176
|
import datetime
from django.test import TestCase
from django.utils import timezone
from schedule.models import Event, Rule, Calendar
from schedule.utils import EventListManager
class TestEventListManager(TestCase):
def setUp(self):
weekly = Rule.objects.create(frequency="WEEKLY")
daily = Rule.objects.create(frequency="DAILY")
cal = Calendar.objects.create(name="MyCal")
self.default_tzinfo = timezone.get_default_timezone()
self.event1 = Event(**{
'title': 'Weekly Event',
'start': datetime.datetime(2009, 4, 1, 8, 0, tzinfo=self.default_tzinfo),
'end': datetime.datetime(2009, 4, 1, 9, 0, tzinfo=self.default_tzinfo),
'end_recurring_period': datetime.datetime(2009, 10, 5, 0, 0, tzinfo=self.default_tzinfo),
'rule': weekly,
'calendar': cal
})
self.event1.save()
self.event2 = Event(**{
'title': 'Recent Event',
'start': datetime.datetime(2008, 1, 5, 9, 0, tzinfo=self.default_tzinfo),
'end': datetime.datetime(2008, 1, 5, 10, 0, tzinfo=self.default_tzinfo),
'end_recurring_period': datetime.datetime(2009, 5, 5, 0, 0, tzinfo=self.default_tzinfo),
'rule': daily,
'calendar': cal
})
self.event2.save()
def test_occurrences_after(self):
eml = EventListManager([self.event1, self.event2])
occurrences = eml.occurrences_after(datetime.datetime(2009, 4, 1, 0, 0, tzinfo=self.default_tzinfo))
self.assertEqual(next(occurrences).event, self.event1)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.even
|
t2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assert
|
Equal(next(occurrences).event, self.event1)
occurrences = eml.occurrences_after()
self.assertEqual(list(occurrences), [])
|
khughitt/ete
|
ete_dev/tools/utils.py
|
Python
|
gpl-3.0
| 6,239
| 0.010258
|
import re
import time
import readline
import os
# CONVERT shell colors to the same curses palette
SHELL_COLORS = {
"wr": '\033[1;37;41m', # white on red
"wo": '\033[1;37;43m', # white on orange
"wm": '\033[1;37;45m', # white on magenta
"wb": '\033[1;37;46m', # white on blue
"bw": '\033[1;37;40m', # black on white
"lblue": '\033[1;34m', # light blue
"lred": '\033[1;31m', # light red
"lgreen": '\033[1;32m', # light green
"yellow": '\033[1;33m', # yellow
"cyan": '\033[36m', # cyan
"blue": '\033[34m', # blue
"green": '\033[32m', # green
"orange": '\033[33m', # orange
"red": '\033[31m', # red
"magenta": "\033[35m", # magenta
"white": "\033[0m", # white
None: "\033[0m", # end
}
def color(string, color):
return "%s%s%s" %(SHELL_COLORS[color], string, SHELL_COLORS[None])
def clear_color(string):
return re.sub("\\033\[[^m]+m", "", string)
def print_table(items, header=None, wrap=True, max_col_width=20,
wrap_style="wrap", row_line=False, fix_col_width=False):
''' Prints a matrix of data as a human readable table. Matrix
should be a list of lists containing any type of values that can
be converted into text strings.
Two different column adjustment methods are supported through
the *wrap_style* argument:
wrap: it will wrap values to fit max_col_width (by extending cell height)
cut: it will strip values to max_col_width
If the *wrap* argument is set to False, column widths are set to fit all
values in each column.
This code is free software. Updates
|
can be found at
https://gist.github.com/jhcepas/5884168
# print_table([[3,2, {"whatever":1, "bla":[1,2]}], [5,"this is a test\n of wrapping text\n with the new function",777], [1,1,1]],
# header=[ "This is column number 1", "Column number 2", "col3"],
# wrap=True, max_col_width=15, wrap_style='wrap',
# row_line=True, fix_col_width=True)
# This is co
|
lumn | Column number 2 | col3
# number 1 | |
# =============== | =============== | ===============
# 3 | 2 | {'bla': [1, 2],
# | | 'whatever': 1}
# --------------- | --------------- | ---------------
# 5 | this is a test | 777
# | of |
# | wrapping text |
# | with the new |
# | function |
# --------------- | --------------- | ---------------
# 1 | 1 | 1
# =============== | =============== | ===============
'''
def safelen(string):
return len(clear_color(string))
if isinstance(fix_col_width, list):
c2maxw = dict([(i, fix_col_width[i]) for i in xrange(len(items[0]))])
wrap = True
elif fix_col_width == True:
c2maxw = dict([(i, max_col_width) for i in xrange(len(items[0]))])
wrap = True
elif not wrap:
c2maxw = dict([(i, max([safelen(str(e[i])) for e in items])) for i in xrange(len(items[0]))])
else:
c2maxw = dict([(i, min(max_col_width, max([safelen(str(e[i])) for e in items])))
for i in xrange(len(items[0]))])
if header:
current_item = -1
row = header
if wrap and not fix_col_width:
for col, maxw in c2maxw.iteritems():
c2maxw[col] = max(maxw, safelen(header[col]))
if wrap:
c2maxw[col] = min(c2maxw[col], max_col_width)
else:
current_item = 0
row = items[current_item]
while row:
is_extra = False
values = []
extra_line = [""]*len(row)
for col, val in enumerate(row):
cwidth = c2maxw[col]
wrap_width = cwidth
val = clear_color(str(val))
try:
newline_i = val.index("\n")
except ValueError:
pass
else:
wrap_width = min(newline_i+1, wrap_width)
val = val.replace("\n", " ", 1)
if wrap and safelen(val) > wrap_width:
if wrap_style == "cut":
val = val[:wrap_width-1]+"+"
elif wrap_style == "wrap":
extra_line[col] = val[wrap_width:]
val = val[:wrap_width]
val = val.ljust(cwidth)
values.append(val)
print ' | '.join(values)
if not set(extra_line) - set(['']):
if header and current_item == -1:
print ' | '.join(['='*c2maxw[col] for col in xrange(len(row)) ])
current_item += 1
try:
row = items[current_item]
except IndexError:
row = None
else:
row = extra_line
is_extra = True
if row_line and not is_extra and not (header and current_item == 0):
if row:
print ' | '.join(['-'*c2maxw[col] for col in xrange(len(row)) ])
else:
print ' | '.join(['='*c2maxw[col] for col in xrange(len(extra_line)) ])
def ask_filename(text):
readline.set_completer(None)
fname = ""
while not os.path.exists(fname):
fname = raw_input(text)
return fname
def ask(string,valid_values,default=-1,case_sensitive=False):
""" Asks for a keyborad answer """
v = None
if not case_sensitive:
valid_values = [value.lower() for value in valid_values]
while v not in valid_values:
v = raw_input("%s [%s]" % (string,','.join(valid_values) ))
if v == '' and default>=0:
v = valid_values[default]
if not case_sensitive:
v = v.lower()
return v
def timeit(f):
def a_wrapper_accepting_arguments(*args, **kargs):
t1 = time.time()
r = f(*args, **kargs)
print " ", f.func_name, time.time() - t1, "seconds"
return r
return a_wrapper_accepting_arguments
|
Zlash65/erpnext
|
erpnext/patches/v11_0/add_market_segments.py
|
Python
|
gpl-3.0
| 317
| 0.018927
|
from __future__ import unicod
|
e_literals
import frappe
from frappe import _
from erpnext.setup.setup_wizard.operations.install_fixtures import add_market_segments
def execute():
|
frappe.reload_doc('crm', 'doctype', 'market_segment')
frappe.local.lang = frappe.db.get_default("lang") or 'en'
add_market_segments()
|
DeflatedPickle/pkinter
|
pkinter_test.py
|
Python
|
mit
| 6,451
| 0.00093
|
import tkinter as tk
from tkinter import ttk
import pkinter as pk
root = tk.Tk()
menu = tk.Menu(root, type="menubar")
filemenu = tk.Menu(menu)
filemenu.add_command(label="New")
filemenu.add_command(label="Save")
menu.add_cascade(label="File", menu=filemenu)
helpmenu = tk.Menu(menu)
helpmenu.add_checkbutton(label="About")
helpmenu.add_separator()
helpmenu.add_checkbutton(label="Changelog")
menu.add_cascade(label="Help", menu=helpmenu)
root.configure(menu=menu)
##################################################
toolbar = pk.Toolbar(root)
toolbar.pack(side="top", fill="x")
button = toolbar.add_button(text="Button")
toolbar.add_separator()
checkbutton1 = toolbar.add_checkbutton(text="CheckButton 1")
checkbutton2 = toolbar.add_checkbutton(text="CheckButton 2")
toolbar.add_separator()
radiobutton1 = toolbar.add_radiobutton(text="RadioButton 1", value=0)
radiobutton2 = toolbar.add_radiobutton(text="RadioButton 2", value=1)
radiobutton3 = toolbar.add_radiobutton(text="RadioButton 3", value=2)
toolbar.add_separator()
##################################################
statusbar = pk.Statusbar(root)
statusbar.pack(side="bottom", fill="x")
variable = tk.StringVar()
statusbar.bind_widget(button, variable, "A Button", "")
statusbar.bind_widget(checkbutton1, variable, "A Checkbutton", "")
statusbar.bind_widget(checkbutton2, variable, "Another Checkbutton", "")
statusbar.bind_widget(radiobutton1, variable, "A Radiobutton", "")
statusbar.bind_widget(radiobutton2, variable, "Another Radiobutton", "")
statusbar.bind_widget(radiobutton3, variable, "A Third Radiobutton", "")
statusbar.bind_menu(menu, variable, ["Open the File menu.", "Open the Help menu."])
statusbar.bind_menu(filemenu, variable, ["Tear-off the menu.", "Create a new file.", "Save the current file."])
statusbar.bind_menu(helpmenu, variable, ["Tear-off the menu.", "Open the About window.", "", "Open the Changelog."])
statusbar.add_variable(variable=variable)
##################################################
frame = ttk.Frame(root)
frame.pack(fill="both")
##################################################
tlf = pk.ToggledLabelFrame(frame)
tlf.grid(row=0, column=0)
##################################################
for i in range(5):
ttk.Button(tlf.frame).pack()
ls = pk.LabeledSeparator(frame, text="LabeledSeparator")
ls.grid(row=0, column=1)
##################################################
rs = pk.RoundingScale(frame, from_=0, to=5)
rs.grid(row=0, column=2)
##################################################
et = pk.EntryText(frame, text="EntryText")
et.grid(row=1, column=0)
##################################################
le = pk.LimitedEntry(frame)
le.grid(row=1, column=1)
##################################################
cpb = pk.ColourPickerButton(frame)
cpb.grid(row=1, column=2)
##################################################
el = pk.EditableLabel(frame, text="EditableLabel")
el.grid(row=2, column=0)
##################################################
cp = pk.CollapsiblePane(frame)
cp.grid(row=2, column=1)
for i in range(5):
ttk.Button(cp.frame).pack()
##################################################
hl = pk.Hyperlink(frame, text="Hyperlink")
hl.grid(row=2, column=2)
##################################################
pv = pk.PageView(frame)
pv.grid(row=3, column=0)
frame1 = ttk.Frame(pv.frame)
for i in range(3):
ttk.Button(frame1, text=i).pack(side="left"
|
)
frame2 = ttk.Frame(pv.frame)
ttk.Checkbutton(frame2, text="Checkbutton").pack()
frame3 = ttk.Frame(pv.frame)
ttk.Label(frame3, text="Frame 3").pack(side="bottom")
pv.add(child=frame1)
pv.add(child=frame2)
pv.add(child=frame3)
##################################################
def func():
print("Function")
bb = pk.BoundButton(frame, text="BoundButton", key="b", command=func)
bb
|
.grid(row=3, column=1)
##################################################
ve = pk.ValidEntry(frame, valid_list=["validentry", "validEntry", "Validentry", "ValidEntry"])
ve.grid(row=3, column=2)
##################################################
cb = pk.ChoiceBook(frame)
cb.grid(row=4, column=0)
frame1 = ttk.Frame(cb.frame)
for i in range(3):
ttk.Button(frame1, text=i).pack(side="left")
frame2 = ttk.Frame(cb.frame)
ttk.Checkbutton(frame2, text="Checkbutton").pack()
frame3 = ttk.Frame(cb.frame)
ttk.Label(frame3, text="Frame 3").pack(side="bottom")
cb.add(child=frame1, label="Frame1")
cb.add(child=frame2, label="Frame2")
cb.add(child=frame3, label="Frame3")
##################################################
pe = pk.PasswordEntry(frame, cover_character="*")
pe.grid(row=4, column=1)
##################################################
iv = pk.InvalidEntry(frame, invalid_list=["invalidentry", "invalidEntry", "Invalidentry", "InvalidEntry"])
iv.grid(row=4, column=2)
##################################################
lb = pk.ListBook(frame)
lb.grid(row=5, column=0)
frame1 = ttk.Frame(lb.frame)
for i in range(3):
ttk.Button(frame1, text=i).pack(side="left")
frame2 = ttk.Frame(lb.frame)
ttk.Checkbutton(frame2, text="Checkbutton").pack()
frame3 = ttk.Frame(lb.frame)
ttk.Label(frame3, text="Frame 3").pack(side="bottom")
lb.add(child=frame1, label="Frame1")
lb.add(child=frame2, label="Frame2")
lb.add(child=frame3, label="Frame3")
##################################################
al = pk.AccelLabel(frame, label_text="AccelLabel", accelerator_text="Ctrl+A")
al.grid(row=5, column=1)
##################################################
ib = pk.InfoBar(frame, title="InfoBar", info="Shows information.")
ib.grid(row=5, column=2)
##################################################
lb = pk.LockButton(frame)
lb.grid(row=6, column=0)
##################################################
tb = pk.ToggleButton(frame)
tb.grid(row=6, column=1)
##################################################
ss = pk.ScaleSwitch(frame)
ss.grid(row=6, column=2)
##################################################
bs = pk.ButtonSwitch(frame)
bs.grid(row=7, column=0)
##################################################
fp = pk.FilePicker(frame)
fp.grid(row=7, column=1)
##################################################
dp = pk.DirectoryPicker(frame)
dp.grid(row=7, column=2)
##################################################
pk.center_on_screen(root)
##################################################
tp = tk.Toplevel(root)
pk.center_on_parent(tp)
##################################################
root.mainloop()
|
misko/neon
|
neon/data/video.py
|
Python
|
apache-2.0
| 865
| 0
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You
|
may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language go
|
verning permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Defines video dataset handling.
"""
class Video(object):
def __init__(self):
raise NotImplementedError()
|
TechWritingWhiz/indy-node
|
indy_client/test/cli/conftest.py
|
Python
|
apache-2.0
| 46,041
| 0.000673
|
import json
import os
import re
import tempfile
from typing import List
import pytest
from plenum.common.signer_did import DidSigner
from indy_client.test.agent.acme import ACME_ID, ACME_SEED
from indy_client.test.agent.acme import ACME_VERKEY
from indy_client.test.agent.faber import FABER_ID, FABER_VERKEY, FABER_SEED
from indy_client.test.agent.thrift import THRIFT_ID, THRIFT_VERKEY, THRIFT_SEED
from indy_common.config_helper import NodeConfigHelper
from ledger.genesis_txn.genesis_txn_file_util import create_genesis_txn_init_ledger
from stp_core.crypto.util import randomSeed
from stp_core.network.port_dispenser import genHa
import plenum
from plenum.common import util
from plenum.common.constants import ALIAS, NODE_IP, NODE_PORT, CLIENT_IP, \
CLIENT_PORT, SERVICES, VALIDATOR, BLS_KEY, TXN_TYPE, NODE, NYM
from plenum.common.constants import CLIENT_STACK_SUFFIX
from plenum.common.exceptions import BlowUp
from plenum.common.signer_simple import SimpleSigner
from plenum.common.util import randomString
from plenum.test import waits
from plenum.test.test_node import checkNodesConnected, ensureElectionsDone
from plenum.test.conftest import txnPoolNodeSet, patchPluginManager, tdirWithNodeKeepInited
from stp_core.loop.eventually import eventually
from stp_core.common.log import getlogger
from plenum.test.conftest import tdirWithPoolTxns, tdirWithDomainTxns
from indy_client.cli.helper import USAGE_TEXT, NEXT_COMMANDS_TO_TRY_TEXT
from indy_client.test.helper import
|
createNym, buildStewardClient
from indy_common.constants import ENDPOINT, TRUST_ANCHOR
from indy_common.roles import Roles
from indy_common.test.conftest import poolTxnTrusteeNames
from indy_common.test.conftest import domainTxnOrderedFields
from indy_node.test.helper import TestNode
from plenum.common.keygen_utils import initNodeKeysForBothStacks
# plenum.common.util.loggingConfigured = False
from stp_core.loop.looper import Looper
from plenum
|
.test.cli.helper import newKeyPair, doByCtx
from indy_client.test.cli.helper import ensureNodesCreated, get_connection_request, \
getPoolTxnData, newCLI, getCliBuilder, P, prompt_is, addAgent, doSendNodeCmd, addNym
from indy_client.test.agent.conftest import faberIsRunning as runningFaber, \
acmeIsRunning as runningAcme, thriftIsRunning as runningThrift, emptyLooper,\
faberWallet, acmeWallet, thriftWallet, agentIpAddress, \
faberAgentPort, acmeAgentPort, thriftAgentPort, faberAgent, acmeAgent, \
thriftAgent, faberBootstrap, acmeBootstrap
from indy_client.test.cli.helper import connect_and_check_output
from indy_common.config_helper import ConfigHelper
from stp_core.crypto.util import randomSeed
@pytest.fixture("module")
def ledger_base_dir(tconf):
return tconf.CLI_NETWORK_DIR
@pytest.yield_fixture(scope="session")
def cliTempLogger():
file_name = "indy_cli_test.log"
file_path = os.path.join(tempfile.tempdir, file_name)
with open(file_path, 'w'):
pass
return file_path
@pytest.yield_fixture(scope="module")
def looper():
with Looper(debug=False) as l:
yield l
@pytest.fixture("module")
def cli(looper, client_tdir):
return newCLI(looper, client_tdir)
@pytest.fixture(scope="module")
def newKeyPairCreated(cli):
return newKeyPair(cli)
@pytest.fixture(scope="module")
def CliBuilder(tdir, tdirWithPoolTxns, tdirWithDomainTxnsUpdated,
txnPoolNodesLooper, tconf, cliTempLogger):
return getCliBuilder(
tdir,
tconf,
tdirWithPoolTxns,
tdirWithDomainTxnsUpdated,
logFileName=cliTempLogger,
def_looper=txnPoolNodesLooper)
def getDefaultUserMap(name):
return {
'wallet-name': name,
}
@pytest.fixture(scope="module")
def aliceMap():
return getDefaultUserMap("Alice")
@pytest.fixture(scope="module")
def earlMap():
return getDefaultUserMap("Earl")
@pytest.fixture(scope="module")
def bobMap():
return getDefaultUserMap("Bob")
@pytest.fixture(scope="module")
def susanMap():
return getDefaultUserMap("Susan")
@pytest.fixture(scope="module")
def faberMap(agentIpAddress, faberAgentPort):
ha = "{}:{}".format(agentIpAddress, faberAgentPort)
return {'inviter': 'Faber College',
'invite': "sample/faber-request.indy",
'invite-not-exists': "sample/faber-request.indy.not.exists",
'inviter-not-exists': "non-existing-inviter",
'seed': FABER_SEED.decode(),
"remote": FABER_ID,
"remote-verkey": FABER_VERKEY,
"nonce": "b1134a647eb818069c089e7694f63e6d",
ENDPOINT: ha,
"invalidEndpointAttr": json.dumps({ENDPOINT: {'ha': ' 127.0.0.1:11'}}),
"endpointAttr": json.dumps({ENDPOINT: {'ha': ha}}),
"claims": "Transcript",
"claim-to-show": "Transcript",
"proof-req-to-match": "Transcript",
'wallet-name': 'Faber'}
@pytest.fixture(scope="module") # noqa
def acmeMap(agentIpAddress, acmeAgentPort):
ha = "{}:{}".format(agentIpAddress, acmeAgentPort)
return {'inviter': 'Acme Corp',
ENDPOINT: ha,
"endpointAttr": json.dumps({ENDPOINT: {'ha': ha}}),
"invalidEndpointAttr": json.dumps({ENDPOINT: {'ha': '127.0.0.1: 11'}}),
'invite': 'sample/acme-job-application.indy',
'invite-no-pr': 'sample/acme-job-application-no-pr.indy',
'invite-not-exists': 'sample/acme-job-application.indy.not.exists',
'inviter-not-exists': 'non-existing-inviter',
'seed': ACME_SEED.decode(),
"remote": ACME_ID,
"remote-verkey": ACME_VERKEY,
'nonce': '57fbf9dc8c8e6acde33de98c6d747b28c',
'proof-requests': 'Job-Application',
'proof-request-to-show': 'Job-Application',
'claim-ver-req-to-show': '0.2',
'proof-req-to-match': 'Job-Application',
'claims': '<claim-name>',
'rcvd-claim-transcript-provider': 'Faber College',
'rcvd-claim-transcript-name': 'Transcript',
'rcvd-claim-transcript-version': '1.2',
'send-proof-target': 'Alice',
'pr-name': 'Job-Application',
'pr-schema-version': '0.2',
'wallet-name': 'Acme'}
@pytest.fixture(scope="module") # noqa
def thriftMap(agentIpAddress, thriftAgentPort):
ha = "{}:{}".format(agentIpAddress, thriftAgentPort)
return {'inviter': 'Thrift Bank',
'invite': "sample/thrift-loan-application.indy",
'invite-not-exists': "sample/thrift-loan-application.indy.not.exists",
'inviter-not-exists': "non-existing-inviter",
'seed': THRIFT_SEED.decode(),
"remote": THRIFT_ID,
"remote-verkey": THRIFT_VERKEY,
"nonce": "77fbf9dc8c8e6acde33de98c6d747b28c",
ENDPOINT: ha,
"endpointAttr": json.dumps({ENDPOINT: {'ha': ha}}),
"invalidEndpointAttr": json.dumps({ENDPOINT: {'ha': '127.0.0.1:4A78'}}),
"proof-requests": "Loan-Application-Basic, Loan-Application-KYC",
"rcvd-claim-job-certificate-name": "Job-Certificate",
"rcvd-claim-job-certificate-version": "0.2",
"rcvd-claim-job-certificate-provider": "Acme Corp",
"claim-ver-req-to-show": "0.1",
'wallet-name': 'Thrift'}
@pytest.fixture(scope="module")
def loadInviteOut(nextCommandsToTryUsageLine):
return ["1 connection request found for {inviter}.",
"Creating connection for {inviter}.",
''] + \
nextCommandsToTryUsageLine + \
[' show connection "{inviter}"',
' accept request from "{inviter}"',
'',
'']
@pytest.fixture(scope="module")
def fileNotExists():
return ["Given file does not exist"]
@pytest.fixture(scope="module")
def canNotSyncMsg():
return ["Cannot sync because not connected"]
@pytest.fixture(scope="module")
def syncWhenNotConnected(canNotSyncMsg, connectUsage):
return canNotSyncMsg + connectUsage
@pytest.fixture(scope="module")
def canNotAcceptMsg():
return ["Cannot accept because not connected"]
@pytest.fixture(scope
|
Lorquas/subscription-manager
|
src/rhsmlib/dbus/util.py
|
Python
|
gpl-2.0
| 2,488
| 0.001608
|
from __future__ import print_function, division, absolute_import
# Copyright (c) 2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
import logging
import sys
import six
import decorator
import dbus.service
import json
import re
from rhsmlib.dbus import exceptions
log = logging.getLogger(__name__)
__all__ = [
'dbus_handle_exceptions',
'dbus_service_method',
'dbus_service_signal'
]
@decorator.decorator
def dbus_handle_exceptions(func, *args, **kwargs):
"""Decorator to handle exceptions, log them, and wrap them if necessary"""
try:
ret = func(*args, **kwargs)
return ret
except Exception as err:
log.exception(err)
trace = sys.exc_info()[2]
severity = "error"
# Remove "HTTP error (...): " string from the messages:
pattern = '^HTTP error \x28.*\x29: '
err_msg = re.sub(pattern, '', str(err))
# Modify severity of some exception here
if "Ignoring request to auto-attach. It is disabled for org" in err_msg:
|
severity = "warning"
if hasattr(err, 'severity'):
severity = err.severity
# Raise exception string as JSON string. Thus it can be parsed and printed properly.
error_msg = json.dumps(
{
"exception": type(er
|
r).__name__,
"severity": severity,
"message": err_msg
}
)
six.reraise(exceptions.RHSM1DBusException, exceptions.RHSM1DBusException(error_msg), trace)
def dbus_service_method(*args, **kwargs):
# Tell python-dbus that "sender" will be the keyword to use for the sender unless otherwise
# defined.
kwargs.setdefault("sender_keyword", "sender")
return dbus.service.method(*args, **kwargs)
def dbus_service_signal(*args, **kwargs):
"""
Decorator used for signal
:param args:
:param kwargs:
:return:
"""
return dbus.service.signal(*args, **kwargs)
|
DistributedSystemsGroup/zoe
|
zoe_master/metrics/kairosdb.py
|
Python
|
apache-2.0
| 4,088
| 0.001223
|
# Copyright (c) 2016, Daniele Venzano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF A
|
NY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieves metrics about services from KairosDB."""
|
from datetime import datetime, timedelta
import logging
import requests
from zoe_lib.config import get_conf
log = logging.getLogger(__name__)
class KairosDBInMetrics:
"""KairosDB metrics."""
def __init__(self):
self.base_url = get_conf().kairosdb_url
self.tags_url = self.base_url + '/api/v1/datapoints/query/tags'
self.metrics_url = self.base_url + '/api/v1/datapoints/query'
self.list_metrics_url = self.base_url + '/api/v1/metricnames'
def _prepare_query(self):
query = {
'time_zone': 'UTC',
'metrics': []
}
self._add_time_range(query)
return query
def _add_time_range(self, query, minutes_from_now=10):
end = datetime.now()
start = end - timedelta(minutes=minutes_from_now)
query['start_absolute'] = int(start.timestamp() * 1000)
query['end_absolute'] = int(end.timestamp() * 1000)
def _add_metric(self, query, metric_name: str, tags, aggregators, limit: int):
metric = {
'name': metric_name,
}
if tags is not None:
metric['tags'] = tags
if aggregators is not None:
metric['aggregators'] = aggregators
if limit > 0:
metric['limit'] = limit
query['metrics'].append(metric)
def get_service_usage(self, service_id):
"""Query the DB for the current usage metrics."""
query = self._prepare_query()
tags_cpu = {
"field": ["usage_percent"],
"zoe_service_id": service_id
}
aggregators_cpu = [
{"name": "scale", "factor": "0.01"},
{"name": "sum", "sampling": {"value": "1", "unit": "minutes"}, "align_sampling": False}
]
self._add_metric(query, "docker_container_cpu", tags_cpu, aggregators_cpu, limit=0)
tags_memory = {
"field": ["usage"],
"zoe_service_id": service_id
}
aggregators_memory = [
{"name": "sum", "sampling": {"value": "1", "unit": "minutes"}, "align_sampling": False}
]
self._add_metric(query, "docker_container_mem", tags_memory, aggregators_memory, limit=0)
try:
req = requests.post(self.metrics_url, json=query)
except requests.exceptions.ConnectionError:
return None
return self._extract_data(req)
def _extract_data(self, response):
if response is None:
return None
if response.status_code != 200:
error_msg = ''
for error in response.json()['errors']:
error_msg += ' {}'.format(error)
log.error('kairosdb query error: {}'.format(error_msg))
return None
else:
data = response.json()
cpu_results = data['queries'][0]
mem_results = data['queries'][1]
if cpu_results['sample_size'] > 0:
assert len(cpu_results['results']) == 1
cpu_usage = cpu_results['results'][0]['values'][-1][1]
else:
cpu_usage = 0
if mem_results['sample_size'] > 0:
assert len(mem_results['results']) == 1
mem_usage = mem_results['results'][0]['values'][-1][1]
else:
mem_usage = 0
return {
'cpu_usage': cpu_usage,
'mem_usage': mem_usage
}
|
lyoniionly/django-cobra
|
src/cobra/apps/summary/admin.py
|
Python
|
apache-2.0
| 76
| 0.013158
|
from d
|
jango.contrib import admin
from cobra.core.loading import get_model
| |
mablae/weblate
|
weblate/trans/tests/test_suggestions.py
|
Python
|
gpl-3.0
| 7,857
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for sugestion views.
"""
from weblate.trans.models.unitdata import Suggestion
from weblate.trans.tests.test_views import ViewTestCase
class SuggestionsTest(ViewTestCase):
def add_suggestion_1(self):
return self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n',
suggest='yes'
)
def add_suggestion_2(self):
return self.edit_unit(
'Hello, world!\n',
'Ahoj svete!\n',
suggest='yes'
)
def test_add(self):
translate_url = self.get_translation().get_translate_url()
# Try empty suggestion (should not be added)
response = self.edit_unit(
'Hello, world!\n',
'',
suggest='yes'
)
# We should stay on same message
self.assertRedirectsOffset(response, translate_url, 0)
# Add first suggestion
response = self.add_suggestion_1()
# We should get to second message
self.assertRedirectsOffset(response, translate_url, 1)
# Add second suggestion
response = self.add_suggestion_2()
# We should get to second message
self.assertRedirectsOffset(response, translate_url, 1)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 1)
self.assertBackend(0)
# Unit should not be translated
self.assertEqual(len(unit.checks()), 0)
self.assertFalse(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(len(self.get_unit().suggestions()), 2)
def test_delete(self):
translate_url = self.get_translation().get_translate_url()
# Create two suggestions
self.add_suggestion_1()
self.add_suggestion_2()
# Get ids of created suggestions
suggestions = [sug.pk for sug in self.get_unit().suggestions()]
self.assertEqual(len(suggestions), 2)
# Delete one of suggestions
response = self.edit_unit(
'Hello, world!\n',
'',
delete=suggestions[0],
)
self.assertRedirectsOffset(response, translate_url, 0)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 1)
self.assertBackend(0)
# Unit should not be translated
self.assertEqual(len(unit.checks()), 0)
self.assertFalse(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(len(self.get_unit().suggestions()), 1)
def test_accept(self):
translate_url = self.get_translation().get_translate_url()
# Create two suggestions
self.add_suggestion_1()
self.add_suggestion_2()
# Get ids of created suggestions
suggestions = [sug.pk for sug in self.get_unit().suggestions()]
self.assertEqual(len(suggestions), 2)
# Accept one of suggestions
response = self.edit_unit(
'Hello, world!\n',
'',
accept=suggestions[1],
)
self.assertRedirectsOffset(response, translate_url, 0)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 1)
# Unit should be tr
|
anslated
self.assertEqual(len(unit.checks()), 0)
self.assertTrue(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(unit.target, 'Ahoj svete!\n')
self.assertBackend(1)
self.assertEqual(len(self.get_unit().suggestions()), 1)
def test_accept_anonymous(self):
translate_url = self.get_translation().get_translate_url()
self.client.logout()
# Create suggestions
self.add_su
|
ggestion_1()
self.client.login(username='testuser', password='testpassword')
# Get ids of created suggestion
suggestions = list(self.get_unit().suggestions())
self.assertEqual(len(suggestions), 1)
self.assertIsNone(suggestions[0].user)
# Accept one of suggestions
response = self.edit_unit(
'Hello, world!\n',
'',
accept=suggestions[0].pk,
)
self.assertRedirectsOffset(response, translate_url, 0)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 0)
# Unit should be translated
self.assertEqual(unit.target, 'Nazdar svete!\n')
def test_vote(self):
translate_url = self.get_translation().get_translate_url()
self.subproject.suggestion_voting = True
self.subproject.suggestion_autoaccept = 0
self.subproject.save()
self.add_suggestion_1()
suggestion_id = self.get_unit().suggestions()[0].pk
response = self.edit_unit(
'Hello, world!\n',
'',
upvote=suggestion_id,
)
self.assertRedirectsOffset(response, translate_url, 0)
suggestion = Suggestion.objects.get(pk=suggestion_id)
self.assertEqual(
suggestion.get_num_votes(),
1
)
response = self.edit_unit(
'Hello, world!\n',
'',
downvote=suggestion_id,
)
self.assertRedirectsOffset(response, translate_url, 0)
suggestion = Suggestion.objects.get(pk=suggestion_id)
self.assertEqual(
suggestion.get_num_votes(),
-1
)
def test_vote_autoaccept(self):
self.add_suggestion_1()
translate_url = self.get_translation().get_translate_url()
self.subproject.suggestion_voting = True
self.subproject.suggestion_autoaccept = 1
self.subproject.save()
suggestion_id = self.get_unit().suggestions()[0].pk
response = self.edit_unit(
'Hello, world!\n',
'',
upvote=suggestion_id,
)
self.assertRedirectsOffset(response, translate_url, 0)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 0)
# Unit should be translated
self.assertEqual(len(unit.checks()), 0)
self.assertTrue(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertBackend(1)
|
srgblnch/LinacGUI
|
ctli/widgets/actionform.py
|
Python
|
gpl-3.0
| 2,316
| 0.000864
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Sergi Blanch-Torne"
__copyright__ = "Copyright 2015, CELLS / ALBA Synchrotron"
__license__ = "GPLv3+"
import os
from taurus.external.qt import Qt
from taurus.qt.qtgui.container import TaurusWidget
from taurus.qt.qtgui.util.ui import UILoadable
import traceback
@UILoadable(with_ui="_ui")
class ActionForm(TaurusWidget):
def __init__(self, parent=None, name=None, designMode=False):
try:
self.__name = name.__name__
except:
self.__name = "ActionForm"
super(ActionForm, self).__init__(parent, designMode=designMode)
try:
self.debug("[%s]__init__()" % (self.__name))
basePath = os.path.dirname(__file__)
if len(basePath) == 0:
basePath = '.'
self.loadUi(filename="actionWidget.ui",
path=basePath+"/ui")
except Exception as e:
self.warning("[%s]__init__(): Widget exception! %s"
% (self.__name, e))
traceback.print_exc()
self.traceback()
@classmethod
def getQtDesignerPluginInfo(cls):
ret = TaurusWidget.getQtDesignerPluginInfo()
ret['module'] = 'actionform'
ret['gr
|
oup'] = 'Taurus Linac Widge
|
ts'
ret['container'] = ':/designer/dialogbuttonbox.png'
ret['container'] = False
return ret
def main():
app = Qt.QApplication(sys.argv)
w = ActionForm()
w.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
Conan-Kudo/bodhi
|
bodhi/tests/server/views/__init__.py
|
Python
|
gpl-2.0
| 817
| 0.001225
|
# Copyright © 2018 Red Hat, Inc.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribut
|
e it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or
|
FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Test the bodhi.server.views package."""
|
zxsted/Passage
|
passage/inits.py
|
Python
|
mit
| 701
| 0.007133
|
import numpy as np
import theano
import theano.tensor as T
from theano_utils import sharedX, floatX, intX
def uniform(shape, scale=0.05):
return sharedX(np.random.uniform(low=-scale, high=scale, size=shape))
def normal
|
(shape, scale=0.05):
return sharedX(np.random.randn(*shape) * scale)
def o
|
rthogonal(shape, scale=1.1):
""" benanne lasagne ortho init (faster than qr approach)"""
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return sharedX(scale * q[:shape[0], :shape[1]])
|
mrozo/PyNas
|
PyNAS.py
|
Python
|
bsd-3-clause
| 839
| 0.001192
|
try:
from DevelopmentConfig import NasConf
print("Loaded DevelopementConf file")
except ImportError:
from Config import NasConf
print("Loaded Conf file")
from ConfigParser import config_parser_class_tests, ConfigParser
from Partition import partition_class_tests
from Disk import disk_class_tests
__author_
|
_ = 'm'
# todo .gitignore
# todo learn a proper unit tests
def py_nas_tests():
try:
config = ConfigParser(NasConf)
except Exception as E:
assert False, 'Failed to parse NasConfig\n' + str(E)
assert partition_class_tests(), 'Partition class tests have failed.'
assert disk_class_tests(), 'Disk class tests have failed.'
assert config_parser_class_tests(), 'Config parser tests have failed'
# todo parted tests
# todo hdparm tests
py_nas_tests()
# todo blkid wrapper
|
|
bigmassa/django_mail_save
|
mail_save/urls.py
|
Python
|
mit
| 238
| 0.004202
|
fr
|
om django.conf.urls import patterns, url
from .views import EmailAlternativeView
urlpatterns = patterns(
'',
url(r'^email_alternative/(?P<pk>\d+)/$',
EmailAlternativeView.as_v
|
iew(),
name='email_alternative'),
)
|
numericube/twistranet
|
twistranet/twistapp/lib/slugify.py
|
Python
|
agpl-3.0
| 1,520
| 0.006579
|
import re
import unicodedata
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
# List o
|
f words you're not allowed to use as a slu
|
g
RESERVED_KEYWORDS = [
"account",
"add_to_network",
"cache",
"configuration",
"content",
"comment",
"create",
"delete",
"download",
"id",
"invitations",
"join",
"media",
"media_resource",
"menu_builder",
"new",
"resource",
"remove_from_network",
"search",
"static",
"twistranet",
"twistable",
]
rsvd_kw = "$|".join(RESERVED_KEYWORDS)
SLUG_REGEX = r"(?!%s$)[a-zA-Z_][a-zA-Z0-9_\-\.]*" % rsvd_kw # XXX TODO: The . must not be last character in the slug
FULL_SLUG_REGEX = "^%s$" % SLUG_REGEX
def slugify(value):
"""
Transform a string value into a 50 characters slug
"""
if not isinstance(value, unicode):
# Double-check against invalid encodings
value = unicode(value, errors = 'ignore')
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('\s+', '_', value))
value = unicode(re.sub('[.@]', '_', value))
value = unicode(re.sub('[^\w\s_-]', '', value).strip().lower())
# If value starts with numbers, prefix it
if re.match(r"[0-9]", value):
value = u"_%s" % value
# Double-check if we've slugified this correctly
if not re.search(FULL_SLUG_REGEX, value):
return slugify(u"%s0" % value)
return value[:50]
|
google-research/augmix
|
imagenet.py
|
Python
|
apache-2.0
| 15,187
| 0.008692
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main script to launch AugMix training on ImageNet.
Currently only supports ResNet-50 training.
Example usage:
`python imagenet.py <path/to/ImageNet> <path/to/ImageNet-C>`
"""
from __future__ import print_function
import argparse
import os
import shutil
import time
import augmentations
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import models
from torchvision import transforms
augmentations.IMAGE_SIZE = 224
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith('__') and
callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='Trains an ImageNet Classifier')
parser.add_argument(
'clean_data', metavar='DIR', help='path to clean ImageNet dataset')
parser.add_argument(
'corrupted_data', metavar='DIR_C', help='path to ImageNet-C dataset')
parser.add_argument(
'--model',
'-m',
default='resnet50',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet50)')
# Optimization options
parser.add_argument(
'--epochs', '-e', type=int, default=90, help='Number of epochs to train.')
parser.add_argument(
'--learning-rate',
'-lr',
type=float,
default=0.1,
help='Initial learning rate.')
parser.add_argument(
'--batch-size', '-b', type=int, default=256, help='Batch size.')
parser.add_argument('--eval-batch-size', type=int, default=1000)
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument(
'--decay',
'-wd',
type=float,
default=0.0001,
help='Weight decay (L2 penalty).')
# AugMix options
parser.add_argument(
'--mixture-width',
default=3,
type=int,
help='Number of augmentation chains to mix per augmented example')
parser.add_argument(
'--mixture-depth',
default=-1,
type=int,
help='Depth of augmentation chains. -1 denotes stochastic depth in [1, 3]')
parser.add_argument(
'--aug-severity',
default=1,
type=int,
help='Severity of base augmentation operators')
parser.add_argument(
'--aug-prob-coeff',
default=1.,
type=float,
help='Probability distribution coefficients')
parser.add_argument(
'--no-jsd',
'-nj',
action='store_true',
help='Turn off JSD consistency loss.')
parser.add_argument(
'--all-ops',
'-all',
action='store_true',
help='Turn on all operations (+brightness,contrast,color,sharpness).')
# Checkpointing options
parser.add_argument(
'--save',
'-s',
type=str,
default='./snapshots',
help='Folder to save checkpoints.')
parser.add_argument(
'--resume',
'-r',
type=str,
default='',
help='Checkpoint path for resume / test.')
parser.add_argument('--evaluate', action='store_true', help='Eval only.')
parser.add_argument(
'--print-freq',
type=int,
default=10,
help='Training loss print frequency (batches).')
parser.add_argument(
'--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
# Acceleration
parser.add_argument(
'--num-workers',
type=int,
default=4,
help='Number of pre-fetching threads.')
args = parser.parse_args()
CORRUPTIONS = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
# Raw AlexNet errors taken from https://github.com/hendrycks/robustness
ALEXNET_ERR = [
0.886428, 0.894468, 0.922640, 0.819880, 0.826268, 0.785948, 0.798360,
0.866816, 0.826572, 0.819324, 0.564592, 0.853204, 0.646056, 0.717840,
0.606500
]
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR (linearly scaled to batch size) decayed by 10 every n / 3 epochs."""
b = args.batch_size / 256.
k = args.epochs // 3
if epoch < k:
m = 1
elif epoc
|
h < 2 * k:
m = 0.1
else:
m = 0.01
lr = args.learning_rate * m * b
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k."""
|
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def compute_mce(corruption_accs):
"""Compute mCE (mean Corruption Error) normalized by AlexNet performance."""
mce = 0.
for i in range(len(CORRUPTIONS)):
avg_err = 1 - np.mean(corruption_accs[CORRUPTIONS[i]])
ce = 100 * avg_err / ALEXNET_ERR[i]
mce += ce / 15
return mce
def aug(image, preprocess):
"""Perform AugMix augmentations and compute mixture.
Args:
image: PIL.Image input image
preprocess: Preprocessing function which should return a torch tensor.
Returns:
mixed: Augmented and mixed image.
"""
aug_list = augmentations.augmentations
if args.all_ops:
aug_list = augmentations.augmentations_all
ws = np.float32(
np.random.dirichlet([args.aug_prob_coeff] * args.mixture_width))
m = np.float32(np.random.beta(args.aug_prob_coeff, args.aug_prob_coeff))
mix = torch.zeros_like(preprocess(image))
for i in range(args.mixture_width):
image_aug = image.copy()
depth = args.mixture_depth if args.mixture_depth > 0 else np.random.randint(
1, 4)
for _ in range(depth):
op = np.random.choice(aug_list)
image_aug = op(image_aug, args.aug_severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * preprocess(image_aug)
mixed = (1 - m) * preprocess(image) + m * mix
return mixed
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix augmentation."""
def __init__(self, dataset, preprocess, no_jsd=False):
self.dataset = dataset
self.preprocess = preprocess
self.no_jsd = no_jsd
def __getitem__(self, i):
x, y = self.dataset[i]
if self.no_jsd:
return aug(x, self.preprocess), y
else:
im_tuple = (self.preprocess(x), aug(x, self.preprocess),
aug(x, self.preprocess))
return im_tuple, y
def __len__(self):
return len(self.dataset)
def train(net, train_loader, optimizer):
"""Train for one epoch."""
net.train()
data_ema = 0.
batch_ema = 0.
loss_ema = 0.
acc1_ema = 0.
acc5_ema = 0.
end = time.time()
for i, (images, targets) in enumerate(train_loader):
# Compute data loading time
data_time = time.time() - end
optimizer.zero_grad()
if args.no_jsd:
images = images.cuda()
targets = targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
acc1, acc5 = accuracy(logits, targets, topk=(1, 5)) # pylint: disable=unbalanced-tuple-unpacking
else:
images_all = torch.cat(images, 0).cuda()
targets = targets.cuda()
logits_all = net(images_all)
logits_clean, logits_aug1, logits_aug2 = torch.split(
logits_all, images[0].size(0))
# Cross-entropy is only computed on clean images
loss = F.cross_entropy(logits_
|
asvetlov/europython2015
|
timerfd.py
|
Python
|
apache-2.0
| 2,878
| 0.001042
|
import asyncio
import ctypes
import os
import time
import unittest
import sys
clib = ctypes.CDLL('libc.so.6', use_errno=True)
class timespec(ctypes.Structure):
_fields_ = [('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long)]
class itimerspec(ctypes.Structure):
_fields_ = [('it_interval', timespec),
('it_value', timespec)]
timerfd_create = clib.timerfd_create
timerfd_create.argtypes = [ctypes.c_int, ctypes.c_int]
|
timerfd_settime = clib.timerfd_settime
timerfd_settime.argtypes = [ctypes.c_int, ctypes.c_int,
ctypes.POINTER(itimerspec),
ctypes.POINTER(itimerspec)]
TFD_NON
|
BLOCK = os.O_NONBLOCK
CLOCK_MONOTONIC = time.CLOCK_MONOTONIC
class Timer:
def __init__(self, *, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
self._fileno = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK)
self._loop = loop
loop.add_reader(self._fileno, self._reader)
self._waiter = None
def close(self):
self._loop.remove_reader(self._fileno)
os.close(self._fileno)
def start(self, timeout):
assert self._waiter is None, self._waiter
secs = int(timeout)
nsecs = int((timeout - secs) * 1000000)
param = itimerspec()
param.it_value.tv_sec = secs
param.it_value.tv_nsec = nsecs
param.it_interval.tv_sec = 0
param.it_interval.tv_nsec = 0
timerfd_settime(self._fileno, 0, ctypes.byref(param), None)
self._waiter = asyncio.Future(loop=self._loop)
def _reader(self):
try:
data = os.read(self._fileno, 8)
except BlockingIOError:
return
else:
if self._waiter.done():
return
else:
self._waiter.set_result(int.from_bytes(data, sys.byteorder))
@asyncio.coroutine
def wait(self):
assert self._waiter is not None
try:
ret = yield from self._waiter
return ret
finally:
self._waiter = None
class TestTimer(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def test_ctor(self):
timer = Timer(loop=self.loop)
self.assertIs(self.loop, timer._loop)
timer.close()
def test_wait(self):
timer = Timer(loop=self.loop)
@asyncio.coroutine
def go():
timer.start(0.5)
t0 = self.loop.time()
ret = yield from timer.wait()
t1 = self.loop.time()
self.assertGreater(0.5, t1-t0)
self.assertEqual(1, ret)
self.loop.run_until_complete(go())
timer.close()
if __name__ == '__main__':
unittest.main()
|
chunfengh/seq2seq
|
bin/tools/generate_toy_data.py
|
Python
|
apache-2.0
| 5,150
| 0.00913
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions to generate various toy datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import numpy as np
import io
PARSER = argparse.ArgumentParser(description="Generates toy datasets.")
PARSER.add_argument(
"--vocab_size", type=int, default=100, help="size of the vocabulary")
PARSER.add_argument(
"--num_examples", type=int, default=10000, help="number of examples")
PARSER.add_argument(
"--min_len", type=int, default=5, help="minimum sequence length")
PARSER.add_argument(
"--max_len", type=int, default=40, help="maximum sequence length")
PARSER.add_argument(
"--type",
type=str,
default="copy",
choices=["copy", "reverse"],
help="Type of dataet to generate. One of \"copy\" or \"reverse\"")
PARSER.add_argument(
"--output_dir",
type=str,
help="path to the output directory",
required=True)
ARGS = PARSER.parse_args()
VOCABULARY = li
|
st([str(x) for x in range(ARGS.vocab_size - 1)])
# VOCABULARY += ["笑"]
def get_target_token(source_tokens):
num_odd = 0
num_even = 0
for token in source_tokens:
if int(token) % 2 == 0:
num_even += 1
else:
num_odd += 1
if num_even == num_odd:
return "EQUAL"
elif num_even > num_
|
odd:
return "EVEN"
else:
return "ODD"
def make_copy(num_examples, min_len, max_len):
"""
Generates a dataset where the target is equal to the source.
Sequence lengths are chosen randomly from [min_len, max_len].
Args:
num_examples: Number of examples to generate
min_len: Minimum sequence length
max_len: Maximum sequence length
Returns:
An iterator of (source, target) string tuples.
"""
### Backup for old copy data generation
# for _ in range(num_examples):
# turn_length = np.random.choice(np.arange(min_len, max_len + 1))
# source_tokens = np.random.choice(
# list(VOCABULARY), size=turn_length, replace=True)
# target_tokens = source_tokens
# yield " ".join(source_tokens), " ".join(target_tokens)
#
for _ in range(num_examples):
turn_length = np.random.choice(np.arange(min_len, max_len + 1))
source_tokens = np.random.choice(
list(VOCABULARY), size=turn_length, replace=True)
target_token = get_target_token(source_tokens)
yield " ".join(source_tokens), target_token
def make_reverse(num_examples, min_len, max_len):
"""
Generates a dataset where the target is equal to the source reversed.
Sequence lengths are chosen randomly from [min_len, max_len].
Args:
num_examples: Number of examples to generate
min_len: Minimum sequence length
max_len: Maximum sequence length
Returns:
An iterator of (source, target) string tuples.
"""
for _ in range(num_examples):
turn_length = np.random.choice(np.arange(min_len, max_len + 1))
source_tokens = np.random.choice(
list(VOCABULARY), size=turn_length, replace=True)
target_tokens = source_tokens[::-1]
yield " ".join(source_tokens), " ".join(target_tokens)
def write_parallel_text(sources, targets, output_prefix):
"""
Writes two files where each line corresponds to one example
- [output_prefix].sources.txt
- [output_prefix].targets.txt
Args:
sources: Iterator of source strings
targets: Iterator of target strings
output_prefix: Prefix for the output file
"""
source_filename = os.path.abspath(os.path.join(output_prefix, "sources.txt"))
target_filename = os.path.abspath(os.path.join(output_prefix, "targets.txt"))
with io.open(source_filename, "w", encoding='utf8') as source_file:
for record in sources:
source_file.write(record + "\n")
print("Wrote {}".format(source_filename))
with io.open(target_filename, "w", encoding='utf8') as target_file:
for record in targets:
target_file.write(record + "\n")
print("Wrote {}".format(target_filename))
def main():
"""Main function"""
if ARGS.type == "copy":
generate_fn = make_copy
elif ARGS.type == "reverse":
generate_fn = make_reverse
# Generate dataset
examples = list(generate_fn(ARGS.num_examples, ARGS.min_len, ARGS.max_len))
try:
os.makedirs(ARGS.output_dir)
except OSError:
if not os.path.isdir(ARGS.output_dir):
raise
# Write train data
train_sources, train_targets = zip(*examples)
write_parallel_text(train_sources, train_targets, ARGS.output_dir)
if __name__ == "__main__":
main()
|
ezotrank/wheezy.template
|
src/wheezy/template/tests/test_engine.py
|
Python
|
mit
| 1,081
| 0
|
""" Unit tests for ``wheezy.templates.engine.Engine``.
"""
import unittest
class EngineTestCase(unittest.TestCase):
""" Test the ``Engine``.
"""
def setUp(self):
from wheezy.template.engine import Engine
from wheezy.template.loader import DictLoader
self.engine = Engine(
loader=DictLoader(templates={}),
extensions=[])
|
def test_template_not_found(self):
""" Raises IOError.
"""
self.assertRaises(IOError, lambda: self.engine.get_template('x'))
def tes
|
t_import_not_found(self):
""" Raises IOError.
"""
self.assertRaises(IOError, lambda: self.engine.import_name('x'))
def test_remove_unknown_name(self):
""" Invalidate name that is not known to engine.
"""
self.engine.remove('x')
def test_remove_name(self):
""" Invalidate name that is known to engine.
"""
self.engine.templates['x'] = 'x'
self.engine.renders['x'] = 'x'
self.engine.modules['x'] = 'x'
self.engine.remove('x')
|
pinard/TweeTabs
|
TweeTabs/Tab.py
|
Python
|
gpl-2.0
| 14,849
| 0.001617
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2009 Progiciels Bourbeau-Pinard inc.
# François Pinard <[email protected]>, 2009.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY
|
; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a co
|
py of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */
"""\
A Twitter reader and personal manager - Tab structures.
"""
__metaclass__ = type
import atexit, gtk, re, sys
import Common, Scheduler, Strip
class Error(Common.Error):
pass
## Base types.
class Tab:
ordinal = 0
registry = {}
name_base = None
name = None
strip_type = None
frozen = False
hidden = False
# Values are False, True and 2 (another True for complement sets)
selected = False
def __init__(self, *inputs):
Tab.ordinal += 1
self.ordinal = Tab.ordinal
Tab.registry[self.ordinal] = self
self.inputs = []
self.added = set()
self.deleted = set()
self.outputs = set()
self.strips = set()
self.visible_strip = {}
self.create_widget()
if self.name_base is not None:
self.set_name(self.name_base)
for input in inputs:
self.add_input(input)
self.goto()
# Not sure why this is still needed here.
self.refresh()
def __str__(self):
return type(self).__name__ + ' ' + (self.name or str(self.ordinal))
def set_name(self, name):
if self.name is None:
del Tab.registry[self.ordinal]
else:
del Tab.registry[self.name]
del self.name
if name is None:
Tab.registry[self.ordinal] = self
else:
if name in Tab.registry:
match = re.match('(.*)([0-9]+)$', name)
if match:
name_base = match.group(1)
counter = int(match.group(2))
else:
name_base = name
counter = 1
counter += 1
name = name_base + str(counter)
while name in Tab.registry:
counter += 1
name = name_base + str(counter)
self.name = name
Tab.registry[name] = self
self.name = name
self.update_tab_label()
def close(self):
for input in self.inputs:
input.outputs.discard(self)
self.inputs = []
for output in list(self.outputs):
self.discard_output(output)
self.strips = set()
def goto(self):
page = Common.gui.notebook_widget.page_num(self.widget)
if page >= 0:
Common.gui.notebook_widget.set_current_page(page)
def select(self, complement=False):
if complement:
wanted = 2
else:
wanted = True
if self.selected != wanted:
self.selected = wanted
if self.hidden:
self.unhide()
else:
self.update_tab_label()
def unselect(self):
if self.selected:
self.selected = False
self.update_tab_label()
def freeze(self):
if not self.frozen:
self.frozen = True
self.update_tab_label()
def unfreeze(self):
if self.frozen:
self.frozen = False
self.refresh()
self.update_tab_label()
def hide(self):
if not self.hidden:
page = Common.gui.notebook_widget.page_num(self.widget)
assert page >= 0, self
Common.gui.notebook_widget.remove_page(page)
self.undisplay_strips(self.strips)
self.hidden = True
def unhide(self):
if self.hidden:
Common.gui.notebook_widget.append_page(self.widget, gtk.Label())
Common.gui.notebook_widget.set_tab_reorderable(self.widget, True)
self.display_strips(self.strips)
self.hidden = False
def add_input(self, tab):
if self.strip_type is None:
self.strip_type = tab.strip_type
elif not issubclass(tab.strip_type, self.strip_type):
raise Error("%s is not made of %s strips"
% (tab, self.strip_type.__name__))
tab.add_output(self)
def discard_input(self, tab):
tab.discard_output(self)
def add_output(self, tab):
self.outputs.add(tab)
if self not in tab.inputs:
tab.inputs.append(self)
if not tab.frozen:
tab.refresh()
def discard_output(self, tab):
self.outputs.discard(tab)
if self in tab.inputs:
tab.inputs.remove(self)
if not tab.frozen:
tab.refresh()
def refresh(self):
strips = (self.recomputed_strips() | self.added) - self.deleted
self.discard_strips(self.strips - strips)
self.add_strips(strips)
def recomputed_strips(self):
# Shall be defined in derived classes.
raise NotImplementedError
def allowable_strips(self, strips):
# Shall be defined in derived classes.
raise NotImplementedError
def add_strips(self, strips):
strips = self.allowable_strips(strips) - self.strips
self.strips |= strips
for output in self.outputs:
if not output.frozen:
output.add_strips(strips)
if not self.hidden:
self.display_strips(strips)
return strips
def discard_strips(self, strips):
strips = strips & self.strips
self.strips -= strips
for output in self.outputs:
if not output.frozen:
output.discard_strips(strips)
if not self.hidden:
self.undisplay_strips(strips)
return strips
def display_strips(self, strips):
Scheduler.Thread(self.display_strips_thread(strips), self)
def display_strips_thread(self, strips):
for counter, strip in enumerate(sorted(strips)):
if counter % 10 == 0 and counter:
self.update_tab_label()
yield 0
visible_strip = strip.visible_maker(self, strip)
self.visible_strip[strip] = visible_strip
self.tab_vbox.pack_start(visible_strip.widget, False, False)
self.update_tab_label()
def undisplay_strips(self, strips):
Scheduler.Thread(self.undisplay_strips_thread(strips), self)
def undisplay_strips_thread(self, strips):
for counter, strip in enumerate(reversed(sorted(strips))):
if counter % 10 == 0 and counter:
self.update_tab_label()
yield 0
self.tab_vbox.remove(self.visible_strip[strip].widget)
del self.visible_strip[strip]
self.update_tab_label()
def create_widget(self):
window = gtk.ScrolledWindow()
window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
vbox = self.tab_vbox = gtk.VBox(False, Common.gui.spacing)
window.add_with_viewport(vbox)
window.show_all()
Common.gui.notebook_widget.append_page(window, gtk.Label())
Common.gui.notebook_widget.set_tab_reorderable(window, True)
self.widget = window
def update_tab_label(self):
text = '<span'
if self.selected:
if self.selected == 2:
text += ' foreground="' + Common.gui.select2_color + '"'
else:
text += ' foreground="' + Common.gui.select_color + '"'
if self.name is None:
name = '%d' % self.ordinal
text += ' style
|
tomer8007/kik-bot-api-unofficial
|
kik_unofficial/protobuf/messagepath/v1/visibility_rules_pb2.py
|
Python
|
mit
| 5,997
| 0.00617
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: messagepath/v1/visibility_rules.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import kik_unofficial.protobuf.common_model_pb2 as common__model__pb2
from kik_unofficial.protobuf.common.v1 import model_pb2 as common_dot_v1_dot_model__pb2
import kik_unofficial.protobuf.protobuf_validation_pb2 as protobuf__validation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='messagepath/v1/visibility_rules.proto',
package='common.messagepath.v1',
syntax='proto3',
serialized_pb=_b('\n%messagepath/v1/visibility_rules.proto\x12\x15\x63ommon.messagepath.v1\x1a\x12\x63ommon_model.proto\x1a\x15\x63ommon/v1/model.proto\x1a\x19protobuf_validation.proto\"\xbd\x02\n\x19VisibilityRulesAttachment\x12\x32\n\tinitiator\x18\x01 \x01(\x0b\x32\x15.common.XiBareUserJidB\x08\x18\x01\xca\x9d%\x02\x08\x00\x12\x38\n\x0cinitiator_v2\x18\x04 \x01(\x0b\x32\".common.v1.XiBareUserJidOrAliasJid\x12$\n\x1c\x64rop_if_initiator_not_friend\x18\x02 \x01(\x08\x12\x43\n\x04rule\x18\x03 \x01(\x0e\x32\x35.common.messagepath.v1.VisibilityRulesAttachment.Rule\"G\n\x04Rule\x12\x1d\n\x19USE_SENDER_FOR_VISIBILITY\x10\x00\x12 \n\x1cUSE_INITIATOR_FOR_VISIBILITY\x10\x01\x42z\n\x19\x63om.kik.messagepath.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/messagepath/v1;messagepath\xa2\x02\x04MPTHb\x06proto3')
,
dependencies=[common__model__pb2.DESCRIPTOR,common_dot_v1_dot_model__pb2.DESCRIPTOR,protobuf__validation__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_VISIBILITYRULESATTACHMENT_RULE = _descriptor.EnumDescriptor(
name='Rule',
full_name='common.messagepath.v1.VisibilityRulesAttachment.Rule',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='USE_SENDER_FOR_VISIBILITY', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='USE_INITIATOR_FOR_VISIBILITY', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=381,
serialized_end=452,
)
_sym_db.RegisterEnumDescriptor(_VISIBILITYRULESATTACHMENT_RULE)
_VISIBILITYRULESATTACHMENT = _descriptor.Descriptor(
name='VisibilityRulesAttachment',
full_name='common.messagepath.v1.VisibilityRulesAttachment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='initiator', full_name='common.messagepath.v1.VisibilityRulesAttachment.initiator', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001\312\235%\002\010\000'))),
_descriptor.FieldDescriptor(
name='initiator_v2', full_name='common.messagepath.v1.VisibilityRulesAttachment.initiator_v2', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, contai
|
ning_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='drop_if_initiator_not_friend', full_name='common.messagepath.v1.VisibilityRu
|
lesAttachment.drop_if_initiator_not_friend', index=2,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rule', full_name='common.messagepath.v1.VisibilityRulesAttachment.rule', index=3,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_VISIBILITYRULESATTACHMENT_RULE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=452,
)
_VISIBILITYRULESATTACHMENT.fields_by_name['initiator'].message_type = common__model__pb2._XIBAREUSERJID
_VISIBILITYRULESATTACHMENT.fields_by_name['initiator_v2'].message_type = common_dot_v1_dot_model__pb2._XIBAREUSERJIDORALIASJID
_VISIBILITYRULESATTACHMENT.fields_by_name['rule'].enum_type = _VISIBILITYRULESATTACHMENT_RULE
_VISIBILITYRULESATTACHMENT_RULE.containing_type = _VISIBILITYRULESATTACHMENT
DESCRIPTOR.message_types_by_name['VisibilityRulesAttachment'] = _VISIBILITYRULESATTACHMENT
VisibilityRulesAttachment = _reflection.GeneratedProtocolMessageType('VisibilityRulesAttachment', (_message.Message,), dict(
DESCRIPTOR = _VISIBILITYRULESATTACHMENT,
__module__ = 'messagepath.v1.visibility_rules_pb2'
# @@protoc_insertion_point(class_scope:common.messagepath.v1.VisibilityRulesAttachment)
))
_sym_db.RegisterMessage(VisibilityRulesAttachment)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\031com.kik.messagepath.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/messagepath/v1;messagepath\242\002\004MPTH'))
_VISIBILITYRULESATTACHMENT.fields_by_name['initiator'].has_options = True
_VISIBILITYRULESATTACHMENT.fields_by_name['initiator']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001\312\235%\002\010\000'))
# @@protoc_insertion_point(module_scope)
|
Azure/azure-sdk-for-python
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2021_01_15/models/_web_site_management_client_enums.py
|
Python
|
mit
| 26,652
| 0.005516
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class AppServiceCertificateOrderPatchResourcePropertiesAppServiceCertificateNotRenewableReasonsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REGISTRATION_STATUS_NOT_SUPPORTED_FOR_RENEWAL = "RegistrationStatusNotSupportedForRenewal"
EXPIRATION_NOT_IN_RENEWAL_TIME_RANGE = "ExpirationNotInRenewalTimeRange"
SUBSCRIPTION_NOT_ACTIVE = "SubscriptionNotActive"
class AppServiceCertificateOrderPropertiesAppServiceCertificateNotRenewableReasonsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REGISTRATION_STATUS_NOT_SUPPORTED_FOR_RENEWAL = "RegistrationStatusNotSupportedForRenewal"
EXPIRATION_NOT_IN_RENEWAL_TIME_RANGE = "ExpirationNotInRenewalTimeRange"
SUBSCRIPTION_NOT_ACTIVE = "SubscriptionNotActive"
class AppServicePlanRestrictions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""App Service plans this offer is restricted to.
"""
NONE = "None"
FREE = "Free"
SHARED = "Shared"
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
class AutoHealActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Predefined action to be taken.
"""
RECYCLE = "Recycle"
LOG_EVENT = "LogEvent"
CUSTOM_ACTION = "CustomAction"
class AzureResourceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the Azure resource the hostname is assigned to.
"""
WEBSITE = "Website"
TRAFFIC_MANAGER = "TrafficManager"
class AzureStorageState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""State of the storage account.
"""
OK = "Ok"
INVALID_CREDENTIALS = "InvalidCredentials"
INVALID_SHARE = "InvalidShare"
NOT_VALIDATED = "NotValidated"
class AzureStorageType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of storage.
"""
AZURE_FILES = "AzureFiles"
AZURE_BLOB = "AzureBlob"
class BackupItemStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Backup status.
"""
IN_PROGRESS = "InProgress"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
TIMED_OUT = "TimedOut"
CREATED = "Created"
SKIPPED = "Skipped"
PARTIALLY_SUCCEEDED = "PartiallySucceeded"
DELETE_IN_PROGRESS = "DeleteInProgress"
DELETE_FAILED = "DeleteFailed"
DELETED = "Deleted"
class BackupRestoreOperationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Operation type.
"""
DEFAULT = "Default"
CLONE = "Clone"
RELOCATION = "Relocation"
SNAPSHOT = "Snapshot"
CLOUD_FS = "CloudFS"
class BuildStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the static site build.
"""
WAITING_FOR_DEPLOYMENT = "WaitingForDeployment"
UPLOADING = "Uploading"
DEPLOYING = "Deploying"
READY = "Ready"
FAILED = "Failed"
DELETING = "Deleting"
DETACHED = "Detached"
class BuiltInAuthenticationProvider(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The default authentication provider to use when multiple providers are configured.
This setting is only needed if multiple providers are configured and the unauthenticated client
action is set to "RedirectToLoginPage".
"""
AZURE_ACTIVE_DIRECTORY = "AzureActiveDirectory"
FACEBOOK = "Facebook"
GOOGLE = "Google"
MICROSOFT_ACCOUNT = "MicrosoftAccount"
TWITTER = "Twitter"
GITHUB = "Github"
class CertificateOrderActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Action type.
"""
CERTIFICATE_ISSUED = "CertificateIssued"
CERTIFICATE_ORDER_CANCELED = "CertificateOrderCanceled"
CERTIFICATE_ORDER_CREATED = "CertificateOrderCreated"
CERTIFICATE_REVOKED = "CertificateRevoked"
DOMAIN_VALIDATION_COMPLETE = "DomainValidationComplete"
FRAUD_DETECTED = "FraudDetected"
ORG_NAME_CHANGE = "OrgNameChange"
ORG_VALIDATION_COMPLETE = "OrgValidationComplete"
SAN_DROP = "SanDrop"
FRAUD_CLEARED = "FraudCleared"
CERTIFICATE_EXPIRED = "CertificateExpired"
CERTIFICATE_EXPIRATION_WARNING = "CertificateExpirationWarning"
FRAUD_DOCUMENTATION_REQUIRED = "FraudDocumentationRequired"
UNKNOWN = "Unknown"
class CertificateOrderStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Current order status.
"""
PENDINGISSUANCE = "Pendingissuance"
ISSUED = "Issued"
REVOKED = "Revoked"
CANCELED = "Canceled"
DENIED = "Denied"
PENDINGREVOCATION = "Pendingrevocation"
PENDING_REKEY = "PendingRekey"
UNUSED = "Unused"
EXPIRED = "Expired"
NOT_SUBMITTED = "NotSubmitted"
class CertificateProductType(with_metaclass(CaseInsensitive
|
EnumMeta, str, Enum)):
"""Certificate product type.
"""
STANDARD_DOMAIN_VALIDATED_SSL = "StandardDomainValidatedSsl"
STANDARD_DOMAIN_VALIDATED_WILD_CARD_SSL = "StandardDo
|
mainValidatedWildCardSsl"
class Channels(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""List of channels that this recommendation can apply.
"""
NOTIFICATION = "Notification"
API = "Api"
EMAIL = "Email"
WEBHOOK = "Webhook"
ALL = "All"
class CheckNameResourceTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Resource type used for verification.
"""
SITE = "Site"
SLOT = "Slot"
HOSTING_ENVIRONMENT = "HostingEnvironment"
PUBLISHING_USER = "PublishingUser"
MICROSOFT_WEB_SITES = "Microsoft.Web/sites"
MICROSOFT_WEB_SITES_SLOTS = "Microsoft.Web/sites/slots"
MICROSOFT_WEB_HOSTING_ENVIRONMENTS = "Microsoft.Web/hostingEnvironments"
MICROSOFT_WEB_PUBLISHING_USERS = "Microsoft.Web/publishingUsers"
class ClientCertMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""This composes with ClientCertEnabled setting.
* ClientCertEnabled: false means ClientCert is ignored.
* ClientCertEnabled: true and ClientCertMode: Required means ClientCert is required.
* ClientCertEnabled: true and ClientCertMode: Optional means ClientCert is optional or
accepted.
"""
REQUIRED = "Required"
OPTIONAL = "Optional"
OPTIONAL_INTERACTIVE_USER = "OptionalInteractiveUser"
class CloneAbilityResult(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of app.
"""
CLONEABLE = "Cloneable"
PARTIALLY_CLONEABLE = "PartiallyCloneable"
NOT_CLONEABLE = "NotCloneable"
class ComputeModeOptions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Shared/dedicated workers.
"""
SHARED = "Shared"
DEDICATED = "Dedicated"
DYNAMIC = "Dynamic"
class ConnectionStringType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of database.
"""
MY_SQL = "MySql"
SQL_SERVER = "SQLServer"
SQL_AZURE = "SQLAzure"
CUSTOM = "Custom"
NOTIFICATION_HUB = "NotificationHub"
SERVICE_BUS = "ServiceBus"
EVENT_HUB = "EventHub"
API_HUB = "ApiHub"
DOC_DB = "DocDb"
REDIS_CACHE = "RedisCache"
POSTGRE_SQL = "PostgreSQL"
class ContinuousWebJobStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Job status.
"""
INITIALIZING = "Initializing"
STARTING = "Starting"
RUNNING = "Running"
PENDING_RESTART = "PendingRestart"
STOPPED = "Stopped"
class CookieExpirationConvention(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The convention used when determining the session cookie's expiration.
"""
FIXED_TIME = "FixedTime"
IDENTITY_PROVIDER_DERIVED = "IdentityProviderDerived"
class CustomDomainStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the custom domain
"""
RETRIEVIN
|
jakeret/tf_unet
|
scripts/ultrasound_launcher.py
|
Python
|
gpl-3.0
| 3,744
| 0.005342
|
# tf_unet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tf_unet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tf_unet. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jul 28, 2016
author: jakeret
Trains a tf_unet network to segment nerves in the Ultrasound Kaggle Dataset.
Requires the Kaggle dataset.
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import click
import numpy as np
from PIL import Image
from tf_unet import unet
from tf_unet import util
from tf_unet.image_util import ImageDataProvider
IMG_SIZE = (290, 210)
@click.command()
@click.option('--data_root', default="../../ultrasound/train")
@click.option('--output_path', default="./unet_trained_ultrasound")
@click.option('--training_iters', default=20)
@click.option('--epochs', default=100)
@click.option('--restore', default=False)
@click.option('--layers', default=3)
@click.option('--features_root', default=32)
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
print("Using data from: %s"%data_root)
if not os.path.exists(data_root):
raise IOError("Kaggle Ultrasound Dataset not found")
data_provider = DataProvider(search_path=data_root + "/*.tif",
mean=100,
std=56)
net = unet.Unet(channels=data_provider.channels,
n_class=data_provider.n_class,
layers=layers,
features_root=features_root,
#cost="dice_coefficient",
)
path = output_path if restore else util.create_training_path(output_path)
trainer = unet.Trainer(net, batch_size=1, norm_grads=False, optimizer="adam")
path = trainer.train(data_provider, path,
training_iters=training_iters,
epochs=epochs,
dropout=0.5,
display_step=2,
restore=restore)
x_test, y_test = data_provider(1)
prediction = net.predict(path, x_test)
print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
class D
|
ataProvider(ImageDataProvider):
"""
Extends the default ImageDataProvider to randomly select the next
image and ensures that only
|
data sets are used where the mask is not empty.
The data then gets mean and std adjusted
"""
def __init__(self, mean, std, *args, **kwargs):
super(DataProvider, self).__init__(*args, **kwargs)
self.mean = mean
self.std = std
def _next_data(self):
data, mask = super(DataProvider, self)._next_data()
while mask.sum() == 0:
self._cylce_file()
data, mask = super(DataProvider, self)._next_data()
return data, mask
def _process_data(self, data):
data -= self.mean
data /= self.std
return data
def _load_file(self, path, dtype=np.float32):
image = Image.open(path)
return np.array(image.resize(IMG_SIZE), dtype)
def _cylce_file(self):
self.file_idx = np.random.choice(len(self.data_files))
if __name__ == '__main__':
launch()
|
flavour/eden
|
controllers/msg.py
|
Python
|
mit
| 87,816
| 0.011604
|
# -*- coding: utf-8 -*-
"""
Messaging Module - Controllers
"""
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[c].get("name_nice")
response.title = module_name
return {"module_name": module_name,
}
# -----------------------------------------------------------------------------
def basestation():
""" RESTful CRUD controller for Base Stations """
# Pre-processor
def prep(r):
# Function to call for all Site Instance Types
from s3db.org import org_site_prep
org_site_prep(r)
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def compose():
""" Compose a Message which can be sent to a pentity via a number of different communications channels """
return msg.compose()
# =============================================================================
def message():
"""
RESTful CRUD controller for the master message log
"""
tablename = "msg_message"
table = s3db.msg_message
table.instance_type.readable = True
table.instance_type.label = T("Channel")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("Message Log"),
label_list_button = T("View Message Log"),
msg_list_empty = T("No Messages currently in the Message Log"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons
s3.actions += [{"label": s3_str(T("Mark Sender")),
"url": URL(f = "mark_sender",
args = ["[id]"],
),
"_class": "action-btn",
},
]
return output
s3.postp = postp
s3db.configure(tablename,
deletable = False,
editable = False,
insertable = False,
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def contact():
"""
RESTful CRUD controller for the Contact Form
"""
def prep(r):
if not auth.s3_has_role("ADMIN"):
r.method = "create"
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def mark_sender():
"""
Assign priority to the given sender
"""
try:
mid = request.args[0]
except:
raise SyntaxError
mtable = s3db.msg_message
stable = s3db.msg_sender
# @ToDo: Replace 2 queries with Join
srecord = db(mtable.id == mid).select(mtable.from_address,
limitby = (0, 1),
).first()
sender = srecord.from_address
record = db(stable.sender == sender).select(stable.id,
limitby = (0, 1),
).first()
if record:
args = "update"
else:
args = "create"
redirect(URL(f = "sender",
args = args,
vars = {"sender": sender},
))
# =====
|
========================================================================
def outbox():
""" View the contents of the Outbox """
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
from s3db.pr import pr_PersonEntityRepresent
tablename = "msg_outbox"
|
table = s3db[tablename]
table.message_id.label = T("Message")
table.message_id.writable = False
table.message_id.readable = True
table.pe_id.readable = True
table.pe_id.label = T("Recipient")
table.message_id.represent = s3db.msg_message_represent
table.pe_id.represent = pr_PersonEntityRepresent(default_label = "")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("Outbox"),
label_list_button = T("View Outbox"),
label_delete_button = T("Delete Message"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def email_outbox():
"""
RESTful CRUD controller for the Email Outbox
- all Outbound Email Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_email"
table = s3db.msg_email
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Details"),
title_list = T("Sent Emails"),
label_list_button = T("View Sent Emails"),
label_delete_button = T("Delete Email"),
msg_record_deleted = T("Email deleted"),
msg_list_empty = T("No Emails currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"subject",
"body",
],
)
return s3_rest_controller(c, "email")
# -----------------------------------------------------------------------------
def facebook_outbox():
"""
RESTful CRUD controller for the Facebook Outbox
- all Outbound Facebook Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_facebook"
table = s3db.msg_facebook
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Post Details"),
title_list = T("Sent Posts"),
label_list_button = T("View Sent Posts"),
label_delete_button = T("Delete Post"),
msg_record_deleted = T("Post deleted"),
msg_list_empty = T("No Posts currently in Outbox"),
)
#def postp(r, output):
# if isinstance(output, dict):
# add_btn = A(T("Compose"),
# _class="action-btn",
# _href=URL(f="compose")
# )
# output["rheader"]
|
Pica4x6/megaradrp
|
megaradrp/recipes/calibration/weights.py
|
Python
|
gpl-3.0
| 15,527
| 0.002061
|
from __future__ import division
import copy
import new
import math
import multiprocessing as mp
import time
import timeit
import shutil
import json as ujson
from tempfile import mkdtemp
import copy_reg
import numpy as np
import os
import os.path
from megaradrp.core.recipe import MegaraBaseRecipe
from numina.core import Product, Requirement
from megaradrp.requirements import MasterFiberFlatFrameRequirement, MasterSlitFlatRequirement, MasterBiasRequirement, MasterDarkRequirement
from megaradrp.products import TraceMap
from megaradrp.products import MasterWeights
# matplotlib.use('agg', warn=True)
from numina.core.requirements import ObservationResultRequirement
from astropy.io import fits
from astropy.modeling import fitting
from astropy.modeling.models import custom_model_1d
from scipy.stats import norm
from astropy.modeling.models import custom_model
import logging
_logger = logging.getLogger('numina.recipes.megara')
##############################################################################
def make_instancemethod(inst, methodname):
return getattr(inst, methodname)
def pickle_instancemethod(method):
return make_instancemethod, (method.im_self, method.im_func.__name__)
copy_reg.pickle(new.instancemethod, pickle_instancemethod, make_instancemethod)
##############################################################################
M_SQRT_2_PI = math.sqrt(2 * math.pi)
class WeightsRecipe(MegaraBaseRecipe):
# Requirements
master_bias = MasterBiasRequirement()
master_dark = MasterDarkRequirement()
master_slitflat = MasterSlitFlatRequirement()
master_fiberflat_frame = MasterFiberFlatFrameRequirement()
tracemap = Requirement(TraceMap, 'Trace information of the Apertures')
# Products
master_weights = Product(MasterWeights)
def __init__(self, size=4096, fibers=623, rows=4112):
self.SIZE = size
self.ROWS = rows
self.FIBERS = fibers
self.procesos = mp.cpu_count() - 2
super(WeightsRecipe, self).__init__(version="0.1.0")
def _add_file_to_tar(self, file_name, tar):
'''
:param file_name: <str> Name of the *.fits files
:param path: <str> Path where fits files are located
:param tar: <tarfile> descriptor of the tarfile object
:return:
'''
tar.add(file_name, arcname=os.path.basename(file_name))
def _check_directory(self, path):
'''
:param path: <str> Path where fits files are stored. If exists then will be erased
:return: None
'''
import shutil
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def compress(self, path, tar_name='files'):
'''
:param path: path: <str> Path where tar file is stored
:param tar_name: <str> name of the tar file
:return: None
'''
import tarfile
import glob
try:
os.remove("%s.tar" % tar_name)
except OSError:
pass
tar = tarfile.open("%s.tar" % tar_name, "w")
files = glob.glob('%s/*.*' % path)
for file in files:
self._add_file_to_tar(file, tar)
tar.close()
def decompress(self, tar_name='files'):
'''
:param tar_name: <str> name of the tar file
:return: None
'''
import tarfile
tar = tarfile.open("%s.tar" % tar_name, 'r')
aux = tar.extractall()
try:
return tar.getnames()[0].split('/')[0]
except:
return ''
def extract_w(self, img, mlist=[]):
'''
:param img: <fits> original fiber flat fits file
:param mlist: <list> list of csr_matrix
:return: <ndarray> result of lsqr
'''
from scipy.sparse.linalg import lsqr
result = np.zeros((self.FIBERS, self.SIZE))
for col in range(self.SIZE):
wes_csr = mlist[col]
p = img[:, col]
x = lsqr(wes_csr, p)
result[:, col] = x[0]
return result
def _load_files_paralell(self, col, path):
'''
:param col: <str,int> name of the fits file. It is a counter
:param path: <str> path where *.npz are
:return: csr_matrix
'''
from scipy.sparse import csr_matrix
|
filename = '%s/%s.npz' % (path, col)
loader = np.load(filename)
return csr_matrix(
(loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
def load_files_from_directory(self, path, tar_file=None):
'''
:param path: <str> path to load *.fits files
:param tar_fil
|
e: <str> if it is given, *.fits files are extracted
:return: list of csr_matrix
'''
if tar_file:
path = self.decompress()
pool = mp.Pool(processes=self.procesos)
results = [pool.apply_async(self._load_files_paralell,
args=(ite, path)) for ite in
range(self.SIZE)]
results = [p.get() for p in results]
return results
def pixcont(self, i, x0, sig, hpix=0.5):
'''Integrate a gaussian profile.'''
z = (i - x0) / sig
hpixs = hpix / sig
z2 = z + hpixs
z1 = z - hpixs
return norm.cdf(z2) - norm.cdf(z1)
def g_profile(self, xl, l, s):
'''A gaussian profile.'''
z = (xl - l) / s
return np.exp(-0.5 * z ** 2)
def fit1d_profile(self, xl, yl, init0, N, nloop=10, S=3):
"""Iterative fitting"""
init = copy.deepcopy(init0)
changes_a = np.zeros((N, nloop))
changes_m = np.zeros((N, nloop))
changes_s = np.zeros((N, nloop))
for il in range(nloop):
values = np.random.permutation(N)
for val in values:
m1 = max(0, int(init[val]['mean']) - 6 * S)
m2 = int(init[val]['mean']) + 6 * S
y = yl[m1:m2].copy()
xt = xl[m1:m2]
for peakid in range(max(0, val - S), min(N, val + S + 1)):
if peakid == val:
continue
y -= gauss_box_model(xt, **init[peakid])
model = GaussBox(**init[val])
model.mean.min = model.mean.value - 0.5
model.mean.max = model.mean.value + 0.5
# model.mean.fixed = True
model.stddev.min = 1.0
model.stddev.max = 2.0
model.hpix.fixed = True
fitter = fitting.LevMarLSQFitter()
model_fitted = fitter(model, xt, y)
na = model_fitted.amplitude.value
nm = model_fitted.mean.value
ns = model_fitted.stddev.value
changes_a[val, il] = na - init[val]['amplitude']
changes_m[val, il] = nm - init[val]['mean']
changes_s[val, il] = ns - init[val]['stddev']
init[val]['amplitude'] = na
init[val]['mean'] = nm
init[val]['stddev'] = ns
return init, (changes_a, changes_m, changes_s)
def calc_sparse_matrix(self, final, nrows, cut=1.0e-6, extra=10):
from scipy.sparse import lil_matrix
idxs = range(len(final))
# g_ampl = np.array([final[i]['amplitude'] for i in idxs])
g_mean = np.array([final[i]['mean'] for i in idxs])
g_std = np.array([final[i]['stddev'] for i in idxs])
# calc w
begpix = np.ceil(g_mean - 0.5).astype('int')
steps = np.arange(-extra, extra)
ref = begpix + steps[:, np.newaxis]
rr = gauss_box_model(ref, mean=g_mean, stddev=g_std)
rrb = begpix - extra
# Filter values below 'cut'
rr[rr < cut] = 0.0
# Calc Ws matrix
block, nfib = rr.shape
w_init = lil_matrix((nrows, nfib))
for i in range(nfib):
w_init[rrb[i]:rrb[i] + block, i] = rr[:, i, np.newaxis]
# Convert to CSR matrix
wcol = w_init.tocsr()
return wcol
def calc_profile(self, data1, pols, col, sigma, start=0, doplot
|
rmyers/clouddb-rpc
|
dashboard/stashboard/content/databases/views.py
|
Python
|
apache-2.0
| 19,542
| 0
|
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing database instances.
"""
from collect
|
ions import OrderedDict
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ug
|
ettext_lazy as _
import six
from horizon import exceptions
from horizon import forms as horizon_forms
from horizon import tables as horizon_tables
from horizon import tabs as horizon_tabs
from horizon.utils import memoized
from horizon import workflows as horizon_workflows
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
from stashboard import api
from stashboard.content.databases import forms
from stashboard.content.databases import tables
from stashboard.content.databases import tabs
from stashboard.content.databases import workflows
LOG = logging.getLogger(__name__)
class IndexView(horizon_tables.DataTableView):
table_class = tables.InstancesTable
template_name = 'project/databases/index.html'
page_title = _("Instances")
def has_more_data(self, table):
return self._more
@memoized.memoized_method
def get_flavors(self):
try:
flavors = api.trove.flavor_list(self.request)
except Exception:
flavors = []
msg = _('Unable to retrieve database size information.')
exceptions.handle(self.request, msg)
return OrderedDict((six.text_type(flavor.id), flavor)
for flavor in flavors)
def _extra_data(self, instance):
flavor = self.get_flavors().get(instance.flavor["id"])
if flavor is not None:
instance.full_flavor = flavor
instance.host = tables.get_host(instance)
return instance
def get_data(self):
marker = self.request.GET.get(
tables.InstancesTable._meta.pagination_param)
# Gather our instances
try:
instances = api.trove.instance_list(self.request, marker=marker)
self._more = instances.next or False
except Exception:
self._more = False
instances = []
msg = _('Unable to retrieve database instances.')
exceptions.handle(self.request, msg)
map(self._extra_data, instances)
return instances
class LaunchInstanceView(horizon_workflows.WorkflowView):
workflow_class = workflows.LaunchInstance
template_name = "project/databases/launch.html"
page_title = _("Launch Database")
def get_initial(self):
initial = super(LaunchInstanceView, self).get_initial()
initial['project_id'] = self.request.user.project_id
initial['user_id'] = self.request.user.id
return initial
class DBAccess(object):
def __init__(self, name, access):
self.name = name
self.access = access
class CreateUserView(horizon_forms.ModalFormView):
form_class = forms.CreateUserForm
form_id = "create_user_form"
modal_header = _("Create User")
modal_id = "create_user_modal"
template_name = 'project/databases/create_user.html'
submit_label = "Create User"
submit_url = 'horizon:project:databases:create_user'
success_url = 'horizon:project:databases:detail'
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['instance_id'],))
def get_context_data(self, **kwargs):
context = super(CreateUserView, self).get_context_data(**kwargs)
context['instance_id'] = self.kwargs['instance_id']
args = (self.kwargs['instance_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
instance_id = self.kwargs['instance_id']
return {'instance_id': instance_id}
class EditUserView(horizon_forms.ModalFormView):
form_class = forms.EditUserForm
form_id = "edit_user_form"
modal_header = _("Edit User")
modal_id = "edit_user_modal"
template_name = 'project/databases/edit_user.html'
submit_label = "Apply Changes"
submit_url = 'horizon:project:databases:edit_user'
success_url = 'horizon:project:databases:detail'
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['instance_id'],))
def get_context_data(self, **kwargs):
context = super(EditUserView, self).get_context_data(**kwargs)
context['instance_id'] = self.kwargs['instance_id']
context['user_name'] = self.kwargs['user_name']
args = (self.kwargs['instance_id'], self.kwargs['user_name'])
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
instance_id = self.kwargs['instance_id']
user_name = self.kwargs['user_name']
host = tables.parse_host_param(self.request)
return {'instance_id': instance_id, 'user_name': user_name,
'host': host}
class AccessDetailView(horizon_tables.DataTableView):
table_class = tables.AccessTable
template_name = 'project/databases/access_detail.html'
page_title = _("Database Access for: {{ user_name }}")
@memoized.memoized_method
def get_data(self):
instance_id = self.kwargs['instance_id']
user_name = self.kwargs['user_name']
try:
databases = api.trove.database_list(self.request, instance_id)
except Exception:
databases = []
redirect = reverse('horizon:project:databases:detail',
args=[instance_id])
exceptions.handle(self.request,
_('Unable to retrieve databases.'),
redirect=redirect)
try:
granted = api.trove.user_list_access(
self.request, instance_id, user_name)
except Exception:
granted = []
redirect = reverse('horizon:project:databases:detail',
args=[instance_id])
exceptions.handle(self.request,
_('Unable to retrieve accessible databases.'),
redirect=redirect)
db_access_list = []
for database in databases:
if database in granted:
access = True
else:
access = False
db_access = DBAccess(database.name, access)
db_access_list.append(db_access)
return sorted(db_access_list, key=lambda data: (data.name))
def get_context_data(self, **kwargs):
context = super(AccessDetailView, self).get_context_data(**kwargs)
context["db_access"] = self.get_data()
return context
class AttachConfigurationView(horizon_forms.ModalFormView):
form_class = forms.AttachConfigurationForm
form_id = "attach_config_form"
modal_header = _("Attach Configuration Group")
modal_id = "attach_config_modal"
template_name = "project/databases/attach_config.html"
submit_label = "Attach Configuration"
submit_url = 'horizon:project:databases:attach_config'
success_url = reverse_lazy('horizon:project:databases:index')
@memoized.memoized_method
def get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
return api.trove.instance_get(self.request, instance_id)
except Exception:
msg = _('Unable to retrieve instance details.')
redirect = reverse('horizon:project:databases:index')
|
pombredanne/metamorphosys-desktop
|
metamorphosys/META/src/CADAssembler/ExtractACM-XMLfromCreoModels/testExtractACM.py
|
Python
|
mit
| 4,969
| 0.00322
|
# Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright not
|
ice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSOR
|
S, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
#!/usr/bin/python
###
# This module provides the 'make CF card' function to OM
# The two functions "update_fs" and "prepare_fs" should
# be provided to the user of the OM so that the user
# can perform either one at any time
#
# At the bottom there is an example of running the function.
# In the OM you'll need to have an option for the user to select
# which device enumeration to use (or input it through text)
###
import copy, os, shutil, subprocess, string, glob, fnmatch, shlex
import threading
import time
import sys
def scan_for_CAD_files(mypath):
print "Starting test script for ExtractACM-XMLfromCASModules.exe"
from os import listdir
from os.path import isfile, join, getsize
matches = []
for root, dirs, files in os.walk(mypath):
for filename in fnmatch.filter(files, '*.prt*') + fnmatch.filter(files, '*.asm*'):
if not filename.endswith('.xml'):
matches.append(os.path.join(root, filename))
max_threads = 1
threads = []
for fn in matches:
while count_alive_threads(threads) >= max_threads:
time.sleep(1)
newThread = threading.Thread(target=run_the_extractor, kwargs={"filename": fn})
newThread.start()
threads.append(newThread)
def count_alive_threads(thread_array):
count = 0
for t in thread_array:
if t.isAlive():
count += 1
return count
def run_the_extractor(filename):
print "converting " + filename
outfilename = filename + '.xml'
exe_path = os.getenv("PROE_ISIS_EXTENSIONS") + 'bin\ExtractACM-XMLfromCreoModels.exe'
arguments = ' -c "'+filename+'" -x "' + outfilename + '"'
command = exe_path + arguments
return_code = subprocess.call(command)
if return_code:
print " Error on converting file "+ filename + " (return code " + str(return_code) + ")"
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Syntax: testExtractACM <PathtoScan>"
exit()
mypath = sys.argv[1]
scan_for_CAD_files(mypath)
|
pczhaoyun/wolf
|
wolf/spiders/wolves/henbt.py
|
Python
|
apache-2.0
| 1,085
| 0.004608
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import urlparse
from scrapy import log
from scrapy.http import Request
from base.base_wolf import Base_Wolf
class Wolf(Base_Wolf):
def __init__(self, *args, **kwargs):
super(Wolf, self).__init__(*args, **kwargs)
|
self.name = 'henbt'
self.seed_urls = [
'http://henbt.com/',
]
self.base_url = 'http://henbt.com/'
self.rule['follow'] = re.compile(r'show-')
self.anchor['desc'] = "//*[@class='intro']"
def get_resource(self, item, response, tree):
item = super(Wolf, self).get_resource(item, response
|
, tree)
resource = tree.xpath("//*[@class='original download']//a/@href")
downloads = [urlparse.urljoin(self.base_url, r) for r in resource if re.match(r'down.php', r)]
if len(downloads):
return self.download_bt(item, [Request(d, cookies=self.cookiejar._cookies,) for d in downloads])
else:
self.log("No Resource DropItem %s" % item['source'], level=log.WARNING)
return None
|
momomoxiaoxi/security
|
Scripts/Check.py
|
Python
|
apache-2.0
| 574
| 0.019164
|
#!/usr/bin/env python
#!-*- coding:utf-8 -*-
def read(filename):
dic=[]
with open(filename,'r') as fp:
w
|
hile True:
lines = fp.readlines(10000)
if not lines :
break
for line in lines:
#line = line.strip('\n')
dic.append(line)
return dic
def Write(file,dic):
with open(file,'w') as fp:
for i in dic:
fp.write(i)
if __name__=='__main__':
test = read('output.txt')
test += read("dire.txt")
print test
Write('outpu
|
t.txt',set(test))
|
superdesk/superdesk-core
|
content_api/packages/__init__.py
|
Python
|
agpl-3.0
| 721
| 0
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and co
|
ntributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
from .service import PackagesService
from .resource import PackagesResource
def init_app(app) -> None:
"""Initialize the `packages` API endpoint.
:param app: the API application object
:type app: `Eve`
"""
endpoint_name = "packages
|
"
service = PackagesService(endpoint_name, backend=superdesk.get_backend())
PackagesResource(endpoint_name, app=app, service=service)
|
pixelrebel/st2
|
st2actions/tests/unit/test_paramiko_remote_script_runner.py
|
Python
|
apache-2.0
| 5,199
| 0.0025
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bson
from mock import patch, Mock, MagicMock
import unittest2
# XXX: There is an import dependency. Config needs to setup
# before importing remote_script_runner classes.
import st2tests.config as tests_config
tests_config.parse_args()
from st2common.util import jsonify
from remote_script_runner import ParamikoRemoteScriptRunner
from st2common.runners.parallel_ssh import ParallelSSHClient
from st2common.exceptions.ssh import InvalidCredentialsException
from st2common.exceptions.ssh import NoHostsConnectedToException
from st2common.models.system.paramiko_script_action import ParamikoRemoteScriptAction
from st2common.constants.action import LIVEACTION_STATUS_FAILED
from st2tests.fixturesloader import FixturesLoader
__all__ = [
'ParamikoScriptRunnerTestCase'
]
FIXTURES_PACK = 'generic'
TEST_MODELS = {
'actions': ['a1.yaml']
}
MODELS = FixturesLoader().load_models(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
ACTION_1 = MODELS['actions']['a1.yaml']
class ParamikoScriptRunnerTestCase(unittest2.TestCase):
@patch('st2common.runners.parallel_ssh.ParallelSSHClient', Mock)
@patch.object(jsonify, 'json_loads', MagicMock(return_value={}))
@patch.object(ParallelSSHClient, 'run', MagicMock(return_value={}))
@patch.object(ParallelSSHClient, 'connect', MagicMock(return_value={}))
def test_cwd_used_correctly(self):
remote_action = ParamikoRemoteScriptAction(
'foo-script', bson.ObjectId(),
script_local_path_abs='/home/stanley/shiz_storm.py',
script_local_libs_path_abs=None,
named_args={}, positional_args=['blank space'], env_vars={},
on_behalf_user='svetlana', user='stanley',
private_key='---SOME RSA KEY---',
remote_dir='/tmp', hosts=['127.0.0.1'], cwd='/test/cwd/'
)
paramiko_runner = ParamikoRemoteScriptRunner('runner_1')
paramiko_runner._parallel_ssh_client = ParallelSSHClient(['127.0.0.1'], 'stanley')
paramiko_runner._run_script_on_remote_host(remote_action)
exp_cmd = "cd /test/cwd/ && /tmp/shiz_storm.py 'blank space'"
ParallelSSHClient.run.assert_called_with(exp_cmd,
|
timeout=None)
@patch('st2common.runners.parallel_ssh.ParallelSSHClient', Mock)
@patch.object(ParallelSSHClient, 'run', MagicMock(return_value={}))
@patch.object(ParallelSSHClient, 'connect', MagicMock(return_value={}))
def test_username_only_ssh(self):
paramiko_runner = ParamikoRemoteScriptRunner('runner_1')
paramiko_runner.runner_parameters = {'username': 'test_user', 'hosts': '127.0.0.1'
|
}
self.assertRaises(InvalidCredentialsException, paramiko_runner.pre_run)
def test_username_invalid_private_key(self):
paramiko_runner = ParamikoRemoteScriptRunner('runner_1')
paramiko_runner.runner_parameters = {
'username': 'test_user',
'hosts': '127.0.0.1',
'private_key': 'invalid private key',
}
paramiko_runner.context = {}
self.assertRaises(NoHostsConnectedToException, paramiko_runner.pre_run)
@patch('st2common.runners.parallel_ssh.ParallelSSHClient', Mock)
@patch.object(ParallelSSHClient, 'run', MagicMock(return_value={}))
@patch.object(ParallelSSHClient, 'connect', MagicMock(return_value={}))
def test_top_level_error_is_correctly_reported(self):
# Verify that a top-level error doesn't cause an exception to be thrown.
# In a top-level error case, result dict doesn't contain entry per host
paramiko_runner = ParamikoRemoteScriptRunner('runner_1')
paramiko_runner.runner_parameters = {
'username': 'test_user',
'hosts': '127.0.0.1'
}
paramiko_runner.action = ACTION_1
paramiko_runner.liveaction_id = 'foo'
paramiko_runner.entry_point = 'foo'
paramiko_runner.context = {}
paramiko_runner._cwd = '/tmp'
paramiko_runner._copy_artifacts = Mock(side_effect=Exception('fail!'))
status, result, _ = paramiko_runner.run(action_parameters={})
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertEqual(result['failed'], True)
self.assertEqual(result['succeeded'], False)
self.assertTrue('Failed copying content to remote boxes' in result['error'])
|
jck/myhdl
|
myhdl/_instance.py
|
Python
|
lgpl-2.1
| 3,952
| 0.000253
|
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Module with the always function. """
import inspect
from types import FunctionType
from myhdl import InstanceError
from myhdl._util import _isGenFunc, _makeAST
from myhdl._Waiter import _inferWaiter
from myhdl._resolverefs import _AttrRefTransformer
from myhdl._visitors import _SigNameVisitor
class _error:
pass
_error.NrOfArgs = "decorated generator function should
|
not have arguments"
_error.ArgType = "decorated object should be a generator function"
class _CallInfo(object):
def __init__(self, name, modctxt, symdict):
self.name = name
self.modctxt = modctxt
self.symdict = symdict
def _getCallInfo():
"""Get info on the caller of an Instantiator.
An Instantiator should be used in a block context.
T
|
his function gets the required info about the caller.
It uses the frame stack:
0: this function
1: the instantiator decorator
2: the block function that defines instances
3: the caller of the block function, e.g. the BlockInstance.
"""
from myhdl import _block
funcrec = inspect.stack()[2]
name = funcrec[3]
frame = funcrec[0]
symdict = dict(frame.f_globals)
symdict.update(frame.f_locals)
modctxt = False
callerrec = inspect.stack()[3]
f_locals = callerrec[0].f_locals
if 'self' in f_locals:
modctxt = isinstance(f_locals['self'], _block._Block)
return _CallInfo(name, modctxt, symdict)
def instance(genfunc):
callinfo = _getCallInfo()
if not isinstance(genfunc, FunctionType):
raise InstanceError(_error.ArgType)
if not _isGenFunc(genfunc):
raise InstanceError(_error.ArgType)
if genfunc.__code__.co_argcount > 0:
raise InstanceError(_error.NrOfArgs)
return _Instantiator(genfunc, callinfo=callinfo)
class _Instantiator(object):
def __init__(self, genfunc, callinfo):
self.callinfo = callinfo
self.callername = callinfo.name
self.modctxt = callinfo.modctxt
self.genfunc = genfunc
self.gen = genfunc()
# infer symdict
f = self.funcobj
varnames = f.__code__.co_varnames
symdict = {}
for n, v in callinfo.symdict.items():
if n not in varnames:
symdict[n] = v
self.symdict = symdict
# print modname, genfunc.__name__
tree = self.ast
# print ast.dump(tree)
v = _AttrRefTransformer(self)
v.visit(tree)
v = _SigNameVisitor(self.symdict)
v.visit(tree)
self.inputs = v.inputs
self.outputs = v.outputs
self.inouts = v.inouts
self.embedded_func = v.embedded_func
self.sigdict = v.sigdict
self.losdict = v.losdict
@property
def name(self):
return self.funcobj.__name__
@property
def funcobj(self):
return self.genfunc
@property
def waiter(self):
return self._waiter()(self.gen)
def _waiter(self):
return _inferWaiter
@property
def ast(self):
return _makeAST(self.funcobj)
|
whoi-acomms/pyacomms
|
bin/console_logger.py
|
Python
|
lgpl-3.0
| 941
| 0.014878
|
#!/usr/bin/env python
|
#__author__ = 'Andrew'
from acomms import micromodem, unifiedlog
import logging
from time import sleep
import argparse
if __name__ == '__main__':
ap = argparse.ArgumentParser(description ='Connect to a MM for testing purposes')
ap.add_argument("logpath", help="Location of Log File", default="/home/acomms/")
ap.add_argument("-C","--COM", help='C
|
OM Port to connect', default="/dev/ttyO1")
ap.add_argument("-BR","--Baudrate", help="COM Port Baud Rate", default=19200)
args = ap.parse_args()
unified_log = unifiedlog.UnifiedLog(log_path=args.logpath, console_log_level=logging.INFO)
um1 = micromodem.Micromodem(name='Micromodem2',unified_log=unified_log)
um1.connect_serial(args.COM, args.Baudrate)
try:
while True:
sleep(1)
finally:
um1.disconnect()
|
CptLemming/django-json-patch
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,191
| 0.007447
|
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import json_patch
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-json-patch'
copyright = u'2015, Ashley Wilson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = json_patch.__version__
# The full version, including alpha/beta/rc tags.
release = json_patch.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is
|
served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-json-patchdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper si
|
ze ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-json-patch.tex', u'django-json-patch Documentation',
u'Ashley Wilson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-json-patch', u'django-json-patch Documentation',
[u'Ashley Wilson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-json-patch', u'django-json-patch Documentation',
u'Ashley Wilson', 'django-json-patch', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailme
|
ktbyers/pynet-ons-mar17
|
threads_procs/threads_show_ver.py
|
Python
|
apache-2.0
| 1,188
| 0.003367
|
#!/usr/bin/env python
'''
Use threads and Netmiko to connect to each of the devices. Execute
'show version' on each device. Record the amount of time required to do this.
'''
import threading
from datetime import datetime
from netmiko import ConnectHandler
from my_devices import device_list as devices
def show_version(a_device):
'''Execute show version command using Netmiko.'''
remote_conn = ConnectHandler(**a_device)
print
print '#' * 8
|
0
print remote_conn.send_command_expect("show version")
print '#' * 80
print
def main():
'''
Use threads and Netmiko to connect to each of the devices. Execute
'show version' on e
|
ach device. Record the amount of time required to do this.
'''
start_time = datetime.now()
for a_device in devices:
my_thread = threading.Thread(target=show_version, args=(a_device,))
my_thread.start()
main_thread = threading.currentThread()
for some_thread in threading.enumerate():
if some_thread != main_thread:
print some_thread
some_thread.join()
print "\nElapsed time: " + str(datetime.now() - start_time)
if __name__ == "__main__":
main()
|
faunalia/rt_geosisma_offline
|
Utils.py
|
Python
|
gpl-3.0
| 8,122
| 0.034105
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : Omero RT
Description : Omero plugin
Date : August 15, 2010
copyright : (C) 2010 by Giuseppe Sucameli (Faunalia)
email : [email protected]
***************************************************************************/
This code has been extracted and adapted from rt_omero plugin to be resused
in rt_geosisma_offline plugin
Works done from Faunalia (http://www.faunalia.it) with funding from Regione
Toscana - Servizio Sismico (http://www.rete.toscana.it/sett/pta/sismica/)
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
import qgis.gui
class MapTool(QObject):
canvas = None
registeredToolStatusMsg = {}
def __init__(self, mapToolClass, canvas=None):
QObject.__init__(self)
if canvas == None:
if MapTool.canvas == None:
raise Exception( "MapTool.canvas is None" )
else:
self.canvas = MapTool.canvas
else:
self.canvas = canvas
if MapTool.canvas == None:
MapTool.canvas = canvas
self.tool = mapToolClass( self.canvas )
QObject.connect(self.tool, SIGNAL( "geometryDrawingEnded" ), self.onEnd)
def deleteLater(self):
self.unregisterStatusMsg()
self.stopCapture()
self.tool.deleteLater()
del self.tool
return QObject.deleteLater(self)
def registerStatusMsg(self, statusMessage):
MapTool.registeredToolStatusMsg[self] = statusMessage
def unregisterStatusMsg(self):
if not MapTool.registeredToolStatusMsg.has_key( self ):
return
del MapTool.registeredToolStatusMsg[self]
def onEnd(self, geometry):
self.stopCapture()
if geometry == None:
return
self.emit( SIGNAL( "geometryEmitted" ), geometry )
def isActive(self):
return self.canvas != None and self.canvas.mapTool() == self.tool
def startCapture(self):
self.canvas.setMapTool( self.tool )
def stopCapture(self):
self.canvas.unsetMapTool( self.tool )
class Drawer(qgis.gui.QgsMapToolEmitPoint):
def __init__(self, canvas, isPolygon=False):
self.canvas = canvas
self.isPolygon = isPolygon
qgis.gui.QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = qgis.gui.QgsRubberBand( self.canvas, self.isPolygon )
self.rubberBand.setColor( Qt.red )
self.rubberBand.setBrushStyle(Qt.DiagCrossPattern)
self.rubberBand.setWidth( 1 )
# imposta lo snap a snap to vertex with tollerance 0.9 map units
customSnapOptions = { 'mode' : "to vertex", 'tolerance' : 0.3, 'unit' : 0 }
self.oldSnapOptions = self.customizeSnapping( customSnapOptions )
self.snapper = qgis.gui.QgsMapCanvasSnapper( self.canvas )
self.isEmittingPoints = False
def __del__(self):
if self.oldSnapOptions:
self.customizeSnapping( self.oldSnapOptions )
del self.rubberBand
del self.snapper
self.deleteLater()
def reset(self):
self.isEmittingPoints = False
self.rubberBand.reset( self.isPolygon )
def customizeSnapping(self, option):
oldSnap = {}
settings = QSettings()
oldSnap['mode'] = settings.value( "/Qgis/digitizing/default_snap_mode", "to vertex", type=str)
oldSnap['tolerance'] = settings.value( "/Qgis/digitizing/default_snapping_tolerance", 0, type=float)
oldSnap['unit'] = settings.value( "/Qgis/digitizing/default_snapping_tolerance_unit", 1, type=int )
settings.setValue( "/Qgis/digitizing/default_snap_mode", option['mode'] )
settings.setValue( "/Qgis/digitizing/default_snapping_tolerance", option['tolerance'] )
settings.setValue( "/Qgis/digitizing/default_snapping_tolerance_unit", option['unit'] )
return oldSnap
def canvasPressEvent(self, e):
if e.button() == Qt.RightButton:
self.isEmittingPoints = False
self.emit( SIGNAL("geometryDrawingEnded"), self.geometry() )
return
if e.button() == Qt.LeftButton:
self.isEmittingPoints = True
else:
return
point = self.toMapCoordinates( e.pos() )
self.rubberBand.addPoint( point, True ) # true to update canvas
self.rubberBand.show()
def canvasMoveEvent(self, e):
if not self.isEmittingPoints:
return
retval, snapResults = self.snapper.snapToBackgroundLayers( e.pos() )
if retval == 0 and len(snapResults) > 0:
point = snapResults[0].snappedVertex
else:
point = self.toMapCoordinates( e.pos() )
self.rubberBand.movePoint( point )
def isValid(self):
return self.rubberBand.numberOfVertices() > 0
def geometry(self):
if not self.isValid():
return None
geom = self.rubberBand.asGeometry()
if geom == None:
return
return QgsGeometry.fromWkt( geom.exportToWkt() )
def deactivate(self):
qgis.gui.QgsMapTool.deactivate(self)
self.reset()
self.emit(SIGNAL("deactivated()"))
class FeatureFinder(MapTool):
def __init__(self, canvas=None):
MapTool.__init__(self, qgis.gui.QgsMapToolEmitPoint, canvas=canvas)
QObject.connect(self.tool, SIGNAL( "canvasClicked(const QgsPoint &, Qt::MouseButton)" ), self.onEnd)
def onEnd(self, point, button):
self.stopCapture()
self.emit( SIGNAL("pointEmitted"), point, button )
@classmethod
def findAtPoint(self, layer, point, onlyTheClosestOne=True, onlyIds=False):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
try:
point = MapTool.canvas.mapSettings().mapToLayerCoordinates(layer, point)
except:
point = MapTool.canvas.mapRenderer().mapToLayerCoordinates(layer, point)
# recupera il valore del raggio di ricerca
settings = QSettings()
radius = settings.value( "/Map/identifyRadius", QGis.DEFAULT_IDENTIFY_RADIUS, float )
if radius <= 0:
# XXX: in QGis 1.8 QGis.DEFAULT_IDENTIFY_RADIUS is 0,
# this cause the rectangle is empty and the select
# returns all the features...
radius = 0.5 # it means 0.50% of the canvas extent
radius = MapTool.canvas.extent().width() * radius/100.0
# crea il rettangolo da usare per la ricerca
rect = QgsRectangle()
rect.setXMinimum(point.x() - radius)
rect.setXMaximum(point.x() + radius)
rect.setYMinimum(point.y() - radius)
rect.setYMaximum(point.y() + radius)
# recupera le feature che intersecano il rettangolo
#layer.select([], rect, True, True)
layer.select( rect, True )
ret = None
if onlyTheClosestOne:
minDist = -1
featureId = None
rect2 = QgsGeometry.fromRect(rect)
for f in layer.getFeatures(QgsFeatureRequest(rect)):
if onlyTheClosestOne:
geom = f.geometry()
distance = geom.distance(rect2)
if minDist < 0 or distance < minDist:
minDist = distance
featureId = f.id()
if onlyIds:
ret = featureId
elif featureId != None:
f
|
= layer.getFeatur
|
es(QgsFeatureRequest().setFilterFid( featureId ))
ret = f.next()
else:
IDs = [f.id() for f in layer.getFeatures(QgsFeatureRequest(rect))]
if onlyIds:
ret = IDs
else:
ret = []
for featureId in IDs:
f = layer.getFeatures(QgsFeatureRequest().setFilterFid( featureId ))
ret.append( f )
QApplication.restoreOverrideCursor()
return ret
class PolygonDrawer(MapTool):
class PolygonDrawer(MapTool.Drawer):
def __init__(self, canvas):
MapTool.Drawer.__init__(self, canvas, QGis.Polygon)
def __init__(self, canvas=None):
MapTool.__init__(self, self.PolygonDrawer, canvas)
class LineDrawer(MapTool):
class LineDrawer(MapTool.Drawer):
def __init__(self, canvas):
MapTool.Drawer.__init__(self, canvas, QGis.Line)
def __init__(self, canvas=None):
MapTool.__init__(self, self.LineDrawer, canvas)
|
epitron/youtube-dl
|
youtube_dl/extractor/huajiao.py
|
Python
|
unlicense
| 1,850
| 0
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from
|
..utils import (
parse_duration,
parse_iso8601,
)
class HuajiaoIE(InfoExtractor):
IE_DESC = '花椒直播'
_VALID_URL = r'https?://(?:www\.)?huajiao\.com/l/(?P<id>[0-9]+)'
_TEST = {
|
'url': 'http://www.huajiao.com/l/38941232',
'md5': 'd08bf9ac98787d24d1e4c0283f2d372d',
'info_dict': {
'id': '38941232',
'ext': 'mp4',
'title': '#新人求关注#',
'description': 're:.*',
'duration': 2424.0,
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1475866459,
'upload_date': '20161007',
'uploader': 'Penny_余姿昀',
'uploader_id': '75206005',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
feed_json = self._search_regex(
r'var\s+feed\s*=\s*({.+})', webpage, 'feed json')
feed = self._parse_json(feed_json, video_id)
description = self._html_search_meta(
'description', webpage, 'description', fatal=False)
def get(section, field):
return feed.get(section, {}).get(field)
return {
'id': video_id,
'title': feed['feed']['formated_title'],
'description': description,
'duration': parse_duration(get('feed', 'duration')),
'thumbnail': get('feed', 'image'),
'timestamp': parse_iso8601(feed.get('creatime'), ' '),
'uploader': get('author', 'nickname'),
'uploader_id': get('author', 'uid'),
'formats': self._extract_m3u8_formats(
feed['feed']['m3u8'], video_id, 'mp4', 'm3u8_native'),
}
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/carpet/_a0.py
|
Python
|
mit
| 380
| 0
|
import _plotly_utils.basevalidators
class A0Validator(_plotl
|
y_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="a0", parent_name="carpet", **kwargs):
super(A0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
|
)
|
danhuss/faker
|
faker/providers/ssn/en_PH/__init__.py
|
Python
|
mit
| 2,596
| 0.005008
|
from ... import BaseProvider
class Provider(BaseProvider):
"""
Provider for Philippine IDs that are related to social security
There is no unified social security program in the Philippines. Instead, the Philippines has a messy collection of
social programs and IDs that, when put together, serves as an analogue of other countries' social security program.
The government agencies responsible for these programs have relatively poor/outdated information and documentation
on their respective websites, so the sources section include third party "unofficial" information.
- Social Security System (SSS) - Social insurance program for workers in private, professional, and informal sectors
- Government Service Insurance System (GSIS) - Social insurance program for government employees
- Home Development Mutual Fund (popularly known as Pag-IBIG) - Socialized financial assistance and loaning program
- Philippine Health Insurance Corporation (PhilHealth) - Social insurance program for health care
- Unified Multi-Purpose ID (UMID) - Identity card with common reference number (CRN) that serves as a link to
the four previous programs and was planned to supersede the previous IDs, but
its future is now uncertain because of the upcoming national ID system
Sources:
- https://www.sss.gov.ph/sss/DownloadContent?fileName=SSSForms_UMID_Application.pdf
- https://www.gsis.gov.ph/active-members/benefits/ecard-plus/
- https://www.pagibigfund.gov.ph/DLForms/providentrelated/PFF039_MembersDataForm_V07.pdf
- https://filipiknow.net/is-umid-and-sss-id-the-same/
- https://filipiknow.net/philhealth-number/
- https://en.wikipedia.org/wiki/Unified_Multi-Purpose_ID
"""
sss_formats = ('##-#######-#',)
gsis_formats = ('###########',)
philhealth_formats = ('##-#########-#',)
pagibig_formats = ('####-####-####',)
umid_formats = ('####-#######-#',)
def sss(self):
return self.numerify(self.random_element(self.sss_formats))
def gsis(self):
return self.numerify(self.random_element(self.gsis_formats))
def pagibig(self):
return self.numerify(self.random_element(self.pagibig_formats))
def philhealth(self):
return self.numerify(self.random_element(self.philhealth_formats))
def umid(self):
return self.numerify(self.rand
|
om_element(self.umid_formats))
def ssn(self):
# Use UMID as SSN in the interim till its deprecatio
|
n
return self.umid()
|
jorisroovers/gitlint
|
gitlint-core/gitlint/tests/samples/user_rules/parent_package/__init__.py
|
Python
|
mit
| 336
| 0.002985
|
# -*- coding: utf-8 -*-
# This file is
|
meant to test that we can also load rules from __init__.py files, this was an issue with pypy before.
from gitlint.rules import CommitRule
class InitFileRule(CommitRule):
name = "my-init-cömmit-rule"
id = "UC1"
options_spec = []
def validate(s
|
elf, _commit):
return []
|
thinkernel/buck
|
src/com/facebook/buck/command/intellij.py
|
Python
|
apache-2.0
| 15,347
| 0.008927
|
import errno
import fnmatch
import json
import os
import re
import subprocess
import sys
MODULE_XML_START = """<?xml version="1.0" encoding="UTF-8"?>
<module type="%(type)s" version="4">"""
MODULE_XML_END = """
</module>
"""
ANDROID_FACET = """
<component name="FacetManager">
<facet type="android" name="Android">
<configuration>
<option name="GEN_FOLDER_RELATIVE_PATH_APT" value="%(module_gen_path)s" />
<option name="GEN_FOLDER_RELATIVE_PATH_AIDL" value="%(module_gen_path)s" />
<option name="MANIFEST_FILE_RELATIVE_PATH" value="%(android_manifest)s" />
<option name="RES_FOLDER_RELATIVE_PATH" value="%(res)s" />
<option name="ASSETS_FOLDER_RELATIVE_PATH" value="/assets" />
<option name="LIBS_FOLDER_RELATIVE_PATH" value="%(libs_path)s" />
<option name="USE_CUSTOM_APK_RESOURCE_FOLDER" value="false" />
<option name="CUSTOM_APK_RESOURCE_FOLDER" value="" />
<option name="USE_CUSTOM_COMPILER_MANIFEST" value="false" />
<option name="CUSTOM_COMPILER_MANIFEST" value="" />
<option name="APK_PATH" value="" />
<option name="LIBRARY_PROJECT" value="%(is_android_library_project)s" />
<option name="RUN_PROCESS_RESOURCES_MAVEN_TASK" value="true" />
<option name="GENERATE_UNSIGNED_APK" value="false" />
<option name="CUSTOM_DEBUG_KEYSTORE_PATH" value="%(keystore)s" />
<option name="PACK_TEST_CODE" value="false" />
<option name="RUN_PROGUARD" value="%(run_proguard)s" />
<option name="PROGUARD_CFG_PATH" value="%(proguard_config)s" />
<resOverlayFolders />
<includeSystemProguardFile>false</includeSystemProguardFile>
<includeAssetsFromLibraries>true</includeAssetsFromLibraries>
<additionalNativeLibs />
</configuration>
</facet>
</component>"""
ALL_MODULES_XML_START = """<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>"""
ALL_MODULES_XML_END = """
</modules>
</component>
</project>
"""
LIBRARY_XML_START = """<component name="libraryTable">
<library name="%(name)s">
<CLASSES>
<root url="jar://$PROJECT_DIR$/%(binary_jar)s!/" />
</CLASSES>"""
LIBRARY_XML_WITH_JAVADOC = """
<JAVADOC>
<root url="%(javadoc_url)s" />
</JAVADOC>"""
LIBRARY_XML_NO_JAVADOC = """
<JAVADOC />"""
LIBRARY_XML_WITH_SOURCES = """
<SOURCES>
<root url="jar://$PROJECT_DIR$/%(source_jar)s!/" />
</SOURCES>"""
LIBRARY_XML_NO_SOURCES = """
<SOURCES />"""
LIBRARY_XML_END = """
</library>
</component>
"""
RUN_CONFIG_XML_START = """<component name="ProjectRunConfigurationManager">"""
RUN_CONFIG_XML_END = "</component>"
REMOTE_RUN_CONFIG_XML = """
<configuration default="false" name="%(name)s" type="Remote" factoryName="Remote">
<option name="USE_SOCKET_TRANSPORT" value="true" />
<option name="SERVER_MODE" value="false" />
<option name="SHMEM_ADDRESS" value="javadebug" />
<option name="HOST" value="localhost" />
<option name="PORT" value="5005" />
<RunnerSettings RunnerId="Debug">
<option name="DEBUG_PORT" value="5005" />
<option name="TRANSPORT" value="0" />
<option name="LOCAL" value="false" />
</RunnerSettings>
<ConfigurationWrapper RunnerId="Debug" />
<method />
</configuration>
"""
# Files that were written by this script.
# If `buck project` is working properly, most of the time it will be a no-op
# and no files will need to be written.
MODIFIED_FILES = []
# Files that are part of the project being run. We will delete all .iml files
# that are not checked in and not in this set.
PROJECT_FILES = set()
def write_modules(modules):
"""Writes one XML file for each module."""
for module in modules:
# Build up the XML.
module_type = 'JAVA_MODULE'
if 'isIntelliJPlugin' in module and module['isIn
|
telliJPlugin']:
module_type = 'PLUGIN_MODULE'
xml = MODULE_XML_START % {
'type': module_type,
}
# Android facet, if appropriate.
if module.get('hasAndroidFacet') == True:
if 'keystorePath' in module:
keystore = 'file://$MODULE_DIR$/%s' % module['keystorePath']
else:
keystore = ''
if 'androidManifest' in module:
|
android_manifest = module['androidManifest']
else:
android_manifest = '/AndroidManifest.xml'
is_library_project = module['isAndroidLibraryProject']
android_params = {
'android_manifest': android_manifest,
'res': '/res',
'is_android_library_project': str(is_library_project).lower(),
'run_proguard': 'false',
'module_gen_path': module['moduleGenPath'],
'proguard_config': '/proguard.cfg',
'keystore': keystore,
'libs_path' : '/%s' % module.get('nativeLibs', 'libs'),
}
xml += ANDROID_FACET % android_params
# Source code and libraries component.
xml += '\n <component name="NewModuleRootManager" inherit-compiler-output="true">'
# Empirically, if there are multiple source folders, then the <content> element for the
# buck-out/android/gen folder should be listed before the other source folders.
num_source_folders = len(module['sourceFolders'])
if num_source_folders > 1:
xml = add_buck_android_source_folder(xml, module)
# Source folders.
xml += '\n <content url="file://$MODULE_DIR$">'
for source_folder in module['sourceFolders']:
if 'packagePrefix' in source_folder:
package_prefix = 'packagePrefix="%s" ' % source_folder['packagePrefix']
else:
package_prefix = ''
xml += '\n <sourceFolder url="%(url)s" isTestSource="%(is_test_source)s" %(package_prefix)s/>' % {
'url': source_folder['url'],
'is_test_source': str(source_folder['isTestSource']).lower(),
'package_prefix': package_prefix
}
for exclude_folder in module['excludeFolders']:
xml += '\n <excludeFolder url="%s" />' % exclude_folder['url']
xml += '\n </content>'
xml = add_annotation_generated_source_folder(xml, module)
# Empirically, if there is one source folder, then the <content> element for the
# buck-out/android/gen folder should be listed after the other source folders.
if num_source_folders <= 1:
xml = add_buck_android_source_folder(xml, module)
# Dependencies.
dependencies = module['dependencies']
module_name = module['name']
# We need to filter out some of the modules in the dependency list:
# (1) The module may list itself as a dependency with scope="TEST", which is bad.
# (2) The module may list another module as a dependency with both COMPILE and TEST scopes, in
# which case the COMPILE scope should win.
# compile_dependencies will be the set of names of dependent modules that do not have scope="TEST"
compile_dependencies = filter(lambda dep: dep['type'] == 'module' and
((not ('scope' in dep)) or dep['scope'] != 'TEST'),
dependencies)
compile_dependencies = map(lambda dep: dep['moduleName'], compile_dependencies)
compile_dependencies = set(compile_dependencies)
# Filter dependencies to satisfy (1) and (2) defined above.
filtered_dependencies = []
for dep in dependencies:
if dep['type'] != 'module':
# Non-module dependencies should still be included.
filtered_dependencies.append(dep)
else:
# dep must be a module
dep_module_name = dep['moduleName']
if dep_module_name == module_name:
# Exclude self-references!
continue
elif 'scope' in dep and dep['scope'] == 'TEST':
# If this is a scope="TEST" module and the module is going to be included as
# a scope="COMPILE" module, then exclude it.
if not (dep_module_name in compile_dependencies):
filtered_dependencies.append(dep)
else:
# Non-test modules should still be included.
filtered_dependencies.append(dep)
# Now that we have filtered the dependencies, we can convert the remaining ones directly into
|
largetalk/tenbagger
|
capital/reactor/cc/migrations/0004_auto_20180313_1052.py
|
Python
|
mit
| 386
| 0
|
# Generated by Django 2.0.2 on 2018-03-13 02:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cc', '
|
0003_auto_20180228_1145'),
]
operations = [
migrations.AlterF
|
ield(
model_name='creditcard',
name='tail_no',
field=models.CharField(max_length=10),
),
]
|
cc-archive/jtoolkit
|
jToolkit/localize.py
|
Python
|
gpl-2.0
| 6,307
| 0.015538
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""extends the standard Python gettext classes
allows multiple simultaneous domains... (makes multiple sessions with different languages easier too)"""
# Copyright 2002, 2003 St James Software
#
# This file is part of jToolkit.
#
# jToolkit is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# jToolkit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jToolkit; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import gettext
import locale
import os.path
from errno import ENOENT
from jToolkit import languagenames
class ManyTranslations(gettext.NullTranslations):
"""this proxies to many translations"""
def __init__(self, translations=None):
"""Takes an optional sequence of translations."""
gettext.NullTranslations.__init__(self)
if translations is None:
self.translations = []
else:
self.translations = translations
def gettext(self, message):
"""gets the translation of the message by searching through all the domains"""
for translation in self.translations:
tmsg = translation._catalog.get(message, None)
if tmsg is not None:
return tmsg
return message
def ngettext(self, singular, plural, n):
"""gets the plural translation of the message by searching through all the domains"""
for translation in self.translations:
if not hasattr(translation, "plural"):
continue
plural = translation.plural
tmsg = translation._catalog[(singular, plural(n))]
if tmsg is not None:
return tmsg
if n == 1:
return singular
else:
return plural
def ugettext(self, message):
"""gets the translation of the message by searching through all the domains (unicode version)"""
for translation in self.translations:
tmsg = translation._catalog.get(message, None)
# TODO: we shouldn't set _charset like this. make sure it is set properly
if translation._charset is None: translation._charset = 'UTF-8'
if tmsg is not None:
if isinstance(tmsg, unicode):
return tmsg
else:
return unicode(tmsg, translation._charset)
return unicode(message)
def ungettext(self, singular, plural, n):
"""gets the plural translation of the message by searching through all the domains (unicode version)"""
for translation in self.translations:
if not hasattr(translation, "plural"):
continue
plural = translation.plural
tmsg = translation._catalog.get((singular, plural(n)), None)
# TODO: we shouldn't set _charset like this. make sure it is set properly
if translation._charset is None: translation._charset = 'UTF-8'
if tmsg is not None:
if isinstance(tmsg, unicode):
return tmsg
else:
return unicode(tmsg, translation._charset)
if n == 1:
return unicode(singular)
else:
return unicode(plural)
def getinstalledlanguages(localedir):
"""looks in localedir and returns a list of languages installed there"""
languages = []
def visit(arg, dirname, names):
if 'LC_MESSAGES' in names:
languages.append(os.path.basename(dirname))
os.path.walk(localedir, visit, None)
return languages
def getlanguagenames(languagecodes):
"""return a dictionary mapping the language code to the language name..."""
return dict([(code, languagenames.languagenames.get(code, code)) for code in languagecodes])
def findmany(domains, localedir=None, languages
|
=None):
"""same as gettext.find, but handles many domains, returns many mofiles (not just one)"""
mofiles = []
if languages is None:
languages = getin
|
stalledlanguages(localedir)
for domain in domains:
mofile = gettext.find(domain, localedir, languages)
mofiles.append(mofile)
return mofiles
def translation(domains, localedir=None, languages=None, class_=None):
"""same as gettext.translation, but handles many domains, returns a ManyTranslations object"""
if class_ is None:
class_ = gettext.GNUTranslations
mofiles = findmany(domains, localedir, languages)
# we'll just use null translations where domains are missing ; this code will refuse to
# if None in mofiles:
# missingindex = mofiles.index(None)
# raise IOError(ENOENT, 'No translation file found for domain', domains[missingindex])
translations = []
for mofile in mofiles:
if mofile is None:
t = gettext.NullTranslations()
t._catalog = {}
else:
key = os.path.abspath(mofile)
t = gettext._translations.get(key)
if t is None:
t = gettext._translations.setdefault(key, class_(open(mofile, 'rb')))
translations.append(t)
return ManyTranslations(translations)
def getdefaultlanguage(languagelist):
"""tries to work out the default language from a list"""
def reducelocale(locale):
pos = locale.find('_')
if pos == -1:
return locale
else:
return locale[:pos]
currentlocale, currentencoding = locale.getlocale()
try:
defaultlocale, defaultencoding = locale.getdefaultlocale()
except ValueError:
defaultlocale, defaultencoding = None, None
if len(languagelist) > 0:
if currentlocale is not None:
if currentlocale in languagelist:
return currentlocale
elif reducelocale(currentlocale) in languagelist:
return reducelocale(currentlocale)
if defaultlocale is not None:
if defaultlocale in languagelist:
return defaultlocale
elif reducelocale(defaultlocale) in languagelist:
return reducelocale(defaultlocale)
return languagelist[0]
else:
# if our language list is empty, we'll just ignore it
if currentlocale is not None:
return currentlocale
elif defaultlocale is not None:
return defaultlocale
return None
|
capital-boss/plate-recognition
|
msp.py
|
Python
|
apache-2.0
| 1,393
| 0.002872
|
import sys
import cv2
import helper as hp
class MSP():
name = "MSP"
def __init__(self):
self.__patterns_num = []
self.__patterns_sym = []
self.__labels_num = []
self.__labels_sym = []
msp_num, msp_sym = "msp/num", "msp/sym"
self.__load_num_patterns(msp_num)
self.__load_sym_patterns(msp_sym)
print 'loading MSP...'
def __load_num_patterns(self, input_dir):
paths = hp.get_paths(input_dir)
self.__patterns_num = [hp.get_gray_image(input_dir, path) for path in paths]
self.__labels_num = [hp.get_test(path, "num")[0] for path in paths]
def __load_sym_patterns(self, input_dir):
paths = hp.get_paths(input_dir)
self.__patterns_sym = [hp.get_gray_image(input_dir, path) for path in paths]
self.__labels_sym = [hp.get_test(path, "sym")[0] for path in paths]
def __get_mode(self, mode):
if mode == "num":
return self.__labels_num,
|
self.__patterns
|
_num
elif mode == "sym":
return self.__labels_sym, self.__patterns_sym
def rec(self, img, mode):
tmp_max, tmp, rec = sys.maxint, 0, 0
labels, patterns = self.__get_mode(mode)
for pattern, label in zip(patterns, labels):
tmp = cv2.countNonZero(pattern - img)
if tmp < tmp_max: tmp_max, rec = tmp, label
return rec
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.