repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
mvanveen/cargo
|
cargo/image.py
|
Python
|
mit
| 1,022
| 0.019569
|
from cargo.base import CargoBase, lowercase, make_id_dict
class Image(CargoBase):
"""Python wrapper class encapsulating the metadata for a Docker Image"""
def __init__(self, *args, **kw):
super(Image, self).__init__(*args, **kw)
@property
def config(self, *args , **kw):
image = make_id_dict(self._dock._images).get
|
(self._config.get('id'))
if image:
self._config = lowercase(image)
return self._config
@property
def image(self):
return self.config.get('image')
@property
def size(self):
return self.config.get('size')
@property
def vsize(self):
return self.config.get('virtualsize')
@property
def image_id(self):
return self.config.get
|
('id')
@property
def repository(self):
return self.config.get('repository')
@property
def tag(self):
return self.config.get('tag')
def __repr__(self):
if self.repository:
return '<Image [%s:%s]>' % (self.repository, self.image_id[:12])
return '<Image [%s]>' % (self.image_id[:12],)
|
jnez71/lqRRT
|
demos/lqrrt_ros/behaviors/car.py
|
Python
|
mit
| 3,231
| 0.005262
|
"""
Constructs a planner that is good for being kinda like a car-boat thing!
"""
from __future__ import division
import numpy as np
import numpy.linalg as npl
from params import *
import lqrrt
################################################# DYNAMICS
magic_rudder = 6000
def dynamics(x, u, dt):
"""
Returns next state given last state x, wrench u, and timestep dt.
"""
# Rotation matrix (orientation, converts body to world)
R = np.array([
[np.cos(x[2]), -np.sin(x[2]), 0],
[np.sin(x[2]), np.cos(x[2]), 0],
[ 0, 0, 1]
])
# Construct drag coefficients based on our motion signs
D = np.copy(D_neg)
for i, v in enumerate(x[3:]):
if v >= 0:
D[i] = D_pos[i]
# Heading controller trying to keep us car-like
vw = R[:2, :2].dot(x[3:5])
ang = np.arctan2(vw[1], vw[0])
c = np.cos(x[2])
s = np.sin(x[2])
cg = np.cos(ang)
sg = np.sin(ang)
u[2] = magic_rudder*np.arctan2(sg*c - cg*s, cg*c + sg*s)
# Actuator saturation
u = B.dot(np.clip(invB.dot(u), -thrust_max, thrust_max))
# M*vdot + D*v = u and pdot = R*v
xdot = np.concatenate((R.dot(x[3:]), invM*(u - D*x[3:])))
# First-order integrate
xnext = x + xdot*dt
# Impose not driving backwards
if xnext[3] < 0:
xnext[3] = abs(x[3])
# # Impose not turning in place
# xnext[5] = np.clip(np.abs(xnext[3]/velmax_pos[0]), 0, 1) * xnext[5]
return xnext
################################################# POLICY
kp = np.diag([150, 150, 0])
kd = np.diag([150, 5, 0])
S = np.diag([1, 1, 1, 0, 0, 0])
def lqr(x, u):
"""
Returns cost-to-go matrix S and policy matrix K given local state x and effort u.
"""
R = np.array([
[np.cos(x[2]), -np.sin(x[2]), 0],
[np.sin(x[2]), np.cos(x[2]), 0],
[ 0, 0, 1]
])
K = np.hstack((kp.dot(R.T), kd))
return (S, K)
################################################# HEURISTICS
goal_buffer = [0.5*free_radius, 0.5*free_radius, np.inf, np.inf, np.inf, np.inf]
error_tol = np.copy(goal_buffer)/10
def gen_ss(seed, goal, buff=[ss_start]*4):
"""
Returns a sample space given
|
a seed state, goal state, and buffer.
"""
return [(min([seed[0], goal[0]]) - buff[0], max([seed[0], goal[0]]) + buff[1]),
(min([seed[1], goal[1]]) - buff[2], max([seed[1], goal[1
|
]]) + buff[3]),
(-np.pi, np.pi),
(0.9*velmax_pos[0], velmax_pos[0]),
(-abs(velmax_neg[1]), velmax_pos[1]),
(-abs(velmax_neg[2]), velmax_pos[2])]
################################################# MAIN ATTRIBUTES
constraints = lqrrt.Constraints(nstates=nstates, ncontrols=ncontrols,
goal_buffer=goal_buffer, is_feasible=unset)
planner = lqrrt.Planner(dynamics, lqr, constraints,
horizon=horizon, dt=dt, FPR=FPR,
error_tol=error_tol, erf=unset,
min_time=basic_duration, max_time=basic_duration, max_nodes=max_nodes,
sys_time=unset, printing=False)
|
nive/nive
|
nive/utils/dataPool2/tests/performance_test.py
|
Python
|
gpl-3.0
| 12,761
| 0.014262
|
import copy
from nive.utils.dataPool2.mysql.tests import test_MySql
try:
from nive.utils.dataPool2.mysql.mySqlPool import *
except:
pass
from . import test_db
from nive.utils.dataPool2.sqlite.sqlite3Pool import *
mode = "mysql"
printed = [""]
def print_(*kw):
if type(kw)!=type(""):
v = ""
for a in kw:
v += " "+str(a)
else:
v = kw
if v == "":
print(".",)
printed.append("")
else:
printed[-1] += v
def getConnection():
if mode == "mysql":
c = MySqlConn(test_MySql.conn, 0)
print_( "MySQL -")
elif mode == "mysqlinno":
c = test_MySql.conn
c["dbName"] = "ut_dataPool2inno"
c = MySqlConn(c, 0)
print_( "MySQL InnoDB -")
else:
c = Sqlite3Conn(test_db.conn, 0)
print_( "Sqlite 3 -")
return c
def getPool():
if mode == "mysql":
pool = MySql(test_MySql.conf)
pool.SetStdMeta(copy.copy(test_MySql.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_MySql.struct)
pool.CreateConnection(test_MySql.conn)
print_( "MySQL -")
elif mode == "mysqlinno":
pool = MySql(test_MySql.conf)
pool.SetStdMeta(copy.copy(test_MySql.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_MySql.struct)
c = test_MySql.conn
c["dbName"] = "ut_dataPool2inno"
pool.CreateConnection(c)
print_( "MySQL InnoDB -")
else:
pool = Sqlite3(test_db.conf)
pool.SetStdMeta(copy.copy(test_db.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_db.struct)
pool.CreateConnection(test_db.conn)
print_( "Sqlite 3 -")
return pool
def empty():
#if mode == "mysql":
# test_MySql.emptypool()
#elif mode == "mysqlinno":
# test_MySql.emptypool()
#else:
# t_db.emptypool()
pass
def connects(n):
c = getConnection()
print_( "Connection: ")
t = time.time()
for i in range(0,n):
c.connect()
c.Close()
t2 = time.time()
print_( n, " connects in ", t2-t, "secs. ", (t2-t)/n, " per connect")
print_()
def cursors(n):
c = getConnection()
c.connect()
print_( "Cursor: ")
t = time.time()
for i in range(0,n):
cu = c.cursor()
cu.close()
t2 = time.time()
c.Close()
print_( n, " cursors in ", t2-t, "secs. ", (t2-t)/n, " per cursor")
print_()
def createsql(n):
pool = getPool()
print_( "Create SQL: ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta)+list(test_MySql.struct["data1"]),
{"pool_type": "data1", "ftext": "", "fnumber": 3},
sort = "title, id, fnumber",
ascending = 0,
dataTable = "data1",
operators={"pool_type":"=", "ftext": "<>", "fnumber": ">"},
start=1,
max=123)
t2 = time.time()
pool.Close()
print_( n, " sql statements in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery1(n, start):
pool = getPool()
print_( "SQL Query data+meta (join no index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta)+list(test_MySql.struct["data1"]),
{"pool_type": "data1", "ftext": "123", "fnumber": i+start},
sort = "title, id, fnumber",
ascending = 0,
dataTable = "data1",
operators={"pool_type":"=", "ftext": "LIKE", "fnumber": "!="},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery2(n, start):
pool = getPool()
print_( "SQL Query data+meta=id (join index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta)+list(test_MySql.struct["data1"]),
{"id": i+start},
sort = "title",
ascending = 0,
dataTable = "data1",
operators={},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery3(n, start):
pool = getPool()
print_( "SQL Query meta=id (index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta),
{"id": start+i},
sort = "id",
ascending = 0,
dataTable = "pool_meta",
singleTable = 1,
operators={},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery4(n, start):
pool = getPool()
print_( "SQL Query meta=id+pool_type=data1 (index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta),
{"id": start+i, "pool_type": "data1"},
sort = "id",
ascending = 0,
dataTable = "pool_meta",
singleTable = 1,
operators={"pool_type": "="},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery5(n, start):
pool = getPool()
print_( "SQL Query meta=id+pool_type=data1+data.funit (join index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta),
{"id": start+i, "pool_type": "data1", "funit": 35},
sort = "id",
ascending = 0,
dataTable = "data1",
operators={"pool_type": "="},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery6(n):
pool = getPool()
print_( "SQL Query filename (text index): ")
t = time.time()
for i in range(0,n):
files = pool.SearchFilename("file1xxx.txt")
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def createentries(n):
pool = getPool()
print_( "Create entries (nodb): ")
t = time.time()
for i in range(0,n):
e=pool._GetPoolEntry(i, version=None, datatbl="data1", preload="skip", virtual=True)
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def checkentries(n):
|
pool = getPool()
print_( "Create entries (nodb) and check exists: ")
t = time.time()
for i in range(0,n):
e=pool._GetPoolEnt
|
ry(i, version=None, datatbl="data1", preload="skip", virtual=True)
e.Exists()
t2 = time.time()
pool.Close()
print_( n, " checks in ", t2-t, "secs. ", (t2-t)/n, " per check")
print_()
def createentries2(n):
pool = getPool()
print_( "Create entries (nodata): ")
t = time.time()
for i in range(0,n):
e=pool.CreateEntry("data1")
#e.data.update(data1_1)
#e.meta.update(meta1)
e.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def createentries3(n):
pool = getPool()
print_( "Create entries (data+meta): ")
t = time.time()
for i in range(0,n):
e=pool.CreateEntry("data1")
if i==0: id = e.GetID()
e.data.update(test_MySql.data1_1)
e.meta.update(test_MySql.meta1)
e.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
return id
def createentries4(n):
pool = getPool()
print_( "Create entries (data+meta+file): ")
t
|
ashang/calibre
|
src/odf/opendocument.py
|
Python
|
gpl-3.0
| 26,274
| 0.004948
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2010 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
__doc__="""Use OpenDocument to generate your documents."""
import zipfile, time, sys, mimetypes, copy
from cStringIO import StringIO
from namespaces import *
import manifest, meta
from office import *
import element
from attrconverters import make_NCName
from xml.sax.xmlreader import InputSource
from odfmanifest import manifestlist
__version__= TOOLSVERSION
_XMLPROLOGUE = u"<?xml version='1.0' encoding='UTF-8'?>\n"
UNIXPERMS = 0100644 << 16L # -rw-r--r--
IS_FILENAME = 0
IS_IMAGE = 1
# We need at least Python 2.2
assert sys.version_info[0]>=2 and sys.version_info[1] >= 2
#sys.setrecursionlimit(100)
#The recursion limit is set conservative so mistakes like
# s=content() s.addElement(s) won't eat up too much processor time.
odmimetypes = {
'application/vnd.oasis.opendocument.text': '.odt',
'application/vnd.oasis.opendocument.text-template': '.ott',
'application/vnd.oasis.opendocument.graphics': '.odg',
'application/vnd.oasis.opendocument.graphics-template': '.otg',
'application/vnd.oasis.opendocument.presentation': '.odp',
'application/vnd.oasis.opendocument.presentation-template': '.otp',
'application/vnd.oasis.opendocument.spreadsheet': '.ods',
'application/vnd.oasis.opendocument.spreadsheet-template': '.ots',
'application/vnd.oasis.opendocument.chart': '.odc',
'application/vnd.oasis.opendocument.chart-template': '.otc',
'application/vnd.oasis.opendocument.image': '.odi',
'application/vnd.oasis.opendocument.image-template': '.oti',
'application/vnd.oasis.opendocument.formula': '.odf',
'application/vnd.oasis.opendocument.formula-template': '.otf',
'application/vnd.oasis.opendocument.text-master': '.odm',
'application/vnd.oasis.opendocument.text-web': '.oth',
}
class OpaqueObject:
def __init__(self, filename, mediatype, content=None):
self.mediatype = mediatype
self.filename = filename
self.content = content
class OpenDocument:
""" A class to hold the content of an OpenDocument document
Use the xml method to write the XML
source to the screen or to a file
d = OpenDocument(mimetype)
fd.write(d.xml())
"""
thumbnail = None
def __init__(self, mimetype, add_generator=True):
self.mimetype = mimetype
self.childobjects = []
self._extra = []
self.folder = "" # Always empty for toplevel documents
self.topnode = Document(mimetype=self.mimetype)
self.topnode.ownerDocument = self
self.clear_caches()
self.Pictures = {}
self.meta = Meta()
self.topnode.addElement(self.meta)
if add_generator:
self.meta.addElement(meta.Generator(text=TOOLSVERSION))
self.scripts = Scripts()
self.topnode.addElement(self.scripts)
self.fontfacedecls = FontFaceDecls()
self.topnode.addElement(self.fontfacedecls)
self.settings = Settings()
self.topnode.addElement(self.settings)
self.styles = Styles()
self.topnode.addElement(self.styles)
self.automaticstyles = AutomaticStyles()
self.topnode.addElement(self.automaticstyles)
self.masterstyles = MasterStyles()
self.topnode.addElement(self.masterstyles)
self.body = Body()
self.topnode.addElement(self.body)
def rebuild_caches(self, node=None):
if node is None: node = self.topnode
self.build_caches(node)
for e in node.childNodes:
if e.nodeType == element.Node.ELEMENT_NODE:
self.rebuild_caches(e)
def clear_caches(self):
self.element_dict = {}
self._styles_dict = {}
self._styles_ooo_fix = {}
def build_caches(self, element):
""" Called from element.py
"""
if not self.element_dict.has_key(element.qname):
self.element_dict[element.qname] = []
self.element_dict[element.qname].append(element)
if element.qname == (STYLENS, u'style'):
self.__register_stylename(element) # Add to style dictionary
styleref = element.getAttrNS(TEXTNS,u'style-name')
if styleref is not None a
|
nd self._styles_ooo_fix.has_key(styleref):
element.setAttrNS(TEXTNS,u'style-name', self._styles_ooo_fix[styleref])
def __register_stylename(self, elemen
|
t):
''' Register a style. But there are three style dictionaries:
office:styles, office:automatic-styles and office:master-styles
Chapter 14
'''
name = element.getAttrNS(STYLENS, u'name')
if name is None:
return
if element.parentNode.qname in ((OFFICENS,u'styles'), (OFFICENS,u'automatic-styles')):
if self._styles_dict.has_key(name):
newname = 'M'+name # Rename style
self._styles_ooo_fix[name] = newname
# From here on all references to the old name will refer to the new one
name = newname
element.setAttrNS(STYLENS, u'name', name)
self._styles_dict[name] = element
def toXml(self, filename=''):
xml=StringIO()
xml.write(_XMLPROLOGUE)
self.body.toXml(0, xml)
if not filename:
return xml.getvalue()
else:
f=file(filename,'w')
f.write(xml.getvalue())
f.close()
def xml(self):
""" Generates the full document as an XML file
Always written as a bytestream in UTF-8 encoding
"""
self.__replaceGenerator()
xml=StringIO()
xml.write(_XMLPROLOGUE)
self.topnode.toXml(0, xml)
return xml.getvalue()
def contentxml(self):
""" Generates the content.xml file
Always written as a bytestream in UTF-8 encoding
"""
xml=StringIO()
xml.write(_XMLPROLOGUE)
x = DocumentContent()
x.write_open_tag(0, xml)
if self.scripts.hasChildNodes():
self.scripts.toXml(1, xml)
if self.fontfacedecls.hasChildNodes():
self.fontfacedecls.toXml(1, xml)
a = AutomaticStyles()
stylelist = self._used_auto_styles([self.styles, self.automaticstyles, self.body])
if len(stylelist) > 0:
a.write_open_tag(1, xml)
for s in stylelist:
s.toXml(2, xml)
a.write_close_tag(1, xml)
else:
a.toXml(1, xml)
self.body.toXml(1, xml)
x.write_close_tag(0, xml)
return xml.getvalue()
def __manifestxml(self):
""" Generates the manifest.xml file
The self.manifest isn't avaible unless the document is being saved
"""
xml=StringIO()
xml.write(_XMLPROLOGUE)
self.manifest.toXml(0,xml)
return xml.getvalue()
def metaxml(self):
""" Generates the meta.xml file """
self.__replaceGenerator()
x = DocumentMeta()
x.addElement(self.meta)
xml=StringIO()
xml.write(_XMLPROLOGUE)
x.toXml(0,xml)
return xml.getvalue()
def settingsxml(self):
""" Generates the settings.xml file """
x = DocumentSettings()
x.addElement(self.settings)
xml
|
andresailer/DIRAC
|
DataManagementSystem/Client/FTS3Client.py
|
Python
|
gpl-3.0
| 3,920
| 0.008673
|
import json
from DIRAC.Core.Base.Client import Client
from DIRAC import S_OK, S_ERROR
from DIRAC.DataManagementSystem.private.FTS3Utilities import FTS3JSONDecoder
class FTS3Client(Client):
""" Client code to the FTS3 service
"""
def __init__(self, url=None, **kwargs):
""" Constructor function.
"""
Client.__init__(self, **kwargs)
self.setServer('DataManagement/FTS3Manager')
if url:
self.setServer(url)
def persistOperation(self, opObj, **kwargs):
""" Persist (insert/update) an FTS3Operation object into the db
:param opObj: instance of FTS3Operation
"""
# In case someone manually set sourceSEs as a list:
if isinstance(opObj.sourceSEs, list):
opObj.sourceSEs = ','.join(opObj.sourceSEs)
opJSON = opObj.toJSON()
return self._getRPC(**kwargs).persistOperation(opJSON)
def getOperation(self, operationID, **kwargs):
""" Get the FTS3Operation from the database
:param operationID: id of the operation
:return: FTS3Operation object
"""
res = self._getRPC(**kwargs).getOperation(operationID)
if not res['OK']:
return res
opJSON = res['Value']
try:
opObj = json.loads(opJSON, cls=FTS3JSONDecoder)
return S_OK(opObj)
except Exception as e:
return S_ERROR("Exception when decoding the FTS3Operation object %s" % e)
def getActiveJobs(self, limit=20, lastMonitor=None, jobAssignmentTag='Assigned', ** kwargs):
""" Get all the FTSJobs that are not in a final state
:param limit: max number of jobs to retrieve
:return: list of FTS3Jobs
"""
res = self._getRPC(**kwargs).getActiveJobs(limit, lastMonitor, jobAssignmentTag)
if not res['OK']:
return res
activeJobsJSON = res['Value']
try:
activeJobs = json.loads(activeJobsJSON, cls=FTS3JSONDecoder)
return S_OK(activeJobs)
except Exception as e:
return S_ERROR("Exception when decoding the active jobs json %s" % e)
def updateFileStatus(self, fileStatusDict, ftsGUID=None, **kwargs):
""" Update the file ftsStatus and error
:param fileStatusDict : { fileID : { status , error, ftsGUID } }
:param ftsGUID: if specified, only update the files having a matchign ftsGUID
"""
return self._getRPC(**kwargs).updateFileStatus(fileStatusDict, ftsGUID)
def updateJobStatus(self, jobStatusDict, **kwargs):
""" Update the job Status and error
:param jobStatusDict : { jobID : { status , error } }
"""
return self._getRPC(**kwargs).updateJobStatus(jobStatusDict)
def getNonFinishedOperations(self, limit=20, operationAssignmentTag="Assigned", **kwargs):
""" Get all the FTS3Operations that have files in New or Failed state
(reminder: Failed is NOT terminal for files. Failed is when fts failed, but we
can retry)
:param limit: max number of jobs to retrieve
:return: json list of FTS3Operation
"""
res = self._getRPC(**kwargs).getNonFinishedOperations(limit, operationAssignmentTag)
if not res['OK']:
return res
operationsJSON = res['Value']
try
|
:
operations = json.loads(operationsJSON, cls=FTS3JSONDecoder)
return S_OK(operations)
except Exception as e:
return S_ERROR(0, "Exception when decoding the non finis
|
hed operations json %s" % e)
def getOperationsFromRMSOpID(self, rmsOpID, **kwargs):
""" Get the FTS3Operations matching a given RMS Operation
:param rmsOpID: id of the operation in the RMS
:return: list of FTS3Operation objects
"""
res = self._getRPC(**kwargs).getOperationsFromRMSOpID(rmsOpID)
if not res['OK']:
return res
operationsJSON = res['Value']
try:
operations = json.loads(operationsJSON, cls=FTS3JSONDecoder)
return S_OK(operations)
except Exception as e:
return S_ERROR(0, "Exception when decoding the operations json %s" % e)
|
aronsky/home-assistant
|
homeassistant/components/cloud/google_config.py
|
Python
|
apache-2.0
| 8,528
| 0.000938
|
"""Google config for Cloud."""
import asyncio
from http import HTTPStatus
import logging
from hass_nabucasa import Cloud, cloud_api
from hass_nabucasa.google_report_state import ErrorResponse
from homeassistant.components.google_assistant.const import DOMAIN as GOOGLE_DOMAIN
from homeassistant.components.google_assistant.helpers import AbstractConfig
from homeassistant.const import (
CLOUD_NEVER_EXPOSED_ENTITIES,
ENTITY_CATEGORY_CONFIG,
ENTITY_CATEGORY_DIAGNOSTIC,
)
from homeassistant.core import CoreState, split_entity_id
from homeassistant.helpers import entity_reg
|
istry as er, start
from homeassistant.setup import async_setup_component
from .const import (
CONF_ENTITY_CONFIG,
DEFAULT_DISABLE_2FA,
PREF_DISABLE_2FA,
PREF_SHOULD_EXPOSE,
)
from .prefs import CloudPreferences
_LOGGER = logging.getLogger(__name__)
class CloudGoogleConfig(AbstractConfig):
"""HA Cloud Configur
|
ation for Google Assistant."""
def __init__(
self, hass, config, cloud_user: str, prefs: CloudPreferences, cloud: Cloud
):
"""Initialize the Google config."""
super().__init__(hass)
self._config = config
self._user = cloud_user
self._prefs = prefs
self._cloud = cloud
self._cur_entity_prefs = self._prefs.google_entity_configs
self._cur_default_expose = self._prefs.google_default_expose
self._sync_entities_lock = asyncio.Lock()
self._sync_on_started = False
@property
def enabled(self):
"""Return if Google is enabled."""
return (
self._cloud.is_logged_in
and not self._cloud.subscription_expired
and self._prefs.google_enabled
)
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@property
def secure_devices_pin(self):
"""Return entity config."""
return self._prefs.google_secure_devices_pin
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self.enabled and self._prefs.google_report_state
@property
def local_sdk_webhook_id(self):
"""Return the local SDK webhook.
Return None to disable the local SDK.
"""
return self._prefs.google_local_webhook_id
@property
def local_sdk_user_id(self):
"""Return the user ID to be used for actions received via the local SDK."""
return self._user
@property
def cloud_user(self):
"""Return Cloud User account."""
return self._user
async def async_initialize(self):
"""Perform async initialization of config."""
await super().async_initialize()
async def hass_started(hass):
if self.enabled and GOOGLE_DOMAIN not in self.hass.config.components:
await async_setup_component(self.hass, GOOGLE_DOMAIN, {})
start.async_at_start(self.hass, hass_started)
# Remove old/wrong user agent ids
remove_agent_user_ids = []
for agent_user_id in self._store.agent_user_ids:
if agent_user_id != self.agent_user_id:
remove_agent_user_ids.append(agent_user_id)
for agent_user_id in remove_agent_user_ids:
await self.async_disconnect_agent_user(agent_user_id)
self._prefs.async_listen_updates(self._async_prefs_updated)
self.hass.bus.async_listen(
er.EVENT_ENTITY_REGISTRY_UPDATED,
self._handle_entity_registry_updated,
)
def should_expose(self, state):
"""If a state object should be exposed."""
return self._should_expose_entity_id(state.entity_id)
def _should_expose_entity_id(self, entity_id):
"""If an entity ID should be exposed."""
if entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
if not self._config["filter"].empty_filter:
return self._config["filter"](entity_id)
entity_configs = self._prefs.google_entity_configs
entity_config = entity_configs.get(entity_id, {})
entity_expose = entity_config.get(PREF_SHOULD_EXPOSE)
if entity_expose is not None:
return entity_expose
entity_registry = er.async_get(self.hass)
if registry_entry := entity_registry.async_get(entity_id):
auxiliary_entity = registry_entry.entity_category in (
ENTITY_CATEGORY_CONFIG,
ENTITY_CATEGORY_DIAGNOSTIC,
)
else:
auxiliary_entity = False
default_expose = self._prefs.google_default_expose
# Backwards compat
if default_expose is None:
return not auxiliary_entity
return not auxiliary_entity and split_entity_id(entity_id)[0] in default_expose
@property
def agent_user_id(self):
"""Return Agent User Id to use for query responses."""
return self._cloud.username
@property
def has_registered_user_agent(self):
"""Return if we have a Agent User Id registered."""
return len(self._store.agent_user_ids) > 0
def get_agent_user_id(self, context):
"""Get agent user ID making request."""
return self.agent_user_id
def should_2fa(self, state):
"""If an entity should be checked for 2FA."""
entity_configs = self._prefs.google_entity_configs
entity_config = entity_configs.get(state.entity_id, {})
return not entity_config.get(PREF_DISABLE_2FA, DEFAULT_DISABLE_2FA)
async def async_report_state(self, message, agent_user_id: str):
"""Send a state report to Google."""
try:
await self._cloud.google_report_state.async_send_message(message)
except ErrorResponse as err:
_LOGGER.warning("Error reporting state - %s: %s", err.code, err.message)
async def _async_request_sync_devices(self, agent_user_id: str):
"""Trigger a sync with Google."""
if self._sync_entities_lock.locked():
return HTTPStatus.OK
async with self._sync_entities_lock:
resp = await cloud_api.async_google_actions_request_sync(self._cloud)
return resp.status
async def _async_prefs_updated(self, prefs):
"""Handle updated preferences."""
if not self._cloud.is_logged_in:
if self.is_reporting_state:
self.async_disable_report_state()
if self.is_local_sdk_active:
self.async_disable_local_sdk()
return
if self.enabled and GOOGLE_DOMAIN not in self.hass.config.components:
await async_setup_component(self.hass, GOOGLE_DOMAIN, {})
if self.should_report_state != self.is_reporting_state:
if self.should_report_state:
self.async_enable_report_state()
else:
self.async_disable_report_state()
# State reporting is reported as a property on entities.
# So when we change it, we need to sync all entities.
await self.async_sync_entities_all()
# If entity prefs are the same or we have filter in config.yaml,
# don't sync.
elif (
self._cur_entity_prefs is not prefs.google_entity_configs
or self._cur_default_expose is not prefs.google_default_expose
) and self._config["filter"].empty_filter:
self.async_schedule_google_sync_all()
if self.enabled and not self.is_local_sdk_active:
self.async_enable_local_sdk()
elif not self.enabled and self.is_local_sdk_active:
self.async_disable_local_sdk()
self._cur_entity_prefs = prefs.google_entity_configs
self._cur_default_expose = prefs.google_default_expose
async def _handle_entity_registry_updated(self, event):
"""Handle when entity registry updated."""
if not self.enabled or not self._cloud.is_logged_in:
return
# Only consider entity registry updates if info relevant for Google has changed
if event.data["action"] == "update" and not
|
googleinterns/betel
|
betel/app_page_scraper.py
|
Python
|
apache-2.0
| 5,385
| 0.001486
|
import pathlib
import urllib.error
import urllib.request
import logging
import bs4
import parmap
import pandas as pd
from betel import utils
from betel import info_files_helpers
from betel import betel_errors
class PlayAppPageScraper:
"""A class for scraping the icons and categories from Google Play Store
apps' web pages."""
_ICON_CLASS = "T75of sHb2Xb" # icon's tag's class
_APP_CATEGORY_ITEMPROP = "genre" # app's category's tag's itemprop
def __init__(self, base_url: str, storage_dir: pathlib.Path, category_filter: [str] = None):
"""Constructor.
:param base_url: base url of the apps store.
:param storage_dir: main storage directory for retrieved info.
:param category_filter: a list of categories whose apps are stored
(instead of the whole input)
"""
self._base_url = base_url
self._storage_dir = storage_dir
self._storage_dir.mkdir(exist_ok=True, parents=True)
self._info_file = storage_dir / utils.SCRAPER_INFO_FILE_NAME
self._log_file = storage_dir / utils.SCRAPER_LOG_FILE_NAME
logging.basicConfig(filename=self._log_file, filemode="a+")
self._category_filter = category_filter
def _build_app_page_url(self, app_id: str) -> str:
return self._base_url + "/details?id=" + app_id
def _get_app_page(self, app_id: str) -> bs4.BeautifulSoup:
url = self._build_app_page_url(app_id)
return _get_html(url)
def get_app_icon(self, app_id: str, subdir: pathlib.Path = "") -> None:
"""Scrapes the app icon URL from the app's Play Store details page,
downloads the corresponding app icon and saves it to
_storage_dir / subdir / icon_{app_id}.
:param app_id: the id of the app.
:param subdir: icon storage subdirectory inside _storage_dir base
directory.
"""
html = self._get_app_page(app_id)
src = self._scrape_icon_url(html)
self._download_icon(app_id, src, subdir)
def _scrape_icon_url(self, html: bs4.BeautifulSoup) -> str:
icon = html.find(class_=self._ICON_CLASS)
if icon is None:
raise betel_errors.PlayScrapingError("Icon class not found in html.")
return icon["src"]
def _download_icon(self, app_id: str, source: str, directory: pathlib.Path) -> None:
location = self._storage_dir / directory
location.mkdir(exist_ok=True, parents=True)
try:
urllib.request.urlretrieve(source, location / utils.get_app_icon_name(app_id))
except (urllib.error.HTTPError, urllib.error.URLError) as exception:
raise betel_errors.AccessError("Can not retrieve icon.", exception)
def get_app_category(self, app_id: str) -> str:
"""Scrapes the app category from the app's Play Store details page.
:param app_id: the id of the app.
:return: the category of the app in str format
"""
html = self._get_app_page(app_id)
return self._scrape_category(html).lower()
def _scrape_category(self, html: bs4.BeautifulSoup) -> str:
category = html.find(itemprop=self._
|
APP_CATEGORY_ITEMPROP)
if category is None:
raise betel_errors.PlayScrapingError("Category itemprop not found in html.")
return category.get_text()
def store_app_info(self, app_id: str) -> None:
"""Adds an app to the data set by retrieving all the info
needed and appending it to the list of apps (kept in _info_file).
The app is only stored in the case that its c
|
ategory is in the
_category_filter list.
:param app_id: the id of the app.
"""
search_data_frame = utils.get_app_search_data_frame(app_id)
part_of_data_set = (
info_files_helpers.part_of_data_set(self._info_file, search_data_frame)
)
try:
if not part_of_data_set:
category = self.get_app_category(app_id)
if self._category_filter is None or category in self._category_filter:
self.get_app_icon(app_id)
self._write_app_info(app_id, category)
except betel_errors.BetelError as exception:
info = f"{app_id}, {getattr(exception, 'message', repr(exception))}"
logging.warning(info)
def _write_app_info(self, app_id: str, category: str) -> None:
app_info = _build_app_info_data_frame(app_id, category)
info_files_helpers.add_to_data(self._info_file, app_info)
def store_apps_info(self, app_ids: [str]) -> None:
"""Adds the specified apps to the data set by retrieving all the info
needed and appending them to the list of apps (kept in _info_file).
:param app_ids: array of app ids.
"""
app_ids = set(app_ids)
parmap.map(self.store_app_info, app_ids)
def _get_html(url: str) -> bs4.BeautifulSoup:
try:
page = urllib.request.urlopen(url)
soup = bs4.BeautifulSoup(page, 'html.parser')
return soup
except (urllib.error.HTTPError, urllib.error.URLError) as exception:
raise betel_errors.AccessError("Can not open URL.", exception)
def _build_app_info_data_frame(app_id: str, category: str) -> pd.DataFrame:
dictionary = {"app_id": app_id, "category": category}
return pd.DataFrame([dictionary])
|
switchonproject/sip-html5-protocol-tool
|
protocoltool/migrations/0024_auto_20161212_1645.py
|
Python
|
lgpl-3.0
| 399
| 0
|
# -*- coding: utf-8 -*-
f
|
rom __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('protocoltool', '0023_auto_20161208_1723'),
]
operations = [
migrations.RenameField(
model_name='basicdataset',
old_name='checked',
new_nam
|
e='hidden',
),
]
|
RemiFr82/ck_addons
|
ck_equipment/models/eqpt_paddler.py
|
Python
|
gpl-3.0
| 551
| 0.005445
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from ./eqpt_equipment import EQPT_TYPES
class Paddler(models.Model):
_name = 'eqpt.paddler'
_description = "Paddler Cycle Equipment"
_description = "Cycle paddler equipment"
eqpt_type = fields.Selection(selection=EQPT_TYPES, string="")
eqpt_id = fields.Reference(selection='_get_eqpt_models', string="Equipment")
cycle_id = fields.Many2one(comodel_name='pac.cycle',
|
string="Cycle")
member_id = fields.M
|
any2one(comodel_name='adm.asso.member', string="Member")
|
zhuww/planetclient
|
gti.py
|
Python
|
apache-2.0
| 6,453
| 0.017666
|
#!/homes/janeway/zhuww/bin/py
import numpy
import pyfits
from pylab import *
#from Pgplot import *
#from ppgplot import *
#import ppgplot
from numpy import *
class Cursor:
badtimestart=[]
badtimeend=[]
lines = []
def __init__(self, ax):
self.ax = ax
#self.lx = ax.axhline(color='k') # the horiz line
self.ly = ax.axvline(color='k') # the vert line
# text location in axes coords
#self.txt = ax.text( 0.7, 0.9, '', transform=ax.transAxes)
self.Flag = True
def mouse_move(self, event):
if not event.inaxes: return
x, y = event.xdata, event.ydata
# update the line positions
#self.lx.set_ydata(y )
self.ly.set_xdata(x )
#self.txt.set_text( 'x=%1.2f, y=%1.2f'%(x,y) )
draw()
def click(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
event.button, event.x, event.y, event.xdata, event.ydata)
self.xdata = event.xdata
self.ydata = event.ydata
if '%d' % (event.button) == '1':
if self.Flag:
self.badtimestart.append(self.xdata)
self.Flag = False
self.lines.append(self.ax.axvline(color='r'))
self.lines[-1].set_xdata(event.xdata)
else:
self.badtimeend.append(self.xdata)
self.lines.append(self.ax.axvline(color='k'))
self.lines[-1].set_xdata(event.xdata)
self.Flag = True
elif '%d' % (event.button) == '3':
if self.Flag:
#self.ax.axvline(color='w').set_xdata(self.badtimeend[-1])
self.lines[-1].remove()
self.lines = self.lines[:-1]
self.badtimeend = self.badtimeend[:-1]
self.Flag = False
#self.ax.lines.pop(0)
else:
#self.ax.axvline(color='w').set_xdata(self.badtimestart[-1])
self.lines[-1].remove()
self.lines = self.lines[:-1]
self.badtimestart = self.badtimestart[:-1]
self.Flag = True
#self.ax.lines.pop(0)
else:
print 'event.button: %d' % (event.button)
draw()
def endedit(self, event):
print 'Quit editing bad time intervals.'
#disconnect('button_press_event', cursor.click)
class SnaptoCursor:
"""
Like Cursor but the crosshair snaps to the nearest x,y point
For simplicity, I'm assuming x is sorted
"""
def __init__(self, ax, x, y):
self.ax = ax
self.lx = ax.axhline(color='k') # the horiz line
self.ly = ax.axvline(color='k') # the vert line
self.x = x
self.y = y
# text location in axes coords
#self.txt = ax.text( 0.7, 0.9, '', transform=ax.transAxes)
def mouse_move(self, event):
if not event.inaxes: return
x, y = event.xdata, event.ydata
indx = searchsorted(self.x, [x])[0]
x = self.x[indx]
y = self.y[indx]
# update the line positions
self.lx.set_ydata(y )
self.ly.set_xdat
|
a(x )
#self.txt.set_text( 'x=%1.2f, y=%1.2f'%(x,y) )
#print 'x=%1.2f, y=%1.2f'%(x,y)
draw()
hdulist=pyfits.open('histo.fits')
tabdata=hdulist[1].data
cols=hdulist[1].columns
#names=cols.names
#print names
counts=array(tabdata.field('COUNTS'))
time=array(tabdata.field('TIME'))
#starttime=time[0]
#time=time#-starttime
#plotxy(counts,time)
ax = subplot(111)
ax.plot(time, counts)
cursor = Cursor(ax)
#cursor = SnaptoCursor(ax, t, s)
connect('motion_notify_
|
event', cursor.mouse_move)
connect('button_press_event', cursor.click)
duration = max(time) - min(time)
ax.set_xlim((min(time)-0.1*duration, max(time)+0.1*duration))
show()
#while not ppgplot.pgband(0)[2]=="X":
#print "click on the daigram twice to define a bad time interval:"
#badtimestart.append(ppgplot.pgband(6)[0])
#badtimeend.append(ppgplot.pgband(6)[0])
#closeplot()
badtimestart=numpy.array(cursor.badtimestart)#+starttime
badtimeend=numpy.array(cursor.badtimeend)#+starttime
print badtimestart
print badtimeend
#plot(time,counts)
#print tabdata[0]
#check gti
hdulist=pyfits.open('gti.fits')
tabdata=hdulist[1].data
#cols=hdulist[1].columns
start=tabdata.field('START')
stop=tabdata.field('STOP')
print len(start),len(stop)
for j in range(len(badtimestart)):
badlist=[]
if badtimestart[j] >= badtimeend[j]:
print "invalid bad time interval: abandon."
else:
print len(start),len(stop)
for i in range(len(start)):
if start[i] < badtimestart[j]:
if stop[i] <= badtimestart[j]:
continue
else:
if stop[i] <= badtimeend[j]:
stop[i]=badtimestart[j]
else:
start=insert(start,i+1,badtimeend[j])
stop=insert(stop,i+1,stop[i])
stop[i]=badtimestart[j]
break
else:
if start[i] < badtimeend[j]:
if stop[i] <= badtimeend[j]:
badlist.append(i)
else:
start[i]=badtimeend[j]
else:
break
start=delete(start,badlist)
stop=delete(stop,badlist)
errbar=0.5*(stop-start)
center=array(start+errbar)#-starttime
#array=0.*start+10.
array=array(0.*start+max(counts)/2)
#plotxy(array,center,symbol=1,line=None,errx=errbar,setup=0)
#closeplot()
print sum(stop-start)
col1=pyfits.Column(name="START",format = 'D',unit = 's',array=start)
col2=pyfits.Column(name="STOP",format = 'D',unit = 's',array=stop)
cols=pyfits.ColDefs([col1,col2])
tbhdu=pyfits.new_table(cols)
hdulist.append(tbhdu)
hdulist[2].header=hdulist[1].header
#print hdulist[2].header['ONTIME'],hdulist[2].header['TSTART'],hdulist[2].header['TSTOP']
hdulist[2].header['ONTIME']=sum(stop-start)
hdulist[2].header['TSTART']=start[0]
hdulist[2].header['TSTOP']=stop[len(stop)-1]
#print hdulist[2].header['ONTIME'],hdulist[2].header['TSTART'],hdulist[2].header['TSTOP']
hdulist.remove(hdulist[1])
hdulist.writeto('newgti.fits')
hdulist.close()
#plotxy(counts,time,device="gti.ps/PS")
#plotxy(array,center,symbol=1,line=None,errx=errbar,setup=0)
#closeplot()
plot(time, counts)
errorbar(center, array, xerr=errbar)
show()
|
Mathew/psychoanalysis
|
psychoanalysis/apps/pa/migrations/0002_auto__del_participant.py
|
Python
|
mit
| 7,476
| 0.007089
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Participant'
db.delete_table(u'pa_participant')
# Removing M2M table for field user on 'Participant'
db.delete_table('pa_participant_user')
# Adding M2M table for field user on 'ReportingPeriod'
db.create_table(u'pa_reportingperiod_user', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('reportingperiod', models.ForeignKey(orm[u'pa.reportingperiod'], null=False)),
('user', models.ForeignKey(orm[u'pa.user'], null=False))
))
db.create_unique(u'pa_reportingperiod_user', ['reportingperiod_id', 'user_id'])
def backwards(self, orm):
# Adding model 'Participant'
db.create_table(u'pa_participant', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reporting_period', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pa.ReportingPeriod'])),
))
db.send_create_signal(u'pa', ['Participant'])
# Adding M2M table for field user on 'Parti
|
cipant'
db.create_table(u'pa_participant_user', (
('id', models.AutoField(verbose_name='ID', primary_key
|
=True, auto_created=True)),
('participant', models.ForeignKey(orm[u'pa.participant'], null=False)),
('user', models.ForeignKey(orm[u'pa.user'], null=False))
))
db.create_unique(u'pa_participant_user', ['participant_id', 'user_id'])
# Removing M2M table for field user on 'ReportingPeriod'
db.delete_table('pa_reportingperiod_user')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'pa.activity': {
'Meta': {'object_name': 'Activity'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Category']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'pa.activityentry': {
'Meta': {'object_name': 'ActivityEntry'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Activity']"}),
'day': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'hour': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.User']"})
},
u'pa.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'grouping': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '15'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reporting_period': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.ReportingPeriod']"})
},
u'pa.profession': {
'Meta': {'object_name': 'Profession'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
u'pa.reportingperiod': {
'Meta': {'object_name': 'ReportingPeriod'},
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'slots_per_hour': ('django.db.models.fields.IntegerField', [], {}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pa.User']", 'symmetrical': 'False'})
},
u'pa.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Profession']", 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['pa']
|
CommonAccord/Cmacc-Org
|
Doc/G/NW-NDA/99/WiP/Schedule/String.py
|
Python
|
mit
| 2,950
| 0.008136
|
import re
thisDict = {
"path": "thisDict",
"2": "2",
"3": "3",
"5": "5",
"7": "7",
"2 x 2": "{2} x {2}",
"3 x 2": "{3} x {2}",
"4": "{2 x 2}",
"6": "{3 x 2}",
"8": "{2 x 2} x {2}",
"16": "{2 x 2} x {2 x 2}",
"96": "{16} x {6}",
"thisModel.Root": "thisModel.Root: {96} bottles of {Favored Beverage} on the {House Element}{period}",
"Model.Root": "{thisModel.Root}",
"period": "." ,
"Favored Beverage": "b{i}r",
"House Element": "{wl}",
"i": "ee",
"wl": "wall"
}
thatDict = {
"path": "thatDict",
"13": "13",
"13 x 13": "{13} x {13}",
"169": "qqq {16}",
"thatModel.Root": "thatModel.Root: {169} books on the {House Element}-mounted {Furniture}.",
"Model.Root": "{thatModel.Root}",
"Furniture": "b{u}kshelves",
"u": "oo"
}
anotherDict = {
"path": "anotherDict",
"11": "11",
"11 x 11": "{11} x {11}",
"121": "{11 x 11}",
"anotherModel.Root": "anotherModel.Root: {121} bottles of b{i}r and {Favored Beverage} on the {Furniture}.",
"Favored Beverage": "whiskey",
"megaModel.Root" : "{thisModel.Root} ... {thatModel.Root} ... {anotherModel.Root}",
"Model.Root": "{anotherModel.Root}"
}
directory = [thisDict, thatDict, anotherDict]
#When there are more than one identical key throughout dictionaries: prioritizes current dict for now -> Later how?
#Limitation: We only go through dictionaries in the order given in directory; the first key we find will be used, always
#Had to add "path" field to all dictionaries to keep track of pathname -> had no way to print out the name of dictionary without using globals() = bad practice
def find_value(dictionary, key):
#Case when the key does not exist in this dictionary
if key not in dictionary:
for x in range(len(directory)):
if directory[x] != dictionary and key in directory[x]:
return find_value(directory[x], key)
#When the key does not exist in any of dictionaries
raise KeyError ("Key not found in directory")
#Case when the key does exist in this dictionary
else:
value = dictionary[key]
search = [x.group() for x in re.finditer(r'{(.*?)}', value)]
path = "{" + key + "}" + " from: " + dictionary["path"] + "\n"
# End case
if len(search) == 0:
return value, path
else:
for i in range(len(search)):
new_val,
|
new_path = find_value(dictionary, search[i][1:-1])
value = value.replace(search[i], new_val, 1)
path += new_path
return value, path
value, path = find_value(thisDict, "megaModel.Root")
print("Value: ", value)
print("Path:\n", path
|
)
#Called inside find_value when there is no such key in the given dictionary input
#Find the key from other dictionaries, keep log of which path; return
# def fetch_value(dictionary, key):
|
christiangalsterer/httpbeat
|
vendor/github.com/elastic/beats/packetbeat/tests/system/test_0029_http_gap.py
|
Python
|
apache-2.0
| 659
| 0
|
from packetbeat import BaseTest
"""
Tests for HTTP messages with gaps (packet loss) in them.
"""
class Test(BaseTest):
def test_gap_in_large_f
|
ile(self):
"""
Should recover well from losing a packet in a large
file download.
"""
self.render_config_template(
http_ports=[8000],
)
self.run_packetbeat(pcap="gap_in_stream.pcap")
objs = self.read_output()
assert len(objs) =
|
= 1
o = objs[0]
assert o["status"] == "OK"
print(o["notes"])
assert len(o["notes"]) == 1
assert o["notes"][0] == "Packet loss while capturing the response"
|
davidvilla/python-doublex
|
doublex/safeunicode.py
|
Python
|
gpl-3.0
| 333
| 0
|
# -*- coding: utf-8 -*-
import sys
def __if_nu
|
mber_get_string(number):
converted_str = number
if isinstance(number, (int, float)):
converted_str = str(number)
return converted_str
def get_string(strOrUnicode, encoding='utf-8'):
strOrUnicode = __if_number_get_string(strOrUnicode)
|
return strOrUnicode
|
murrayrm/python-control
|
control/frdata.py
|
Python
|
bsd-3-clause
| 26,326
| 0.000076
|
# Copyright (c) 2010 by California Institute of Technology
# Copyright (c) 2012 by Delft University of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the names of the California Institute of Technology nor
# the Delft University of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE,
|
EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: M.M. (Rene) van Paassen (using xferfcn.py as basis)
# Date: 02 Oct 12
from __future__ import division
"""
Frequency response data representation and functions.
This module contains the FRD class and also functions that operate on
FRD data.
"""
# External function declarations
from warnings import warn
import numpy as np
from numpy import angle, array, empty,
|
ones, \
real, imag, absolute, eye, linalg, where, dot, sort
from scipy.interpolate import splprep, splev
from .lti import LTI, _process_frequency_response
from . import config
__all__ = ['FrequencyResponseData', 'FRD', 'frd']
class FrequencyResponseData(LTI):
"""FrequencyResponseData(d, w[, smooth])
A class for models defined by frequency response data (FRD).
The FrequencyResponseData (FRD) class is used to represent systems in
frequency response data form.
Parameters
----------
d : 1D or 3D complex array_like
The frequency response at each frequency point. If 1D, the system is
assumed to be SISO. If 3D, the system is MIMO, with the first
dimension corresponding to the output index of the FRD, the second
dimension corresponding to the input index, and the 3rd dimension
corresponding to the frequency points in omega
w : iterable of real frequencies
List of frequency points for which data are available.
smooth : bool, optional
If ``True``, create an interpolation function that allows the
frequency response to be computed at any frequency within the range of
frequencies give in ``w``. If ``False`` (default), frequency response
can only be obtained at the frequencies specified in ``w``.
Attributes
----------
ninputs, noutputs : int
Number of input and output variables.
omega : 1D array
Frequency points of the response.
fresp : 3D array
Frequency response, indexed by output index, input index, and
frequency point.
Notes
-----
The main data members are 'omega' and 'fresp', where 'omega' is a 1D array
of frequency points and and 'fresp' is a 3D array of frequency responses,
with the first dimension corresponding to the output index of the FRD, the
second dimension corresponding to the input index, and the 3rd dimension
corresponding to the frequency points in omega. For example,
>>> frdata[2,5,:] = numpy.array([1., 0.8-0.2j, 0.2-0.8j])
means that the frequency response from the 6th input to the 3rd output at
the frequencies defined in omega is set to the array above, i.e. the rows
represent the outputs and the columns represent the inputs.
A frequency response data object is callable and returns the value of the
transfer function evaluated at a point in the complex plane (must be on
the imaginary access). See :meth:`~control.FrequencyResponseData.__call__`
for a more detailed description.
"""
# Allow NDarray * StateSpace to give StateSpace._rmul_() priority
# https://docs.scipy.org/doc/numpy/reference/arrays.classes.html
__array_priority__ = 11 # override ndarray and matrix types
#
# Class attributes
#
# These attributes are defined as class attributes so that they are
# documented properly. They are "overwritten" in __init__.
#
#: Number of system inputs.
#:
#: :meta hide-value:
ninputs = 1
#: Number of system outputs.
#:
#: :meta hide-value:
noutputs = 1
_epsw = 1e-8 #: Bound for exact frequency match
def __init__(self, *args, **kwargs):
"""Construct an FRD object.
The default constructor is FRD(d, w), where w is an iterable of
frequency points, and d is the matching frequency data.
If d is a single list, 1d array, or tuple, a SISO system description
is assumed. d can also be
To call the copy constructor, call FRD(sys), where sys is a
FRD object.
To construct frequency response data for an existing LTI
object, other than an FRD, call FRD(sys, omega)
"""
# TODO: discrete-time FRD systems?
smooth = kwargs.get('smooth', False)
if len(args) == 2:
if not isinstance(args[0], FRD) and isinstance(args[0], LTI):
# not an FRD, but still a system, second argument should be
# the frequency range
otherlti = args[0]
self.omega = sort(np.asarray(args[1], dtype=float))
# calculate frequency response at my points
if otherlti.isctime():
s = 1j * self.omega
self.fresp = otherlti(s, squeeze=False)
else:
z = np.exp(1j * self.omega * otherlti.dt)
self.fresp = otherlti(z, squeeze=False)
else:
# The user provided a response and a freq vector
self.fresp = array(args[0], dtype=complex)
if len(self.fresp.shape) == 1:
self.fresp = self.fresp.reshape(1, 1, len(args[0]))
self.omega = array(args[1], dtype=float)
if len(self.fresp.shape) != 3 or \
self.fresp.shape[-1] != self.omega.shape[-1] or \
len(self.omega.shape) != 1:
raise TypeError(
"The frequency data constructor needs a 1-d or 3-d"
" response data array and a matching frequency vector"
" size")
elif len(args) == 1:
# Use the copy constructor.
if not isinstance(args[0], FRD):
raise TypeError(
"The one-argument constructor can only take in"
" an FRD object. Received %s." % type(args[0]))
self.omega = args[0].omega
self.fresp = args[0].fresp
else:
raise ValueError(
"Needs 1 or 2 arguments; received %i." % len(args))
# create interpolation functions
if smooth:
self.ifunc = empty((self.fresp.shape[0], self.fresp.shape[1]),
dtype=tuple)
for i in range(self.fresp.shape[0]):
for j in range(self.fresp.shape[1]):
self.ifunc[i, j], u = splprep(
u=self.omega, x=[real(self.fresp[i, j, :]),
|
iulian787/spack
|
var/spack/repos/builtin/packages/xmessage/package.py
|
Python
|
lgpl-2.1
| 845
| 0.001183
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xmessage(AutotoolsPackage, XorgPackage):
"""xmessage displays a message or query in a window. The user can click
on an "okay" button to dismiss it
|
or can select one of several buttons
to answer a question. xmessage can also exit after a specified time."""
homepage = "http://cgit.freedesktop.org/xorg/app/xmessage"
xorg_mirror_path = "app/xmessage-1.0.4.tar.gz"
version('1.0.4', sha256='883099c3952c8cace5bd11d3df
|
2e9ca143fc07375997435d5ff4f2d50353acca')
depends_on('libxaw')
depends_on('libxt')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
richgieg/RichEmu86
|
main.py
|
Python
|
mit
| 98
| 0
|
import system
# Crea
|
te the computer system and power it up.
sys = system.System()
sys.power_on
|
()
|
daien/daco
|
distances_rkhs.py
|
Python
|
mit
| 24,666
| 0.000568
|
"""
Pairwise distance functions between time series in a RKHS
=========================================================
They all have the following prototype:
function(K, T1, T2, **kwargs)
"""
import numpy as np
from scipy.linalg import solve, eigvals, inv
from scipy.signal import correlate2d
# mean-element-based ----------------------------------------------------------
def distance_mean_elements(K, T1, T2):
""" Compute the squared distance between mean elements of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
Returns
-------
dme2: double,
squared distance between the mean-elements in RKHS
"""
dme2 = K[:T1, :T1].mean()
dme2 += K[T1:, T1:].mean()
dme2 += -2.0 * K[:T1, T1:].mean()
# # normalization vector
# m = np.zeros((T1+T2, 1), dtype=np.double)
# m[:T1,:] = -1./T1
# m[T1:,:] = 1./T2
# # return the distance
# dme2 = np.dot(m.T, np.dot(K, m))[0,0]
return dme2
def distance_me_squared(K, T1, T2):
""" Compute the squared distance between the squared mean elements of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
Returns
-------
dme2: double,
squared HS distance between the mean-elements squared
"""
dme2 = (K[:T1, :T1].mean()) ** 2
dme2 += (K[T1:, T1:].mean()) ** 2
dme2 += -2.0 * (K[:T1, T1:].mean()) ** 2
return dme2
def distance_mahalanobis(K, T1, T2, regul=1e-3):
""" Compute the squared distance between mean elements of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
regul: double, optional, default: 1e-3,
regularization parameter
Returns
-------
dmpc2: double,
squared Mahalanobis distance between time-series in RKHS
"""
# normalization vector
n = T1 + T2
m = np.zeros((n, 1), dtype=np.double)
m[:T1, :] = -1.0 / T1
m[T1:, :] = 1.0 / T2
# centering matrix
PiT1 = np.eye(T1, dtype=np.double) - 1.0 / T1
PiT2 = np.eye(T2, dtype=np.double) - 1.0 / T2
N = np.vstack([np.hstack([PiT1, np.zeros((T1, T2), dtype=np.double)]),
np.hstack([np.zeros((T2, T1), dtype=np.double), PiT2])])
# compute the distance
mTK = np.dot(m.T, K)
me = np.dot(mTK, m) # difference between mean elements
mTKN = np.dot(mTK, N)
NTK = np.dot(N.T, K)
A = regul * np.eye(n) + 1.0 / n * np.dot(NTK, N)
AinvNTK = solve(A, NTK, overwrite_a=True) # A^{-1} N.T K
AinvNTKm = np.dot(AinvNTK, m)
dmpc2 = 1.0 / regul * (me - 1.0 / n * np.dot(mTKN, AinvNTKm))
return dmpc2[0, 0]
# alignment-based -------------------------------------------------------------
def distance_aligned_frames_truncated(K, T1, T2, tau=0):
""" Compute the squared distance between aligned frames
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 0,
temporal shift (in frames) to apply to time series 2 before computing
alignment, using "cyclic" padding
Returns
-------
dme2: double,
squared distance between aligned frames in the RKHS
Notes
-----
Truncated verion (equivalent to zero padding)
dme2 = K[0,0] - 1/(T2-tau) * sum_{t=0}^{T2-tau} K[x1_t, x2_{t+tau}]
"""
assert T1 == T2, "the series should be of same duration"
T = T1
# constant base kernel value k(x,x)
c = K[0, 0]
# matrix of k(x,y)
Kxy = K[:T, T:]
# return the distance
return c - np.mean(np.diag(Kxy, k=tau))
def distance_aligned_frames_cyclic(K, T1, T2, tau=0):
""" Compute the squared distance between aligned frames
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: positive int, optional, default: 0,
temporal shift (in frames) to apply to time series 2 before computing
alignment, using "cyclic" padding
Returns
-------
dme2: double,
squared distance between aligned frames in the RKHS
Notes
-----
Cyclic verion
dme2 = K[0,0] - 1/T2 * sum_{t=0}^{T2} K[x1_t, x2_{(t+tau) % T2}]
"""
assert T1 == T2, "the series should be of same duration"
T = T1
# constant base kernel value k(x,x)
c = K[0, 0]
# matrix of k(x,y)
Kxy = K[:T, T:]
# return the distance
if tau:
tr = Kxy.trace(offset=tau) + Kxy.trace(offset=tau - T)
else:
tr = Kxy.trace()
return c - tr / float(T)
# auto-covariance-based -------------------------------------------------------
def
|
distance_hsac_truncated(K, T1, T2, tau=1):
""" Compute the squared HS distance between the autocovariance operators of
two time series
|| \\scov^{(y)}_{\\tau} - \\scov^{(x)}_{\\tau} ||_{HS}^2 =
1/T**2 ( Tr(K_1 x K_1^\\tau) + Tr(K_2 x K_2^\\tau) - 2 Tr(K_{1,2} x K_{2,1}^\\tau ) )
Parameter
|
s
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: -1
lag, ie time shift used in the auto-covariance computation
Returns
-------
dhsac: double,
squared Hilbert-Schmidt norm of the difference between the
auto-covariance operators, in the RKHS induced by 'frame_kern', of
the two time series
Notes
-----
Truncated version between X[:-tau] and X[tau:] (equivalent to zero padding).
"""
assert tau <= min(T1 / 2.0, T2 / 2.0), "Too big tau"
# define the truncated matrices of the non-shifted series
K1 = K[:T1 - tau, :T1 - tau]
K2 = K[T1:T1 + T2 - tau, T1:T1 + T2 - tau]
K12 = K[:T1 - tau, T1:T1 + T2 - tau]
# define the truncated matrices of the shifted series
K1tau = K[tau:T1, tau:T1]
K2tau = K[T1 + tau:, T1 + tau:]
K12tau = K[tau:T1, T1 + tau:]
# compute the different traces using Hadamard products (and sym of K)
tr1 = np.mean(K1 * K1tau)
tr2 = np.mean(K2 * K2tau)
tr12 = np.mean(K12 * K12tau) # no transpose (K21tau.T == K12tau)
# return dhsac
return tr1 + tr2 - 2 * tr12
def distance_hsac_cyclic(K, T1, T2, tau=1):
""" Compute the squared HS distance between the autocovariance operators of
two time series
|| \\scov^{(y)}_{\\tau} - \\scov^{(x)}_{\\tau} ||_{HS}^2 =
1/T**2 ( Tr(K_1 x K_1^\\tau) + Tr(K_2 x K_2^\\tau) - 2 Tr(K_{1,2} x K_{2,1}^\\tau ) )
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: -1
lag, ie time shift used in the auto-covariance computation
Returns
-------
dhsac: double,
squared Hilbert-Schmidt norm of the difference between the
auto-covariance operators, in the RKHS induced by 'frame_kern', of
the two time series
Notes
-----
Cyclic version between X and [ X[tau:], X[:tau] ].
Artefacts may arise if the two series were not synchronized and comprised
of the same number of periods.
"""
assert tau <= min(T1 / 2.0, T2 / 2.0), "Too big tau"
# define the (non-truncated) matrices of the non-shifted series
K1 = K[:T1, :T1]
K2 = K[T1:, T1:]
K12 = K[:T1, T1:]
# circular permutation of tau frames
idxs1 = np.arange(tau, T1 + tau) % T1
idxs2 = np.arange(tau, T2 + tau) % T2
# Note: no need for copy as w
|
nikitakurylev/TuxemonX
|
tuxemon/core/states/combat/combat.py
|
Python
|
gpl-3.0
| 27,084
| 0.001625
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (C) 2014, William Edwards <[email protected]>,
# Benjamin Bean <[email protected]>
#
# This file is part of Tuxemon.
#
# Tuxemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tuxemon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tuxemon. If not, see <http://www.gnu.org/licenses/>.
#
# Contributor(s):
#
# Benjamin Bean <[email protected]>
# Leif Theden <[email protected]>
#
#
# core.states.combat Combat Start module
#
#
from __future__ import division
import logging
from collections i
|
mport namedtuple, defaultdict
from functools import partial
from itertools import chain
from operator import attrgetter
import pygame
from core import tools, state
from core.components.locale import translator
from core.components.pyganim import PygAnimation
from core.components.sprite import Sprite
from core.components.technique import Technique
from core.compon
|
ents.ui.draw import GraphicBox
from core.components.ui.text import TextArea
from .combat_animations import CombatAnimations
trans = translator.translate
# Create a logger for optional handling of debug messages.
logger = logging.getLogger(__name__)
logger.debug("%s successfully imported" % __name__)
EnqueuedAction = namedtuple("EnqueuedAction", "user technique target")
faint = Technique("status_faint")
def check_status(monster, status_name):
return any(t for t in monster.status if t.slug == status_name)
def fainted(monster):
return check_status(monster, "status_faint")
def get_awake_monsters(player):
""" Iterate all non-fainted monsters in party
:param player:
:return:
"""
for monster in player.monsters:
if not fainted(monster):
yield monster
def fainted_party(party):
return all(map(fainted, party))
def defeated(player):
return fainted_party(player.monsters)
class WaitForInputState(state.State):
""" Just wait for input blocking everything
"""
def process_event(self, event):
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
self.game.pop_state(self)
class CombatState(CombatAnimations):
""" The state-menu responsible for all combat related tasks and functions.
.. image:: images/combat/monster_drawing01.png
General description of this class:
* implements a simple state machine
* various phases are executed using a queue of actions
* "decision queue" is used to queue player interactions/menus
* this class holds mostly logic, though some graphical functions exist
* most graphical functions are contained in "CombatAnimations" class
Currently, status icons are implemented as follows:
each round, all status icons are destroyed
status icons are created for each status on each monster
obvs, not ideal, maybe someday make it better? (see transition_phase)
"""
background_filename = "gfx/ui/combat/battle_bg03.png"
draw_borders = False
escape_key_exits = False
def startup(self, **kwargs):
self.max_positions = 1 # TODO: make dependant on match type
self.phase = None
self.monsters_in_play = defaultdict(list)
self._damage_map = defaultdict(set) # track damage so experience can be awarded later
self._technique_cache = dict() # cache for technique animations
self._decision_queue = list() # queue for monsters that need decisions
self._position_queue = list() # queue for asking players to add a monster into play (subject to change)
self._action_queue = list() # queue for techniques, items, and status effects
self._status_icons = list() # list of sprites that are status icons
self._monster_sprite_map = dict() # monster => sprite
self._hp_bars = dict() # monster => hp bar
self._layout = dict() # player => home areas on screen
self._animation_in_progress = False # if true, delay phase change
self._winner = None # when set, combat ends
self._round = 0
super(CombatState, self).startup(**kwargs)
self.players = list(self.players)
self.show_combat_dialog()
self.transition_phase("begin")
self.task(partial(setattr, self, "phase", "ready"), 3)
def update(self, time_delta):
""" Update the combat state. State machine is checked.
General operation:
* determine what phase to execute
* if new phase, then run transition into new one
* update the new phase, or the current one
"""
super(CombatState, self).update(time_delta)
if not self._animation_in_progress:
new_phase = self.determine_phase(self.phase)
if new_phase:
self.phase = new_phase
self.transition_phase(new_phase)
self.update_phase()
def draw(self, surface):
super(CombatState, self).draw(surface)
self.draw_hp_bars()
def draw_hp_bars(self):
""" Go through the HP bars and redraw them
:returns: None
"""
for monster, hud in self.hud.items():
rect = pygame.Rect(0, 0, tools.scale(70), tools.scale(8))
rect.right = hud.image.get_width() - tools.scale(8)
rect.top += tools.scale(12)
self._hp_bars[monster].draw(hud.image, rect)
def determine_phase(self, phase):
""" Determine the next phase and set it
Part of state machine
Only test and set new phase.
* Do not execute phase actions
* Try not to modify any values
* Return a phase name and phase will change
* Return None and phase will not change
:returns: None or String
"""
if phase == "ready":
return "housekeeping phase"
elif phase == "housekeeping phase":
# this will wait for players to fill battleground positions
for player in self.active_players:
positions_available = self.max_positions - len(self.monsters_in_play[player])
if positions_available:
return
return "decision phase"
elif phase == "decision phase":
# assume each monster executes one action
# if number of actions == monsters, then all monsters are ready
if len(self._action_queue) == len(self.active_monsters):
return "pre action phase"
# TODO: change check so that it doesn't change state
# (state is changed because check_match_status will modify _winner)
# if a player runs, it will be known here
self.determine_winner()
if self._winner:
return "ran away"
elif phase == "pre action phase":
return "action phase"
if phase == "action phase":
if not self._action_queue:
return "post action phase"
elif phase == "post action phase":
if not self._action_queue:
return "resolve match"
elif phase == "ran away":
return "end combat"
elif phase == "has winner":
return "end combat"
elif phase == "resolve match":
if self._winner:
return "has winner"
else:
return "housekeeping phase"
def transition_phase(self, phase):
""" Change from one phase from another.
Part of state machine
* Will be run just -once- when phase changes.
* Do not change phase.
* Execute cod
|
skosukhin/spack
|
var/spack/repos/builtin/packages/liblbxutil/package.py
|
Python
|
lgpl-2.1
| 2,028
| 0.000986
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License
|
for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#######################################
|
#######################################
from spack import *
class Liblbxutil(AutotoolsPackage):
"""liblbxutil - Low Bandwith X extension (LBX) utility routines."""
homepage = "http://cgit.freedesktop.org/xorg/lib/liblbxutil"
url = "https://www.x.org/archive/individual/lib/liblbxutil-1.1.0.tar.gz"
version('1.1.0', '2735cd23625d4cc870ec4eb7ca272788')
depends_on('[email protected]:', type='build')
depends_on('xproto', type='build')
depends_on('[email protected]:', type='build')
depends_on('util-macros', type='build')
# There is a bug in the library that causes the following messages:
# undefined symbol: Xfree
# undefined symbol: Xalloc
# See https://bugs.freedesktop.org/show_bug.cgi?id=8421
# Adding a dependency on libxdmcp and adding LIBS=-lXdmcp did not fix it
|
PyBossa/pybossa-locust
|
mainandprojects.py
|
Python
|
agpl-3.0
| 600
| 0.011667
|
from loc
|
ust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
self.login()
def login(self):
# do a login here
# self.client.post("/login", {"username":"ellen_key", "password":"education"})
pass
@task(2)
def index(self):
self.client.get("/")
@task(1)
def project1(self):
self.client.g
|
et("/app/category/featured/")
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait=5000
max_wait=9000
|
mylokin/redisext
|
redisext/backend/rmredis.py
|
Python
|
mit
| 326
| 0
|
from __future__ import absolute_import
import redisext.backend.abc
import rm.rmredis
class Client(redisext.backend.abc.IClient):
def __init__(self, database=None, role=None):
|
self._redis = rm.rmredis.RmRedis.get_instance(database, role)
class C
|
onnection(redisext.backend.abc.IConnection):
CLIENT = Client
|
s40523116/2016fallcp_hw
|
w4.py
|
Python
|
agpl-3.0
| 66
| 0.045455
|
m
|
ystring="40523116"
mystring=mystring +" test"
print(mystrin
|
g)
|
neversun/sailfish-hackernews
|
src/main.py
|
Python
|
mit
| 3,028
| 0.000661
|
import pyotherside
from firebase import firebase
firebase = firebase.FirebaseApplication('https://hacker-news.firebaseio.com', None)
responses = []
getItemsCount = 0
eventCount = 0
itemIDs = []
def getCommentsForItem(itemID):
itemID = int(itemID)
itemID = str(itemID)
item = firebase.get('/v0/item/'+itemID, None)
# print(item)
# print(item['kids'])
for commentID in item['kids']:
# print("inside loop. kid:", commentID)
commentID = str(commentID)
comment = firebase.get_async('/v0/item/'+commentID, None, callback=cbNewComment)
def getItems(items, startID=None, count=None):
if items is None and startID is None and count is None:
return
currentlyDownloading(True)
global getItemsCount
global itemIDs
items = str(items)
if count is None:
getItemsCount = 30
else:
getItemsCount = count
if startID is None:
itemIDs = firebase.get('/v0/'+items, None)
if len(itemIDs) < getItemsCount:
getItemsCount = len(itemIDs)
else:
allIDs = firebase.get('/v0/'+items, None)
startIDFound = False
for i in allIDs:
if i == startID:
startIDFound = True
continue
if startIDFound is False:
continue
itemIDs.append(i)
if len(itemIDs) >= getItemsCount:
break
if len(itemIDs) == 0:
resetDownloader()
currentlyDownloading(False)
return
if len(itemIDs) < getItemsCount:
getItemsCount = len(itemIDs)
itemID = None
i = 0
for itemID in itemIDs:
itemID = str(itemID)
item = firebase.get_async('/v0/item/'+itemID, None, callback=cbNewItem)
i += 1
if i >= getItemsCount:
break
def cbNewItem(response):
global eventCount
eventCount += 1
pyotherside.send('item-downloaded')
bufferResponse(response)
def cbNewComment(response):
# print("cbNewComment", response)
pyotherside.send('comment-downloaded', response)
def bufferResponse(response):
global getItemsCount
global eventCount
global itemIDs
global responses
responses.append(response)
# print(eventCount, getItemsCount)
if eventCount == getItemsCount:
orderedResponses = []
# print(itemIDs)
for r in responses:
index = itemIDs.index(r['id'])
if index is None:
continue
orderedResponses.insert(index, r)
sendResponses(orderedResponses)
def sendResponses(respons
|
es):
for r in responses:
pyotherside.send('new-item', r)
resetDownloader()
currentlyDownloading(False)
de
|
f resetDownloader():
global eventCount
global itemIDs
global responses
global getItemsCount
eventCount = 0
itemIDs[:] = []
responses[:] = []
getItemsCount = 0
def currentlyDownloading(b):
pyotherside.send('items-currently-downloading', b)
|
WMD-group/SMACT
|
smact/__init__.py
|
Python
|
mit
| 14,679
| 0.011309
|
"""
Semiconducting Materials from Analogy and Chemical Theory
A collection of fast screening tools from elemental data
"""
# get correct path for datafiles when called from another directory
from builtins import filter
from builtins import map
from builtins import range
from builtins import object
from os import path
module_directory = path.abspath(path.dirname(__file__))
data_directory = path.join(module_directory, 'data')
import itertools
from math import gcd
from operator import mul as multiply
from smact import data_loader
class Element(object):
"""Collection of standard elemental properties for given element.
Data is drawn from "data/element.txt", part of the Open Babel
package.
Atoms with a defined oxidation state draw properties from the
"Species" class.
Attributes:
Element.symbol (string) : Elemental symbol used to retrieve data
Element.name (string) : Full name of element
Element.number (int) : Proton number of element
Element.pauling_eneg (float) : Pauling electronegativity (0.0 if unknown)
Element.ionpot (float) : Ionisation potential in eV (0.0 if unknown)
Element.e_affinity (float) : Electron affinity in eV (0.0 if unknown)
Element.dipol (float) : Static dipole polarizability in 1.6488e-41 C m^2 / V (0.0 if unknown)
Element.eig (float) : Electron eigenvalue (units unknown) N.B. For Cu, Au and Ag this defaults to d-orbital
Element.eig_s (float) : Eigenvalue of s-orbital
Element.SSE (float) : Solid State Energy
Element.SSEPauling (float) : SSE based on regression fit with Pauling electronegativity
Element.oxidation_states (list) : Default list of allowed oxidation states for use in SMACT
Element.oxidation_states_sp (list) : List of oxdation states recognised by the Pymatgen Structure Predictor
Element.oxidation_states_icsd (list) : List of oxidation states that appear in the ICSD
Element.coord_envs (list): The allowed coordination enviroments for the ion
Element.covalent_radius (float) : Covalent radius of the element
Element.mass (float) : Molar mass of the element
Element.crustal_abundance (float) : Crustal abundance in the earths crust mg/kg taken from CRC
Element.HHI_p (float) : Herfindahl-Hirschman Index for elemental production
Element.HHI_r (float) : Hirfindahl-Hirschman Index for elemental reserves
Raises:
NameError: Element not found in element.txt
Warning: Element not found in Eigenvalues.csv
"""
def __init__(self, symbol):
"""Initialise Element class
Args:
symbol (str): Chemical element symbol (e.g. 'Fe')
"""
dataset = data_loader.lookup_element_data(symbol, copy=False)
if dataset == None:
raise NameError("Elemental data for {0} not found.".format(symbol))
# Set coordination-environment data from the Shannon-radius data.
# As above, it is safe to use copy = False with this Get* function.
shannon_data = data_loader.lookup_element_shannon_radius_data(symbol, copy=False)
if shannon_data != None:
coord_envs = [row['coordination'] for row in shannon_data]
else:
coord_envs = None
HHI_scores = data_loader.lookup_element_hhis(symbol)
if HHI_scores == None:
HHI_scores = (None, None)
sse_data = data_loader.lookup_element_sse_data(symbol)
if sse_data:
sse = sse_data['SolidStateEnergy']
else:
sse = None
sse_Pauling_data = data_loader.lookup_element_sse_pauling_data(symbol)
if sse_Pauling_data:
sse_Pauling = sse_Pauling_data['SolidStateEnergyPauling']
else:
sse_Pauling = None
for attribute, value in (
('coord_envs', coord_envs),
('covalent_radius', dataset['r_cov']),
('crustal_abundance', dataset['Abundance']),
('e_affinity', dataset['e_affinity']),
('eig', dataset['p_eig']),
('eig_s', dataset['s_eig']),
('HHI_p', HHI_scores[0]),
('HHI_r', HHI_scores[1]),
('ionpot', dataset['ion_pot']),
('mass', dataset['Mass']),
('name', dataset['Name']),
('number', dataset['Z']),
('oxidation_states',
data_loader.lookup_element_oxidation_states(symbol)),
('oxidation_states_icsd',
data_loader.lookup_element_oxidation_states_icsd(symbol)),
('oxidation_states_sp',
data_loader.lookup_element_oxidation_states_sp(symbol)),
('dipol', dataset['dipol']),
('pauling_eneg', dataset['el_neg']),
('SSE', sse),
('SSEPauling', sse_Pauling),
('symbol', symbol),
#('vdw_radius', dataset['RVdW']),
):
setattr(self, attribute, value)
class Species(Element):
"""
Class providing data for elements in a given chemical environment
In addition to the standard properties from the periodic table
(inherited from the Element class), Species objects use the
oxidation state and coordination environment to provide further
properties.
Attributes:
Species.symbol: Elemental symbol used to retrieve data
Species.name: Full name of element
Species.oxidation: Oxidation state of species (signed integer)
Species.coordination: Coordination number of species (integer)
Species.pauling_eneg: Pauling electronegativity (0.0 if unknown)
Species.ionpot: Ionisation potential in eV (0.0 if unknown)
Species.e_affinity: Electron affinity in eV (0.0 if unknown)
Species.eig: Electron eigenvalue (units unknown)
N.B. For Cu, Au and Ag this defaults to d-orbital.
Species.shannon_radius: Shannon radius of Species.
Species.ionic_radius: Ionic radius of Species.
Raises:
NameError: Element not found in element.txt
Warning: Element not found in Eigenvalues.csv
"""
def __init__
|
(self,symbol,oxidation,coordination=4, radii_source="shannon"):
Element.__init__(self,symbol)
self.oxidation = oxidation
self.coordination = coordination
# Get shannon radius for the oxidation state and coordination.
self.shannon_radius = None;
if radii_source=="shannon":
shannon_data = data_loader.lookup_element_shannon_radius_data(sym
|
bol);
elif radii_source == "extended":
shannon_data = data_loader.lookup_element_shannon_radius_data_extendedML(symbol)
else:
print("Data source not recognised. Please select 'shannon' or 'extended'. ")
if shannon_data:
for dataset in shannon_data:
if dataset['charge'] == oxidation and str(coordination) == dataset['coordination'].split('_')[0]:
self.shannon_radius = dataset['crystal_radius'];
# Get ionic radius
self.ionic_radius = None;
if shannon_data:
for dataset in shannon_data:
if dataset['charge'] == oxidation and str(coordination) == dataset['coordination'].split('_')[0]:
self.ionic_radius = dataset['ionic_radius'];
# Get SSE_2015 (revised) for the oxidation state.
self.SSE_2015 = None
sse_2015_data = data_loader.lookup_element_sse2015_data(symbol);
if sse_2015_data:
for dataset in sse_2015_data:
if dataset['OxidationState'] == oxidation:
self.SSE_2015 = dataset['SolidStateEnergy2015']
else:
self.SSE_2015 = None
def ordered_elements(x,y):
"""
Return a list of element symbols, ordered by proton number in the range x -> y
Args:
|
Cadasta/cadasta-platform
|
deployment/scripts/ami.py
|
Python
|
agpl-3.0
| 1,182
| 0.001692
|
#!/usr/bin/env python
import sys
from os.path import normpath, join, dirname, abspath
machine_file = normpath(join(dirname(abspath(__file__)),
'../files/machine-images.csv'))
def read_machine_file():
amis = {}
with open(machine_file) as fp:
for l in fp:
type, region, ami = l[:-1].split(',')
amis[type + ':' + region] = ami
return amis
def write_machine_file(amis):
with open(machine_file, 'w') as fp:
for k in sorted(amis.keys()):
type, region = k.split(':')
print('{},{},{}'.format(type, region, amis[k]), file=fp)
def get_ami(type, region
|
):
return read_machine_file().get(type + ':' + region)
def set_ami(type, region, ami):
amis = read_machine_file()
amis[type + ':' + region] = ami
write_machine_
|
file(amis)
def main(argv):
if len(argv) == 3:
print(get_ami(argv[1], argv[2]))
elif len(argv) == 4:
set_ami(argv[1], argv[2], argv[3])
else:
print("""
Usage:
Get AMI ami.py <type> <region>
Save AMI ami.py <type> <region> <ami>
""")
sys.exit(1)
if __name__ == "__main__":
main(sys.argv)
|
rg3/youtube-dl
|
youtube_dl/extractor/stv.py
|
Python
|
unlicense
| 3,447
| 0.002031
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_str,
float_or_none,
int_or_none,
smuggle_url,
str_or_none,
try_get,
)
class STVPlayerIE(InfoExtractor):
IE_NAME = 'stv:player'
_VALID_URL = r'https?://player\.stv\.tv/(?P<type>episode|video)/(?P<id>[a-z0-9]{4})'
_TESTS = [{
# shortform
'url': 'https://player.stv.tv/video/4gwd/emmerdale/60-seconds-on-set-with-laura-norton/',
'md5': '5adf9439c31d554f8be0707c7abe7e0a',
'info_dict': {
'id': '5333973339001',
'ext': 'mp4',
'upload_date': '20170301',
'title': '60 seconds on set with Laura Norton',
'description': "How many questions can Laura - a.k.a Kerry Wyatt - answer in 60 seconds? Let\'s find out!",
'timestamp': 1488388054,
'uploader_id': '1486976045',
},
'skip
|
': 'this resource is unavailable outside of the UK',
}, {
# episodes
'url': 'https://player.stv.tv/episode/4125/jennifer-saunders-memory-lane',
'only_matching': True,
}]
BRIGHTCO
|
VE_URL_TEMPLATE = 'http://players.brightcove.net/1486976045/default_default/index.html?videoId=%s'
_PTYPE_MAP = {
'episode': 'episodes',
'video': 'shortform',
}
def _real_extract(self, url):
ptype, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, video_id, fatal=False) or ''
props = (self._parse_json(self._search_regex(
r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>',
webpage, 'next data', default='{}'), video_id,
fatal=False) or {}).get('props') or {}
player_api_cache = try_get(
props, lambda x: x['initialReduxState']['playerApiCache']) or {}
api_path, resp = None, {}
for k, v in player_api_cache.items():
if k.startswith('/episodes/') or k.startswith('/shortform/'):
api_path, resp = k, v
break
else:
episode_id = str_or_none(try_get(
props, lambda x: x['pageProps']['episodeId']))
api_path = '/%s/%s' % (self._PTYPE_MAP[ptype], episode_id or video_id)
result = resp.get('results')
if not result:
resp = self._download_json(
'https://player.api.stv.tv/v1' + api_path, video_id)
result = resp['results']
video = result['video']
video_id = compat_str(video['id'])
subtitles = {}
_subtitles = result.get('_subtitles') or {}
for ext, sub_url in _subtitles.items():
subtitles.setdefault('en', []).append({
'ext': 'vtt' if ext == 'webvtt' else ext,
'url': sub_url,
})
programme = result.get('programme') or {}
return {
'_type': 'url_transparent',
'id': video_id,
'url': smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % video_id, {'geo_countries': ['GB']}),
'description': result.get('summary'),
'duration': float_or_none(video.get('length'), 1000),
'subtitles': subtitles,
'view_count': int_or_none(result.get('views')),
'series': programme.get('name') or programme.get('shortName'),
'ie_key': 'BrightcoveNew',
}
|
sharestack/sharestack-api
|
sharestackapi/techs/test_models.py
|
Python
|
mit
| 8,113
| 0
|
import random
from django.test import TestCase
from .models import TechType, Tech, Component
# shared data across the tests
types = [
{
"name": "framework",
"description": "to do web stuff"
},
{
"name": "database",
"description": "to store stuff"
},
{
"name": "application",
"description": "to show stuff"
},
{
"name": "balancer",
"description": "to balance stuff"
},
{
"name": "web server",
"description": "to serve stuff"
},
{
"name": "programming language",
"description": "to programm stuff"
},
]
techs = [
{
"name": "django",
"description": "The best web framework",
"url": "https://www.djangoproject.com/",
"logo": "http://someimage.com/some.png",
"open_source": True,
"repo": "https://github.com/django/django.git",
},
{
"name": "python",
"description": "The programming language",
"url": "http://www.python.org/",
"logo": "http://someimage.com/some.png",
"open_source": True,
"repo": "http://hg.python.org/cpython/",
},
{
"name": "golang",
"description": "The other programming language",
"url": "http://www.golang.org/",
"logo": "http://someimage.com/some.png",
"open_source": True,
"repo": "https://code.google.com/p/go",
},
{
"name": "postgresql",
"description": "The best relational database",
"url": "http://www.postgresql.org/",
"logo": "http://someimage.com/some.png",
"open_source": True,
"repo": "https://github.com/postgres/postgres",
},
{
"name": "nginx",
"description": "The best http server",
"url": "http://nginx.org/",
"logo": "http://someimage.com/some.png",
"open_source": True,
"repo": "http://hg.nginx.org/nginx",
},
{
"name": "sharestack",
"description": "The best app",
"url": "http://sharestack.org/",
"logo": "http://someimage.com/some.png",
"open_source": True,
"repo": "https://github.com/sharestack/sharestack",
},
]
components = [
{
"name": "sharestack",
"version": "1.0",
"config": '{"json-config": "yeah"}',
"description": "This is a description of a versioned app",
},
{
"name": "nginx",
"version": "1.5.11",
"config": 'nginx config big string',
"description": "This is a description of a versioned nginx",
},
{
"name": "postgresql",
"version": "9.3",
"config": 'postgres config',
"description": "This is a description of a versioned postgres",
},
{
"name": "python",
"version": "2.7.6",
"config": 'python config',
"description": "This is a description of a versioned python",
},
]
class TechTypeTests(TestCase):
def setUp(self):
pass
def test_save(self):
for i in types:
t = TechType(**i)
t.save()
self.assertEqual(len(TechType.objects
|
.all()), len(types))
def test_retrieval(self):
for i in types:
t = TechType(**i)
t.save()
t2 = TechType.objects.get(id=t.id)
self.assertEqual(t, t2)
def test_filter(self):
for i in types:
t = TechType(**i)
t.save()
tech_type = types[random.randrange(len(types))]
t = TechType.objects.filter(name=tech_type
|
["name"])[0]
self.assertEqual(t.description, tech_type["description"])
def test_str(self):
for i in types:
t = TechType(**i)
self.assertEqual(str(t), i["name"])
class TechTests(TestCase):
def test_save(self):
for i in techs:
t = Tech(**i)
t.save()
self.assertEqual(len(Tech.objects.all()), len(techs))
def test_retrieval(self):
for i in techs:
t = Tech(**i)
t.save()
t2 = Tech.objects.get(id=t.id)
self.assertEqual(t, t2)
def test_filter(self):
for i in techs:
t = Tech(**i)
t.save()
tech = techs[random.randrange(len(techs))]
t = Tech.objects.filter(name=tech["name"])[0]
self.assertEqual(t.url, tech["url"])
def test_related_fields(self):
# Create types
programming_lang = TechType(**types[5]) # Is the 6th
app = TechType(**types[2]) # Is the 3rd
framework = TechType(**types[0]) # Is the 1st
database = TechType(**types[1]) # Is the 2nd
# Create techs
python = Tech(**techs[1]) # Is the 2nd
go = Tech(**techs[2]) # Is the 3rd
django = Tech(**techs[0]) # Is the 1st
sharestack = Tech(**techs[-1]) # Is the last
postgres = Tech(**techs[3]) # Is the 4th
# Save before adding m2m fields
programming_lang.save()
app.save()
framework.save()
database.save()
# save remaining
python.save()
go.save()
django.save()
sharestack.save()
postgres.save()
# add types
python.types.add(programming_lang)
go.types.add(programming_lang)
django.types.add(framework)
sharestack.types.add(app)
postgres.types.add(database)
# Check types are ok in both sides for programmign languages
python2 = Tech.objects.get(name=python.name)
go2 = Tech.objects.get(name=go.name)
programming_lang2 = TechType.objects.get(name=programming_lang.name)
self.assertEqual(python2.types.all()[0], programming_lang2)
self.assertEqual(go2.types.all()[0], programming_lang2)
self.assertEqual(len(programming_lang2.tech_set.all()), 2)
# Check tech componente are ok in one side with sharestack
django.tech_components.add(python)
sharestack.tech_components.add(python, django, postgres)
self.assertEqual(len(sharestack.tech_components.all()), 3)
# Isn't symmetrical so postgres shouldn't have sharestack
self.assertEqual(len(postgres.tech_components.all()), 0)
def test_str(self):
for i in techs:
t = Tech(**i)
self.assertEqual(str(t), i["name"])
class ComponentTests(TestCase):
def setUp(self):
# Save first all the types
self.tech_objects = {}
for i in techs:
t = Tech(**i)
t.save()
self.tech_objects[i["name"]] = t
def test_save(self):
for i in components:
c = Component(**i)
c.tech = self.tech_objects[c.name]
c.save()
self.assertEqual(len(Component.objects.all()), len(components))
def test_retrieval(self):
for i in components:
c = Component(**i)
c.tech = self.tech_objects[c.name]
c.save()
c2 = Component.objects.get(id=c.id)
self.assertEqual(c, c2)
def test_filter(self):
for i in components:
c = Component(**i)
c.tech = self.tech_objects[c.name]
c.save()
component = components[random.randrange(len(components))]
c = Component.objects.filter(name=component["name"])[0]
self.assertEqual(c.version, component["version"])
def test_related_fields(self):
# Create 2 components and add the same tech to each.
django_tech = Tech.objects.get(name="django")
sharestack = Component(**components[0])
sharestack.tech = django_tech
nginx = Component(**components[1])
nginx.tech = django_tech
sharestack.save()
nginx.save()
# Check the tech has the 2 components
t = Tech.objects.get(name=django_tech.name)
self.assertEqual(len(t.component_set.all()), 2)
def test_str(self):
for i in components:
t = Component(**i)
self.assertEqual(str(t), i["name"])
|
mne-tools/mne-tools.github.io
|
0.20/_downloads/e414d894f3f4079b3e5897dd9c691af7/plot_morph_surface_stc.py
|
Python
|
bsd-3-clause
| 5,938
| 0
|
"""
.. _ex-morph-surface:
=============================
Morph surface source estimate
=============================
This example demonstrates how to morph an individual subject's
:class:`mne.SourceEstimate` to a common reference space. We achieve this using
:class:`mne.SourceMorph`. Pre-computed data will be morphed based on
a spherical representation of the cortex computed using the spherical
registration of :ref:`FreeSurfer <tut-freesurfer>`
(https://surfer.nmr.mgh.harvard.edu/fswiki/SurfaceRegA
|
ndTemplates) [1]_. This
transform will be used to morph the surface vertices of the subject towards the
reference vertices. Here we will use 'fsaverage' as a
|
reference space (see
https://surfer.nmr.mgh.harvard.edu/fswiki/FsAverage).
The transformation will be applied to the surface source estimate. A plot
depicting the successful morph will be created for the spherical and inflated
surface representation of ``'fsaverage'``, overlaid with the morphed surface
source estimate.
References
----------
.. [1] Greve D. N., Van der Haegen L., Cai Q., Stufflebeam S., Sabuncu M.
R., Fischl B., Brysbaert M.
A Surface-based Analysis of Language Lateralization and Cortical
Asymmetry. Journal of Cognitive Neuroscience 25(9), 1477-1492, 2013.
.. note:: For background information about morphing see :ref:`ch_morph`.
"""
# Author: Tommy Clausner <[email protected]>
#
# License: BSD (3-clause)
import os
import mne
from mne.datasets import sample
print(__doc__)
###############################################################################
# Setup paths
sample_dir_raw = sample.data_path()
sample_dir = os.path.join(sample_dir_raw, 'MEG', 'sample')
subjects_dir = os.path.join(sample_dir_raw, 'subjects')
fname_stc = os.path.join(sample_dir, 'sample_audvis-meg')
###############################################################################
# Load example data
# Read stc from file
stc = mne.read_source_estimate(fname_stc, subject='sample')
###############################################################################
# Setting up SourceMorph for SourceEstimate
# -----------------------------------------
#
# In MNE surface source estimates represent the source space simply as
# lists of vertices (see
# :ref:`tut-source-estimate-class`).
# This list can either be obtained from
# :class:`mne.SourceSpaces` (src) or from the ``stc`` itself.
#
# Since the default ``spacing`` (resolution of surface mesh) is ``5`` and
# ``subject_to`` is set to 'fsaverage', :class:`mne.SourceMorph` will use
# default ico-5 ``fsaverage`` vertices to morph, which are the special
# values ``[np.arange(10242)] * 2``.
#
# .. note:: This is not generally true for other subjects! The set of vertices
# used for ``fsaverage`` with ico-5 spacing was designed to be
# special. ico-5 spacings for other subjects (or other spacings
# for fsaverage) must be calculated and will not be consecutive
# integers.
#
# If src was not defined, the morph will actually not be precomputed, because
# we lack the vertices *from* that we want to compute. Instead the morph will
# be set up and when applying it, the actual transformation will be computed on
# the fly.
#
# Initialize SourceMorph for SourceEstimate
morph = mne.compute_source_morph(stc, subject_from='sample',
subject_to='fsaverage',
subjects_dir=subjects_dir)
###############################################################################
# Apply morph to (Vector) SourceEstimate
# --------------------------------------
#
# The morph will be applied to the source estimate data, by giving it as the
# first argument to the morph we computed above.
stc_fsaverage = morph.apply(stc)
###############################################################################
# Plot results
# ------------
# Define plotting parameters
surfer_kwargs = dict(
hemi='lh', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]), views='lateral',
initial_time=0.09, time_unit='s', size=(800, 800),
smoothing_steps=5)
# As spherical surface
brain = stc_fsaverage.plot(surface='sphere', **surfer_kwargs)
# Add title
brain.add_text(0.1, 0.9, 'Morphed to fsaverage (spherical)', 'title',
font_size=16)
###############################################################################
# As inflated surface
brain_inf = stc_fsaverage.plot(surface='inflated', **surfer_kwargs)
# Add title
brain_inf.add_text(0.1, 0.9, 'Morphed to fsaverage (inflated)', 'title',
font_size=16)
###############################################################################
# Reading and writing SourceMorph from and to disk
# ------------------------------------------------
#
# An instance of SourceMorph can be saved, by calling
# :meth:`morph.save <mne.SourceMorph.save>`.
#
# This method allows for specification of a filename under which the ``morph``
# will be save in ".h5" format. If no file extension is provided, "-morph.h5"
# will be appended to the respective defined filename::
#
# >>> morph.save('my-file-name')
#
# Reading a saved source morph can be achieved by using
# :func:`mne.read_source_morph`::
#
# >>> morph = mne.read_source_morph('my-file-name-morph.h5')
#
# Once the environment is set up correctly, no information such as
# ``subject_from`` or ``subjects_dir`` must be provided, since it can be
# inferred from the data and use morph to 'fsaverage' by default. SourceMorph
# can further be used without creating an instance and assigning it to a
# variable. Instead :func:`mne.compute_source_morph` and
# :meth:`mne.SourceMorph.apply` can be
# easily chained into a handy one-liner. Taking this together the shortest
# possible way to morph data directly would be:
stc_fsaverage = mne.compute_source_morph(stc,
subjects_dir=subjects_dir).apply(stc)
|
bacher09/Gentoolkit
|
pym/gentoolkit/equery/check.py
|
Python
|
gpl-2.0
| 7,056
| 0.029337
|
# Copyright(c) 2009, Gentoo Foundation
#
# Licensed under the GNU General Public License, v2
#
# $Header: $
"""Checks timestamps and MD5 sums for files owned by a given installed package"""
from __future__ import print_function
__docformat__ = 'epytext'
# =======
# Imports
# =======
import os
import sys
from functools import partial
from getopt import gnu_getopt, GetoptError
import portage.checksum as checksum
import gentoolkit.pprinter as pp
from gentoolkit import errors
from gentoolkit.equery import format_options, mod_usage, CONFIG
from gentoolkit.query import Query
# =======
# Globals
# =======
QUERY_OPTS = {
"in_installed": True,
"in_porttree": False,
"in_overlay": False,
"check_MD5sum": True,
"check_timestamp" : True,
"is_regex": False,
"only_failures": False,
"show_progress": False,
}
# =======
# Classes
# =======
class VerifyContents(object):
"""Verify installed packages' CONTENTS files.
The CONTENTS file contains timestamps and MD5 sums for each file owned
by a package.
"""
def __init__(self, printer_fn=None):
"""Create a VerifyObjects instance.
@type printer_fn: callable
@param printer_fn: if defined, will be applied to each result as found
"""
self.check_sums = True
self.check_timestamps = True
self.printer_fn = printer_fn
self.is_regex = False
def __call__(
self,
pkgs,
is_regex=False,
check_sums=True,
check_timestamps=True
):
self.is_regex = is_regex
self.check_sums = check_sums
self.check_timestamps = check_timestamps
result = {}
for pkg in pkgs:
# _run_checks returns tuple(n_passed, n_checked, err)
check_results = self._run_checks(pkg.parsed_contents())
result[pkg.cpv] = check_results
if self.printer_fn is not None:
self.printer_fn(pkg.cpv, check_results)
return result
def _run_checks(self, files):
"""Run some basic sanity checks on a package's contents.
If the file type (ftype) is not a directory or symlink, optionally
verify MD5 sums or mtimes via L{self._verify_obj}.
@see: gentoolkit.packages.get_contents()
@type files: dict
@param files: in form {'PATH': ['TYPE', 'TIMESTAMP', 'MD5SUM']}
@rtype: tuple
@return:
n_passed (int): number of files that passed all checks
n_checked (int): number of files checked
errs (list): check errors' descriptions
"""
n_checked = 0
n_passed = 0
errs = []
for cfile in files:
n_checked += 1
ftype = files[cfile][0]
if not os.path.exists(cfile):
errs.append("%s does not exist" % cfile)
continue
elif ftype == "dir":
if not os.path.isdir(cfile):
err = "%(cfile)s exists, but is not a directory"
errs.append(err % locals())
continue
elif ftype == "obj":
obj_errs = self._verify_obj(files, cfile, errs)
if len(obj_errs) > len(errs):
errs = obj_errs[:]
continue
elif ftype == "sym":
target = files[cfile][2].strip()
if not os.path.islink(cfile):
err = "%(cfile)s exists, but is not a symlink"
errs.append(err % locals())
continue
tgt = os.readlink(cfile)
if tgt != target:
err = "%(cfile)s does not point to %(target)s"
errs.append(err % locals())
continue
else:
err = "%(cfile)s has unknown type %(ftype)s"
errs.append(err % locals())
continue
n_passed += 1
return n_passed, n_checked, errs
def _verify_obj(self, files, cfile, errs):
"""Verify the MD5 sum and/or mtime and return any errors."""
obj_errs = errs[:]
if self.check_sums:
md5sum = files[cfile][2]
try:
cur_checksum = checksum.perform_md5(cfile, calc_prelink=1)
except IOError:
err = "Insufficient permissions to read %(cfile)s"
obj_errs.append(err % locals())
return obj_errs
if cur_checksum != md5sum:
err = "%(cfile)s has incorrect MD5sum"
obj_errs.append(err % locals())
return obj_errs
if self.check_timestamps:
mtime = int(files[cfile][1])
st_mtime = int(os.lstat(cfile).st_mtime)
if st_mtime != mtime:
err = (
"%(cfile)s has wrong mtime (is %(st_mtime)d, should be "
"%(mtime)d)"
)
obj_errs.append(err % locals())
return obj_errs
|
return obj_errs
# =========
# Functions
# =========
def print_help(with_description=True):
"""Print description, usage and a detailed help message.
@type with_description: bool
@param with_description: if true, print module's __doc__ string
"""
if with_description:
print(__doc__.strip())
print()
# Deprecation warning added by djanderson,
|
12/2008
depwarning = (
"Default action for this module has changed in Gentoolkit 0.3.",
"Use globbing to simulate the old behavior (see man equery).",
"Use '*' to check all installed packages.",
"Use 'foo-bar/*' to filter by category."
)
for line in depwarning:
sys.stderr.write(pp.warn(line))
print()
print(mod_usage(mod_name="check"))
print()
print(pp.command("options"))
print(format_options((
(" -h, --help", "display this help message"),
(" -f, --full-regex", "query is a regular expression"),
(" -o, --only-failures", "only display packages that do not pass"),
)))
def checks_printer(cpv, data, verbose=True, only_failures=False):
"""Output formatted results of pkg file(s) checks"""
seen = []
n_passed, n_checked, errs = data
n_failed = n_checked - n_passed
if only_failures and not n_failed:
return
else:
if verbose:
if not cpv in seen:
pp.uprint("* Checking %s ..." % (pp.emph(str(cpv))))
seen.append(cpv)
else:
pp.uprint("%s:" % cpv, end=' ')
if verbose:
for err in errs:
sys.stderr.write(pp.error(err))
if verbose:
n_passed = pp.number(str(n_passed))
n_checked = pp.number(str(n_checked))
info = " %(n_passed)s out of %(n_checked)s files passed"
print(info % locals())
else:
print("failed(%s)" % n_failed)
def parse_module_options(module_opts):
"""Parse module options and update QUERY_OPTS"""
opts = (x[0] for x in module_opts)
for opt in opts:
if opt in ('-h', '--help'):
print_help()
sys.exit(0)
elif opt in ('-f', '--full-regex'):
QUERY_OPTS['is_regex'] = True
elif opt in ('-o', '--only-failures'):
QUERY_OPTS['only_failures'] = True
def main(input_args):
"""Parse input and run the program"""
short_opts = "hof"
long_opts = ('help', 'only-failures', 'full-regex')
try:
module_opts, queries = gnu_getopt(input_args, short_opts, long_opts)
except GetoptError as err:
sys.stderr.write(pp.error("Module %s" % err))
print()
print_help(with_description=False)
sys.exit(2)
parse_module_options(module_opts)
if not queries:
print_help()
sys.exit(2)
first_run = True
for query in (Query(x, QUERY_OPTS['is_regex']) for x in queries):
if not first_run:
print()
matches = query.smart_find(**QUERY_OPTS)
if not matches:
raise errors.GentoolkitNoMatches(query, in_installed=True)
matches.sort()
printer = partial(
checks_printer,
verbose=CONFIG['verbose'],
only_failures=QUERY_OPTS['only_failures']
)
check = VerifyContents(printer_fn=printer)
check(matches)
first_run = False
# vim: set ts=4 sw=4 tw=79:
|
adaussy/eclipse-monkey-revival
|
plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_module.py
|
Python
|
epl-1.0
| 1,896
| 0.004747
|
# Test the module type
from test.test_support import verify, vereq, verbose, TestFailed
from types import ModuleType as module
# An uninitialized module has no __dict__ or __name__, and __doc__ is None
foo = module.__new__(module)
verify(foo.__dict__ is None)
try:
s = foo.__name__
except AttributeError:
pass
else:
raise TestFailed, "__name__ = %s" % repr(s)
# __doc__ is None by default in CPython but not in Jython.
# We're not worrying about that now.
#vereq(foo.__doc__, module.__doc__)
try:
foo_dir = dir(foo)
except TypeError:
pass
else:
raise TestFailed, "__dict__ = %s" % repr(foo_dir)
try:
del foo.somename
except AttributeError:
pass
else:
raise TestFailed, "del foo.somename"
try:
del foo.__dict__
except TypeError:
pass
else:
raise TestFailed, "del foo.__dict__"
try:
foo.__dict__ = {}
except TypeError:
pass
else:
raise TestFailed, "foo.__dict__ = {}"
verify(foo.__dict__ is None)
# Regularly initialized module, no docstring
foo = module("foo")
vereq(foo.__name__, "foo")
vereq(foo.__doc__, None)
vereq(foo.__dict__, {"__name__": "foo", "__package__": None, "__doc__": None})
# ASCII docstring
foo = module("foo", "foodoc")
vereq(foo.__name__, "foo")
vereq(foo.__doc__, "foodoc")
vereq(foo.__dict__, {"__name__": "foo", "__package__": None, "__doc__": "foodoc"})
# Unicode docstring
foo = module("foo", u"foodoc\u1234")
vereq(foo.__name
|
__, "foo")
vereq(foo.__doc__, u"foodoc\u1234")
vereq(foo.__dict__, {"__name__": "
|
foo", "__package__": None, "__doc__": u"foodoc\u1234"})
# Reinitialization should not replace the __dict__
foo.bar = 42
d = foo.__dict__
foo.__init__("foo", "foodoc")
vereq(foo.__name__, "foo")
vereq(foo.__doc__, "foodoc")
vereq(foo.bar, 42)
vereq(foo.__dict__, {"__name__": "foo", "__package__": None, "__doc__": "foodoc", "bar": 42})
verify(foo.__dict__ is d)
if verbose:
print "All OK"
|
qedsoftware/commcare-hq
|
corehq/apps/cloudcare/views.py
|
Python
|
bsd-3-clause
| 29,447
| 0.002241
|
import HTMLParser
import json
from xml.etree import ElementTree
from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseBadRequest, Http404
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.cache import cache_page
from django.views.generic import View
from couchdbkit import ResourceConflict
from casexml.apps.case.models import CASE_STATUS_OPEN
from casexml.apps.case.xml import V2
from casexml.apps.phone.fixtures import generator
from corehq.form_processor.utils import should_use_sql_backend
from corehq.form_processor.utils.general import use_sqlite_backend
from dimagi.utils.logging import notify_exception
from dimagi.utils.parsing import string_to_boolean
from dimagi.utils.web import json_response, get_url_base, json_handler
from touchforms.formplayer.api import DjangoAuth, get_raw_instance, sync_db
from touchforms.formplayer.models import EntrySession
from xml2json.lib import xml2json
from corehq import toggles, privileges
from corehq.apps.accounting.decorators import requires_privilege_for_commcare_user, requires_privilege_with_fallback
from corehq.apps.app_manager.dbaccessors import (
get_latest_build_doc,
get_brief_apps_in_domain,
get_latest_released_app_doc,
get_app_ids_in_domain,
get_current_app,
wrap_app,
)
from corehq.apps.app_manager.exceptions import FormNotFoundException, ModuleNotFoundException
from corehq.apps.app_manager.models import Application, ApplicationBase, RemoteApp
from corehq.apps.app_manager.suite_xml.sections.details import get_instances_for_module
from corehq.apps.app_manager.suite_xml.sections.entries import EntriesHelper
from corehq.apps.app_manager.util import get_cloudcare_session_data
from corehq.apps.cloudcare.api import (
api_closed_to_status,
CaseAPIResult,
get_app_json,
get_filtered_cases,
get_filters_from_request_params,
get_open_form_sessions,
look_up_app_json,
)
from corehq.apps.cloudcare.dbaccessors import get_cloudcare_apps
from corehq.apps.cloudcare.decorators import require_cloudcare_access
from corehq.apps.cloudcare.exceptions import RemoteAppError
from corehq.apps.cloudcare.models import ApplicationAccess
from corehq.apps.cloudcare.touchforms_api import BaseSessionDataHelper, CaseSessionDataHelper
from corehq.apps.domain.decorators import login_and_domain_required, login_or_digest_ex, domain_admin_required
from corehq.apps.groups.models import Group
from corehq.apps.reports.formdetails import readable
from corehq.apps.style.decorators import (
use_datatables,
use_jquery_ui,
)
from corehq.apps.users.models import CouchUser, CommCareUser
from corehq.apps.users.views import BaseUserSettingsView
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors, FormAccessors, LedgerAccessors
from corehq.form_processor.exceptions import XFormNotFound, CaseNotFound
from corehq.util.quickcache import skippable_quickcache
from corehq.util.xml_utils import indent_xml
from corehq.apps.analytics.tasks import track_clicked_preview_on_hubspot
from corehq.apps.analytics.utils import get_meta
@require_cloudcare_access
def default(request, domain):
return HttpResponseRedirect(reverse('cloudcare_main', args=[domain, '']))
def insufficient_privilege(request, domain, *args, **kwargs):
context = {
'domain': domain,
}
return render(request, "cloudcare/insufficient_privilege.html", context)
class CloudcareMain(View):
@use_datatables
@use_jquery_ui
@method_decorator(require_cloudcare_access)
@method_decorator(requires_privilege_for_commcare_user(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(CloudcareMain, self).dispatch(request, *args, **kwargs)
def get(self, request, domain, urlPath):
try:
preview = string_to_boolean(request.GET.get("preview", "false"))
except ValueError:
# this is typically only set at all if it's intended to be true so this
# is a reasonable default for "something went wrong"
preview = True
app_access = ApplicationAccess.get_by_domain(domain)
accessor = CaseAccessors(domain)
if not preview:
apps = get_cloudcare_apps(domain)
if request.project.use_cloudcare_releases:
if (toggles.CLOUDCARE_LATEST_BUILD.enabled(domain) or
toggles.CLOUDCARE_LATEST_BUILD.enabled(request.couch_user.username)):
get_cloudcare_app = get_latest_build_doc
else:
get_cloudcare_app = get_latest_released_app_doc
apps = map(
lambda app: get_cloudcare_app(domain, app['_id']),
apps,
)
apps = filter(None, apps)
apps = map(wrap_app, apps)
# convert to json
apps = [get_app_json(app) for app in apps]
else:
# legacy functionality - use the latest build regardless of stars
apps = [get_latest_build_doc(domain, app['_id']) for app in apps]
apps = [get_app_json(ApplicationBase.wrap(app)) for app in apps if app]
else:
# big TODO: write a new apps view for Formplayer, can likely cut most out now
if toggles.USE_FORMPLAYER_FRONTEND.enabled(domain):
apps = get_cloudcare_apps(domain)
else:
apps = get_brief_apps_in_domain(domain)
apps = [get_app_json(app) for app in apps if app and (
isinstance(app, RemoteApp) or app.application_version == V2)]
meta = get_meta(request)
track_clicked_preview_on_hubspot(request.couch_user, request.COOKIES, meta)
# trim out empty apps
apps = filter(lambda app: app, apps)
apps = filter(lambda app: app_access.user_can_access_app(request.couch_user, app), apps)
def _default_lang():
if apps:
# unfortunately we have to go back to the DB to find this
return Application.get(apps[0]["_id"]).default_language
else:
return "en"
# default language to user's preference, followed by
# first app's default, followed by english
language = request.couch_user.language or _default_lang()
def _url_context():
# given a url path, returns potentially the app, parent, and case, if
# they're selected. the front end optimizes with these to avoid excess
# server calls
# there's an annoying dependency between this logic and backbone's
# url routing that seems hard to solve well. this needs to be synced
# with apps.js if anything changes
# for apps anything with "view/app/" works
# for cases it will be:
# "view/:app/:module/:form/case/:case/"
# if there are parent cases, it will be:
# "view/:app/:module/:form/parent/:parent/case/:case/
# could use regex here but this is actually simpler with the potential
# absence of a trailing slash
split = urlP
|
ath.split('/')
app_id = split[1] if len(split) >= 2 else None
if len(split) >= 5 and split[4] == "parent":
parent_id = split[5]
case_id = split[7] if len(split) >= 7 else None
else:
parent_id = None
|
case_id = split[5] if len(split) >= 6 else None
app = None
if app_id:
if app_id in [a['_id'] for a in apps]:
app = look_up_app_json(domain, app_id)
else:
messages.info(request, _("That app is
|
mscuthbert/abjad
|
abjad/tools/rhythmtreetools/test/test_rhythmtreetools_RhythmTreeNode_duration.py
|
Python
|
gpl-3.0
| 1,469
| 0.000681
|
# -*- encoding: utf-8 -*-
from abjad.tools.durationtools import Duration
from abjad.tools.rhythmtreetools import RhythmTreeContainer, RhythmTreeLeaf
def test_rhythmtreetools_RhythmTreeNode_duration_01():
tree = RhythmTreeContainer(preprolated_duration=1, children=[
RhythmTreeLeaf(preprolated_duration=1),
RhythmTreeContainer(preprolated_duration=2, children=[
RhythmTreeLeaf(preprolated_duration=3),
RhythmTreeLeaf(preprolated_duration=2)
]),
RhythmTreeLeaf(preprolated_duration=2)
])
assert tree.duration == Duration(1)
assert tree[0].duration == Duration(1, 5)
assert tree[1].duration == Duration(2, 5)
assert tree[1][0].duration == Duration(6, 25)
assert tree[1][1].duration == Duration(4, 25)
assert tree[2].duration == Duration(2, 5)
|
tree[1].append(tree.pop
|
())
assert tree.duration == Duration(1)
assert tree[0].duration == Duration(1, 3)
assert tree[1].duration == Duration(2, 3)
assert tree[1][0].duration == Duration(2, 7)
assert tree[1][1].duration == Duration(4, 21)
assert tree[1][2].duration == Duration(4, 21)
tree.preprolated_duration = 19
assert tree.duration == Duration(19)
assert tree[0].duration == Duration(19, 3)
assert tree[1].duration == Duration(38, 3)
assert tree[1][0].duration == Duration(38, 7)
assert tree[1][1].duration == Duration(76, 21)
assert tree[1][2].duration == Duration(76, 21)
|
dannybrowne86/django-timepiece
|
timepiece/tests/factories.py
|
Python
|
mit
| 6,805
| 0.000882
|
import datetime
from dateutil.relativedelta import relativedelta
from decimal import Decimal
import factory
from factory.fuzzy import FuzzyDate, FuzzyInteger
import random
from django.contrib.auth import models as auth
from django.contrib.auth.hashers import make_password
from timepiece.contracts import models as contracts
from timepiece.crm import models as crm
from timepiece.entries import models as entries
from timepiece import utils
class User(factory.DjangoModelFactory):
FACTORY_FOR = auth.User
# FIXME: Some tests depend on first_name/last_name being unique.
first_name = factory.Sequence(lambda n: 'Sam{0}'.format(n))
last_name = factory.Sequence(lambda n: 'Blue{0}'.format(n))
username = factory.Sequence(lambda n: 'user{0}'.format(n))
email = factory.Sequence(lambda n: 'user{0}@example.com'.format(n))
password = factory.LazyAttribute(lambda n: make_password('password'))
@factory.post_generation
def permissions(self, create, extracted, **kwargs):
if create and extracted:
for perm in extracted:
if isinstance(perm, basestring):
app_label, codename = perm.split('.')
perm = auth.Permission.objects.get(
content_type__app_label=app_label,
codename=codename,
)
self.user_permissions.add(perm)
class Superuser(User):
is_superuser = True
is_staff = True
class Group(factory.DjangoModelFactory):
FACTORY_FOR = auth.Group
name = factory.Sequence(lambda n: 'group{0}'.format(n))
class ProjectContract(factory.DjangoModelFactory):
FACTORY_FOR = contracts.ProjectContract
name = factory.Sequence(lambda n: 'contract{0}'.format(n))
start_date = datetime.date.today()
end_date = datetime.date.today() + relativedelta(weeks=2)
status = contracts.ProjectContract.STATUS_CURRENT,
type = contracts.ProjectContract.PROJECT_PRE_PAID_HOURLY
@factory.post_generation
def contract_hours(self, create, extracted, **kwargs):
if create:
num_hours = extracted or random.randint(10, 400)
for i in range(2):
ContractHour(contract=self,
hours=Decimal(str(num_hours/2.0)))
@factory.post_generation
def projects(self, create, extracted, **kwargs):
if create and extracted:
self.projects.add(*extracted)
class ContractHour(factory.DjangoModelFactory):
FACTORY_FOR = contracts.ContractHour
date_requested = datetime.date.today()
status = contracts.ContractHour.APPROVED_STATUS
contract = factory.SubFactory('timepiece.tests.factories.ProjectContract')
class ContractAssignment(factory.DjangoModelFactory):
FACTORY_FOR = contracts.ContractAssignment
user = factory.SubFactory('timepiece.tests.factories.User')
contract = factory.SubFactory('timepiece.tests.factories.ProjectContract')
start_date = datetime.date.today()
end_date = datetime.date.today() + relativedelta(weeks=2)
class HourGroup(factory.DjangoModelFactory):
FACTORY_FOR = contracts.HourGroup
name = factory.Sequence(lambda n: 'hourgroup{0}'.format(n))
class EntryGroup(factory.DjangoModelFactory):
FACTORY_FOR = contracts.EntryGroup
user = factory.SubFactory('timepiece.tests.factories.User')
project = factory.SubFactory('timepiece.tests.factories.Project')
end = FuzzyDate(datetime.date.today() - relativedelta(months=1))
class TypeAttribute(factory.DjangoModelFactory):
FACTORY_FOR = crm.Attribute
label = factory.Sequence(lambda n: 'type{0}'.format(n))
type = crm.Attribute.PROJECT_TYPE
class StatusAttribute(factory.DjangoModelFactory):
FACTORY_FOR = crm.Attribute
label = factory.Sequence(lambda n: 'status{0}'.format(n))
type = crm.Attribute.PROJECT_STATUS
class Business(factory.DjangoModelFactory):
FACTORY_FOR = crm.Business
name = factory.Sequence(lambda n: 'business{0}'.format(n))
class Project(factory.DjangoModelFactory):
FACTORY_FOR = crm.Project
name = factory.Sequence(lambda n: 'project{0}'.format(n))
business = factory.SubFactory('timepiece.tests.factories.Business')
point_person = factory.SubFactory('timepiece.tests.factories.User')
type = factory.SubFactory('timepiece.tests.factories.TypeAttribute')
status = factory.SubFactory('timepiece.tests.factories.StatusAttribute')
class BillableProject(Project):
type = factory.SubFactory('timepiece.tests.factories.TypeAttribute', billable=True)
status = factory.SubFactory('timepiece.tests.factories.StatusAttribute', billable=True)
class NonbillableProject(Project):
type = factory.SubFactory('timepiece.tests.factories.TypeAttribute', billable=False)
status = factory.SubFactory('timepiece.tests.factories.StatusAttribute', billable=False)
class RelationshipType(factory.DjangoModelFactory):
FACTORY_FOR = crm.RelationshipType
name = factory.Sequence(lambda n: 'reltype{0}'.format(n))
class ProjectRelationship(facto
|
ry.DjangoModelFactory):
FACTORY_FOR = crm.ProjectRelationship
user = factory.SubFactory('timepiece.tests.factories.User')
project = factory.SubFactory('timepiece.tests.factories.Project')
class UserProfile(factory.DjangoModelFactory):
FACTORY_FOR = crm.UserProfile
user = factory.SubFactory('timepiece.tests.factories.User')
class Activity(factory.DjangoModelFactory):
FACTORY_FOR = entries.Activity
code = factory.Sequence(l
|
ambda n: 'a{0}'.format(n))
name = factory.Sequence(lambda n: 'activity{0}'.format(n))
class BillableActivityFactory(Activity):
billable = True
class NonbillableActivityFactory(Activity):
billable = False
class ActivityGroup(factory.DjangoModelFactory):
FACTORY_FOR = entries.ActivityGroup
name = factory.Sequence(lambda n: 'activitygroup{0}'.format(n))
class Location(factory.DjangoModelFactory):
FACTORY_FOR = entries.Location
name = factory.Sequence(lambda n: 'location{0}'.format(n))
slug = factory.Sequence(lambda n: 'location{0}'.format(n))
class Entry(factory.DjangoModelFactory):
FACTORY_FOR = entries.Entry
status = entries.Entry.UNVERIFIED
user = factory.SubFactory('timepiece.tests.factories.User')
activity = factory.SubFactory('timepiece.tests.factories.Activity')
project = factory.SubFactory('timepiece.tests.factories.Project')
location = factory.SubFactory('timepiece.tests.factories.Location')
class ProjectHours(factory.DjangoModelFactory):
FACTORY_FOR = entries.ProjectHours
week_start = utils.get_week_start()
project = factory.SubFactory('timepiece.tests.factories.Project')
user = factory.SubFactory('timepiece.tests.factories.User')
hours = FuzzyInteger(0, 20)
|
cpennington/edx-platform
|
cms/djangoapps/contentstore/api/tests/base.py
|
Python
|
agpl-3.0
| 2,906
| 0.001376
|
"""
Base test case for the course API views.
"""
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from lms.djangoapps.courseware.tests.factories import StaffFactory
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
# pylint: disable=unused-variable
class BaseCourseViewTest(SharedModuleStoreTestCase, APITestCase):
"""
Base test class for course data views.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
view_name = None
|
# The name of the view to use in reverse() call in self.get_url()
@classmethod
def setUpClass(cls):
super(BaseCourseViewTest, cls).setUpClass
|
()
cls.course = CourseFactory.create(display_name='test course', run="Testing_course")
cls.course_key = cls.course.id
cls.password = 'test'
cls.student = UserFactory(username='dummy', password=cls.password)
cls.staff = StaffFactory(course_key=cls.course.id, password=cls.password)
cls.initialize_course(cls.course)
@classmethod
def initialize_course(cls, course):
"""
Sets up the structure of the test course.
"""
course.self_paced = True
cls.store.update_item(course, cls.staff.id)
cls.section = ItemFactory.create(
parent_location=course.location,
category="chapter",
)
cls.subsection1 = ItemFactory.create(
parent_location=cls.section.location,
category="sequential",
)
unit1 = ItemFactory.create(
parent_location=cls.subsection1.location,
category="vertical",
)
ItemFactory.create(
parent_location=unit1.location,
category="video",
)
ItemFactory.create(
parent_location=unit1.location,
category="problem",
)
cls.subsection2 = ItemFactory.create(
parent_location=cls.section.location,
category="sequential",
)
unit2 = ItemFactory.create(
parent_location=cls.subsection2.location,
category="vertical",
)
unit3 = ItemFactory.create(
parent_location=cls.subsection2.location,
category="vertical",
)
ItemFactory.create(
parent_location=unit3.location,
category="video",
)
ItemFactory.create(
parent_location=unit3.location,
category="video",
)
def get_url(self, course_id):
"""
Helper function to create the url
"""
return reverse(
self.view_name,
kwargs={
'course_id': course_id
}
)
|
osgcc/ryzom
|
nel/tools/build_gamedata/processes/rbank/2_build.py
|
Python
|
agpl-3.0
| 11,551
| 0.013159
|
#!/usr/bin/python
#
# \file 2_build.py
# \brief Build rbank
# \date 2009-03-10-22-43-GMT
# \author Jan Boon (Kaetemi)
# Python port of game data build pipeline.
# Build rbank
#
# NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
# Copyright (C) 2010 Winch Gate Property Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time, sys, os, shutil, subprocess, distutils.dir_util
sys.path.append("../../configuration")
if os.path.isfile("log.log"):
os.remove("log.log")
log = open("log.log", "w")
from scripts import *
from buildsite import *
from process import *
from tools import *
from directories import *
printLog(log, "")
printLog(log, "-------")
printLog(log, "--- Build rbank")
printLog(log, "-------")
printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time())))
printLog(log, "")
# Find tools
BuildIgBoxes = findTool(log, ToolDirectories, BuildIgBoxesTool, ToolSuffix)
ExecTimeout = findTool(log, ToolDirectories, ExecTimeoutTool, ToolSuffix)
BuildRbank = findTool(log, ToolDirectories, BuildRbankTool, ToolSuffix)
GetNeighbors = findTool(log, ToolDirectories, GetNeighborsTool, ToolSuffix)
BuildIndoorRbank = findTool(log, ToolDirectories, BuildIndoorRbankTool, ToolSuffix)
# AiBuildWmap = findTool(log, ToolDirectories, AiBuildWmapTool, ToolSuffix)
printLog(log, "")
# Build rbank bbox
printLog(log, ">>> Build rbank bbox <<<")
if BuildIgBoxes == "":
toolLogFail(log, BuildIgBoxesTool, ToolSuffix)
else:
mkPath(log, ExportBuildDirectory + "/" + RbankBboxBuildDirectory)
cf = open("build_ig_boxes.cfg", "w")
cf.write("\n")
cf.write("Pathes = {\n")
for dir in IgLookupDirectories:
mkPath(log, ExportBuildDirectory + "/" + dir)
cf.write("\t\"" + ExportBuildDirectory + "/" + dir + "\", \n")
for dir in ShapeLookupDirectories:
mkPath(log, ExportBuildDirectory + "/" + dir)
cf.write("\t\"" + ExportBuildDirectory + "/" + dir + "\", \n")
cf.write("};\n")
cf.write("\n")
cf.write("IGs = {\n")
|
for dir in IgLookupDirectories:
files = findFiles(log, ExportBuildDirectory + "/" + dir, "", ".ig"
|
)
for file in files:
cf.write("\t\"" + os.path.basename(file)[0:-len(".ig")] + "\", \n")
cf.write("};\n")
cf.write("\n")
cf.write("Output = \"" + ExportBuildDirectory + "/" + RbankBboxBuildDirectory + "/temp.bbox\";\n")
cf.write("\n")
cf.close()
subprocess.call([ BuildIgBoxes ])
os.remove("build_ig_boxes.cfg")
printLog(log, "")
printLog(log, ">>> Build rbank build config <<<")
cf = open("build_rbank.cfg", "w")
cf.write("\n")
cf.write("// Rbank settings\n")
cf.write("\n")
cf.write("Verbose = " + str(RBankVerbose) + ";\n")
cf.write("CheckConsistency = " + str(RBankConsistencyCheck) + ";\n")
mkPath(log, ExportBuildDirectory + "/" + ZoneWeldBuildDirectory)
cf.write("ZonePath = \"" + ExportBuildDirectory + "/" + ZoneWeldBuildDirectory + "/\";\n")
mkPath(log, ExportBuildDirectory + "/" + SmallbankExportDirectory)
cf.write("BanksPath = \"" + ExportBuildDirectory + "/" + SmallbankExportDirectory + "/\";\n")
cf.write("Bank = \"" + ExportBuildDirectory + "/" + SmallbankExportDirectory + "/" + BankTileBankName + ".smallbank\";\n")
cf.write("ZoneExt = \".zonew\";\n")
cf.write("ZoneNHExt = \".zonenhw\";\n")
cf.write("IGBoxes = \"" + ExportBuildDirectory + "/" + RbankBboxBuildDirectory + "/temp.bbox\";\n")
mkPath(log, LeveldesignWorldDirectory)
cf.write("LevelDesignWorldPath = \"" + LeveldesignWorldDirectory + "\";\n")
mkPath(log, ExportBuildDirectory + "/" + IgLandBuildDirectory)
cf.write("IgLandPath = \"" + ExportBuildDirectory + "/" + IgLandBuildDirectory + "\";\n")
mkPath(log, ExportBuildDirectory + "/" + IgOtherBuildDirectory)
cf.write("IgVillagePath = \"" + ExportBuildDirectory + "/" + IgOtherBuildDirectory + "\";\n")
cf.write("\n")
mkPath(log, ExportBuildDirectory + "/" + RbankTessellationBuildDirectory)
cf.write("TessellationPath = \"" + ExportBuildDirectory + "/" + RbankTessellationBuildDirectory + "/\";\n")
cf.write("TessellateLevel = " + str(BuildQuality) + ";\n") # BuildQuality
cf.write("\n")
cf.write("WaterThreshold = 1.0;\n")
cf.write("\n")
cf.write("OutputRootPath = \"" + ExportBuildDirectory + "/\";\n")
mkPath(log, ExportBuildDirectory + "/" + RbankSmoothBuildDirectory)
cf.write("SmoothDirectory = \"" + RbankSmoothBuildDirectory + "/\";\n")
mkPath(log, ExportBuildDirectory + "/" + RbankRawBuildDirectory)
cf.write("RawDirectory = \"" + RbankRawBuildDirectory + "/\";\n")
cf.write("\n")
cf.write("ReduceSurfaces = " + str(RbankReduceSurfaces) + ";\n")
cf.write("SmoothBorders = " + str(RbankSmoothBorders) + ";\n")
cf.write("\n")
cf.write("ComputeElevation = " + str(RbankComputeElevation) + ";\n")
cf.write("ComputeLevels = " + str(RbankComputeLevels) + ";\n")
cf.write("\n")
cf.write("LinkElements = " + str(RbankLinkElements) + ";\n")
cf.write("\n")
cf.write("CutEdges = " + str(RbankCutEdges) + ";\n")
cf.write("\n")
cf.write("UseZoneSquare = " + str(RbankUseZoneSquare) + ";\n")
cf.write("\n")
cf.write("// The whole landscape\n")
cf.write("ZoneUL = \"" + RbankZoneUl + "\";\n")
cf.write("ZoneDR = \"" + RbankZoneDr + "\";\n")
cf.write("\n")
mkPath(log, ExportBuildDirectory + "/" + RbankPreprocBuildDirectory)
cf.write("PreprocessDirectory = \"" + ExportBuildDirectory + "/" + RbankPreprocBuildDirectory + "/\";\n")
cf.write("\n")
cf.write("// The global retriever processing settings\n")
cf.write("GlobalRetriever = \"temp.gr\";\n")
cf.write("RetrieverBank = \"temp.rbank\";\n")
cf.write("\n")
cf.write("GlobalUL = \"" + RbankZoneUl + "\";\n")
cf.write("GlobalDR = \"" + RbankZoneDr + "\";\n")
cf.write("\n")
cf.write("// Which kind of stuff to do\n")
cf.write("TessellateZones = 0;\n")
cf.write("MoulineZones = 0;\n")
cf.write("ProcessRetrievers = 0;\n")
cf.write("ProcessGlobal = 0;\n")
cf.write("\n")
cf.write("Zones = {\n")
mkPath(log, ExportBuildDirectory + "/" + ZoneWeldBuildDirectory)
files = findFiles(log, ExportBuildDirectory + "/" + ZoneWeldBuildDirectory, "", ".zonew")
for file in files:
cf.write("\t\"" + os.path.basename(file) + "\", \n")
cf.write("};\n")
cf.write("\n")
cf.write("Pathes = {\n")
for dir in IgLookupDirectories:
mkPath(log, ExportBuildDirectory + "/" + dir)
cf.write("\t\"" + ExportBuildDirectory + "/" + dir + "\", \n")
for dir in ShapeLookupDirectories:
mkPath(log, ExportBuildDirectory + "/" + dir)
cf.write("\t\"" + ExportBuildDirectory + "/" + dir + "\", \n")
cf.write("};\n")
cf.write("\n")
cf.close()
printLog(log, "")
printLog(log, ">>> Build rbank check prims <<<")
if BuildRbank == "":
toolLogFail(log, BuildRbankTool, ToolSuffix)
elif ExecTimeout == "":
toolLogFail(log, ExecTimeoutTool, ToolSuffix)
else:
subprocess.call([ ExecTimeout, str(RbankBuildTesselTimeout), BuildRbank, "-C", "-p", "-g" ])
printLog(log, "")
printLog(log, ">>> Build rbank process all passes <<<")
if BuildRbank == "":
toolLogFail(log, BuildRbankTool, ToolSuffix)
if GetNeighbors == "":
toolLogFail(log, GetNeighborsTool, ToolSuffix)
elif ExecTimeout == "":
toolLogFail(log, ExecTimeoutTool, ToolSuffix)
else:
zonefiles = findFiles(log, ExportBuildDirectory + "/" + ZoneWeldBuildDirectory, "", ".zonew")
for zonefile in zonefiles:
zone = os.path.basename(zonefile)[0:-len(".zonew")]
lr1 = ExportBuildDirectory + "/" + RbankSmoothBuildDirectory + "/" + zone + ".lr"
nearzones = subprocess.Popen([ GetNeighbors, zone ], stdout = subprocess.PIPE).communicate()[0].strip().split(" ")
printLog(log, "ZONE " + zone + ": " + str(nearzones))
zone_to_build = 0
for nearzone in nearzones:
sourcePath = ExportBuildDirectory + "/" + ZoneWeldBuildDirectory + "/" + nearzone + ".zonew"
if (os.path.isfile(sourceP
|
mbohlool/client-python
|
kubernetes/client/models/v1alpha1_role_list.py
|
Python
|
apache-2.0
| 6,307
| 0.001586
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
""
|
"
from pprint import pformat
from six import iteritems
import re
class V1alpha1RoleList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"
|
""
swagger_types = {
'api_version': 'str',
'items': 'list[V1alpha1Role]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1alpha1RoleList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1alpha1RoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1alpha1RoleList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1alpha1RoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1alpha1RoleList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1alpha1RoleList.
Items is a list of Roles
:return: The items of this V1alpha1RoleList.
:rtype: list[V1alpha1Role]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1alpha1RoleList.
Items is a list of Roles
:param items: The items of this V1alpha1RoleList.
:type: list[V1alpha1Role]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1alpha1RoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1alpha1RoleList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1alpha1RoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1alpha1RoleList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1alpha1RoleList.
Standard object's metadata.
:return: The metadata of this V1alpha1RoleList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1alpha1RoleList.
Standard object's metadata.
:param metadata: The metadata of this V1alpha1RoleList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1alpha1RoleList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
python-hyper/priority
|
src/priority/__init__.py
|
Python
|
mit
| 346
| 0
|
# -*- coding: utf-8 -*-
"
|
""
priority: HTTP/2 priority implementation for Python
"""
from .priority import ( # noqa
Stream,
PriorityTree,
DeadlockError,
PriorityLoop,
PriorityError,
DuplicateStreamError,
MissingStreamError,
TooManyStreamsError,
BadWeightError,
PseudoStreamError,
)
__ve
|
rsion__ = "2.0.0"
|
hammerlab/mhctools
|
mhctools/netmhc_cons.py
|
Python
|
apache-2.0
| 1,609
| 0.000622
|
# Copyright (c) 2014-2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from .base_commandline_predictor import BaseCommandlinePredictor
from .parsing import parse_netmhccons_stdout
class NetMHCcons(BaseCommandlinePredictor):
def __init__(
self,
alleles,
program_name="netMHCcons",
process_limit=0,
default_peptide_lengths=[9]):
BaseCommandlinePredictor.__init__(
self,
program_name=program_name,
|
alleles=alleles,
parse_output_fn=parse_netmhccons_stdout,
# netMHCcons does not have a supported allele flag
supported_alleles_flag=None,
length_flag="-length",
input_file_flag="-f",
allele_flag="-a",
peptide_mode_flags=["-inptype", "1"],
tempdir_flag="-tdir",
process_limit=process_limit,
|
default_peptide_lengths=default_peptide_lengths,
group_peptides_by_length=True)
|
isanwong/cctools
|
weaver/src/examples/batch.py
|
Python
|
gpl-2.0
| 410
| 0.004878
|
uname = ParseFunction('uname -a > {OUT}')
for group in ('disc', 'ccl', 'gh'):
batch_options = 'requirements = MachineGroup == "{0}"'.format(group)
uname(outputs='uname.{0}'.format(group), environment={'BATCH_OPTIONS': batch_options})
#for group in ('disc', 'ccl', 'gh'):
# with Options(ba
|
tch='requirements = MachineGroup == "{0}"'.format(group)):
# uname(outputs='uname.{0}'.form
|
at(group))
|
FederatedAI/FATE
|
examples/pipeline/upload/pipeline-upload-extend-sid.py
|
Python
|
apache-2.0
| 2,416
| 0.003311
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from pipeline.bac
|
kend.pipeline import PipeLine
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
|
parties = config.parties
guest = parties.guest[0]
data_base = config.data_base_dir
# partition for data storage
partition = 4
# table name and namespace, used in FATE job configuration
dense_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
tag_data = {"name": "tag_value_1", "namespace": f"experiment{namespace}"}
pipeline_upload = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest)
# add upload data info
# path to csv file(s) to be uploaded
pipeline_upload.add_upload_data(file=os.path.join(data_base, "examples/data/breast_hetero_guest.csv"),
table_name=dense_data["name"], # table name
namespace=dense_data["namespace"], # namespace
head=1, partition=partition, # data info
id_delimiter=",",
extend_sid=True)
pipeline_upload.add_upload_data(file=os.path.join(data_base, "examples/data/tag_value_1000_140.csv"),
table_name=tag_data["name"],
namespace=tag_data["namespace"],
head=0, partition=partition,
id_delimiter=",",
extend_sid=True)
# upload both data
pipeline_upload.upload(drop=1)
if __name__ == "__main__":
main()
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/surface/hoverlabel/font/_color.py
|
Python
|
mit
| 470
| 0.002128
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="surface.hoverlabel.font", **kwargs
):
super(C
|
olorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.po
|
p("edit_type", "none"),
**kwargs
)
|
vascotenner/holoviews
|
holoviews/core/util.py
|
Python
|
bsd-3-clause
| 31,475
| 0.004829
|
import os, sys, warnings, operator
import numbers
import itertools
import string, fnmatch
import unicodedata
from collections import defaultdict
import numpy as np
import param
try:
from cyordereddict import OrderedDict
except:
from collections import OrderedDict
try:
import pandas as pd # noqa (optional import)
except ImportError:
pd = None
try:
import dask.dataframe as dd
except ImportError:
dd = None
# Python3 compatibility
import types
if sys.version_info.major == 3:
basestring = str
unicode = str
generator_types = (zip, range, types.GeneratorType)
else:
basestring = basestring
unicode = unicode
from itertools import izip
generator_types = (izip, xrange, types.GeneratorType)
def process_ellipses(obj, key, vdim_selection=False):
"""
Helper function to pad a __getitem__ key with the right number of
empty slices (i.e :) when the key contains an Ellipsis (...).
If the vdim_selection flag is true, check if the end of the key
contains strings or Dimension objects in obj. If so, extra padding
will not be applied for the value dimensions (i.e the resulting key
will be exactly one longer than the number of kdims). Note: this
flag should not be used for composite types.
"""
if isinstance(key, np.ndarray) and key.dtype.kind == 'b':
return key
wrapped_key = wrap_tuple(key)
if wrapped_key.count(Ellipsis)== 0:
return key
if wrapped_key.count(Ellipsis)!=1:
raise Exception("Only one ellipsis allowed at a time.")
dim_count = len(obj.dimensions())
index = wrapped_key.index(Ellipsis)
head = wrapped_key[:index]
tail = wrapped_key[index+1:]
padlen = dim_count - (len(head) + len(tail))
if vdim_selection:
# If the end of the key (i.e the tail) is in vdims, pad to len(kdims)+1
if wrapped_key[-1] in obj.vdims:
padlen = (len(obj.kdims) +1 ) - len(head+tail)
return head + ((slice(None),) * padlen) + tail
def safe_unicode(value):
if sys.version_info.major == 3 or not isinstance(value, str): return value
else: return unicode(value.decode('utf-8'))
def capitalize_unicode_name(s):
"""
Turns a string such as 'capital delta' into the shortened,
capitalized version, in this case simply 'Delta'. Used as a
transform in sanitize_identifier.
"""
index = s.find('capital')
if index == -1: return s
tail = s[index:].replace('capital', '').strip()
tail = tail[0].upper() + tail[1:]
return s[:index] + tail
class Aliases(object):
"""
Helper class useful for defining a set of alias tuples on a single object.
For instance, when defining a group or label with an alias, instead
of setting tuples in the constructor, you could use
``aliases.water`` if you first define:
>>> aliases = Aliases(water='H_2O', glucose='C_6H_{12}O_6')
>>> aliases.water
('water', 'H_2O')
This may be used to conveniently define aliases for groups, labels
or dimension names.
"""
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, (k,v))
class sanitize_identifier_fn(param.ParameterizedFunction):
"""
Sanitizes group/label values for use in AttrTree attribute
access. Depending on the version parameter, either sanitization
appropriate for Python 2 (no unicode gn identifiers allowed) or
Python 3 (some unicode allowed) is used.
Note that if you are using Python 3, you can switch to version 2
for compatibility but you cannot enable relaxed sanitization if
you are using Python 2.
Special characters are sanitized using their (lowercase) unicode
name using the unicodedata module. For instance:
>>> unicodedata.name(u'$').lower()
'dollar sign'
As these names are often very long, this parameterized function
allows filtered, substitions and transforms to help shorten these
names appropriately.
"""
version = param.ObjectSelector(sys.version_info.major, objects=[2,3], doc="""
The sanitization version. If set to 2, more aggresive
sanitization appropriate for Python 2 is applied. Otherwise,
if set to 3, more relaxed, Python 3 sanitization is used.""")
capitalize = param.Boolean(default=True, doc="""
Whether the first letter should be converted to
uppercase. Note, this will only be applied to ASCII characters
in order to make sure paths aren't confused with method
names.""")
eliminations = param.List(['extended', 'accent', 'small', 'letter', 'sign', 'digit',
'latin', 'greek', 'arabic-indic', 'with', 'dollar'], doc="""
Lowercase strings to be eliminated from the unicode names in
order to shorten the sanitized name ( lowercase). Redundant
strings should be removed but too much elimination could cause
two unique strings to map to the same sanitized output.""")
substitutions = param.Dict(default={'circumflex':'power',
'asterisk':'times',
'solidus':'over'}, doc="""
Lowercase substitutions of substrings in unicode names. For
instance the ^ character has the name 'circumflex accent' even
though it is more typically used for exponentiation. Note that
substitutions occur after filtering and that there should be no
ordering dependence between substitutions.""")
transforms = param.List(default=[capitalize_unicode_name], doc="""
List of string transformation functions to apply after
filtering and substitution in order to further compress the
unicode name. For instance, the defaultcapitalize_unicode_name
function will turn the string "capital delta" into "Delta".""")
disallowed = param.List(default=['trait_names', '_ipython_display_',
'_getAttributeNames'], doc="""
An explicit list of name that should not be allowed as
attribute names on Tree objects.
By default, prevents IPython from creating an entry called
Trait_names due to an inconvenient getattr check (during
tab-completion).""")
disable_leading_underscore = param.Boolean(default=False, doc="""
Whether leading underscores should be allowed to be sanitized
with the leading prefix.""")
aliases = param.Dict(default={}, doc="""
A dictionary of aliases mapping long strings to their short,
sanitized equivalents""")
prefix = 'A_'
_lookup_table = param.Dict(default={}, doc="""
Cache of previously computed sanitizations""")
@param.parameterized.bothmethod
def add_aliases(self_or_cls, **kwargs):
"""
Conveniently add new aliases as keyword arguments. For instance
you can add a new alias with add_aliases(short='Longer string')
"""
|
self_or_cls.aliases.update({v:k for k,v in kwargs.items()})
@param.parameterized.bothmethod
def remove_aliases(self_or_cls, aliases):
"""
Remove a list of aliases.
"""
for k,v in self_or_cls.aliases.items():
if v in aliases:
self_or_cls.aliases.pop(k)
@param.parameterized.bothmethod
def allowable(self_or_cls, name, disable_leading_underscore=None):
disabled_reprs = ['javascript', 'jp
|
eg', 'json', 'latex',
'latex', 'pdf', 'png', 'svg', 'markdown']
disabled_ = (self_or_cls.disable_leading_underscore
if disable_leading_underscore is None
else disable_leading_underscore)
if disabled_ and name.startswith('_'):
return False
isrepr = any(('_repr_%s_' % el) == name for el in disabled_reprs)
return (name not in self_or_cls.disallowed) and not isrepr
@param.parameterized.bothmethod
def prefixed(self, identifier, version):
"""
Whether or not the identifier will be prefixed.
Strings that require the prefix are generally not recommended.
"""
invalid_starting = ['Mn', 'Mc', 'Nd', 'Pc']
|
0ED/UnitX
|
doc/conf.py
|
Python
|
mit
| 6,844
| 0.005552
|
# -*- coding: utf-8 -*-
#
# UnitX documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 17 05:31:20 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'UnitX'
copyright = u'2016, Tasuku TAKAHASHI'
author = u'Tasuku TAKAHASHI'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.7.0'
# The full version, including alpha/beta/rc tags.
release = u'0.7.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
#html_theme = 'default' #Good!
#html_theme = 'sphinx_rtd_theme' #Good!
#html_theme = 'agogo' #Good!
#html_theme = 'nature' #Pretty Good!
# Options for HTML output
# -----------------------
# Use our custom theme.
html_theme = 'pydoctheme'
html_theme_path = ['tools']
html_theme_options = {'collapsiblesidebar': True}
# Short title used e.g. for <title> HTML tags.
html_short_title = '%s Documentation' % release
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Path to find HTML templates.
templates_path = ['tools/templates']
# Custom sidebar templates, filenames relative to this file.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages.
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
# Output an OpenSearch description file.
html_use_opensearch = 'http://supertask.github.io/unitx/'
# Additional static files.
html_static_path = ['tools/static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'UnitX' + release.replace('.', '')
# Split the index
html_split_index = True
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'UnitX.tex', u'UnitX Documentation',
u'Tasuku TAKAHASHI', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after intern
|
al links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source star
|
t file, name, description, authors, manual section).
man_pages = [
(master_doc, 'unitx', u'UnitX Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'UnitX', u'UnitX Documentation',
author, 'UnitX', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
tejesh95/Zubio.in
|
zubio/allauth/socialaccount/providers/flickr/tests.py
|
Python
|
mit
| 1,692
| 0
|
# -*- coding: utf-8 -*-
from allauth.socialaccount.tests import create_oauth_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import FlickrProvider
class FlickrTests(create_oauth_tests(registry.by_id(FlickrProvider.id))):
def get_mocked_response(self):
#
return [
MockedResponse(200, r"""
{"stat": "ok", "user": {"username": {"_content": "pennersr"}, "id": "12345678@N00"}}
"""), # noqa
MockedResponse(200, r"""
{"person": {"username": {"_content": "pennersr"}, "photosurl": {"_content": "http://www.flickr.com/photos/12345678@N00/"}, "nsid": "12345678@N00", "path_alias": null, "photos": {"count": {"_content": 0}, "firstdate
|
taken": {"_content": null}, "views": {"_content": "28"}, "firstdate": {"_content": null}}, "iconserver": "0", "description": {"_content": ""}, "mobileurl": {"_content": "http://m.flickr.com/photostream.gne?id=6294613"}, "profileurl": {"_content": "http://www.flickr.com/people/12345678@N00/"}, "mbox_sha1sum": {"_content": "5e5b359c123e54f95236209c8808d607a5cdd21e"}, "ispro": 0, "l
|
ocation": {"_content": ""}, "id": "12345678@N00", "realname": {"_content": "raymond penners"}, "iconfarm": 0}, "stat": "ok"}
""")] # noqa
def test_login(self):
account = super(FlickrTests, self).test_login()
f_account = account.get_provider_account()
self.assertEqual(account.user.first_name,
'raymond')
self.assertEqual(account.user.last_name,
'penners')
self.assertEqual(f_account.get_profile_url(),
'http://www.flickr.com/people/12345678@N00/')
|
softak/webfaction_demo
|
vendor-local/lib/python/kombu/serialization.py
|
Python
|
bsd-3-clause
| 11,002
| 0.000273
|
"""
kombu.serialization
===================
Serialization utilities.
:copyright: (c) 2009 - 2011 by Ask Solem
:license: BSD, see LICENSE for more details.
"""
import codecs
import sys
import pickle as pypickle
try:
import cPickle as cpickle
except ImportError:
cpickle = None # noqa
from kombu.utils.encoding import str_to_bytes
if sys.platform.startswith("java"):
def _decode(t, coding):
return codecs.getdecoder(coding)(t)[0]
else:
_decode = codecs.decode
if sys.version_info < (2, 6): # pragma: no cover
# cPickle is broken in Python <= 2.5.
# It unsafely and incorrectly uses relative instead of absolute
# imports,
# so e.g.:
# exceptions.KeyError
# becomes:
# kombu.exceptions.KeyError
#
# Your best choice is to upgrade to Python 2.6,
# as while the pure pickle version has worse performance,
# it is the only safe option for older Python versions.
pickle = pypickle
else:
pickle = cpickle or pypickle
bytes_type = str
if sys.version_info >= (3, 0):
bytes_type = bytes
class SerializerNotInstalled(StandardError):
"""Support for the requested serialization type is not installed"""
pass
class SerializerRegistry(object):
"""The registry keeps track of serialization methods."""
def __init__(self):
self._encoders = {}
self._decoders = {}
self._default_encode = None
self._default_content_type = None
self._default_content_encoding = None
self._disabled_content_types = set()
self.type_to_name = {}
def register(self, name, encoder, decoder, content_type,
content_encoding='utf-8'):
if encoder:
self._encoders[name] = (content_type, content_encoding, encoder)
if decoder:
self._decoders[content_type] = decoder
self.type_to_name[content_type] = name
def disable(self, name):
if '/' not in name:
name = self.type_to_name[name]
self._disabled_content_types.add(name)
def unregister(self, name):
try:
content_type = self._encoders[name][0]
self._decoders.pop(content_type, None)
self._encoders.pop(name, None)
self.type_to_name.pop(content_type, None)
except KeyError:
raise SerializerNotInstalled(
"No encoder/decoder installed for %s" % name)
def _set_default_serializer(self, name):
"""
Set the default serialization method used by this library.
:param name: The name of the registered serialization me
|
thod.
For example, `json` (default), `pickle`, `yaml`, `msgpack`,
or any custom methods registered using :meth:`register`.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
"""
try
|
:
(self._default_content_type, self._default_content_encoding,
self._default_encode) = self._encoders[name]
except KeyError:
raise SerializerNotInstalled(
"No encoder installed for %s" % name)
def encode(self, data, serializer=None):
if serializer == "raw":
return raw_encode(data)
if serializer and not self._encoders.get(serializer):
raise SerializerNotInstalled(
"No encoder installed for %s" % serializer)
# If a raw string was sent, assume binary encoding
# (it's likely either ASCII or a raw binary file, and a character
# set of 'binary' will encompass both, even if not ideal.
if not serializer and isinstance(data, bytes_type):
# In Python 3+, this would be "bytes"; allow binary data to be
# sent as a message without getting encoder errors
return "application/data", "binary", data
# For Unicode objects, force it into a string
if not serializer and isinstance(data, unicode):
payload = data.encode("utf-8")
return "text/plain", "utf-8", payload
if serializer:
content_type, content_encoding, encoder = \
self._encoders[serializer]
else:
encoder = self._default_encode
content_type = self._default_content_type
content_encoding = self._default_content_encoding
payload = encoder(data)
return content_type, content_encoding, payload
def decode(self, data, content_type, content_encoding, force=False):
if content_type in self._disabled_content_types:
raise SerializerNotInstalled(
"Content-type %r has been disabled." % (content_type, ))
content_type = content_type or 'application/data'
content_encoding = (content_encoding or 'utf-8').lower()
# Don't decode 8-bit strings or Unicode objects
if content_encoding not in ('binary', 'ascii-8bit') and \
not isinstance(data, unicode):
data = _decode(data, content_encoding)
try:
decoder = self._decoders[content_type]
except KeyError:
return data
if not data:
return data
return decoder(data)
"""
.. data:: registry
Global registry of serializers/deserializers.
"""
registry = SerializerRegistry()
"""
.. function:: encode(data, serializer=default_serializer)
Serialize a data structure into a string suitable for sending
as an AMQP message body.
:param data: The message data to send. Can be a list,
dictionary or a string.
:keyword serializer: An optional string representing
the serialization method you want the data marshalled
into. (For example, `json`, `raw`, or `pickle`).
If :const:`None` (default), then json will be used, unless
`data` is a :class:`str` or :class:`unicode` object. In this
latter case, no serialization occurs as it would be
unnecessary.
Note that if `serializer` is specified, then that
serialization method will be used even if a :class:`str`
or :class:`unicode` object is passed in.
:returns: A three-item tuple containing the content type
(e.g., `application/json`), content encoding, (e.g.,
`utf-8`) and a string containing the serialized
data.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
"""
encode = registry.encode
"""
.. function:: decode(data, content_type, content_encoding):
Deserialize a data stream as serialized using `encode`
based on `content_type`.
:param data: The message data to deserialize.
:param content_type: The content-type of the data.
(e.g., `application/json`).
:param content_encoding: The content-encoding of the data.
(e.g., `utf-8`, `binary`, or `us-ascii`).
:returns: The unserialized data.
"""
decode = registry.decode
"""
.. function:: register(name, encoder, decoder, content_type,
content_encoding="utf-8"):
Register a new encoder/decoder.
:param name: A convenience name for the serialization method.
:param encoder: A method that will be passed a python data structure
and should return a string representing the serialized data.
If :const:`None`, then only a decoder will be registered. Encoding
will not be possible.
:param decoder: A method that will be passed a string representing
serialized data and should return a python data structure.
If :const:`None`, then only an encoder will be registered.
Decoding will not be possible.
:param content_type: The mime-type describing the serialized
structure.
:param content_encoding: The content encoding (character set) that
the `decoder` method will be returning. Will usually be
utf-8`, `us-ascii`, or `binary`.
"""
register = registry.register
"""
.. function:: unregister(name):
Unregister registered encoder/decoder.
:param name: Registered serialization method name.
"""
unregister =
|
ekivemark/BlueButtonDev
|
appmgmt/utils.py
|
Python
|
apache-2.0
| 1,004
| 0.002988
|
# -*- coding: utf-8 -*-
"""
BlueButtonDev.appmgmt
FILE: utils
Created: 12/2/15 8:09 PM
"""
__author__ = 'Mark Scrimshire:@ekivemark'
from django.contrib import message
|
s
from .models import DEVELOPER_ROLE_CHOICES
from django.conf import settings
def Choice_Display(role):
"""
Receive a string of the current role
Lookup in DEVELOPER_ROLE_CHOICES
Return the String
:param role:
:return:
"""
result = dict(DEVELOPER_ROLE_CHOICES).get(role)
if role == "None":
return
else:
return result
class MessageMixin(object):
"""
Make it easy to display notification m
|
essages when using Class Based Views.
"""
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(MessageMixin, self).delete(request, *args, **kwargs)
def form_valid(self, form):
messages.success(self.request, self.success_message)
return super(MessageMixin, self).form_valid(form)
|
DeeDee22/nelliepi
|
src/ch/fluxkompensator/nelliepi/IPAddressFinder.py
|
Python
|
gpl-2.0
| 283
| 0.021201
|
'''
Created on Jun 15, 2014
@author: geraldin
|
e
'''
import socket
import fcntl
impor
|
t struct
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(),0x8915,struct.pack('256s', ifname[:15]))[20:24])
|
cliffano/swaggy-jenkins
|
clients/python-blueplanet/generated/app/openapi_server/models/github_repositorypermissions.py
|
Python
|
mit
| 3,754
| 0.002397
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from app.openapi_server.models.base_model_ import Model
from openapi_server import util
class GithubRepositorypermissions(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self,
|
admin: bool=None, push: bool=None, pull: bool=None, _class: str=None): # noqa: E501
"""GithubRepositorypermissions - a model defined in Swagger
:param admin: The admin of this GithubRepositorypermissions. # noqa: E501
:type admin: bool
:param push: The push of this GithubRepositorypermissions. # noqa: E501
:type push: bool
|
:param pull: The pull of this GithubRepositorypermissions. # noqa: E501
:type pull: bool
:param _class: The _class of this GithubRepositorypermissions. # noqa: E501
:type _class: str
"""
self.swagger_types = {
'admin': bool,
'push': bool,
'pull': bool,
'_class': str
}
self.attribute_map = {
'admin': 'admin',
'push': 'push',
'pull': 'pull',
'_class': '_class'
}
self._admin = admin
self._push = push
self._pull = pull
self.__class = _class
@classmethod
def from_dict(cls, dikt) -> 'GithubRepositorypermissions':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The GithubRepositorypermissions of this GithubRepositorypermissions. # noqa: E501
:rtype: GithubRepositorypermissions
"""
return util.deserialize_model(dikt, cls)
@property
def admin(self) -> bool:
"""Gets the admin of this GithubRepositorypermissions.
:return: The admin of this GithubRepositorypermissions.
:rtype: bool
"""
return self._admin
@admin.setter
def admin(self, admin: bool):
"""Sets the admin of this GithubRepositorypermissions.
:param admin: The admin of this GithubRepositorypermissions.
:type admin: bool
"""
self._admin = admin
@property
def push(self) -> bool:
"""Gets the push of this GithubRepositorypermissions.
:return: The push of this GithubRepositorypermissions.
:rtype: bool
"""
return self._push
@push.setter
def push(self, push: bool):
"""Sets the push of this GithubRepositorypermissions.
:param push: The push of this GithubRepositorypermissions.
:type push: bool
"""
self._push = push
@property
def pull(self) -> bool:
"""Gets the pull of this GithubRepositorypermissions.
:return: The pull of this GithubRepositorypermissions.
:rtype: bool
"""
return self._pull
@pull.setter
def pull(self, pull: bool):
"""Sets the pull of this GithubRepositorypermissions.
:param pull: The pull of this GithubRepositorypermissions.
:type pull: bool
"""
self._pull = pull
@property
def _class(self) -> str:
"""Gets the _class of this GithubRepositorypermissions.
:return: The _class of this GithubRepositorypermissions.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""Sets the _class of this GithubRepositorypermissions.
:param _class: The _class of this GithubRepositorypermissions.
:type _class: str
"""
self.__class = _class
|
USTB-LETTers/judger
|
exceptions.py
|
Python
|
mit
| 256
| 0
|
# -*- coding: utf-8 -*-
class CrazyBoxError(Exce
|
ption):
"""
The base class for custom exceptions raised by crazybox.
"""
pass
class DockerError(Exception):
"""
An error occurred with the un
|
derlying docker system.
"""
pass
|
cfc603/django-twilio-sms-models
|
tests/test_models.py
|
Python
|
bsd-3-clause
| 25,815
| 0
|
import datetime
from django.test import override_settings, TestCase
from django_twilio.models import Caller
from mock import Mock, patch, PropertyMock
from model_mommy import mommy
from twilio.rest.exceptions import TwilioRestException
from .mommy_recipes import caller_recipe, message_recipe, phone_number_recipe
from django_twilio_sms.models import (
Account,
Action,
ApiVersion,
Currency,
Error,
Message,
MessagingService,
PhoneNumber,
Response
)
class CommonTestCase(TestCase):
def string_test(self, model, test_value, **kwargs):
obj = mommy.make('django_twilio_sms.'+model, **kwargs)
self.assertEqual(test_value, obj.__str__())
class AccountModelTest(CommonTestCase):
def mock_account(self, owner_account_sid='test'):
return Mock(
friendly_name='test',
type='Full',
status='active',
sid='test',
owner_account_sid=owner_account_sid,
)
def test_unicode(self):
self.string_test('Account', 'abc', **{'sid': 'abc'})
def test_get_account_type_choice_account_type_display_equal_choice(self):
self.assertEqual(0, Account.get_account_type_choice('Trial'))
def test_get_account_type_choice_account_type_display_not_equal_choice(
self):
self.assertEqual(None, Account.get_account_type_choice('test'))
def test_get_status_choice_status_display_equal_choice(self):
self.assertEqual(0, Account.get_status_choice('active'))
def test_get_status_choice_status_display_not_equal_choice(self):
self.assertEqual(None, Account.get_status_choice('test'))
def test_get_or_create_if_not_account_sid_no_exception(self):
account_1 = mommy.make(Account, sid='test')
account_2 = Account.get_or_create(account=self.mock_account())
self.assertEqual(1, Account.objects.all().count())
self.assertEqual(account_1, account_2)
def test_get_or_create_if_account_sid_no_exception(self):
account_1 = mommy.make(Account, sid='test')
account_2 = Account.get_or_create(account_sid='test')
self.assertEqual(1, Account.objects.all().count())
self.assertEqual(account_1, account_2)
def test_get_or_create_if_not_account_sid_with_exception(self):
Account.get_or_create(account=self.mock_account())
self.assertEqual(1, Account.objects.all().count())
self.assertEqual('test', Account.objects.first().sid)
@patch(
'django_twilio_sms.models.Account.twilio_account',
new_callable=PropertyMock
)
def test_get_or_create_if_account_sid_with_exception(self, twilio_account):
twilio_account.return_value = self.mock_account()
Account.get_or_create(account_sid='test')
self.assertEqual(1, Account.objects.all().count())
self.assertEqual('test', Account.objects.first().sid)
@patch(
'django_twilio_sms.models.Account.twilio_account',
new_callable=PropertyMock
)
def test_twilio_account(self, twilio_account):
mock_account = self.mock_account()
twilio_account.return_value = mock_account
self.assertEqual(mock_account, Account.twilio_account)
@patch(
'django_twilio_sms.models.Account.twilio_account',
new_callable=PropertyMock
)
def test_sync_twilio_account_if_not_account_sids_not_equal(
self, twilio_account):
twilio_account.return_value = self.mock_account()
account = mommy.make(Account, sid='test')
account.sync_twilio_account()
self.assertEqual('test', account.friendly_name)
self.assertEqual(Account.FULL, account.account_type)
self.assertEqual(Account.ACTIVE, account.status)
self.assertEqual(None, account.owner_account_sid)
def test_sync_twilio_account_if_account_sids_not_equal(self):
account = mommy.make(Account, sid='test')
account.sync_twilio_account(self.mock_account())
self.assertEqual('test', account.friendly_name)
self.assertEqual(Account.FULL, account.account_type)
self.assertEqual(Account.ACTIVE, account.status)
self.assertEqual(None, account.owner_account_sid)
def test_sync_twilio_account_if_account_sids_equal(self):
owner_account = mommy.make(Account, sid='ownertest')
account = mommy.make(Account, sid='test')
account.sync_twilio_account(self.mock_account('ownertest'))
self.assertEqual('test', account.friendly_name)
self.assertEqual(Account.FULL, account.account_type)
self.assertEqual(Account.ACTIVE, account.status)
self.assertEqual(owner_account, account.owner_account_sid)
class ApiVersionModelTest(CommonTestCase):
def test_unicode(self):
api_version = momm
|
y.make(ApiVersion)
self.assertEqual(
'{}'.format(api_version.date), api_version.__str__()
)
def test_get_or_create_created_false(self):
api_version = mommy.make(ApiVersion)
self.assertEqual(
api_version, ApiVersion.get_or_create(api_version.date)
)
self.assertEqual(1, ApiVersion.objects.all().count())
def test_get_or_create_created_true(self):
date = datetime.date(2016, 1, 1)
api_version = ApiVersion
|
.get_or_create(date)
self.assertEqual(date, api_version.date)
self.assertEqual(1, ApiVersion.objects.all().count())
class CurrencyModelTest(CommonTestCase):
def test_unicode(self):
self.string_test('Currency', 'abc', **{'code': 'abc'})
def test_get_or_create_created_false(self):
currency = mommy.make(Currency)
self.assertEqual(currency, Currency.get_or_create(currency.code))
self.assertEqual(1, Currency.objects.all().count())
def test_get_or_create_created_true(self):
currency_code = 'USD'
currency = Currency.get_or_create(currency_code)
self.assertEqual(currency_code, currency.code)
self.assertEqual(1, Currency.objects.all().count())
class ErrorModelTest(CommonTestCase):
def test_unicode(self):
self.string_test('Error', 'abc', **{'code': 'abc'})
def test_get_or_create_created_false(self):
error = mommy.make(Error)
self.assertEqual(error, Error.get_or_create(error.code, error.message))
self.assertEqual(1, Error.objects.all().count())
def get_or_create_created_true(self):
error_code = '10015'
error_message = 'test'
error = Error.get_or_create(error_code, error_message)
self.assertEqual(error_code, error.code)
self.assertEqual(error_message, error.message)
self.assertEqual(1, Error.objects.all().count())
class MessageServiceModelTest(CommonTestCase):
def test_unicode(self):
self.string_test('MessagingService', 'abc', **{'sid': 'abc'})
def test_get_or_create_created_false(self):
messaging_service = mommy.make(MessagingService)
self.assertEqual(
messaging_service, MessagingService.get_or_create(
messaging_service.sid
)
)
self.assertEqual(1, MessagingService.objects.all().count())
def test_get_or_create_created_true(self):
sid = 'test'
messaging_service = MessagingService.get_or_create(sid)
self.assertEqual(sid, messaging_service.sid)
self.assertEqual(1, MessagingService.objects.all().count())
class PhoneNumberModelTest(CommonTestCase):
def test_unicode(self):
caller = caller_recipe.make()
self.string_test(
'PhoneNumber', '+19999999991', **{'caller': caller}
)
def test_get_or_create_is_instance(self):
phone_number = phone_number_recipe.make()
self.assertEqual(phone_number, PhoneNumber.get_or_create(phone_number))
def test_get_or_create_caller_created_false_phone_number_created_false(
self):
caller = caller_recipe.make()
phone_number = phone_number_recipe.make(caller=caller)
self.assertEqual(
phone_number, PhoneNumber.get_or_create(caller.phone_number)
)
self.assertEq
|
netoaraujjo/hal
|
clustering/agglomerative_clustering.py
|
Python
|
mit
| 1,946
| 0.023124
|
#-*- coding: utf-8 -*-
import numpy as np
from sklearn.cluster import AgglomerativeClustering as sk_AgglomerativeClustering
from sklearn.externals.joblib import Memory
from .clustering import Clustering
class AgglomerativeClustering(Clustering):
"""docstring for AgglomerativeClustering."""
def __init__(self, data, n_clusters = 2, affinity = 'euclidean',
memory = Memory(cachedir = None), connectivity = None,
compute_full_tree = 'auto', linkage = 'ward',
pooling_func = np.mean):
super(AgglomerativeClustering, self).__init__()
self.data = data
self.n_clusters = n_clusters
self.affinity = affinity
self.memory = memory
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.linkage = linkage
self.pooling_func = pooling_func
def execute(self):
"""Constroi o modelo de clusterizacao."""
self.model = sk_AgglomerativeClustering(n_clusters = self.n_clusters
|
,
affinity = self.affinity,
memory = self.memory,
connectivity = self.connectivity,
compute_full_tree = self.compute_full_tree,
|
linkage = self.linkage,
pooling_func = self.pooling_func).fit(self.data)
self.clusters = super().make_clusters(self.data, self.model.labels_)
@property
def labels_(self):
"""Retorna os labels dos elementos do dataset."""
return self.model.labels_
@property
def clusters_(self):
"""Retorna um dicionaro onde os indices dos grupos sao as chaves."""
return self.clusters
@property
def model_(self):
"""Retorna o modelo de agrupamento."""
return self.model
|
toladata/TolaTables
|
silo/migrations/0029_auto_20170915_0810.py
|
Python
|
gpl-2.0
| 1,233
| 0.003244
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-15 15:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('silo', '0028_auto_20170913_0206'),
]
operations = [
migrations.AlterField(
model_name='silo',
name='workflowlevel1',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='silo.WorkflowLevel1'),
),
migrations.AlterField(
model_name='tola
|
user',
name='workflowlevel1',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='silo.WorkflowLevel1'),
),
migrations.AlterField(
model_name='workflowlevel2',
name='activity_id',
field=models.IntegerField(blank=True, null=True, verbose_name=b'ID'),
),
migrations.AlterField(
model_name='workflowlevel2',
|
name='workflowlevel1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='silo.WorkflowLevel1'),
),
]
|
damonmcminn/rosalind
|
boilerplate.py
|
Python
|
gpl-2.0
| 183
| 0
|
from os
|
import path
rosalind_id = path.basename(__file__).split
|
('.').pop(0)
dataset = "../datasets/rosalind_{}.txt".format(rosalind_id)
data = open(dataset, 'r').read().splitlines()
|
shawnadelic/shuup
|
shuup/xtheme/plugins/category_links.py
|
Python
|
agpl-3.0
| 2,994
| 0.001002
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django import forms
from django.utils.translation import ugettext_lazy as _
from shuup.core.models import Category
from shuup.xtheme import TemplatedPlugin
from shuup.xtheme.plugins.forms import GenericPluginForm, TranslatableField
class CategoryLinksConfigForm(GenericPluginForm):
"""
A configuration form for the CategoryLinksPlugin
"""
def populate(self):
"""
A custom populate method to display category choices
"""
for field in self.plugin.fields:
if isinstance(field, tuple):
name, value = field
value.initial = self.plugin.config.get(name, value.initial)
self.fields[name] = value
self.fields["categories"] = forms.ModelMultipleChoiceField(
queryset=Category.objects.all_visible(customer=None),
required=False,
initial=self.plugin.config.get("categories", None),
)
def clean(self):
"""
A custom clean method to save category configuration information in a serializable form
"""
cleaned_data = super(CategoryLinksConfigForm, self).clean()
categories = cleaned_data.get("categories", [])
cleaned_data["categories"] = [category.pk for category in categories if hasattr(category, "pk")]
return cleaned_data
class CategoryLinksPlugin(TemplatedPlugin):
"""
A plugin for displaying links to visible categories on the shop front
"""
identifier = "category_links"
name = _("Category Links")
template_name = "shuup/xtheme/plugins/category_links.jinja"
editor_form_class = CategoryLinksConfigForm
fields = [
("title", TranslatableField(label=_("Title"), required=False, initial="")),
("show_all_categories", forms.BooleanField(
label=_("Show all categories"),
required=False,
initial=True,
help_text=_("All categories are shown, even if not selected"),
)),
"categories",
]
def get_context_data(self, context):
"""
A custom get_context_data method to return only visible categories
|
for request customer.
"""
selected_categories = self.config.get("categories", [])
show_all_categories = self.config.get("show_all_categories", True)
request = context.get("request")
categories = Category.objects.all_visible(
customer=getattr(request, "customer"),
shop=getattr(request, "shop")
|
)
if not show_all_categories:
categories = categories.filter(id__in=selected_categories)
return {
"title": self.get_translated_value("title"),
"categories": categories,
}
|
jpetto/olympia
|
src/olympia/amo/views.py
|
Python
|
bsd-3-clause
| 5,726
| 0
|
import json
import os
import re
from django import http
from django.conf import settings
from django.db.transaction import non_atomic_requests
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import render
from django.utils.encoding import iri_to_uri
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
import commonware.log
import waffle
from django_s
|
tatsd.clients import statsd
from olympia import amo, api
from olympia.amo.utils import log_cef
from . import monitors
log = commonware.log.getLogger('z.amo')
monit
|
or_log = commonware.log.getLogger('z.monitor')
jp_log = commonware.log.getLogger('z.jp.repack')
flash_re = re.compile(r'^(Win|(PPC|Intel) Mac OS X|Linux.+i\d86)|SunOs',
re.IGNORECASE)
quicktime_re = re.compile(
r'^(application/(sdp|x-(mpeg|rtsp|sdp))|audio/(3gpp(2)?|AMR|aiff|basic|'
r'mid(i)?|mp4|mpeg|vnd\.qcelp|wav|x-(aiff|m4(a|b|p)|midi|mpeg|wav))|'
r'image/(pict|png|tiff|x-(macpaint|pict|png|quicktime|sgi|targa|tiff))|'
r'video/(3gpp(2)?|flc|mp4|mpeg|quicktime|sd-video|x-mpeg))$')
java_re = re.compile(
r'^application/x-java-((applet|bean)(;jpi-version=1\.5|;'
r'version=(1\.(1(\.[1-3])?|(2|4)(\.[1-2])?|3(\.1)?|5)))?|vm)$')
wmp_re = re.compile(
r'^(application/(asx|x-(mplayer2|ms-wmp))|video/x-ms-(asf(-plugin)?|'
r'wm(p|v|x)?|wvx)|audio/x-ms-w(ax|ma))$')
@never_cache
@non_atomic_requests
def monitor(request, format=None):
# For each check, a boolean pass/fail status to show in the template
status_summary = {}
results = {}
checks = ['memcache', 'libraries', 'elastic', 'path',
'redis']
for check in checks:
with statsd.timer('monitor.%s' % check) as timer:
status, result = getattr(monitors, check)()
# state is a string. If it is empty, that means everything is fine.
status_summary[check] = {'state': not status,
'status': status}
results['%s_results' % check] = result
results['%s_timer' % check] = timer.ms
# If anything broke, send HTTP 500.
status_code = 200 if all(a['state']
for a in status_summary.values()) else 500
if format == '.json':
return http.HttpResponse(json.dumps(status_summary),
status=status_code)
ctx = {}
ctx.update(results)
ctx['status_summary'] = status_summary
return render(request, 'services/monitor.html', ctx, status=status_code)
@non_atomic_requests
def robots(request):
"""Generate a robots.txt"""
_service = (request.META['SERVER_NAME'] == settings.SERVICES_DOMAIN)
if _service or not settings.ENGAGE_ROBOTS:
template = "User-agent: *\nDisallow: /"
else:
template = render(request, 'amo/robots.html', {'apps': amo.APP_USAGE})
return HttpResponse(template, content_type="text/plain")
@non_atomic_requests
def contribute(request):
path = os.path.join(settings.ROOT, 'contribute.json')
return HttpResponse(open(path, 'rb'), content_type='application/json')
@non_atomic_requests
def handler403(request):
if request.path_info.startswith('/api/'):
# Pass over to handler403 view in api if api was targeted.
return api.views.handler403(request)
else:
return render(request, 'amo/403.html', status=403)
@non_atomic_requests
def handler404(request):
if request.path_info.startswith('/api/'):
# Pass over to handler404 view in api if api was targeted.
return api.views.handler404(request)
else:
return render(request, 'amo/404.html', status=404)
@non_atomic_requests
def handler500(request):
if request.path_info.startswith('/api/'):
# Pass over to handler500 view in api if api was targeted.
return api.views.handler500(request)
else:
return render(request, 'amo/500.html', status=500)
@non_atomic_requests
def csrf_failure(request, reason=''):
return render(request, 'amo/403.html',
{'because_csrf': 'CSRF' in reason}, status=403)
@non_atomic_requests
def loaded(request):
return http.HttpResponse('%s' % request.META['wsgi.loaded'],
content_type='text/plain')
@csrf_exempt
@require_POST
@non_atomic_requests
def cspreport(request):
"""Accept CSP reports and log them."""
report = ('blocked-uri', 'violated-directive', 'original-policy')
if not waffle.sample_is_active('csp-store-reports'):
return HttpResponse()
try:
v = json.loads(request.body)['csp-report']
# If possible, alter the PATH_INFO to contain the request of the page
# the error occurred on, spec: http://mzl.la/P82R5y
meta = request.META.copy()
meta['PATH_INFO'] = v.get('document-uri', meta['PATH_INFO'])
v = [(k, v[k]) for k in report if k in v]
log_cef('CSPViolation', 5, meta, username=request.user,
signature='CSPREPORT',
msg='A client reported a CSP violation',
cs6=v, cs6Label='ContentPolicy')
except (KeyError, ValueError), e:
log.debug('Exception in CSP report: %s' % e, exc_info=True)
return HttpResponseBadRequest()
return HttpResponse()
@non_atomic_requests
def version(request):
path = os.path.join(settings.ROOT, 'version.json')
return HttpResponse(open(path, 'rb'), content_type='application/json')
@non_atomic_requests
def plugin_check_redirect(request):
return http.HttpResponseRedirect('%s?%s' % (
settings.PFS_URL, iri_to_uri(request.META.get('QUERY_STRING', ''))))
|
MaxTyutyunnikov/lino
|
lino/utils/config.py
|
Python
|
gpl-3.0
| 11,277
| 0.018003
|
# -*- coding: UTF-8 -*-
## Copyright 2009-2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
"""
This creates a list `config_dirs` of all
configuration directories by looping through :setting:`INSTALLED_APPS`
and taking those whose source directory has a :xfile:`config` subdir.
..
DO NOT import this module at the global level of a models module
because importing it will fill the config dirs, i.e. will try to import
every installed `models` module.
The mechanism in this module emulates the behaviour of Django's
(or Jinja's) template loaders.
It was written before I discovered Jinja and became less used afterwards.
But we still need it to find the `.odt` files for
:class:`AppyBuildMethod <lino.mixins.printable.AppyBuildMethod>`.
This task cannot be done using Jinja because
Jinja's `get_template` method returns a `Template`,
and Jinja templates don't know their filename,
the only thing needed by
:class:`AppyBuildMethod <lino.mixins.printable.AppyBuildMethod>`.
One possibility might be to write a special Jinja Template class...
Die Reihenfolge in :setting:`INSTALLED_APPS` sollte sein: zuerst
`django.contrib.*`, dann ``lino``, dann `lino.modlib.*`
und dann `lino.projects.pcsw`.
Also vom Allgemeineren zum Spezifischeren. Und bei den config-Dirs soll diese
Liste umgekehrt abgeklappert werden (und die Suche beim
ersten Treffer aufhören): zuerst das eventuelle lokale `config_dir`,
dann `lino.projects.pcsw`, dann die diversen `lino.modlib.*` usw.
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
import os
from os.path import join, abspath, dirname, normpath, isdir
import sys
import codecs
import glob
from fnmatch import fnmatch
from django.utils.importlib import import_module
from django.conf import settings
from lino import ad
from lino.utils import iif
SUBDIR_NAME = 'config' # we might change this to "templates"
class ConfigDir:
"""
A configuration directory is a directory that may contain configuration files.
"""
def __init__(self,name,writeable):
self.name = os.path.abspath(name)
self.writeable = writeable
def __repr__(self):
return "ConfigDir %s" % self.name + iif(self.writeable," (writeable)","")
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
config_dirs = []
for pth in settings.SITE.get_settings_subdirs(SUBDIR_NAME):
config_dirs.append(ConfigDir(pth.decode(fs_encoding),False))
def add_config_dir(name,mod):
pth = join(dirname(mod.__file__),SUBDIR_NAME)
if isdir(pth):
config_dirs.append(ConfigDir(pth.decode(fs_encoding),False))
settings.SITE.for_each_app(add_config_dir)
#~ for app in settings.INSTALLED_APPS:
#~ app_mod = import_module(app)
#~ app = getattr(app_mod,'App',None)
#~ if isinstance(app,ad.App) and app.extends:
#~ parent = import_module(app.extends)
#~ add_config_dir(parent)
#~ add_config_dir(app_mod)
LOCAL_CONFIG_DIR = None
#~ if settings.SITE.project_dir != settings.SITE.source_dir:
if settings.SITE.is_local_project_dir:
p = join(settings.SITE.project_dir,SUBDIR_NAME)
if isdir(p):
LOCAL_CONFIG_DIR = ConfigDir(p,True)
config_dirs.append(LOCAL_CONFIG_DIR)
config_dirs.reverse()
config_dirs = tuple(config_dirs)
#~ logger.debug('config_dirs:\n%s', '\n'.join([repr(cd) for cd in config_dirs]))
#~ for app_name in settings.INSTALLED_APPS:
#~ app = import_module(app_name)
#~ fn = getattr(app,'__file__',None)
#~ if fn is not None:
#~ pth = join(dirname(fn),'config')
#~ if isdir(pth):
#~ config_dirs.append(ConfigDir(pth,False))
#~ LOCAL_CONFIG_DIR = ConfigDir(join(settings.PROJECT_DIR,'config'),True)
#~ config_dirs.append(LOCAL_CONFIG_DIR)
def find_config_file(fn,group=''):
if os.path.isabs(fn):
return fn
if group:
prefix = join(*(group.split('/')))
else:
prefix = ''
for cd in config_dirs:
ffn = join(cd.name,prefix,fn)
if os.path.exists(ffn):
return ffn
def find_config_files(pattern,group=''):
"""
Returns a dict of filename -> config_dir entries for
each config file on this site that matches the pattern.
Loops through `config_dirs` and collects matching files.
When more than one file of the same name exists in different
applications it gets overridden by later apps.
`group` is e.g. '','foo', 'foo/bar',...
"""
if group:
prefix = os.path.sep + join(*(group.split('/')))
#~ if not group.endswith('/'):
#~ group += '/'
else:
prefix = ''
files = {}
for cd in config_dirs:
pth = cd.name + prefix
#~ print 'find_config_files() discover', pth, pattern
if isdir(pth):
for fn in os.listdir(pth):
if fnmatch(fn,pattern):
files.setdefault(fn,cd)
#~ if not files.has_key(fn):
#~ files[fn] = cd
#~ else:
#~ print 'find_config_files() not a directory:', pth
return files
def find_template_config_files(template_ext,templates_group):
"""
find_config_files and ignore babel variants:
e.g. ignore "foo_fr.html" if "foo.html" exists
but don't ignore "my_template.html"
"""
files = find_config_files('*' + template_ext,templates_group)
l = []
template_ext
for name in files.keys():
basename = name[:-len(template_ext)]
chunks = basename.split('_')
if len(chunks) > 1:
basename = '_'.join(chunks[:-1])
if files.has_key(basename + template_ext):
continue
l.append(name)
l.sort()
if not l:
logger.warning("email_template_choices() : no matches for (%r,%r)",
'*' + template_ext,templates_group)
return l
def load_config_f
|
iles(loader,pattern,group=''):
"""
Naming conventions for :xfile:`*.dtl` files are:
- the first detail is called appname.Model.dtl
- If there are more Details, then they are called
appname.Model.2.dtl, appname.Model.3.dtl etc.
The `sort()` below must remove the filename extension (".dtl")
because otherwise the frist Detail would come last.
"""
files
|
= find_config_files(pattern,group).items()
def fcmp(a,b):
return cmp(a[0][:-4],b[0][:-4])
files.sort(fcmp)
prefix = group.replace("/",os.sep)
for filename,cd in files:
filename = join(prefix,filename)
ffn = join(cd.name,filename)
logger.debug("Loading %s...",ffn)
s = codecs.open(ffn,encoding='utf-8').read()
loader(s,cd,filename)
class Configured(object):
#~ filename = None
#~ cd = None # ConfigDir
def __init__(self,filename=None,cd=None):
if filename is not None:
assert not os.pardir in filename
#~ assert not os.sep in filename
if cd is None:
cd = LOCAL_CONFIG_DIR
self.filename = filename
self.cd = cd
self.messages = set()
def save_config(self):
if not self.filename:
raise IOError('Cannot save unnamed %s' % self)
if self.cd is Non
|
hrroon/literoticapi
|
test/author_test.py
|
Python
|
gpl-3.0
| 290
| 0.006897
|
import unittest
from literoticapi.author import *
class testStory(unittest.Tes
|
tCase):
def setUp(self):
self.a
|
uthor = Author(868670)
def testGetSeriesAndNonSeries(self):
assert len(self.author.get_stories()) >= 132
if __name__ == "__main__":
unittest.main()
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/testfixtures/tests/test_wrap.py
|
Python
|
agpl-3.0
| 5,995
| 0.012677
|
# Copyright (c) 2008 Simplistix Ltd
# See license.txt for license details.
from mock import Mock
from testfixtures import wrap,compare
from unittest import TestCase,TestSuite,makeSuite
class TestWrap(TestCase):
def test_wrapping(self):
m = Mock()
@wrap(m.before,m.after)
def test_function(r):
m.test()
return 'something'
compare(m.method_calls,[])
compare(test_function(),'something')
compare(m.method_calls,[
('before', (), {}),
('test', (), {}),
('after', (), {})
])
def test_wrapping_only_before(self):
before = Mock()
@wrap(before)
def test_function():
return 'something'
self.assertFalse(before.called)
compare(test_function(),'something')
compare(before.call_count,1)
def test_wrapping_wants_return(self):
m = Mock()
m.before.return_value = 'something'
@wrap(m.before,m.after)
def test_function(r):
m.test(r)
return 'r:'+r
compare(m.method_calls,[])
compare(test_function(),'r:something')
compare(m.method_calls,[
('before', (), {}),
('test', ('something',), {}),
('after', (), {})
])
def test_wrapping_wants_arguments(self):
# This only works in python 2.5+, for
# earlier versions, you'll have to come
# up with your own `partial` class...
from functools import partial
m = Mock()
@wrap(partial(m.before,1,x=2),partial(m.after,3,y=4))
def test_function(r):
m.test()
return 'something'
compare(m.method_calls,[])
compare(test_function(),'something')
compare(m.method_calls,[
('before', (1,), {'x':2}),
('test', (), {}),
('after', (3,), {'y':4})
])
def test_multiple_wrappers(self):
m = Mock()
@wrap(m.before2,m.after2)
@wrap(m.before1,m.after1)
def test_function():
m.test_function()
return 'something'
compare(m.method_calls,[])
compare(test_function(),'something')
compare(m.method_calls,[
('before1', (), {}),
('before2', (), {}),
('test_function', (), {}),
('after2', (), {}),
('after1', (), {}),
])
def test_multiple_wrappers_wants_return(self):
m = Mock()
m.before1.return_value=1
m.before2.return_value=2
@wrap(m.before2,m.after2)
@wrap(
|
m.before1,m.after1)
def test_function(r1,r2):
m.test_function(r1,r2)
return 'something'
compare(m.method_calls,[])
compare(test_function(),'something')
compare(m.method_calls,[
('before1', (), {}),
('before2', (), {}),
('test_function', (1,2), {}),
('after2', (), {}),
('after1', (), {
|
}),
])
def test_multiple_wrappers_only_want_first_return(self):
m = Mock()
m.before1.return_value=1
@wrap(m.before2,m.after2)
@wrap(m.before1,m.after1)
def test_function(r1):
m.test_function(r1)
return 'something'
compare(m.method_calls,[])
compare(test_function(),'something')
compare(m.method_calls,[
('before1', (), {}),
('before2', (), {}),
('test_function', (1,), {}),
('after2', (), {}),
('after1', (), {}),
])
def test_wrap_method(self):
m = Mock()
class T:
@wrap(m.before,m.after)
def method(self):
m.method()
T().method()
compare(m.method_calls,[
('before', (), {}),
('method', (), {}),
('after', (), {})
])
def test_wrap_method_wants_return(self):
m = Mock()
m.before.return_value = 'return'
class T:
@wrap(m.before,m.after)
def method(self,r):
m.method(r)
T().method()
compare(m.method_calls,[
('before', (), {}),
('method', ('return',), {}),
('after', (), {})
])
def test_wrapping_different_functions(self):
m = Mock()
@wrap(m.before1,m.after1)
def test_function1():
m.something1()
return 'something1'
@wrap(m.before2,m.after2)
def test_function2():
m.something2()
return 'something2'
compare(m.method_calls,[])
compare(test_function1(),'something1')
compare(m.method_calls,[
('before1', (), {}),
('something1', (), {}),
('after1', (), {})
])
compare(test_function2(),'something2')
compare(m.method_calls,[
('before1', (), {}),
('something1', (), {}),
('after1', (), {}),
('before2', (), {}),
('something2', (), {}),
('after2', (), {})
])
def test_wrapping_local_vars(self):
m = Mock()
@wrap(m.before,m.after)
def test_function():
something = 1
from testfixtures.tests import sample2
m.test()
return 'something'
compare(m.method_calls,[])
compare(test_function(),'something')
compare(m.method_calls,[
('before', (), {}),
('test', (), {}),
('after', (), {})
])
def test_wrapping__name__(self):
m = Mock()
@wrap(m.before,m.after)
def test_function():
pass # pragma: no cover
compare(test_function.__name__,'test_function')
|
arizvisa/syringe
|
lib/ia32/_optable.py
|
Python
|
bsd-2-clause
| 2,339
| 0
|
OperandLookupTable = b''.join([
b'\x81\xbd\x81\xbd\x41\x7d\x00\x00\x81\xbd\x81\xbd\x41\x7d\x00\x00'
b'\x81\xbd\x81\xbd\x41\x7d\x00\x00\x81\xbd\x81\xbd\x41\x7d\x00\x00'
b'\x81\xbd\x81\xbd\x41\x7d\x00\x00\x81\xbd\x81\xbd\x41\x7d\x00\x00'
b'\x81\xbd\x81\xbd\x41\x7d\x00\x00\x81\xbd\x81\xbd\x41\x7d\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\xbf\x82\x00\x00\x00\x00\x7d\xfd\x41\xc1\x00\x00\x00\x00'
b'\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41'
b'\xc1\xfd\xc1\xc1\x81\xbd\x81\xbd\x81\xbd\x81\xbd\x82\x88\x82\xbd'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7a\x00\x00\x00\x00\x00'
b'\x41\x7d\x41\x7d\x00\x00\x00\x00\x41\x7d\x00\x00\x00\x00\x00\x00'
b'\x41\x41\x41\x41\x41\x41\x41\x41\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d'
b'\xc1\xc1\x42\x00\xba\xba\xc1\xfd\x41\x00\x42\x00\x00\x41\x00\x00'
b'\x81\xbd\x81\xbd\x41\x41\x00\x00\x84\x84\x84\x84\x84\x84\x82\x82'
b'\x41\x41\x41\x41\x41\x41\x41\x41\x7d\x7d\x7a\x41\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\xc1\xfd\x00\x00\x00\x00\x00\x00\x81\xbd'
b'\x82\x84\x82\x82\x00\x00\x00\x00\x0
|
0\x00\x00\x00\x00\xbd\x00\xc1'
b'\x84\x84\x88\x88\x88\x88\x88\x88\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd'
b'\x84\x84\x84\x84\x84\x00\x84\x00\x84\x84\x88\x84\x84\x84\x84\x84'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd\xbd'
b'\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x90\x84\x84\x84\x84'
b'\x84\x84\x84\x84\x84\x84\x84\x88\x88\x88\x88\x88\x90\x90\x84\x88'
b'\x
|
c1\xc1\xc1\xc1\x88\x88\x88\x00\x84\x84\x00\x00\x84\x84\x88\x88'
b'\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d\x7d'
b'\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81\x81'
b'\x00\x00\x00\xbd\xc1\xbd\x00\x00\x00\x00\x00\xbd\xc1\xbd\x84\xbd'
b'\x81\xbd\xba\xbd\xba\xba\x81\x82\x00\x00\xc1\xbd\xbd\xbd\x81\x82'
b'\x81\xbd\xc1\xbc\xc1\xc1\xc1\x88\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x84\x88\x88\x88\x88\x88\x88\x88\x88\x88\x88\x84\x88\x88\x88\x88'
b'\x88\x88\x88\x88\x88\x88\x84\x88\x88\x88\x88\x88\x88\x88\x88\x88'
b'\x90\x88\x88\x88\x88\x84\x88\x88\x88\x88\x88\x88\x88\x88\x88\x00'
])
|
BD777/WindPythonToy
|
comm/network.py
|
Python
|
apache-2.0
| 7,827
| 0.004999
|
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup, UnicodeDammit
import time
import os
import re
import log
import tools
class Get(object):
# timeout, retry_interval -> seconds
def __init__(self, url='', timeout=5, retry=5, retry_interval=2, proxies={}, headers={}, download_file=None, savepath='.'):
self.log = log.Log()
self.url = url
self.timeout = timeout
self.retry = retry
self.retry_interval = retry_interval
self.proxies = proxies
self.headers = headers
if download_file is None:
self.download_file = False
else:
self.download_file = download_file
self.savepath = savepath
self.download_result = None
self.__run()
def config(self, url='', timeout=5, retry=5, retry_interval=2, proxies={}, headers={}, download_file=None, savepath=''):
self.url = url
self.timeout = timeout
self.retry = retry
self.retry_interval = retry_interval
if len(proxies) > 0:
self.proxies = proxies
if not download_file is None:
self.download_file = download_file
if len(headers) > 0:
self.headers = headers
if savepath != '':
self.savepath = savepath
self.__run()
def __run(self):
if self.url != '':
self.s = requests.Session()
self.__get()
if self.download_file:
self.__download()
else:
self.__soup()
def __get(self):
if self.url == '':
return False
self.log.info('start get [%s]'%self.url)
self.r = None
for i in range(self.retry):
try:
self.r = self.s.get(self.url, timeout=self.timeout, proxies=self.proxies, headers=self.headers)
break
except Exception as e:
self.log.error( '[retry %d] get [%s] fail. except [%s]'%(i+1, self.url, str(e)) )
time.sleep(self.retry_interval)
if self.r is None:
self.log.error('get [%s] fail' % self.url)
return False
self.log.info('end, get [%s]' % self.url)
return True
def __soup(self):
if not self.r:
self.log.error('self.r is None, cannot get soup. url [%s]' % self.url)
return False
if self.download_file:
self.log.info('to download url [%s], should not get soup' % self.url)
return False
self.soup = None
try:
self.soup = BeautifulSoup(self.r.content, 'html.parser')
return True
except Exception as e:
log.error('contruct BeautifulSoup fail, url [%s], except [%s]' & (self.url, str(e)))
return False
def __download(self):
self.log.info('start download [%s]' % self.url)
if self.r is None:
self.log.error('self.r is None. download fail. url [%s]' % self.url)
return False
filepath = self.savepath
tools.mkdir(filepath)
r = self.r
url = self.url
# 获取headers中的content-length
tot_size = 0
try:
tot_size = int( r.headers['content-length'] )
except Exception as e:
self.log.error('cannot get content-length, url [%s], headers [%s]' % (url, str(r.headers)) )
# get file name
filename = self.__get_file_name()
chunk_size = 4096
flag = 3
# retry if size is not right.
for i in range(3):
now_size = 0
try:
#print filename, type(filename)
with open( os.path.join(self.savepath, filename), 'wb' ) as f:
for chunk in r.iter_content(chunk_size):
now_size = now_size + len(chunk)
f.write(chunk)
except Exception as e:
self.log.error(u'something wrong. url [%s], exception [%s], 文件名 [%s], retry [%d]' % (url, unicode(e), filename, i+1) )
flag = 3
if tot_size == 0:
self.log.info(u'获取文件size失败,无法校验。 获取的文件大小 [%d], 文件名 [%s], url [%s]' % (now_size, filename, url) )
flag = 0
break
if now_size != tot_size:
self.log.error('文件size不正确. 获取的文件大小 [%d], 正确文件大小 [%d], 文件名 [%s], url[%s], retry[%d]' % (now_size, tot_size, filename.encode('utf8'), url, i+1))
flag = 4
else:
flag = 0
break
time.sleep(1)
self.log.info('end download [%s]' % self.url)
self.download_result = {'errno': flag, 'filename': filename}
return self.download_result
def __get_file_name(self):
# 通过content-type获取后缀
r = self.r
url = self.url
suf = ''
try:
ct = r.headers['content-type']
ctl = ct.split(';')
for i in ctl:
try:
suf = constant.CONTENT_TYPE_REVERSE[i.strip()]
except Exception as e:
pass
except Exception as e:
self.log.error('cannot get suffix, url[%s], headers [%s]' % (url, str(r.headers)))
# 尝试获取Content-Disposition,并以该项中的文件名及后缀优先
try:
content_disposition = r.headers['Content-Disposition']
fntmp = re.findall(r'filename=[\"\'](.*?)[\"\']', content_disposition)[0]
pos = fntmp.rfind('.')
if pos > -1:
fn = fntmp[:pos]
suf = fntmp[pos:]
else:
fn = fntmp
if filename is None:
filename = fn
dammit = UnicodeDammit(filename, ['utf-8', 'gb2312', 'gbk'])
filename = dammit.unicode_markup
except Exception as e:
pass
# url中的文件名以及后缀
pos = url.rfind("/") + 1
if pos >= len(url) or pos == -1:
fn = str(time.time()).replace(".", "")
else:
fn = url[p
|
os:]
pos = fn.rfind('.')
if pos >= len(fn) or pos == -1:
pass
else:
if suf == '':
suf = fn[pos:]
try:
fn = fn[:pos]
except Exception as e:
pass
filename = fn
dammit = UnicodeDammit(filename, ['utf-8', 'gb2312', 'gbk'])
filename = dammit.unicode_markup
# 这里要判断一下有没有
|
重名的文件,并做处理
i = 0
while True:
if i == 0:
if not os.path.exists( os.path.join(self.savepath, filename+suf) ):
break
else:
if not os.path.exists( os.path.join(self.savepath, filename+("(%d)"%i)+suf ) ):
filename = filename + ("(%d)"%i)
break
i = i + 1
filename = filename + suf
# 确保文件名合法(windows)
filename = tools.replaceBanCharacter(filename)
return filename
def download(self, url, savepath=''):
self.url = url
self.download_file = True
if savepath != '':
self.savepath = savepath
return self.__download()
def get(self):
return self.r
def soup(self):
return self.soup
def getsoup(self):
return (self.r, self.soup)
def clear_headers(self):
self.headers = {}
def clear_proxies(self):
self.proxies = {}
def stop(self):
self.log.stop()
def __del__(self):
self.stop()
|
nirmeshk/oh-mainline
|
vendor/packages/django-debug-toolbar/debug_toolbar/panels/profiling.py
|
Python
|
agpl-3.0
| 4,968
| 0.000201
|
from __future__ import absolute_import, division, unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from debug_toolbar.panels import Panel
from debug_toolbar import settings as dt_settings
import cProfile
from pstats import Stats
from colorsys import hsv_to_rgb
import os
class DjangoDebugToolbarStats(Stats):
__root = None
def get_root_func(self):
if self.__root is None:
for func, (cc, nc, tt, ct, callers) in self.stats.items():
if len(callers) == 0:
self.__root = func
break
return self.__root
class FunctionCall(object):
def __init__(self, statobj, func, depth=0, stats=None,
id=0, parent_ids=[], hsv=(0, 0.5, 1)):
self.statobj = statobj
self.func = func
if stats:
self.stats = stats
else:
self.stats = statobj.stats[func][:4]
self.depth = depth
self.id = id
self.parent_ids = parent_ids
self.hsv = hsv
def parent_classes(self):
return self.parent_classes
def background(self):
r, g, b = hsv_to_rgb(*self.hsv)
return 'rgb(%f%%,%f%%,%f%%)' % (r * 100, g * 100, b * 100)
def func_std_string(self): # match what old profile produced
func_name = self.func
if func_name[:2] == ('~', 0):
# special case for built-in functions
name = func_name[2]
if name.startswith('<') and name.endswith('>'):
return '{%s}' % name[1:-1]
else:
return name
else:
file_name, line_num, method = self.func
idx = file_name.find('/site-packages/')
if idx > -1:
file_name = file_name[(idx + 14):]
file_path, file_name = file_name.rsplit(os.sep, 1)
return mark_safe(
'<span class="path">{0}/</span>'
'<span class="file">{1}</span>'
' in <span class="func">{3}</span>'
'(<span class="lineno">{2}</span>)'.format(
file_path,
file_name,
line_num,
method))
def subfuncs(self):
i = 0
h, s, v = self.hsv
count = len(self.statobj.all_callees[self.func])
for func, stats in self.statobj.all_callees[self.func].items():
i += 1
h1 = h + (i / count) / (self.depth + 1)
if stats[3] == 0:
s1 = 0
else:
s1 = s * (stats[3] / self.stats[3])
yield FunctionCall(self.statobj,
func,
self.depth + 1,
stats=stats,
id=str(self.id) + '_' + str(i),
parent_ids=self.parent_ids + [self.id],
hsv=(h1, s1, 1))
def count(self):
return self.stats[1]
def tottime(self):
return self.stats[2]
def cumtime(self):
cc, nc, tt, ct = self.stats
return self.stats[3]
def tottime_per_call(self):
cc, nc, tt, ct = self.stats
if nc == 0:
return 0
return tt / nc
def cumtime_per_call(self):
cc, nc, tt, ct = self.stats
if cc == 0:
return 0
return ct / cc
def indent(self):
return 16 * self.depth
class ProfilingPanel(Panel):
"""
|
Panel that displays profiling information.
"""
title = _("Profiling")
template = 'debug_toolbar/panels/profiling.html'
def process_view(
|
self, request, view_func, view_args, view_kwargs):
self.profiler = cProfile.Profile()
args = (request,) + view_args
return self.profiler.runcall(view_func, *args, **view_kwargs)
def add_node(self, func_list, func, max_depth, cum_time=0.1):
func_list.append(func)
func.has_subfuncs = False
if func.depth < max_depth:
for subfunc in func.subfuncs():
if subfunc.stats[3] >= cum_time:
func.has_subfuncs = True
self.add_node(func_list, subfunc, max_depth, cum_time=cum_time)
def process_response(self, request, response):
if not hasattr(self, 'profiler'):
return None
# Could be delayed until the panel content is requested (perf. optim.)
self.profiler.create_stats()
self.stats = DjangoDebugToolbarStats(self.profiler)
self.stats.calc_callees()
root = FunctionCall(self.stats, self.stats.get_root_func(), depth=0)
func_list = []
self.add_node(func_list,
root,
dt_settings.CONFIG['PROFILER_MAX_DEPTH'],
root.stats[3] / 8)
self.record_stats({'func_list': func_list})
|
google/material-design-icons
|
update/venv/lib/python3.9/site-packages/pip/_internal/models/scheme.py
|
Python
|
apache-2.0
| 770
| 0
|
"""
For types associated with installation schemes.
For a general overview of available schemes and their context, see
https://docs.python.org/3/install/index.html#alternate-installation.
"""
SCHEME_KEYS = ['platlib', 'purelib', 'headers', 'scripts', 'data']
class Scheme:
"""A Scheme holds paths which are used as the base directories for
artifacts associated with a Python package.
"""
__slots__ = SCHEME_KEYS
def __init__(
self,
|
platli
|
b, # type: str
purelib, # type: str
headers, # type: str
scripts, # type: str
data, # type: str
):
self.platlib = platlib
self.purelib = purelib
self.headers = headers
self.scripts = scripts
self.data = data
|
luckyleap/NLP_Projects
|
MSChallenge Ngrams/MSChallengeNGrams.py
|
Python
|
mit
| 908
| 0.030837
|
# David Tsui 2.9.2016
# Human Languages and Technologies
# Dr. Rebecca Hwa
from Ngrams import *
#TRAIN
train_file = open("tokenized_train.txt","r")
train_str = train_file.read();
tri = Trigram(1)
print "Begin training vocabul
|
ary----------------------"
tri.trainVocabulary(train_str)
#tri.printVocabulary()
#Takes in questions for development
dev_file = open("Holmes.lm_format.questions.txt")
output_file = open("holmes_output.txt","w+")
print "Begin calculating perplexity-------
|
---------------"
for i, line in enumerate(dev_file):
#Clean text by removing all quotations
line = line[:-1]
exclude = set(string.punctuation)
s = ''.join(ch for ch in line if ch not in exclude)
s = s.lower()
#Lambda factors
lu = .3
lb = .3
lt = .4
print "Question %d complete" %(i)
perplexity = tri.getPerWordPerplexityInterpolated(s,lu,lb,lt)
newline = "%s\t%f\n"%(line,perplexity)
output_file.write(newline)
|
lino-framework/lino
|
lino/modlib/system/models.py
|
Python
|
bsd-2-clause
| 4,683
| 0.002776
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2019 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from django.conf import settings
from django.utils.encoding import force_str
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.apps import apps ; get_models = apps.get_models
from lino.api import dd, rt
from lino.core import actions
from lino.core.utils import full_model_name
from lino.core.roles import SiteStaff
from lino.modlib.printing.choicelists import BuildMethods
from lino.modlib.checkdata.choicelists import Checker
# import them here to have them on rt.models.system:
from .choicelists import YesNo, Genders, PeriodEvents
from .mixins import Lockable
class BuildSiteCache(dd.Action):
label = _("Rebuild site cache")
url_action_name = "buildjs"
def run_from_ui(self, ar):
settings.SITE.kernel.default_renderer.build_site_cache(True)
return ar.success(
"""\
Seems that it worked. Refresh your browser.
<br>
Note that other users might experience side effects because
of the unexpected .js update, but there are no known problems so far.
Please report any anomalies.""",
alert=_("Success"))
class SiteConfigManager(models.Manager):
def get(self, *args, **kwargs):
return settings.SITE.site_config
class SiteConfig(dd.Model):
class Meta(object):
abstract = dd.is_abstract_model(__name__, 'SiteConfig')
verbose_name = _("Site configuration")
objects = SiteConfigManager()
real_objects = models.Manager()
default_build_method = BuildMethods.field(
verbose_name=_("Default build method"),
blank=True, null=True)
simulate_today = models.DateField(
_("Simulated date"), blank=True, null=True)
site_company = dd.ForeignKey(
"contacts.Company",
blank=True, null=True,
verbose_name=_("Site owner"),
related_name='site_company_sites')
def __str__(self):
return force_str(_("Site Parameters"))
def update(self, **kw):
"""
Set some field of the SiteConfig object and store it to the
database.
"""
# print("20180502 update({})".format(kw))
for k, v in kw.items():
if not hasattr(self, k):
raise Exception("SiteConfig has no attribute
|
%r" % k)
setattr(self, k, v)
self.full_clean()
self.save()
def save(self, *args, **kw):
# print("20180502 save() {}".format(dd.obj2str(self, True)))
super(SiteConfig, self).save(*args, **kw)
settings.SITE.clear_s
|
ite_config()
def my_handler(sender, **kw):
# print("20180502 {} my_handler calls clear_site_config()".format(
# settings.SITE))
settings.SITE.clear_site_config()
#~ kw.update(sender=sender)
# dd.database_connected.send(sender)
#~ dd.database_connected.send(sender,**kw)
from django.test.signals import setting_changed
from lino.core.signals import testcase_setup
setting_changed.connect(my_handler)
testcase_setup.connect(my_handler)
dd.connection_created.connect(my_handler)
models.signals.post_migrate.connect(my_handler)
class SiteConfigs(dd.Table):
model = 'system.SiteConfig'
required_roles = dd.login_required(SiteStaff)
# default_action = actions.ShowDetail()
#~ has_navigator = False
hide_navigator = True
allow_delete = False
# hide_top_toolbar = True
#~ can_delete = perms.never
detail_layout = dd.DetailLayout("""
default_build_method
# lino.ModelsBySite
""", window_size=(60, 'auto'))
@classmethod
def get_default_action(cls):
return cls.detail_action
do_build = BuildSiteCache()
# if settings.SITE.user_model == 'users.User':
# dd.inject_field(settings.SITE.user_model,
# 'user_type', UserTypes.field())
# dd.inject_field(settings.SITE.user_model, 'language', dd.LanguageField())
class BleachChecker(Checker):
verbose_name = _("Find unbleached html content")
model = dd.Model
def get_checkable_models(self):
for m in super(BleachChecker, self).get_checkable_models():
if len(m._bleached_fields):
yield m
def get_checkdata_problems(self, obj, fix=False):
t = tuple(obj.fields_to_bleach())
if len(t):
fldnames = ', '.join([f.name for f, old, new in t])
yield (True, _("Fields {} have unbleached content.").format(fldnames))
if fix:
obj.before_ui_save(None, None)
obj.full_clean()
obj.save()
BleachChecker.activate()
|
FEniCS/ufl
|
demo/ExplicitConvection.py
|
Python
|
lgpl-3.0
| 332
| 0
|
#
# Author: Martin Sandve Alnes
|
# Date: 2008-10-03
#
from ufl import (Coe
|
fficient, TestFunction, TrialFunction, VectorElement, dot,
dx, grad, triangle)
element = VectorElement("Lagrange", triangle, 1)
u = TrialFunction(element)
v = TestFunction(element)
w = Coefficient(element)
a = dot(dot(w, grad(u)), v) * dx
|
hhucn/netsec-uebungssystem
|
netsecus/korrekturserver.py
|
Python
|
mit
| 4,455
| 0.000673
|
from __future__ import unicode_literals
import logging
import os
import tornado.ioloop
import tornado.web
from .webhandler.DownloadHandler import DownloadHandler
from .webhandler.OverviewHandler import OverviewHandler
from .webhandler.GradingPreviewMailsHandler import GradingPreviewMailsHandler
from .webhandler.GradingSendMailsHandler import GradingSendMailsHandler
from .webhandler.SheetCreateHandler import SheetCreateHandler
from .webhandler.SheetDeleteHandler import SheetDeleteHandler
from .webhandler.SheetEditEndHandler import SheetEditEndHandler
from .webhandler.SheetHandler import SheetHandler
from .webhandler.SheetRestoreHandler import SheetRestoreHandler
from .webhandler.SheetsHandler import SheetsHandler
from .webhandler.StudentHandler import StudentHandler
from .webhandler.StudentsHandler import StudentsHandler
from .webhandler.SubmissionAssignHandler import SubmissionAssignHandler
from .webhandler.SubmissionDetailHandler import SubmissionDetailHandler
from .webhandler.SubmissionGradeAllHandler import SubmissionGradeAllHandler
from .webhandler.SubmissionsListAllHandler im
|
port SubmissionsListAllHandler
from .webhandler.SubmissionsListCurrentHandler import SubmissionsListCurrentHandler
from .webhandler.SubmissionsListUnfinishedHandler import SubmissionsListUnfinishedHandler
from .webhandler.SubmissionStudentSheetHandler import SubmissionStudentSheetHandler
from .webhandler.TaskCreateHandler import TaskCreateHandler
from .webhandler.TaskDeleteHandler import TaskDeleteHandler
from .webhandl
|
er.TaskEditHandler import TaskEditHandler
from .webhandler.UpdateDatabaseHandler import UpdateDatabaseHandler
from .webhandler.contact import (
ContactCraftHandler,
ContactSendHandler,
ContactAllCraftHandler,
ContactAllSendHandler,
)
from .webhandler.merge import (
MergeSelectHandler,
MergePreviewHandler,
MergeHandler,
)
from . import database
class KorrekturApp(tornado.web.Application):
realm = 'netsec Uebungsabgabesystem'
def __init__(self, config, handlers):
super(KorrekturApp, self).__init__(handlers)
for handler in handlers:
handler[1].config = config
self.config = config
@property
def users(self):
return self.config('korrektoren')
def web_main(config):
try:
mainloop(config)
except BaseException as e:
logging.exception(e)
raise
def mainloop(config):
application = KorrekturApp(config, [
(r"/", OverviewHandler),
(r"/sheets", SheetsHandler),
(r"/sheet/create", SheetCreateHandler),
(r"/sheet/([0-9]+)/delete", SheetDeleteHandler),
(r"/sheet/([0-9]+)/editend", SheetEditEndHandler),
(r"/sheet/([0-9]+)/restore", SheetRestoreHandler),
(r"/sheet/([0-9]+)/task/create", TaskCreateHandler),
(r"/task/([0-9]+)/edit", TaskEditHandler),
(r"/task/([0-9]+)/delete", TaskDeleteHandler),
(r"/sheet/.*", SheetHandler),
(r"/students", StudentsHandler),
(r"/student/(.*)", StudentHandler),
(r"/submissions", SubmissionsListCurrentHandler),
(r"/submissions/all", SubmissionsListAllHandler),
(r"/submissions/unfinished", SubmissionsListUnfinishedHandler),
(r"/submission/([0-9]+)", SubmissionDetailHandler),
(r"/submission/([0-9]+)/([0-9]+)", SubmissionStudentSheetHandler),
(r"/submission/([0-9]+)/grade_all", SubmissionGradeAllHandler),
(r"/submission/([0-9]+)/assign", SubmissionAssignHandler),
(r"/grading/mails/preview", GradingPreviewMailsHandler),
(r"/grading/mails/send_all", GradingSendMailsHandler),
(r"/merge/([0-9]+)/select", MergeSelectHandler),
(r"/merge/([0-9]+)/preview", MergePreviewHandler),
(r"/merge/([0-9]+)/merge", MergeHandler),
(r"/contact/([0-9]+)", ContactCraftHandler),
(r"/contact/([0-9]+)/send", ContactSendHandler),
(r"/contact/all", ContactAllCraftHandler),
(r"/contact/all/send", ContactAllSendHandler),
(r"/download/(.*)", DownloadHandler),
(r"/updb", UpdateDatabaseHandler),
(r"/static/(.*)", tornado.web.StaticFileHandler, {
"path": os.path.join(config.module_path, "static")
}),
])
application.db = database.Database(config)
port = config('httpd.port')
application.listen(port)
logging.debug("Web server started on port %i.", port)
tornado.ioloop.IOLoop.instance().start()
|
mgolub2/Breadcrumb
|
ESP8266/chain_simulator.py
|
Python
|
gpl-3.0
| 4,013
| 0.005981
|
#!/usr/local/bin/python3.5
"""
Author: Maximilian Golub
Simulate the chain parsing and replying to a http request to test
the ESP8266
"""
import serial
import socket
import re
import traceback
import sys
import subprocess
import time
PORT = '/dev/cu.usbserial-FTZ29WSV' #OSX
#PORT = 'COM3' # If on windows
BAUD = 115200
def loop():
"""
Loop that runs forever, reading from the
serial connection waiting for special sequences indicating
TCP data.
:return:
"""
with serial.Serial(PORT, BAUD) as serial_socket:
pound_count = 0
data = ''
start = 0
while True:
#Read a character at a time.
#Yes this is awful and terrible
new_data = serial_socket.read(1)
try:
decode_data = new_data.decode('ascii')
#print(decode_data, end="")
if decode_data:
if decode_data == '\a':
pound_count += 1
if pound_count >= 3:
start = 1
print(pound_count)
else:
pound_count = 0
if decode_data == '\b':
print("***Parsing data!!!****")
start = 0
print(data)
parse(data, serial_socket)
pound_count = 0
data = ''
else:
if start:
data += decode_data
except UnicodeDecodeError:
pass
def parse(data, serial_socket):
"""
Parse the data coming over the serial connection. The data should
be the
|
GET/POST whatever request from the Wifi device attached to the
ESP8266. Looks for the Host header, trys to get the host+port with regex.
:param data:
:param serial_socket:
:return:
"""
try:
host_match = re.search('Host: (\S+)\\r\\n', data)
if host_match:
|
host = host_match.group(1)
#print(host)
try:
host, port = host.split()
except ValueError:
port = 80
if host == "192.168.1.1:8080": # Special case to test basic functionality level.
with open('hackaday.txt', 'r') as d:
data = d.read(100)
serial_socket.write('\b\b\b'.encode('utf-8'))
while data:
serial_socket.write(data.encode('utf-8'))
data = d.read(100)
if chr(27).encode() in data.encode():
print("OH SHIT")
time.sleep(.01)
serial_socket.write(chr(27).encode())
else: #Connect a socket as a client, then return that over the uart.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
s.connect((host, int(port)))
totalsent = 0
while totalsent < len(data): #Send all of our data
sent = s.send(data[totalsent:].encode())
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
result = s.recv(100) #Recieve data in 100 byte chunks, just like the special case.
if result:
serial_socket.write('\b\b\b'.encode('utf-8')) #Write special start code sequence
while (len(result) > 0):
serial_socket.write(result)
time.sleep(.01) #Keep the ESP8266 from sploding
result = s.recv(100)
serial_socket.write(chr(27).encode())
s.close()
except Exception as e:
print(e)
traceback.print_exc(file=sys.stdout)
if __name__ == '__main__':
loop()
|
siggame/webserver
|
webserver/codemanagement/management/commands/update_repos.py
|
Python
|
bsd-3-clause
| 4,248
| 0.000235
|
from django.core.management.base import BaseCommand
from webserver.codemanagement.models import TeamClient
import os
import re
import tempfile
import subprocess
class Command(BaseCommand):
help = 'Attempts to update all repositories by pulling from bases'
def handle(self, *args, **options):
# A list of tuples: (message, repo_directory, stderr)
errors = []
# A list of tuples: (team name, git-show output)
successes = []
for client in TeamClient.objects.all():
directory = tempfile.mkdtemp(prefix='GRETA_UPDATE')
repo_name = os.path.basename(client.repository.name)
repo_name = repo_name.replace(".git", "")
repo_directory = os.path.join(directory, repo_name)
self.stdout.write("Updating {0}'s repo...\n".format(client.team.name))
####################
# Clone
####################
self.stdout.write("\tCloning...\n")
clone = subprocess.Popen(["git", "clone", client.repository.path],
cwd=directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = clone.communicate()
if clone.returncode != 0:
errors.append(
("Failed to clone {0}'s repo".format(client.team.name),
repo_directory,
out + err)
)
continue
####################
# Pull
####################
self.stdout.write("\tPulling...\n")
# Use default merge-recursive strategy
pull = subprocess.Popen(["git", "pull",
client.base.repository.path],
cwd=repo_directory,
stdout=subprocess.PIPE,
|
stderr=subprocess.PIPE)
out, err = pull.communicate()
if pull.returncode != 0:
errors.append(
("Failed to pull into {0}'s repo".format(client.team.name),
repo_directory,
out + err)
|
)
continue
####################
# Push
####################
self.stdout.write("\tPushing...\n")
push = subprocess.Popen(["git", "push"],
cwd=repo_directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = push.communicate()
if push.returncode != 0:
errors.append(
("Failed to push to {0}'s repo".format(client.team.name),
repo_directory,
out + err)
)
continue
####################
# Show
####################
self.stdout.write("\tGetting show...\n")
show = subprocess.Popen(["git", "show", "--stat"],
cwd=repo_directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = show.communicate()
successes.append((client.team.name, out + err))
if successes:
self.stdout.write("\n\nSuccessfully updated some team repos\n")
for name, show in successes:
self.stdout.write("\t - {0}\n".format(name))
for line in show.splitlines():
self.stdout.write("\t\t" + line + "\n")
self.stdout.write("\n\n")
if errors:
self.stdout.write("\n\nUnable to update some team repos\n")
for name, directory, stderr in errors:
self.stdout.write("\t - {0} ({1})\n".format(name, directory))
for line in stderr.splitlines():
self.stdout.write("\t\t" + line + "\n")
self.stdout.write("\n\n")
if not errors and not successes:
self.stdout.write("No team repos to update\n")
|
vgrem/Office365-REST-Python-Client
|
office365/sharepoint/files/checkedOutFileCollection.py
|
Python
|
mit
| 350
| 0.002857
|
from office365.sharepoint.base_entity_collection import BaseEntityCollection
from office365.sharepoint.files.checkedOu
|
tFile import
|
CheckedOutFile
class CheckedOutFileCollection(BaseEntityCollection):
def __init__(self, context, resource_path=None):
super(CheckedOutFileCollection, self).__init__(context, CheckedOutFile, resource_path)
|
who-emro/meerkat_abacus
|
meerkat_abacus/util/data_types.py
|
Python
|
mit
| 555
| 0.009009
|
import csv
from meerkat_abacus.config import config
def data_types(param_config=config):
with open(param_config.config_directory + param_config.country_config["types_file"],
"r", encoding='utf-8',
errors="replac
|
e") as f:
DATA_TYPES_DICT = [_dict for _dict in csv.DictReader(f)]
return DATA_TYPES_DICT
def data_types_for_form_name(form_name, param_config=config):
return [data_type for data_
|
type in data_types(param_config=param_config) if form_name == data_type['form']]
DATA_TYPES_DICT = data_types()
|
mashedkeyboard/Headlights
|
handlers.py
|
Python
|
gpl-3.0
| 429
| 0.016317
|
import logging
from time import strftime
def closed():
logging.info('Headli
|
ghts process stopped')
def criterr(errortext):
logging.critical('A fatal error occured :: ' + errortext)
exit()
def err(errortext):
logging.error('An error occured :: ' + errortext)
def warn(errortext):
logging.warning(errortext)
def inf(errortext):
logging.info(errortext)
def debug(errortext):
|
logging.debug(errortext)
|
chapman-cpsc-230/hw1-grazi102
|
sin2_plus_cos2.py
|
Python
|
mit
| 1,198
| 0.004174
|
"""
File: <Sin2_plus_cos2>
Copyright (c) 2016 <Lauren Graziani>
License: MIT
<debugging a program>
"""
"""
# a
from math import sin, cos #need to import pi from math
x = pi/4
1_val = math.sin^2(x) + math.cos^2(x) #can't start a variable with a number, powers are written by **
print 1_VAL
"""
# a debugged
from math import sin, cos, pi
x = pi / 4
val1 = sin(x) ** 2 + cos(x) ** 2
print val1
"""
# b
v0 = 3 m/s #get rid of m/s
t = 1 s #get rid of s
a = 2 m/s**2 # **2 should come right after 2, get rid of m/s
s = v0.t + 0,5.a.t**2 #v0
|
.t should be v0*2, change comma to period and periods to *
print s
"""
# b debugged
v0 = 3
t = 1
a = 2 ** 2
s = v0*t + 0.5*a*t**2
print s
#c
"""
a = 3,3 b = 5,3
a2 = a**2
b2 = b**2
eq1_sum = a2 + 2ab + b2
eq2_sum = a2 - (2ab + b2
eq1_pow = (a+b)**2
eq2_pow = (a-b)**2
print 'First equation: %g = %g', % (eq1_sum, eq1_pow)
print 'Second equation: %h = %h', % (eq2_pow, eq2_pow)
# c debugged (cofused???)
a = 3,3
b=5,3
a2 = a**2
b2 = b**2
eq1_sum = a2 + (2*a*b) + b2
eq2_sum = a2 - (2*a*b) + b2
eq
|
1_pow = (a+b)**2
eq2_pow = (a-b)**2
print "First equation: %g = %g" % (eq1_sum, eq1_pow)
print "Second equation: %h = %h" % (eq2_pow, eq2_pow)
"""
|
caot/intellij-community
|
python/testData/inspections/PyPropertyDefinitionInspection25/src/prop_test.py
|
Python
|
apache-2.0
| 1,700
| 0.042941
|
class A(object):
def __init__(self, bar):
self._x = 1 ; self._bar = bar
def __getX(self):
return self._x
def __setX(self, x):
self._x = x
def __delX(self):
pass
x1 = property(__getX, __setX, __delX, "doc of x1")
x2 = property(__setX) # should return
x3 = property(__getX, __getX) # should not return
x4 = property(__getX, fdel=__getX) # should not return
x5 = property(__getX, doc=123) # bad doc
x6 = property(lambda self: self._x)
x7 = property(lambda self: self._x, lambda self: self._x) # setter should not return
@property
def foo(self):
return self._x
@foo.setter # ignored in 2.5
def foo(self, x):
self._x = x
@foo.deleter # ignored in 2.5
def foo(self):
pass
@property
def boo(self):
return self._x
@boo.setter
def boo1(self, x): # ignored in 2.
|
5
self._x = x
@boo.deleter
def boo2(self): # ignored in 2,5
pass
@property
def moo(self): # should return
pass
@moo.setter
def foo(self, x):
return 1 # ignored in 2.5
@foo.deleter
def foo(self):
return self._x # ignored in 2.5
@qoo.setter # unknown qoo is reported in ref inspectio
|
n
def qoo(self, v):
self._x = v
@property
def bar(self):
return None
class Ghostbusters(object):
def __call__(self):
return "Who do you call?"
gb = Ghostbusters()
class B(object):
x = property(gb) # pass
y = property(Ghostbusters()) # pass
z = property(Ghostbusters) # pass
class Eternal(object):
def give(self):
while True:
yield 1
def giveAndTake(self):
x = 1
while True:
x = (yield x)
one = property(give) # should pass
anything = property(giveAndTake) # should pass
|
tornadoalert/kmcoffice
|
venue/migrations/0006_eventcalander.py
|
Python
|
gpl-3.0
| 725
| 0.002759
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-25 12:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('venue', '0005_auto_20170916_0701'),
]
operations = [
migrations.CreateModel(
name='EventCalander',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
(
|
'name', models.CharField(default='Default Event'
|
, max_length=200)),
('calander_id', models.TextField()),
('active', models.BooleanField(default=True)),
],
),
]
|
mikeireland/pyghost
|
pyghost/extract.py
|
Python
|
mit
| 20,005
| 0.024144
|
"""Given (x,wave,matrices, slit_profile), extract the flux from each order. For
readability, we keep this separate from the simulator.... but the simulator is
required in order to run this.
To run, create a simulated fits file (e.g. "test_blue.fits") using ghostsim then:
blue_high = pyghost.extract.Extractor('blue', 'high')
flux,var = blue_high.two_d_extract("test_blue.fits")
plt.plot(blue_high.w_map[0,:], flux[0,:,0])
"""
from __future__ import division, print_function
import ghostsim
import numpy as np
import matplotlib.pyplot as plt
try:
import pyfits
except:
import astropy.io.fits as pyfits
import pdb
class Extractor():
"""A class for each arm of the spectrograph. The initialisation function takes a
single string representing
|
the configuration
|
. For GHOST, it can be "red" or "blue".
The extraction is defined by 3 key parameters: an "x_map", which is equivalent to
2dFDR's tramlines and contains a physical x-coordinate for every y (dispersion direction)
coordinate and order, and a "w_map", which is the wavelength corresponding to every y
(dispersion direction) coordinate and order. """
def __init__(self,arm, mode):
self.sim = ghostsim.Arm(arm)
self.x_map,self.w_map,self.blaze,self.matrices = self.sim.spectral_format_with_matrix()
#Fill in the slit dimensions in "simulator pixel"s. based on if we are in the
#high or standard resolution mode.
if mode == 'high':
self.mode = mode
self.lenslet_width = self.sim.lenslet_high_size
self.nl = 28
## Set default profiles - object, sky and reference
fluxes = np.zeros( (self.nl,3) )
fluxes[2:21,0] = 0.37
fluxes[8:15,0] = 0.78
fluxes[11,0] = 1.0
#NB if on the following line, fluxes[2:,1]=1.0 is set, sky will be
#subtracted automatically.
fluxes[2+19:,1]=1.0
fluxes[0,2]=1.0
self.define_profile(fluxes)
elif mode == 'std':
self.mode = mode
self.lenslet_width = self.sim.lenslet_std_size
self.nl = 17
## Set default profiles - object 1, sky and object 2
fluxes = np.zeros( (self.nl,3) )
fluxes[0:7,0] = 1.0
fluxes[7:10,1] = 1.0
fluxes[10:,2] = 1.0
self.define_profile(fluxes)
#Set some default pixel offsets for each lenslet, as used for a square lenslet profile
ny = self.x_map.shape[1]
nm = self.x_map.shape[0]
pix_offset_ix = np.append(np.append([0],np.arange(1,self.nl).repeat(2)),self.nl)
self.square_offsets = np.empty( (2*self.nl,nm) )
# The [0,0] component of "matrices" measures the size of a detector pixel in the
# simulated slit image space. i.e. slitmicrons/detpix.
for i in range(self.nl):
self.square_offsets[:,i] = (pix_offset_ix - self.nl/2.0) * self.lenslet_width / self.matrices[i,self.x_map.shape[1]//2,0,0]
self.sim_offsets = np.empty( (self.sim.im_slit_sz,nm) )
im_slit_pix_in_microns = (np.arange(self.sim.im_slit_sz) - self.sim.im_slit_sz/2.0) * self.sim.microns_pix
for i in range(nm):
self.sim_offsets[:,i] = im_slit_pix_in_microns / self.matrices[i,self.x_map.shape[1]//2,0,0]
#To aid in 2D extraction, let's explicitly compute the y offsets corresponding to these x offsets...
#The "matrices" map pixels back to slit co-ordinates.
self.slit_tilt = np.zeros( (nm,ny) )
for i in range(nm):
for j in range(ny):
invmat = np.linalg.inv( self.matrices[i,j] )
#What happens to the +x direction?
x_dir_map = np.dot(invmat,[1,0])
self.slit_tilt[i,j] = x_dir_map[1]/x_dir_map[0]
def define_profile(self,fluxes):
""" Manually define the slit profile as used in lenslet extraction. As this is
a low-level function, all lenslets must be defined. e.g. by convention, for the
star lenslets of the high resolution mode, lenslets 0,1 and 21 through 27 would
be zero. Also """
if fluxes.shape[0] != self.nl:
print("Error: {0:s} resolution mode must have {1:d} lenslets".format(self.mode,self.nl))
else:
self.square_profile = np.empty( (fluxes.shape[0]*2, fluxes.shape[1]) )
self.sim_profile = np.empty( (self.sim.im_slit_sz, fluxes.shape[1]) )
for i in range(fluxes.shape[1]):
self.square_profile[:,i] = np.array(fluxes[:,i]).repeat(2)
im_slit=self.sim.make_lenslets(fluxes=fluxes[:,i], mode=self.mode)
self.sim_profile[:,i] = np.sum(im_slit, axis=0)
def one_d_extract(self, data=[], file='', badpix=[], lenslet_profile='sim', rnoise=3.0):
""" Extract flux by integrating down columns (the "y" direction), using an
optimal extraction method.
Given that some of this code is in common with two_d_extract, the routines could
easily be merged... however that would make one_d_extract less readable.
Parameters
----------
data: numpy array (optional)
Image data, transposed so that dispersion is in the "y" direction. Note that
this is the transpose of a conventional echellogram. Either data or file
must be given
file: string (optional)
A fits file with conventional row/column directions containing the data to be
extracted.
lenslet_profile: 'square' or 'sim'
Shape of the profile of each fiber as used in the extraction. For a final
implementation, 'measured' should be a possibility. 'square' assigns each
pixel uniquely to a single lenslet. For testing only
rnoise: float
The assumed readout noise.
WARNING: Binning not implemented yet"""
if len(data)==0:
if len(file)==0:
print("ERROR: Must input data or file")
else:
#Transpose the data from the start.
data = pyfits.getdata(file).T
ny = self.x_map.shape[1]
nm = self.x_map.shape[0]
nx = self.sim.szx
#Number of "objects"
no = self.square_profile.shape[1]
extracted_flux = np.zeros( (nm,ny,no) )
extracted_var = np.zeros( (nm,ny,no) )
#Assuming that the data are in photo-electrons, construct a simple model for the
#pixel inverse variance.
pixel_inv_var = 1.0/(np.maximum(data,0) + rnoise**2)
pixel_inv_var[badpix]=0.0
#Loop through all orders then through all y pixels.
for i in range(nm):
print("Extracting order: {0:d}".format(i))
#Based on the profile we're using, create the local offsets and profile vectors
if lenslet_profile == 'square':
offsets = self.square_offsets[:,i]
profile = self.square_profile
elif lenslet_profile == 'sim':
offsets = self.sim_offsets[:,i]
profile = self.sim_profile
nx_cutout = 2*int( (np.max(offsets) - np.min(offsets))/2 ) + 2
phi = np.empty( (nx_cutout,no) )
for j in range(ny):
#Check for NaNs
if self.x_map[i,j] != self.x_map[i,j]:
extracted_var[i,j,:] = np.nan
continue
#Create our column cutout for the data and the PSF
x_ix = int(self.x_map[i,j]) - nx_cutout//2 + np.arange(nx_cutout,dtype=int) + nx//2
for k in range(no):
phi[:,k] = np.interp(x_ix - self.x_map[i,j] - nx//2, offsets, profile[:,k])
phi[:,k] /= np.sum(phi[:,k])
#Deal with edge effects...
ww = np.where( (x_ix >= nx) | (x_ix < 0) )[0]
x_ix[ww]=0
phi[ww,:]=0.0
#Cut out our
|
crustymonkey/r53-dyndns
|
libr53dyndns/errors.py
|
Python
|
gpl-2.0
| 133
| 0.015038
|
class IPParseError(
|
Exception):
pass
class ZoneNotFoundError(Exception):
pass
class InvalidInputErro
|
r(Exception):
pass
|
linkhub-sdk/popbill.cashbill.example.py
|
getChargeInfo.py
|
Python
|
mit
| 1,259
| 0.001747
|
# -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
imp.reload(sys)
try:
sys.setdefaultencoding('UTF8')
except Exception as E:
pass
import testValue
from popbill import CashbillService, PopbillException
cashbillService = CashbillService
|
(testValue.LinkID, testValue.SecretKey)
cashbillService.IsTest = testValue.IsTest
cashbillService.IPRestrictOnOff = testValue.IPRestrictOnOff
cashbillService.UseStaticIP = testValue.UseStaticIP
cashbillService.UseLocalTimeYN = testValue.UseLocalTimeYN
'''
연동회원의 현금영수증 API 서비스 과금정보를 확인합니다.
- https://docs.popbill.com/cashbill/python/api#GetChargeInfo
'''
try:
print("=" * 15 + " 과금정보 확인 " + "=" * 15)
# 팝빌회원 사업자번호
CorpNum = testValue.testCorpNum
# 팝빌회원 아이디
UserID = testValue.testUserID
|
response = cashbillService.getChargeInfo(CorpNum, UserID)
print(" unitCost (발행단가) : %s" % response.unitCost)
print(" chargeMethod (과금유형) : %s" % response.chargeMethod)
print(" rateSystem (과금제도) : %s" % response.rateSystem)
except PopbillException as PE:
print("Exception Occur : [%d] %s" % (PE.code, PE.message))
|
ray-project/ray
|
python/ray/util/sgd/torch/examples/tune_example.py
|
Python
|
apache-2.0
| 5,267
| 0.00019
|
# fmt: off
"""
This file holds code for a Distributed Pytorch + Tune page in the docs.
FIXME: We switched our code formatter from YAPF to Black. Check if we can enable code
formatting on this module and update the paragraph below. See issue #21318.
It ignores yapf because yapf doesn't allow comments right after code blocks,
but we put comments right after code blocks to prevent large white spaces
in the documentation.
"""
import torch
import torch.nn as nn
from ray.tune.utils import merge_dicts
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
import ray
from ray import tune
from ray.util.sgd.torch import TorchTrainer, TrainingOperator
from ray.util.sgd.utils import BATCH_SIZE
from ray.util.sgd.torch.examples.train_example import LinearDataset
def model_creator(config):
return nn.Linear(1, 1)
def optimizer_creator(model, config):
"""Returns optimizer."""
return torch.optim.SGD(model.parameters(), lr=config.get("lr", 1e-4))
def data_creator(config):
"""Returns training dataloader, validation dataloader."""
train_dataset = LinearDataset(2, 5)
val_dataset = LinearDataset(2, 5, size=400)
train_loader = DataLoader(train_dataset, batch_size=config[BATCH_SIZE])
validation_loader = DataLoader(val_dataset, batch_size=config[BATCH_SIZE])
return train_loader, validation_loader
def scheduler_creator(optimizer, config):
"""Returns scheduler. We are using a ReduceLROnPleateau scheduler."""
scheduler = ReduceLROnPlateau(optimizer, mode="min")
return scheduler
# __torch_tune_example__
def tune_example(operator_cls, num_workers=1, use_gpu=False):
TorchTrainable = TorchTrainer.as_trainable(
training_operator_cls=operator_cls,
num_workers=num_workers,
use_gpu=use_gpu,
config={BATCH_SIZE: 128}
)
analysis = tune.run(
TorchTrainable,
num_samples=3,
config={"lr": tune.grid_search([1e-4, 1e-3])},
stop={"training_iteration": 2},
verbose=1)
return analysis.get_best_config(metric="val_loss", mode="min")
# __end_torch_tune_example__
# __torch_tune_manual_lr_example__
def tune_example_manual(operator_cls, num_workers=1, use_gpu=False):
def step(trainer, info: dict):
"""Define a custom training loop for tune.
This is needed because we want to manually update our scheduler.
"""
train_stats = trainer.train(profile=True)
validation_stats = trainer.validate(profile=True)
# Manually update our scheduler with the given metric.
trainer.update_scheduler(metric=validation_stats["val_loss"])
all_stats = merge_dicts(train_stats, validation_stats)
return all_stats
TorchTrainable = TorchTrainer.as_trainable(
override_tune_step=step,
training_operator_cls=operator_cls,
num_workers=num_workers,
use_gpu=use_gpu,
scheduler_step_freq="manual",
config={BATCH_SIZE: 128}
)
analysis = tune.run(
TorchTrainable,
num_samples=3,
config={"lr": tune.grid_search([1e-4, 1e-3])},
stop={"training_iteration": 2},
verbose=1)
return analysis.get_best_config(metric="val_loss", mode="min")
# __end_torch_tune_manual_lr_example__
def get_custom_training_operator(lr_reduce_on_plateau=False):
return TrainingOperator.from_creators(
model_creator=model_creator, optimizer_creator=optimizer_creator,
data_creator=data_creator, loss_creator=nn.MSELoss,
scheduler_creator=scheduler_creator if lr_reduce_on_plateau
else None)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--address",
type=str,
help="the address to use for Ray")
parser.add_argument(
"--server-address",
type=str,
default=None,
required=False,
help="The address of server to connect to if using "
"Ray Client.")
parser.add_argument(
|
"--num-workers",
"-n",
type=int,
default=1,
help="Sets number of workers for training.")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Enables GPU training")
parser.add_argument(
"--lr-reduce-o
|
n-plateau",
action="store_true",
default=False,
help="If enabled, use a ReduceLROnPlateau scheduler. If not set, "
"no scheduler is used."
)
args, _ = parser.parse_known_args()
if args.smoke_test:
ray.init(num_cpus=3)
elif args.server_address:
ray.init(f"ray://{args.server_address}")
else:
ray.init(address=args.address)
CustomTrainingOperator = get_custom_training_operator(
args.lr_reduce_on_plateau)
if not args.lr_reduce_on_plateau:
tune_example(CustomTrainingOperator, num_workers=args.num_workers,
use_gpu=args.use_gpu)
else:
tune_example_manual(CustomTrainingOperator,
num_workers=args.num_workers, use_gpu=args.use_gpu)
|
mlperf/training_results_v0.7
|
Google/benchmarks/resnet/implementations/resnet-cloud-TF2.0-tpu-v3-32/tf2_common/utils/mlp_log/test_mlp_log.py
|
Python
|
apache-2.0
| 739
| 0.00406
|
"""Test MLPerf logging.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import
|
json
import sys
import pytest
from tensorflow_models.mlperf.models.rough.mlp_log import mlp_log
class TestMLPerf
|
Log(object):
"""Test mlperf log."""
def test_format(self):
msg = mlp_log.mlperf_format('foo_key', {'whiz': 'bang'})
parts = msg.split()
assert parts[0] == ':::MLL'
assert float(parts[1]) > 10
assert parts[2] == 'foo_key:'
j = json.loads(' '.join(parts[3:]))
assert j['value'] == {'whiz': 'bang'}
assert j['metadata']['lineno'] == 21
assert 'test_mlp_log' in j['metadata']['file']
if __name__ == '__main__':
sys.exit(pytest.main())
|
ricotabor/opendrop
|
opendrop/app/keyboard.py
|
Python
|
gpl-2.0
| 1,940
| 0
|
# Copyright © 2020, Joseph Berry, Rico Tabor ([email protected])
# OpenDrop is released under the GNU GPL License. You are free to
# modify and distribute the code, but always under the same license
# (i.e. you cannot make commercial derivatives).
#
# If you use this software in your research, please cite the following
# journal articles:
#
# J. D. Berry, M. J. Neeson, R. R. Dagastine, D. Y. C. Chan and
# R. F. Tabor, Measurement of surface and interfacial tension using
# pendant drop tensiometry. Journal of Colloid and Interface Science 454
# (2015) 226–237. https://doi.org/10.1016/j.jcis.2015.05.012
#
# E. Huang, T. Denning, A. Skoufis, J. Qi, R. R. Dagastine, R. F. Tabor
# and J. D. Berry, OpenDrop: Open-source software for pendant drop
# tensiometry & contact angle measurements, submitted to the J
|
ournal of
# Open Source Software
#
# These citations help us not only to understand who is using and
# developing OpenDrop, and for what purpose, but also to justify
# continued development of this code and other open source resources.
#
# OpenDrop is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have r
|
eceived a copy of the GNU General Public License along
# with this software. If not, see <https://www.gnu.org/licenses/>.
from enum import IntEnum
from gi.repository import Gdk
class Key(IntEnum):
UNKNOWN = -1
Up = Gdk.KEY_Up
Right = Gdk.KEY_Right
Down = Gdk.KEY_Down
Left = Gdk.KEY_Left
@classmethod
def from_value(cls, value: int) -> 'Key':
try:
return Key(value)
except ValueError:
return Key.UNKNOWN
class Modifier:
SHIFT = int(Gdk.ModifierType.SHIFT_MASK)
class KeyEvent:
def __init__(self, key: Key, modifier: int):
self.key = key
self.modifier = modifier
|
qubs/data-centre
|
climate_data/migrations/0020_annotation.py
|
Python
|
apache-2.0
| 1,145
| 0.003493
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-13 03:20
from __future__ import unicode_literals
import django.contrib.postgres.fields.ranges
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0019_auto_20170613_0241'),
]
operations = [
migrations.CreateModel(
name='Annotation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
|
('time_range', django.contrib.postgres.fields.ranges.DateTimeRangeField()),
('comment', models.TextField()),
|
('sensor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='climate_data.Sensor')),
('station', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='climate_data.Station')),
],
),
]
|
chenke91/ckPermission
|
settings.py
|
Python
|
mit
| 313
| 0.003195
|
#coding:utf-8
bind = 'unix:/var/run/
|
gunicorn.sock'
workers = 4
# you should change this
user = 'root'
# maybe you like error
loglevel = 'debug'
errorlog = '
|
-'
logfile = '/var/log/gunicorn/debug.log'
timeout = 300
secure_scheme_headers = {
'X-SCHEME': 'https',
}
x_forwarded_for_header = 'X-FORWARDED-FOR'
|
ikn/o
|
game/engine/entity.py
|
Python
|
gpl-3.0
| 884
| 0.003394
|
"""Entities: things that exist in the world."""
from .gfx import GraphicsGroup
from .util import ir
class Entity (object):
"""A thing that exists in the world.
Entity()
Currently, an entity is just a container of graphics.
"""
def __init__ (self):
#: The :class:`World <engine.game.World>` this entity is in. This is
#: set by the world when the entity is added or removed.
self.world = None
#: :class:`GraphicsGroup <engine.gfx.container.GraphicsG
|
roup>`
#: containing the entity's graphics, with ``x=0``, ``y=0``.
self.graphics = GraphicsGroup()
def added (self):
"""Called whenever the entity is added to a world.
This is called after
|
:attr:`world` has been changed to the new world.
"""
pass
def update (self):
"""Called every frame to makes any necessary changes."""
pass
|
supistar/Botnyan
|
plugins/drive.py
|
Python
|
mit
| 603
| 0
|
# -*- encoding:utf8 -*-
from model.parser import Parser
from model.googledr
|
ive import GoogleDrive
from plugins.base.responsebase import IResponseBase
class Drive(IResponseBase):
def hear_
|
regex(self, **kwargs):
lists = Parser().get_keyword_list(expand=True)
print("Lists : %r" % lists)
return "^({0})$".format("|".join(lists))
def response(self, **kwargs):
drive_kwargs = {
'document_id': Parser().get_document_id(kwargs.get('text')),
'export_type': 'text/plain'
}
return GoogleDrive().retrieve_content(**drive_kwargs)
|
antoinecarme/pyaf
|
tests/artificial/transf_Logit/trend_MovingMedian/cycle_0/ar_/test_artificial_32_Logit_MovingMedian_0__100.py
|
Python
|
bsd-3-clause
| 263
| 0.087452
|
import pyaf.Bench.TS_datase
|
ts as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedia
|
n", cycle_length = 0, transform = "Logit", sigma = 0.0, exog_count = 100, ar_order = 0);
|
dumoulinj/ers
|
ers_backend/ers_backend/celery.py
|
Python
|
mit
| 607
| 0.001647
|
from __future__ import absolute_import
import os
from celery import Celery
# set the default
|
Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ers_backend.settings')
from django.conf import settings
app = Celery('dataset_manager', backend="redis://localhost")
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request:
|
{0!r}'.format(self.request))
|
FrostLuma/Mousey
|
mousey/bot/context.py
|
Python
|
mit
| 2,070
| 0
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 - 2018 FrostLuma
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sub
|
license, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EV
|
ENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from mousey import commands
from mousey.utils import haste, Timer
class Context(commands.Context):
"""
Provides context while executing commands and utility methods.
Attributes
----------
timer : Timer
A timer which measures how long the current command takes to execute.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.timer = Timer()
async def send(self, content=None, *args, **kwargs):
if content is not None and len(content) > 1999:
link = await haste(content, session=self.bot.session)
content = f'Content too long! <{link}>'
return await super().send(content, *args, **kwargs)
# todo
@property
def red_tick(self):
return '\N{NEGATIVE SQUARED CROSS MARK}'
async def ok(self):
await self.message.add_reaction('\N{WHITE HEAVY CHECK MARK}')
@property
def color(self):
return self.me.color
|
caseywstark/colab
|
colab/apps/object_feeds/templatetags/object_feeds_tags.py
|
Python
|
mit
| 2,666
| 0.007127
|
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
register = template.Library()
### Show an update instance ###
@register.inclusion_tag("feeds/update.html", takes_context=True)
def show_update(context, update):
feed_object = update.feed.feed_object
update_object = update.content_object
icon = object_content = None
feed_object_link = '<a class="update-object" href="%s">%s</a>' % (feed_object.get_absolute_url(), feed_object)
update_object_link = None
if update.action_description.startswith('create'):
icon = 'create'
update_line = update.action_description % feed_object_link
elif update.action_description.startswith('edit'):
icon = 'edit'
update_line = update.action_description % feed_object_link
elif update.action_description.startswith('added to the discussion'):
icon = 'comment'
update_line = update.action_description % feed_object_link
elif update.action_description.startswith('added the paper'):
icon = 'paper'
update_object_link = '<a class="update-object" href="%s">%s</a>' % (update_object.get_absolute_url(), update_object)
update_line = update.action_description % (update_object_link, feed_object_link)
elif update.action_description.startswith('started following'):
icon = 'follow'
update_line = update.action_description % feed_object_link
elif update.action_description.startswith('resolved'):
icon = 'resolve'
update_line = update.action_description % feed_object_link
else:
icon = 'settings'
update_line = update.action_description % feed_object_link
return {'update': update, 'update_user': update.user, 'icon': icon,
'feed_object': feed_object, 'update_object': update_object, 'update_line': update_line,
'update_content': update.update_content, 'STATIC_URL': settings.STATIC_URL}
### Renders the follow button for any object ###
@register.inclusion_tag("feeds/follow_button.html", takes_context=True)
def follow_button(context, content_object, extra_text=None):
user = context['request'].user
feed = content_object.feed
|
subscription_url = reverse('feeds_subscription', kwargs={'feed_id': feed.id})
if user.is_authenticated():
subscription = feed.is_user_follo
|
wing(user)
else:
subscription = None
return {'subscription_url': subscription_url, 'subscription': subscription,
'extra_text': extra_text, 'feed': feed,
'STATIC_URL': settings.STATIC_URL}
|
mrchristine/spark-examples-dbc
|
src/main/python/ml/max_abs_scaler_example.py
|
Python
|
apache-2.0
| 1,515
| 0.00066
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import MaxAbsScaler
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("MaxAbsScalerExample")\
.getOrCreate()
# $example on$
dataFrame = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
scaler = MaxAbsScaler(inputCol="features", outputCol="scaledFeatures")
# Compute summary statistics and generate MaxAbsScalerModel
scalerModel = scaler.fit(dataFrame)
# rescale each feature to range [-1, 1].
scaledData = scalerModel.t
|
ransform
|
(dataFrame)
scaledData.show()
# $example off$
spark.stop()
|
eteamin/spell_checker_web_api
|
scwapi/__init__.py
|
Python
|
gpl-3.0
| 49
| 0
|
# -*- codin
|
g: utf-8 -*-
"""The scwapi package
|
"""
|
bengosney/rhgd3
|
gardens/migrations/0024_auto_20180215_2316.py
|
Python
|
gpl-3.0
| 759
| 0.001318
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-02-15 23:16
from __future__ import unicode_literals
from django.db import migrations
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('gardens', '0023_auto_20180215_2314'),
]
operations = [
migrations.RemoveField(
|
model_name='maintenancephoto',
name='main',
),
migrations.AddField(
model_name='maintenancephoto',
name='large',
field=image_cropping.fields.ImageRatioField('image', '600x400', adapt_rotation=False, allow_fullsize=False, free_crop=False, help_text=None, hide_image_field=False, size_warning=False, verbose_name='large'),
),
]
|
|
wxiang7/airflow
|
airflow/operators/__init__.py
|
Python
|
apache-2.0
| 2,497
| 0.0004
|
# Imports operators dynamically while keeping the package API clean,
# abstracting the underlying modules
from airflow.utils.helpers import import_module_attrs as _import_module_attrs
# These need to be integrated first as other operators depend on them
_import_module_attrs(globals(), {
'check_operator': [
'CheckOperator',
'ValueCheckOperator',
'IntervalCheckOperator',
],
})
_operators = {
'bash_operator': ['BashOperator'],
'python_operator': [
'PythonOperator',
'BranchPythonOperator',
'ShortCircuitOperator',
],
'hive_operator': ['HiveOperator'],
'pig_operator': ['PigOperator'],
'presto_check_operator': [
'PrestoCheckOperator',
'PrestoValueCheckOperator',
'PrestoIntervalCheckOperator',
],
'dagrun_operator': ['TriggerDagRunOperator'],
'dummy_operator': ['DummyOperator'],
'email_operator': ['EmailOperator'],
'hive_to_samba_operator': ['Hive2SambaOperator'],
'mysql_operator': ['MySqlOperator'],
'sqlite_operator': ['SqliteOperator'],
'mysql_to_hive': ['MySqlToHiveTransfer'],
'postgres_operator': ['PostgresOperator'],
'sensors': [
'BaseSensorOperator',
'ExternalTaskSensor',
'HdfsSensor',
'HivePartitionSensor',
'HttpSensor',
'MetastorePartitionSensor',
'S3KeySensor',
'S3PrefixSensor',
'SqlSensor',
'TimeDeltaSensor',
'TimeSensor',
'WebHdfsSensor',
],
'subdag_operator': ['SubDagOperator
|
'],
'hive_stats_operator': ['HiveStatsCollectionOperator'],
's3_to_hive_operator': ['S3ToHiveTransfer'],
'hive_to_mysql': ['HiveToMySqlTransfer'],
'presto_to_mysql': ['PrestoToMySqlTransfer'],
's3_file_transform_operator': ['S3FileTransformOperator'],
'http_operator': ['SimpleHttpOperator'],
'hive_to_druid': ['HiveToDruidTransfer'],
'jdbc_operator': ['JdbcOperator'],
'mssql_ope
|
rator': ['MsSqlOperator'],
'mssql_to_hive': ['MsSqlToHiveTransfer'],
'slack_operator': ['SlackAPIOperator', 'SlackAPIPostOperator'],
'generic_transfer': ['GenericTransfer'],
'oracle_operator': ['OracleOperator']
}
_import_module_attrs(globals(), _operators)
from airflow.models import BaseOperator
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import operators as _operators
for _operator in _operators:
globals()[_operator.__name__] = _operator
|
fangdingjun/dnsproxy
|
third-part/ldns-1.6.17/contrib/python/examples/python3/ldns-newpkt.py
|
Python
|
gpl-3.0
| 476
| 0.014706
|
#!/usr/bin/python
import ldns
pkt = ldns.ldns_pkt.new_query_frm_str("www.google.com",ldns.LDNS_RR_TYPE_ANY, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_QR | ldns.
|
LDNS_AA)
rra = ldns.ldns_rr.new_frm_str("www.google.
|
com. IN A 192.168.1.1",300)
rrb = ldns.ldns_rr.new_frm_str("www.google.com. IN TXT Some\ Description",300)
list = ldns.ldns_rr_list()
if (rra): list.push_rr(rra)
if (rrb): list.push_rr(rrb)
pkt.push_rr_list(ldns.LDNS_SECTION_ANSWER, list)
print("Packet:")
print(pkt)
|
zcoinofficial/zcoin
|
src/bls-signatures/python-impl/aggregation_info.py
|
Python
|
mit
| 6,831
| 0
|
from util import hash256, hash_pks
from copy import deepcopy
class AggregationInfo:
"""
AggregationInfo represents information of how a tree of aggregate
signatures was created. Different tress will result in different
signatures, due to exponentiations required for security.
An AggregationInfo is represented as a map from (message_hash, pk)
to exponents. When verifying, a verifier will take the signature,
along with this map, and raise each public key to the correct
exponent, and multiply the pks together, for identical messages.
"""
def __init__(self, tree, message_hashes, public_keys):
self.tree = tree
self.message_hashes = message_hashes
self.public_keys = public_keys
def empty(self):
return not self.tree
def __eq__(self, other):
return not self.__lt__(other) and not other.__lt__(self)
def __lt__(self, other):
"""
Compares two AggregationInfo objects, this is necessary for sorting
them. Comparison is done by comparing (message hash, pk, exponent)
"""
combined = [(self.message_hashes[i], self.public_keys[i],
self.tree[(self.message_hashes[i], self.public_keys[i])])
for i in range(len(self.public_keys))]
combined_other = [(other.message_hashes[i], other.public_keys[i],
other.tree[(other.message_hashes[i],
other.public_keys[i])])
for i in range(len(other.public_keys))]
for i in range(max(len(combined), len(combined_other))):
if i == len(combined):
return True
if i == len(combined_other):
return False
if combined[i] < combined_other[i]:
return True
if combined_other[i] < combined[i]:
return False
return False
def __str__(self):
ret = ""
for key, value in self.tree.items():
ret += ("(" + key[0].hex() + "," + key[1].serialize().hex()
+ "):\n" + hex(value) + "\n")
return ret
def __deepcopy__(self, memo):
new_tree = deepcopy(self.tree, memo)
new_mh = deepcopy(self.message_hashes, memo)
new_pubkeys = deepcopy(self.public_keys, memo)
return AggregationInfo(new_tree, new_mh, new_pubkeys)
@staticmethod
def from_msg_hash(public_key, message_hash):
tree = {}
tree[(message_hash, public_key)] = 1
return AggregationInfo(tree, [message_hash], [public_key])
@staticmethod
def from_msg(pk, message):
return AggregationInfo.from_msg_hash(pk, hash256(message))
@staticmethod
def simple_merge_infos(aggregation_infos):
"""
Infos are just merged together with no addition of exponents,
since they are disjoint
"""
new_tree = {}
for info in aggregation_infos:
new_tree.update(info.tree)
mh_pubkeys = [k for k, v in new_tree.items()]
mh_pubkeys.sort()
message_hashes = [message_hash for (message_hash, public_key)
in mh_pubkeys]
public_keys = [public_key for (message_hash, public_key)
in mh_pubkeys]
return Aggregati
|
onInfo(new_tree, message_hashes, public_keys)
@staticmethod
def secure_merge_infos(colliding_infos):
"""
Infos are merged together with combination of exponents
"""
# Groups are sorted by message then pk then exponent
# Each info object (and all
|
of it's exponents) will be
# exponentiated by one of the Ts
colliding_infos.sort()
sorted_keys = []
for info in colliding_infos:
for key, value in info.tree.items():
sorted_keys.append(key)
sorted_keys.sort()
sorted_pks = [public_key for (message_hash, public_key)
in sorted_keys]
computed_Ts = hash_pks(len(colliding_infos), sorted_pks)
# Group order, exponents can be reduced mod the order
order = sorted_pks[0].value.ec.n
new_tree = {}
for i in range(len(colliding_infos)):
for key, value in colliding_infos[i].tree.items():
if key not in new_tree:
# This message & pk have not been included yet
new_tree[key] = (value * computed_Ts[i]) % order
else:
# This message and pk are already included, so multiply
addend = value * computed_Ts[i]
new_tree[key] = (new_tree[key] + addend) % order
mh_pubkeys = [k for k, v in new_tree.items()]
mh_pubkeys.sort()
message_hashes = [message_hash for (message_hash, public_key)
in mh_pubkeys]
public_keys = [public_key for (message_hash, public_key)
in mh_pubkeys]
return AggregationInfo(new_tree, message_hashes, public_keys)
@staticmethod
def merge_infos(aggregation_infos):
messages = set()
colliding_messages = set()
for info in aggregation_infos:
messages_local = set()
for key, value in info.tree.items():
if key[0] in messages and key[0] not in messages_local:
colliding_messages.add(key[0])
messages.add(key[0])
messages_local.add(key[0])
if len(colliding_messages) == 0:
return AggregationInfo.simple_merge_infos(aggregation_infos)
colliding_infos = []
non_colliding_infos = []
for info in aggregation_infos:
info_collides = False
for key, value in info.tree.items():
if key[0] in colliding_messages:
info_collides = True
colliding_infos.append(info)
break
if not info_collides:
non_colliding_infos.append(info)
combined = AggregationInfo.secure_merge_infos(colliding_infos)
non_colliding_infos.append(combined)
return AggregationInfo.simple_merge_infos(non_colliding_infos)
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
citrix-openstack-build/python-keystoneclient
|
keystoneclient/middleware/auth_token.py
|
Python
|
apache-2.0
| 50,380
| 0.000099
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TOKEN-BASED AUTH MIDDLEWARE
This WSGI component:
* Verifies that incoming client requests have valid tokens by validating
tokens with the auth service.
* Rejects unauthenticated requests UNLESS it is in 'delay_auth_decision'
mode, which means the final decision is delegated to the downstream WSGI
component (usually the OpenStack service)
* Collects and forwards identity information based on a valid token
such as user n
|
ame, tenant, etc
Refer to: http://keystone.openstack.org/middlewarearchitecture.html
HEADERS
-------
* Headers starting with HTTP\_ is a standard http header
* Headers starting with HTTP_X is an extended http header
Coming in from initial
|
call from client or customer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
HTTP_X_AUTH_TOKEN
The client token being passed in.
HTTP_X_STORAGE_TOKEN
The client token being passed in (legacy Rackspace use) to support
swift/cloud files
Used for communication between components
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
WWW-Authenticate
HTTP header returned to a user indicating which endpoint to use
to retrieve a new token
What we add to the request for use by the OpenStack service
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
HTTP_X_IDENTITY_STATUS
'Confirmed' or 'Invalid'
The underlying service will only see a value of 'Invalid' if the Middleware
is configured to run in 'delay_auth_decision' mode
HTTP_X_DOMAIN_ID
Identity service managed unique identifier, string. Only present if
this is a domain-scoped v3 token.
HTTP_X_DOMAIN_NAME
Unique domain name, string. Only present if this is a domain-scoped
v3 token.
HTTP_X_PROJECT_ID
Identity service managed unique identifier, string. Only present if
this is a project-scoped v3 token, or a tenant-scoped v2 token.
HTTP_X_PROJECT_NAME
Project name, unique within owning domain, string. Only present if
this is a project-scoped v3 token, or a tenant-scoped v2 token.
HTTP_X_PROJECT_DOMAIN_ID
Identity service managed unique identifier of owning domain of
project, string. Only present if this is a project-scoped v3 token. If
this variable is set, this indicates that the PROJECT_NAME can only
be assumed to be unique within this domain.
HTTP_X_PROJECT_DOMAIN_NAME
Name of owning domain of project, string. Only present if this is a
project-scoped v3 token. If this variable is set, this indicates that
the PROJECT_NAME can only be assumed to be unique within this domain.
HTTP_X_USER_ID
Identity-service managed unique identifier, string
HTTP_X_USER_NAME
User identifier, unique within owning domain, string
HTTP_X_USER_DOMAIN_ID
Identity service managed unique identifier of owning domain of
user, string. If this variable is set, this indicates that the USER_NAME
can only be assumed to be unique within this domain.
HTTP_X_USER_DOMAIN_NAME
Name of owning domain of user, string. If this variable is set, this
indicates that the USER_NAME can only be assumed to be unique within
this domain.
HTTP_X_ROLES
Comma delimited list of case-sensitive role names
HTTP_X_SERVICE_CATALOG
json encoded keystone service catalog (optional).
HTTP_X_TENANT_ID
*Deprecated* in favor of HTTP_X_PROJECT_ID
Identity service managed unique identifier, string. For v3 tokens, this
will be set to the same value as HTTP_X_PROJECT_ID
HTTP_X_TENANT_NAME
*Deprecated* in favor of HTTP_X_PROJECT_NAME
Project identifier, unique within owning domain, string. For v3 tokens,
this will be set to the same value as HTTP_X_PROJECT_NAME
HTTP_X_TENANT
*Deprecated* in favor of HTTP_X_TENANT_ID and HTTP_X_TENANT_NAME
Keystone-assigned unique identifier, string. For v3 tokens, this
will be set to the same value as HTTP_X_PROJECT_ID
HTTP_X_USER
*Deprecated* in favor of HTTP_X_USER_ID and HTTP_X_USER_NAME
User name, unique within owning domain, string
HTTP_X_ROLE
*Deprecated* in favor of HTTP_X_ROLES
Will contain the same values as HTTP_X_ROLES.
OTHER ENVIRONMENT VARIABLES
---------------------------
keystone.token_info
Information about the token discovered in the process of
validation. This may include extended information returned by the
Keystone token validation call, as well as basic information about
the tenant and user.
"""
import datetime
import logging
import os
import requests
import stat
import tempfile
import time
import urllib
import netaddr
import six
from keystoneclient.common import cms
from keystoneclient.middleware import memcache_crypt
from keystoneclient.openstack.common import jsonutils
from keystoneclient.openstack.common import memorycache
from keystoneclient.openstack.common import timeutils
from keystoneclient import utils
CONF = None
# to pass gate before oslo-config is deployed everywhere,
# try application copies first
for app in 'nova', 'glance', 'quantum', 'cinder':
try:
cfg = __import__('%s.openstack.common.cfg' % app,
fromlist=['%s.openstack.common' % app])
# test which application middleware is running in
if hasattr(cfg, 'CONF') and 'config_file' in cfg.CONF:
CONF = cfg.CONF
break
except ImportError:
pass
if not CONF:
from oslo.config import cfg
CONF = cfg.CONF
# alternative middleware configuration in the main application's
# configuration file e.g. in nova.conf
# [keystone_authtoken]
# auth_host = 127.0.0.1
# auth_port = 35357
# auth_protocol = http
# admin_tenant_name = admin
# admin_user = admin
# admin_password = badpassword
# when deploy Keystone auth_token middleware with Swift, user may elect
# to use Swift memcache instead of the local Keystone memcache. Swift memcache
# is passed in from the request environment and its identified by the
# 'swift.cache' key. However it could be different, depending on deployment.
# To use Swift memcache, you must set the 'cache' option to the environment
# key where the Swift cache object is stored.
opts = [
cfg.StrOpt('auth_admin_prefix',
default='',
help='Prefix to prepend at the beginning of the path'),
cfg.StrOpt('auth_host',
default='127.0.0.1',
help='Host providing the admin Identity API endpoint'),
cfg.IntOpt('auth_port',
default=35357,
help='Port of the admin Identity API endpoint'),
cfg.StrOpt('auth_protocol',
default='https',
help='Protocol of the admin Identity API endpoint'
'(http or https)'),
cfg.StrOpt('auth_uri',
default=None,
# FIXME(dolph): should be default='http://127.0.0.1:5000/v2.0/',
# or (depending on client support) an unversioned, publicly
# accessible identity endpoint (see bug 1207517)
help='Complete public Identity API endpoint'),
cfg.StrOpt('auth_version',
default=None,
help='API version of the admin Identity API endpoint'),
cfg.BoolOpt('delay_auth_decision',
default=False,
help='Do not handle authorization requests within the'
' middleware, but delegate the authorization decision to'
' downstream WSGI components'),
cfg.BoolOpt('http_connect_timeout',
default=None,
help='Request timeout value for communicating with Identity'
' API server.'),
cfg.
|
jklaiho/django-class-fixtures
|
class_fixtures/tests/tests_dumpdata.py
|
Python
|
bsd-3-clause
| 11,960
| 0.008027
|
import re
from django.core.management import call_command
from django.test import TestCase
from class_fixtures.tests.models import (Band, Musician,
Membership, Roadie, Competency, JobPosting, ComprehensiveModel)
from class_fixtures.utils import string_stdout
class DumpDataTests(TestCase):
def test_encoding_declaration(self):
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class')
self.assertTrue(output.getvalue().startswith('# -*- coding: utf-8 -*-\n'))
def test_correct_imports_in_output(self):
band = Band.objects.create(name="Brutallica")
musician = Musician.objects.create(name="Lars Toorich")
membership = Membership.objects.create(band=band, musician=musician, instrument="Bongos", date_joined="1982-01-01")
roadie = Roadie.objects.create(name="Ciggy Tardust")
roadie.hauls_for.add(band)
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class', exclude=[
'tests.Party', 'tests.Politician'])
lines = output.getvalue().split('\n')
fixture_import, model_imports = lines[3], lines[4]
self.assertEqual(fixture_import, "from class_fixtures.models import Fixture")
self.assertEqual(model_imports, "from tests.models import Band, Membership, Musician, Roadie")
def test_correct_fixtures_in_output(self):
band = Band.objects.create(name="Brutallica")
musician = Musician.objects.create(name="Lars Toorich")
membership = Membership.objects.create(band=band, musician=musician, instrument="Bongos", date_joined="1982-01-01")
roadie = Roadie.objects.create(name="Ciggy Tardust")
roadie.hauls_for.add(band)
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class', exclude=[
'tests.Party', 'tests.Politician'])
lines = output.getvalue().split('\n')
self.assertEqual(lines[6], 'tests_band_fixture = Fixture(Band)')
self.assertEqual(lines[7], 'tests_musician_fixture = Fixture(Musician)')
self.assertEqual(lines[8], 'tests_membership_fixture = Fixture(Membership)')
self.assertEqual(lines[9], 'tests_roadie_fixture = Fixture(Roadie)')
def test_correct_fixture_populating(self):
band = Band.objects.create(name="Brutallica")
musician = Musician.objects.create(name="Lars Toorich")
membership = Membership.objects.create(band=band, musician=musician, instrument="Bongos", date_joined="1982-01-01")
roadie = Roadie.objects.create(name="Ciggy Tardust")
roadie.hauls_for.add(band)
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class', exclude=[
'tests.Party', 'tests.Politician'])
lines = output.getvalue().split('\n')
self.assertEqual(lines[11], "tests_band_fixture.add(1, **{'name': u'Brutallica'})")
self.assertEqual(lines[12], "tests_musician_fixture.add(1, **{'name': u'Lars Toorich'})")
self.assertEqual(lines[13], "tests_membership_fixture.add(1, **{'band': 1, 'date_joined': datetime.date(1982, 1, 1), 'instrument': u'Bongos', 'musician': 1})")
self.assertEqual(lines[14], "tests_roadie_fixture.add(1, **{'hauls_for': [1], 'name': u'Ciggy Tardust'})")
def test_escaped_characters_in_strings(self):
band = Band.objects.create(name="The Apostrophe's Apostles")
musician = Musician.objects.create(name="Ivan \"The Terrible\" Terrible")
musician2 = Musician.objects.create(name="\\, aka the artist formerly known as Backslash")
membership = Membership.objects.create(band=band, musician=musician, instrument="Bass", date_joined="2000-12-05")
membership = Membership.objects.create(band=band, musician=musician2, instrument="Guitar", date_joined="2000-12-05")
roadie = Roadie.objects.create(name='Simon \'Single Quote\' DeForestation')
roadie.hauls_for.add(band)
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class', exclude=[
'tests.Party', 'tests.Politician'])
lines = output.getvalu
|
e().split('\n')
self.assertEqual(lines[11], """tests_band_fixture.add(1, **{'name': u"The Apostrophe's Apostles"})""")
self.assertEqual(lines[12], """tests_musician_fixture.add(1, **{'name': u'Ivan "The Terrible" Terrible'})""")
# Raw string to represent what's actually printed out, would be four backslashes without it
|
self.assertEqual(lines[13], r"""tests_musician_fixture.add(2, **{'name': u'\\, aka the artist formerly known as Backslash'})""")
self.assertEqual(lines[14], """tests_membership_fixture.add(1, **{'band': 1, 'date_joined': datetime.date(2000, 12, 5), 'instrument': u'Bass', 'musician': 1})""")
self.assertEqual(lines[15], """tests_membership_fixture.add(2, **{'band': 1, 'date_joined': datetime.date(2000, 12, 5), 'instrument': u'Guitar', 'musician': 2})""")
self.assertEqual(lines[16], """tests_roadie_fixture.add(1, **{'hauls_for': [1], 'name': u"Simon 'Single Quote' DeForestation"})""")
def test_complex_model(self):
import datetime
from decimal import Decimal
# https://docs.djangoproject.com/en/dev/ref/models/fields/#bigintegerfield
bigintfield_max = 9223372036854775807
cm = ComprehensiveModel.objects.create(
bigint = bigintfield_max,
boolean = True,
char = 'Hey hey now',
date = datetime.date(2011, 6, 6),
datetime = datetime.datetime(2011, 5, 5, 12, 30, 7),
decimal = Decimal('1234.56'),
floatf = 2345.67,
integer = 345678,
nullboolean = None,
time = datetime.time(14, 45, 30),
text = "Bacon ipsum dolor sit amet ham eiusmod cupidatat, "
"hamburger voluptate non dolor. Pork belly excepteur chuck, shankle ullamco "
"fugiat meatloaf est quis meatball sint dolore. Shank drumstick sint, tri-tip "
"deserunt proident in. Pancetta laboris culpa beef, pork chop venison magna "
"duis tail. Nulla in sirloin, minim bresaola ham cupidatat drumstick spare ribs "
"eiusmod ut. Shankle mollit ut, short ribs pork chop drumstick meatloaf duis "
"""elit reprehenderit. Cillum short loin flank est beef.
And the second paragraph looks like this.""")
with string_stdout() as output:
call_command('dumpdata', 'tests', format='class', exclude=[
'tests.Party', 'tests.Politician'])
lines = output.getvalue().split('\n')
self.assertEqual(lines[4], "from tests.models import ComprehensiveModel")
self.assertEqual(lines[6], "tests_comprehensivemodel_fixture = Fixture(ComprehensiveModel)")
# Write-only code that turns the dumpdata output into a dictionary of
# keys and values to be tested individually
model_fields = dict([(j[0].strip("'"), j[1].strip(" ")) for j in
[i.split(':') for i in re.split(", '|\{|\}\)", lines[8]) if ':' in i]
])
# Depending on the platform where the test is being run, bigintfield_max
# may be an integer or a long, depending on the value of sys.maxint.
# The repr() result on the field will vary accordingly (L suffix or not),
# so assertEqual instead repr()'s the value (like the serializer does)
# because we can't have a single string representation for the value
# that would work across all platforms.
#
# There's one more complication: on some systems, Python sees the
# bigintfield_max value as an integer, but after it comes back from the
# database, it is transformed into a long, presumably due to the SQLite
# configuration. So, we retrieve the object from the database and repr()
# its bigint field instead of the original value.
db_cm = ComprehensiveModel.objects.get(pk=cm.pk)
self.assertEqual(model_fields['bigint'], repr(db_cm.bigint))
self.assertEqual(mo
|
saketkc/ribo-seq-snakemake
|
configs/Oct_10_2016_HuR_Human_rna.py
|
Python
|
bsd-3-clause
| 2,343
| 0.012804
|
## Absolute location where all raw files are
RAWDATA_DIR = '/home/cmb-06/as/skchoudh/dna/Oct_10_2016_HuR_Human_Mouse_Liver/rna-seq/Penalva_L_08182016/human'
## Output directory
OUT_DIR = '/staging/as/skchoudh/Oct_10_2016_HuR_Human_Mouse_Liver/RNA-Seq_human'
## Absolute location to 're-ribo/scripts' directory
SRC_DIR = '/home/cmb-panasas2/skchoudh/github_projects/re-ribo/scripts'
## Genome fasta location
GENOME_FASTA = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.fa'
## Chromosome sizes location
CHROM_SIZES = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.chrom.sizes'
## Path to STAR index (will be generated if does not exist)
STAR_INDEX = '/home/cmb-panasas2/skchoudh/genomes/hg38/star_annotated'
## GTF path
GTF = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.annotation.without_rRNA_tRNA.gtf'
## GenePred bed downloaded from UCSC
## (this is used for inferring the type of experiment i.e stranded/non-stranded
## and hence is not required)
GENE_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v24.genes.bed'
##
|
Path to bed file with start codon coordinates
START_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.start_codon.bed'
## Path to bed file with stop codon coordinates
STOP_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.stop_codon.bed'
## Path to bed file containing CDS coordinates
CDS_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutil
|
s.cds.bed'
# We don't have these so just use CDs bed to get the pipeline running
UTR5_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.UTR5.bed'
UTR3_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.UTR3.bed'
## Name of python2 environment
## The following package needs to be installed in that environment
## numpy scipy matploltib seaborn pysam pybedtools htseq
## you can do: conda create -n python2 PYTHON=2 && source activate python2 && conda install numpy scipy matploltib seaborn pysam pybedtools htseq
PYTHON2ENV = 'python2'
############################################Do Not Edit#############################################
HTSEQ_STRANDED = 'yes'
FEATURECOUNTS_S = '-s 1'
FEATURECOUNTS_T = 'CDS'
HTSEQ_MODE = 'intersection-strict'
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/unittest/suite.py
|
Python
|
unlicense
| 10,084
| 0.00119
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: suite.py
"""TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda : None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return '<%s tests=%s>' % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
__hash__ = None
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self:
cases += test.countTestCases()
return cases
def addTest(self, test):
if not hasattr(test, '__call__'):
raise TypeError('{} is not callable'.format(repr(test)))
if isinstance(test, type) and issubclass(test, (
case.TestCase, TestSuite)):
raise TypeError('TestCases and TestSuites must be instantiated before passing them to addTest()')
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, basestring):
raise TypeError('tests must be an iterable of tests, not a string')
for test in tests:
self.addTest(test)
def run(self, result):
for test in self:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for test in self:
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False):
continue
if not debug:
test(result)
else:
test.debug()
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
else:
if result._moduleSetUpFailed:
return
if getattr(currentClass, '__unittest_skip__', False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
|
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName
|
= 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
else:
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
return
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
else:
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
else:
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getatt
|
beiko-lab/gengis
|
bin/Lib/site-packages/scipy/linalg/cblas.py
|
Python
|
gpl-3.0
| 362
| 0.002762
|
"""
This module is deprecated -- use scipy.linalg.blas instead
"""
from __future__ import division, print_func
|
tion, absolute_import
try:
from ._cblas import *
except ImportError:
empty_module = True
import numpy as _np
|
@_np.deprecate(old_name="scipy.linalg.cblas", new_name="scipy.linalg.blas")
def _deprecate():
pass
_deprecate()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.