repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
rlefevre1/hpp-rbprm-corba
|
src/hpp/corbaserver/rbprm/rbprmbuilder.py
|
Python
|
lgpl-3.0
| 12,303
| 0.017719
|
#!/usr/bin/env python
# Copyright (c) 2014 CNRS
# Author: Steve Tonneau
#
# This file is part of hpp-rbprm-corba.
# hpp-rbprm-corba is free software: you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# hpp-manipulation-corba is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# hpp-manipulation-corba. If not, see
# <http://www.gnu.org/licenses/>.
from hpp.corbaserver.rbprm import Client as RbprmClient
from hpp.corbaserver import Client as BasicClient
import hpp.gepetto.blender.exportmotion as em
## Corba clients to the various servers
#
class CorbaClient:
"""
Container for corba clients to various interfaces.
"""
def __init__ (self):
self.basic = BasicClient ()
self.rbprm = RbprmClient ()
## Load and handle a RbprmDevice robot for rbprm planning
#
# A RbprmDevice robot is a dual representation of a robots. One robot describes the
# trunk of the robot, and a set of robots describe the range of motion of each limb of the robot.
class Builder (object):
## Constructor
def __init__ (self, load = True):
self.tf_root = "base_link"
self.rootJointType = dict()
self.client = CorbaClient ()
self.load = load
## Virtual function to load the robot model.
#
# \param urdfName urdf description of the robot trunk,
# \param urdfNameroms either a string, or an array of strings, indicating the urdf of the different roms to add.
# \param rootJointType type of root joint among ("freeflyer", "planar",
# "anchor"),
# \param meshPackageName name of the meshpackage from where the robot mesh will be loaded
# \param packageName name of the package from where the robot will be loaded
# \param urdfSuffix optional suffix for the urdf of the robot package
# \param srdfSuffix optional suffix for the srdf of the robot package
def loadModel (self, urdfName, urdfNameroms, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix):
if(isinstance(urdfNameroms, list)):
for urdfNamerom in urdfNameroms:
self.client.rbprm.rbprm.loadRobotRomModel(urdfNamerom, rootJointType, packageName, urdfNamerom, urdfSuffix, srdfSuffix)
else:
self.client.rbprm.rbprm.loadRobotRomModel(urdfNameroms, rootJointType, packageName, urdfNameroms, urdfSuffix, srdfSuffix)
self.client.rbprm.rbprm.loadRobotCompleteModel(urdfName, rootJointType, packageName, urdfName, urdfSuffix, srdfSuffix
|
)
self.name = urdfName
self.displayName = urdfName
self.tf_root = "base_link"
self.rootJointType = rootJointType
self.jointNames = self.client.basic.robot.getJointNames ()
self.allJointNames = self.client.basic.robot.getAllJointNames ()
self.client.basic.robot.meshPackageName = meshPackageName
self.meshPackageName = meshPackageName
self.rankInConfiguration = dict ()
self.rankInVelocity = dict ()
self
|
.packageName = packageName
self.urdfName = urdfName
self.urdfSuffix = urdfSuffix
self.srdfSuffix = srdfSuffix
rankInConfiguration = rankInVelocity = 0
for j in self.jointNames:
self.rankInConfiguration [j] = rankInConfiguration
rankInConfiguration += self.client.basic.robot.getJointConfigSize (j)
self.rankInVelocity [j] = rankInVelocity
rankInVelocity += self.client.basic.robot.getJointNumberDof (j)
## Init RbprmShooter
#
def initshooter (self):
return self.client.rbprm.rbprm.initshooter ()
## Sets limits on robot orientation, described according to Euler's ZYX rotation order
#
# \param bounds 6D vector with the lower and upperBound for each rotation axis in sequence
def boundSO3 (self, bounds):
return self.client.rbprm.rbprm.boundSO3 (bounds)
## Specifies a preferred affordance for a given rom.
# This constrains the planner to accept a rom configuration only if
# it collides with a surface the normal of which has these properties.
#
# \param rom name of the rome,
# \param affordances list of affordance names
def setAffordanceFilter (self, rom, affordances):
return self.client.rbprm.rbprm.setAffordanceFilter (rom, affordances)
## Specifies a rom constraint for the planner.
# A configuration will be valid if and only if the considered rom collides
# with the environment.
#
# \param romFilter array of roms indicated by name, which determine the constraint.
def setFilter (self, romFilter):
return self.client.rbprm.rbprm.setFilter (romFilter)
## Export a computed path for blender
#
# \param problem the problem associated with the path computed for the robot
# \param stepsize increment along the path
# \param pathId if of the considered path
# \param filename name of the output file where to save the output
def exportPath (self, viewer, problem, pathId, stepsize, filename):
em.exportPath(viewer, self.client.basic.robot, problem, pathId, stepsize, filename)
## \name Degrees of freedom
# \{
## Get size of configuration
# \return size of configuration
def getConfigSize (self):
return self.client.basic.robot.getConfigSize ()
# Get size of velocity
# \return size of velocity
def getNumberDof (self):
return self.client.basic.robot.getNumberDof ()
## \}
## \name Joints
#\{
## Get joint names in the same order as in the configuration.
def getJointNames (self):
return self.client.basic.robot.getJointNames ()
## Get joint names in the same order as in the configuration.
def getAllJointNames (self):
return self.client.basic.robot.getAllJointNames ()
## Get joint position.
def getJointPosition (self, jointName):
return self.client.basic.robot.getJointPosition (jointName)
## Set static position of joint in its parent frame
def setJointPosition (self, jointName, position):
return self.client.basic.robot.setJointPosition (jointName, position)
## Get joint number degrees of freedom.
def getJointNumberDof (self, jointName):
return self.client.basic.robot.getJointNumberDof (jointName)
## Get joint number config size.
def getJointConfigSize (self, jointName):
return self.client.basic.robot.getJointConfigSize (jointName)
## set bounds for the joint
def setJointBounds (self, jointName, inJointBound):
return self.client.basic.robot.setJointBounds (jointName, inJointBound)
## Set bounds on the translation part of the freeflyer joint.
#
# Valid only if the robot has a freeflyer joint.
def setTranslationBounds (self, xmin, xmax, ymin, ymax, zmin, zmax):
self.client.basic.robot.setJointBounds \
(self.displayName + "base_joint_x", [xmin, xmax])
self.client.basic.robot.setJointBounds \
(self.displayName + "base_joint_y", [ymin, ymax])
self.client.basic.robot.setJointBounds \
(self.displayName + "base_joint_z", [zmin, zmax])
## Get link position in joint frame
#
# Joints are oriented in a different way as in urdf standard since
# rotation and uni-dimensional translation joints act around or along
# their x-axis. This method returns the position of the urdf link in
# world frame.
#
# \param jointName name of the joint
# \return position of the link in world frame.
def getLinkPosition (self, jointName):
return self.client.basic.robot.getLinkPosition (jointName)
## Get link name
#
# \param jointName name of the joint,
# \return name of the link.
def getLinkName (self, jointName):
return self.client.basic.robot.getLinkName (jointName)
## \}
## \name Access to current configuration
#\{
## Set current configuration of composite robot
#
# \param q configu
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_sync.py
|
Python
|
apache-2.0
| 1,552
| 0.000644
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ExportModel
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install g
|
oogle-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_ModelService_ExportModel_sync]
from google.cloud import aiplatform_v1
def sample_export_model():
# Create a client
client = aiplatform_v1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ExportModelRequest(
name="name_value",
|
)
# Make the request
operation = client.export_model(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_ExportModel_sync]
|
vbelakov/h2o
|
py/testdir_single_jvm/test_GLM2_many_cols_libsvm.py
|
Python
|
apache-2.0
| 2,663
| 0.009763
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm
def write_syn_libsvm_dataset(csvPathname, rowCount, colCount, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
for j in range(colCount):
ri = r1.randint(0,1)
if ri!=0: # don't include 0's
colNumber = j + 1
rowData.append(str(colNumber) + ":" + str(ri))
ri = r1.randint(0,1)
# output class goes first
rowData.insert(0, str(ri))
rowDataCsv = " ".join(rowData) # already all strings
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1,java_heap_GB=10)
@classmethod
def tearDownClass(cls):
### time.sleep(3600)
h2o.tear_down_cloud()
def test_GLM2_many_cols_libsvm(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100, 3000, 'cA', 300),
(100, 5000, 'cB', 500),
# too slow!
# (100, 10000, 'cC', 800),
]
### h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.svm'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
|
print "Creating random libsvm:", csvPathname
write_syn_libsvm_dataset(csvPathname, rowCount, colCount, SE
|
EDPERFILE)
parseResult = h2i.import_parse(path=csvPathname, hex_key=hex_key, schema='put', timeoutSecs=timeoutSecs)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
y = colCount
kwargs = {'response': y, 'max_iter': 2, 'n_folds': 1, 'alpha': 0.2, 'lambda': 1e-5}
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
if __name__ == '__main__':
h2o.unit_main()
|
crunchmail/munch-core
|
src/munch/apps/campaigns/migrations/0002_permissions.py
|
Python
|
agpl-3.0
| 9,257
| 0.001296
|
# -*- coding: utf-8 -*-
from django.db import migrations
from django.core.management.sql import emit_post_migrate_signal
PERMISSIONS = {
'mailstatus': [
('add_mailstatus', 'Can add mail status'),
('change_mailstatus', 'Can change mail status'),
('change_mine_mailstatus', 'Can change_mine mail status'),
('change_organizations_mailstatus', 'Can change_organizations mail status'),
('delete_mailstatus', 'Can delete mail status'),
('delete_mine_mailstatus', 'Can delete_mine mail status'),
('delete_organizations_mailstatus', 'Can delete_organizations mail status'),
('view_mailstatus', 'Can view mail status'),
('view_mine_mailstatus', 'Can view_mine mail status'),
('view_organizations_mailstatus', 'Can view_organizations mail status'), ],
'mail': [
('add_mail', 'Can add mail'),
('change_mail', 'Can change mail'),
('change_mine_mail', 'Can change_mine mail'),
('change_organizations_mail', 'Can change_organizations mail'),
('delete_mail', 'Can delete mail'),
('delete_mine_mail', 'Can delete_mine mail'),
('delete_organizations_mail', 'Can delete_organizations mail'),
('view_mail', 'Can view mail'),
('view_mine_mail', 'Can view_mine mail'),
('view_organizations_mail', 'Can view_organizations mail'), ],
'message': [
('add_message', 'Can add Message'),
('change_message', 'Can change Message'),
('change_mine_message', 'Can change_mine Message'),
('change_organizations_message', 'Can change_organizations Message'),
('delete_message', 'Can delete Message'),
('delete_mine_message', 'Can delete_mine Message'),
('delete_organizations_message', 'Can delete_organizations Message'),
('previewsend_message', 'Can previewsend Message'),
('previewsend_mine_message', 'Can previewsend_mine Message'),
('previewsend_organizations_message', 'Can previewsend_organizations Message'),
('view_message', 'Can view Message'),
('view_mine_message', 'Can view_mine Message'),
('view_organizations_message', 'Can view_organizations Message'), ],
'messageattachment': [
('add_messageattachment', 'Can add message attachment'),
('change_messageattachment', 'Can change message attachment'),
('change_mine_messageattachment', 'Can change_mine message attachment'),
('change_organizations_messageattachment', 'Can change_organizations message attachment'),
('delete_messageattachment', 'Can delete message attachment'),
('delete_mine_messageattachment', 'Can delete_mine message attachment'),
('delete_organizations_messageattachment', 'Can delete_organizations message attachment'),
('view_messageattachment', 'Can view message attachment'),
('view_mine_messageattachment', 'Can view_mine message attachment'),
('view_organizations_messageattachment', 'Can view_organizations message attachment'), ],
'previewmail': [
('add_previewmail', 'Can add preview mail'),
('change_mine_previewmail', 'Can change_mine preview mail'),
('change_organizations_previewmail', 'Can change_organizations preview mail'),
('change_previewmail', 'Can change preview mail'),
('delete_mine_previewmail', 'Can delete_mine preview mail'),
('delete_organizations_previewmail', 'Can delete_organizations preview mail'),
('delete_previewmail', 'Can delete preview mail'),
('view_mine_previewmail', 'Can view_mine preview mail'),
('view_organizations_previewmail', 'Can view_organizations preview mail'),
('view_previewmail', 'Can view preview mail'), ],
}
GROUP_PERMISSIONS = {
'administrators': {
'mailstatus': [
'view_organizations_mailstatus', ],
'mail': [
'add_mail',
'change_organizations_mail',
'delete_organizations_mail',
'view_organizations_mail', ],
'message': [
'add_message',
'change_organizations_message',
'delete_organizations_message',
'previewsend_organizations_message',
'view_organizations_message', ],
'messageattachment': [
'add_messageattachment',
'change_organizations_messageattachment',
'delete_organizations_messageattachment',
'view_organizations_messageattachment', ],
'previewmail': [
'change_organizations_previewmail',
'delete_organizations_previewmail',
'view_organizations_previewmail', ],
},
'managers': {
'mailstatus': [
'view_organizations_mailstatus', ],
'mail': [
'add_mail',
'change_organizations_mail',
'delete_organizations_mail',
'view_organizations_mail', ],
'message': [
'add_message',
'change_organizations_message',
'delete_organizations_message',
'previewsend_organizations_message',
'view_organizations_message', ],
'messageattachment': [
'add_messageattachment',
'change_organizations_messageattachment',
'delete_organizations_messageattachment',
'view_organizations_messageattachment', ],
'previewmail': [
'change_organizations_previewmail',
'delete_organizations_previewmail',
'view_organizations_previewmail', ],
},
'users': {
'mailstatus': [
'view_mine_mailstatus', ],
'mail': [
'add_mail',
'change_mine_mail',
'delete_mine_mail',
'view_mine_mail', ],
'message': [
'add_message',
'change_mine_message',
'delete_mine_message',
'previewsend_mine_message',
'view_mine_message', ],
'messageattachment': [
'add_messageattachment',
'change_mine_messageattachment',
'delete_mine_messageattachment',
'view_mine_messageattachment', ],
'previewmail': [
'view_mine_previewmail', ],
},
'collaborators': {
'mailstatus': [
'view_organizations_mailstatus', ],
'mail': [
'add_mail',
'change_organizations_mail',
'delete_organizations_mail',
'view_organizations_mail', ],
'message': [
'add_message',
'change_organizations_message',
'delete_organizations_message',
'previewsend_organizations_message',
'view_organizations_message', ],
'messageattachment': [
'add_messageattachment',
'change_organizations_messageattachment',
'delete_organizations_messageattachment',
'view_organizat
|
ions_messageattachment', ],
'previewmail': [
'change_organizations_previewmail',
'delete_organizations_previewmail',
'view_organizations_previewmail', ],
},
}
def update_content_types(apps, schema_editor):
db_alias = schema_editor.connection.alias
emit_post_migrate_signal(False, 'def
|
ault', db_alias)
def load_permissions(apps, schema_editor):
Group = apps.get_model('auth', 'group')
Permission = apps.get_model('auth', 'permission')
ContentType = apps.get_model('contenttypes', 'contenttype')
# Delete previous permissions
for model in PERMISSIONS:
content_type = ContentType.objects.get(
app_label='campaigns', model=model)
Permission.objects.filter(content_type=content_type).delete()
# Load permissions
for model_name, permissions in PERMISSIONS.items():
for permission_codename, permission_name in permissions:
content_type = ContentType.objects.get(
app_label='campaigns', model=model_name)
if not Permission.objects.filter(
codename=permission_codename,
content_type=content_type).exists():
P
|
avsaj/rtpmidi
|
rtpmidi/test/test_recovery_journal_chapters.py
|
Python
|
gpl-3.0
| 22,784
| 0.01207
|
from twisted.trial import unittest
from rtpmidi.engines.midi.recovery_journal_chapters import *
class TestNote(unittest.TestCase):
def setUp(self):
self.note = Note()
def test_note_on(self):
#simple
note_to_test = self.note.note_on(100, 90)
#Testing type
assert(type(note_to_test)==str), self.fail("Wrong type return")
#length test
assert(len(note_to_test)==2), \
self.fail("len of note On is higher than 2 octet")
#with all args
note_to_test = self.note.note_on(100, 90, 0, 1)
#length test
assert(len(note_to_test)==2), \
self.fail("len of note On is higher than 2 octet")
def test_parse_note_on(self):
#Simple
note_to_test = self.note.note_on(100, 90)
res_n = self.note.parse_note_on(note_to_test)
#Testing content
assert(res_n[1] == 100), self.fail("Note number is not respected")
assert(res_n[3] == 90), self.fail("Note velocity is not respected")
#With all args
note_to_test = self.note.note_on(100, 90, 0, 1)
res_n = self.note.parse_note_on(note_to_test)
#Testing content
assert(res_n[0] == 1), self.fail("S mark is not respected")
assert(res_n[1] == 100), self.fail("Note number is not respected")
assert(res_n[2] == 0), self.fail("Y mark not respected")
assert(res_n[3] == 90), self.fail("Note velocity is not respected")
def test_note_off(self):
#list of notes to test (note from the same midi channel)
plist = [[[128, 57, 100],1000], [[144, 4, 0],1000], \
[[144, 110, 0],1000], [[144, 112, 0],1000]]
#setting low and high like in create_chapter_n
high = 113 / 8
low = 4 / 8
#selecting note off like in create_chapter_n
note_off_list = [ plist[i][0][1] for i in range(len(plist))\
if (plist[i][0][0]&240 == 128) or \
(plist[i][0][2] == 0) ]
res = self.note.note_off(note_off_list, low, high)
#type test
assert(type(res)==str), self.fail("Wrong type return")
#checking size
size_wait = high - low + 1
assert(len(res) == size_wait), \
self.fail("Problem of size with note off creation")
def test_parse_note_off(self):
"""Test parse note off"""
#list of notes to test
#plist = [[[128, 120, 100],1000],[[145, 4, 0],1000],\
# [[145, 110, 0],1000], [[145, 112, 0],1000]]
#setting low and high like in create_chapter_n
note_off_test = [12, 57, 112, 114 ]
high = 115 / 8
low = 12 / 8
res = self.note.note_off(note_off_test, low, high)
#testing the result of parsing
res_parsed = self.note.parse_note_off(res, low, high)
#Testing type
assert(type(res_parsed)==list), self.fail("Wrong type returned")
#res_parsed.sort()
#Testing content
note_off_test = [12, 57, 112, 114 ]
for i in range(len(note_off_test)):
assert(res_parsed[i][1]==note_off_test[i]), \
self.fail("Problem getting the good value for note off encoded")
class TestChapterP(unittest.TestCase):
def setUp(self):
self.chapter_p = ChapterP()
#program change with msb and lsb
self.plist = [[[176, 0, 75], 1000], [[176, 32, 110], 1000], \
[[192, 110, 0], 1000]]
#program change without msb and lsb
self.plist_1 = [[[192, 110, 0], 1000]]
def test_update(self):
"""Testing chapter P creation from a list (with MSB and LSB)"""
self.chapter_p.update(self.plist)
chapter = self.chapter_p.content
#Testing len
assert(len(chapter)==3), \
self.fail("Size of chapter p is not 24 bits!!!")
#Testing type
assert(type(chapter)==str), self.fail("Problem of type")
#Testing content
size, chapter_parse, marker_s, marker_x, marker_b \
= self.chapter_p.parse(chapter)
#Testing content
assert(marker_s==1), \
self.fail("Problem getting right value of S")
assert(chapter_parse[0][1]==110), \
self.fail("Problem getting right value of PROGRAM")
assert(marker_b==1), \
self.fail("Problem getting right value of B")
assert(chapter_parse[1][2]==75), \
self.fail("Problem getting right value of MSB")
assert(marker_x==0), \
self.fail("Problem getting right value of X")
assert(chapter_parse[2][2]==110), \
self.fail("Problem getting right value of LSB")
def test_update_1(self):
"""Testing chapter P creation from a list (without MSB and LSB)"""
self.chapter_p.update(self.plist_1)
chapter = self.chapter_p.content
#Testing len
assert(len(chapter)==3), \
self.fail("Size of chapter p is not 24 bits!!!")
#Testing type
assert(type(chapter)==str), self.fail("Problem of type")
#Testing content
size, chapter_parse, marker_s, marker_x, marker_b \
= self.chapter_p.parse(chapter)
#Testing content
assert(marker_s==1), \
self.fail("Problem getting right value of S")
assert(chapter_parse[0][1]==110), \
self.fail("Problem getting right value of PROGRAM")
assert(marker_b==0), \
self.fail("Problem getting right value of B")
assert(marker_x==0), \
self.fail("Problem getting right value of X")
class TestChapterC(unittest.TestCase):
def setUp(self):
self.chapter_c = ChapterC()
self.plist = []
for i in range(127):
self.plist
|
.append([[176, i, 100],6])
def test_header(self):
"""Test header creation ChapterC"""
#Creating header
header = self.chapter_c.header(10, 1)
#Testing type
assert(type(header)==str), self.fail("Wrong type returned")
#Testing length
assert(len(
|
header)==1), self.fail("Wrong header size")
def test_parse_header(self):
"""Test header parsing ChapterC"""
#Creating header
header = self.chapter_c.header(10, 1)
#Parsing header
header_parsed = self.chapter_c.parse_header(header)
#Testing type
assert(type(header_parsed)==tuple), self.fail("Wrong size returned")
#Testing content
assert(header_parsed[0]==1), self.fail("Wrong marker_s value")
assert(header_parsed[1]==10), self.fail("Wrong length value")
def test_create_log_c(self):
"""Test create log C (individual component from ChapterC"""
res = self.chapter_c.create_log_c(0, 110, 1, 90)
assert(type(res)==str), self.fail("Wrong type returned")
assert(len(res)==2), self.fail("Wrong size returned")
def test_parse_log_c(self):
"""Test parsing individual component from chapterC"""
res = self.chapter_c.create_log_c(0, 110, 1, 90)
res_parsed = self.chapter_c.parse_log_c(res)
assert(res_parsed[0]==0), self.fail("Wrong value for marker_s")
assert(res_parsed[1]==110), self.fail("Wrong value for number")
assert(res_parsed[2]==1), self.fail("Wrong value for marker_a")
assert(res_parsed[3]==90), self.fail("Wrong value for value")
def test_update(self):
"""Testing chapter C creation"""
self.chapter_c.update(self.plist)
assert(type(self.chapter_c.content)==str), self.fail("Wrong type returned")
#length calc header == 1 + 2 * length
length_wait = 1 + 2 * len(self.plist)
assert(len(self.chapter_c.content)==length_wait), self.fail("Wrong length returned")
def test_update_1(self):
self.plist.append([[176, 42, 100],6])
self.chapter_c.update(self.plist)
length_wait = 1 + 2 * 127
assert(len(self.chapter_c.content)==length_wait), self.fail("Wrong length returned")
def test_parse(self):
"""Test chapter C parsing"""
self.chapter_c.update(self.p
|
TemplateVoid/mapnik
|
tests/python_tests/image_filters_test.py
|
Python
|
lgpl-2.1
| 2,704
| 0.005547
|
#!/usr/bin/env python
from nose.tools import *
from utilities import execution_path, run_all
from utilities import side_by_side_image
import os, mapnik
import re
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def replace_style(m, name, style):
m.remove_style(name)
m.append_style(name, style)
def test_append():
s = mapnik.Style()
eq_(s.image_filters,'')
s.image_filters = 'gray'
eq_(s.image_filters,'gray')
s.image_filters = 'sharpen'
eq_(s.image_filters,'sharpen')
if 'shape' in mapnik.DatasourceCache.plugin_names():
def test_style_level_image_filter():
m = mapnik.Map(256, 256)
mapnik.load_map(m, '../data/good_maps/style_level_image_filter.xml')
m.zoom_all()
successes = []
fails = []
for name in ("", "agg-stack-blur(2,2)", "blur",
"edge-detect", "emboss", "gray", "invert",
"sharpen", "sobel", "x-gradient", "y-gradient"):
if name == "":
filename = "none"
else:
filename = re.sub(r"[^-_a-z.0-9]", "", name)
# find_style returns a copy of the style object
style_markers = m.find_style("markers")
style_markers.image_filters = name
style_labels = m.find_style("labels")
style_labels.image_filters = name
# replace the original style with the modified one
replace_style(m, "markers", style_markers)
replace_style(m, "labels", style_labels)
im = mapnik.Image(m.width, m.height)
mapnik.render(m, im)
actual = '/tmp/mapnik-style-image-filter-' + filename + '.png'
expected = 'images/style-image-filter/' + filename + '.png'
im.save(actual,"png32")
if not os.path.exists(expected):
print 'generating expected test image: %s' % expected
im.save(expected,'png32')
expected_im = mapnik.Image.open(expected)
# compare them
if im.tostring('png32') == expected_im.tostring('png32'):
successes.append(name)
else:
fails.append('failed comparing actual (%s) and expected(%s)' % (actual,'tests/python_tests/'+ expected))
|
fail_im = side_by_side_image(expected_im, im)
fail_im.s
|
ave('/tmp/mapnik-style-image-filter-' + filename + '.fail.png','png32')
eq_(len(fails), 0, '\n'+'\n'.join(fails))
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
|
frreiss/tensorflow-fred
|
tensorflow/python/ops/numpy_ops/np_array_ops.py
|
Python
|
apache-2.0
| 60,984
| 0.010675
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common array methods."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import functools
import math
import numbers
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.numpy_ops import np_dtypes
from tensorflow.python.ops.numpy_ops import np_export
from tensorflow.python.ops.numpy_ops import np_utils
from tensorflow.python.util import nest
newaxis = np_export.np_export_constant(__name__, 'newaxis', np.newaxis)
@np_utils.np_doc('empty')
def empty(shape, dtype=float): # pylint: disable=redefined-outer-name
return zeros(shape, dtype)
@np_utils.np_doc('empty_like')
def empty_like(a, dtype=None):
return zeros_like(a, dtype)
@np_utils.np_doc('zeros')
def zeros(shape, dtype=float): # pylint: disable=redefined-outer-name
dtype = (
np_utils.result_type(dtype) if dtype else np_dtypes.default_float_type())
return array_ops.zeros(shape, dtype=dtype)
@np_utils.np_doc('zeros_like')
def zeros_like(a, dtype=None): # pylint: disable=missing-docstring
if dtype is None:
# We need to let np_utils.result_type decide the dtype, not tf.zeros_like
dtype = np_utils.result_type(a)
else:
# TF and numpy has different interpretations of Python types such as
# `float`, so we let `np_utils.result_type` decide.
dtype = np_utils.result_type(dtype)
dtype = dtypes.as_dtype(dtype) # Work around b/149877262
return array_ops.zeros_like(a, dtype)
@np_utils.np_doc('ones')
def ones(shape, dtype=float): # pylint: disable=redefined-outer-name
if dtype:
dtype = np_utils.result_type(dtype)
return array_ops.ones(shape, dtype=dtype)
@np_utils.np_doc('ones_like')
def ones_like(a, dtype=None):
if dtype is None:
dtype = np_utils.result_type(a)
else:
dtype = np_utils.result_type(dtype)
return array_ops.ones_like(a, dtype)
@np_utils.np_doc('eye')
def eye(N, M=None, k=0, dtype=float): # pylint: disable=invalid-name,missing-docstring
if dtype:
dtype = np_utils.result_type(dtype)
if not M:
M = N
# Making sure N, M and k are `int`
N = int(N)
M = int(M)
k = int(k)
if k >= M or -k >= N:
# tf.linalg.diag will raise an error in this case
return zeros([N, M], dtype=dtype)
if k == 0:
return linalg_ops.eye(N, M, dtype=dtype)
# We need the precise length, otherwise tf.linalg.diag will raise an error
diag_len = min(N, M)
if k > 0:
if N >= M:
diag_len -= k
elif N + k > M:
diag_len = M - k
elif k <= 0:
if M >= N:
diag_len += k
elif M - k > N:
diag_len = N + k
diagonal_ = array_ops.ones([diag_len], dtype=dtype)
return array_ops.matrix_diag(diagonal=diagonal_, num_rows=N, num_cols=M, k=k)
@np_utils.np_doc('identity')
def identity(n, dtype=float):
return eye(N=n, M=n, dtype=dtype)
@np_utils.np_doc('full')
def full(shape, fill_value, dtype=None): # pylint: disable=redefined-outer-name
if not isinstance(shape, np_arrays.ndarray):
shape = asarray(np_arrays.convert_to_tensor(shape, dtype_hint=np.int32))
shape = atleast_1d(shape)
fill_value = asarray(fill_value, dtype=dtype)
return array_ops.broadcast_to(fill_value, shape)
# Using doc only here since np full_like signature doesn't seem to have the
# shape argument (even though it exists in the documentation online).
@np_utils.np_doc_only('full_like')
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): # pylint: disable=missing-docstring,redefined-outer-name
"""order, subok and shape arguments mustn't be changed."""
if order != 'K':
raise ValueError('Non-standard orders are not supported.')
if not subok:
raise ValueError('subok being False is not supported.')
if shape:
raise ValueError('Overriding the shape is not supported.')
a = asarray(a)
dtype = dtype or np_utils.result_type(a)
fill_value = asarray(fill_value, dtype=dtype)
return array_ops.broadcast_to(fill_value, array_ops.shape(a))
def _array_internal(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Main implementation of np.array()."""
result_t = val
if not isinstance(result_t, ops.Tensor):
if not dtype:
dtype = np_utils.result_type(result_t)
# We can't call `convert_to_tensor(result_t, dtype=dtype)` here because
# convert_to_tensor doesn't allow incompatible arguments such as (5.5, int)
# while np.array allows them. We need to convert-then-cast.
# EagerTensor conversion complains about "mixed types" when converting
# tensors with no dtype information. This is because it infers types based
# on one selected i
|
tem in the list. So e.g. when converting [2., 2j]
# to a tensor, i
|
t will select float32 as the inferred type and not be able
# to convert the list to a float 32 tensor.
# Since we have some information about the final dtype we care about, we
# supply that information so that convert_to_tensor will do best-effort
# conversion to that dtype first.
result_t = np_arrays.convert_to_tensor(result_t, dtype_hint=dtype)
result_t = math_ops.cast(result_t, dtype=dtype)
elif dtype:
result_t = math_ops.cast(result_t, dtype)
if copy:
result_t = array_ops.identity(result_t)
if ndmin == 0:
return result_t
ndims = array_ops.rank(result_t)
def true_fn():
old_shape = array_ops.shape(result_t)
new_shape = array_ops.concat(
[array_ops.ones(ndmin - ndims, dtypes.int32), old_shape], axis=0)
return array_ops.reshape(result_t, new_shape)
result_t = np_utils.cond(
np_utils.greater(ndmin, ndims), true_fn, lambda: result_t)
return result_t
# TODO(wangpeng): investigate whether we can make `copy` default to False.
# pylint: disable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
@np_utils.np_doc_only('array')
def array(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Since Tensors are immutable, a copy is made only if val is placed on a
different device than the current one. Even if `copy` is False, a new Tensor
may need to be built to satisfy `dtype` and `ndim`. This is used only if `val`
is an ndarray or a Tensor.
""" # pylint:disable=g-docstring-missing-newline
if dtype:
dtype = np_utils.result_type(dtype)
return _array_internal(val, dtype, copy, ndmin)
# pylint: enable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
@np_utils.np_doc('asarray')
def asarray(a, dtype=None):
if dtype:
dtype = np_utils.result_type(dtype)
if isinstance(a, np_arrays.ndarray) and (
not dtype or dtype == a.dtype.as_numpy_dtype):
return a
return array(a, dtype, copy=False)
@np_utils.np_doc('asanyarray')
def asanyarray(a, dtype=None):
return asarray(a, dtype)
@np_utils.np_doc('ascontiguousarray')
def a
|
JarronL/pynrc
|
dev_utils/DMS/nircam2ssb.py
|
Python
|
mit
| 16,914
| 0.018565
|
#! /usr/bin/env python
# This script converts the fits files from the NIRCam CRYO runs
# into ssb-conform fits files.
import sys, os,re,math
import optparse,scipy
from jwst import datamodels as models
from astropy.io import fits as pyfits
import numpy as np
class nircam2ssbclass:
def __init__(self):
self.version = 1.0
self.runID=None
self.outputmodel=None
self.data = None
self.hdr = None
#dictionary to translate between part number and detector/channel/module
self.part2mod = {}
self.modApartIDs = ['16989','17023','17024','17048','17158','C072','C067','C104','C073','C090',481,482,483,484,485]
self.modBpartIDs = ['16991','17005','17011','17047','17161','C045','C043','C101','C044','C084',486,487,488,489,490]
for i in range(len(sel
|
f.modApartIDs)):
self.part2mod[self.modApartIDs[i]]={}
self.part2mod[self.modBpartIDs[i]]={}
self.part2mod[self.modApartIDs[i]]['module']='A'
self.part2mod[self.modBpartIDs[i]]['module']='B'
if i == 4 or i == 9 or i==14:
self.part2mod[self.modApartIDs[i]]['channel']='LONG'
self.part2mod[self.modApartIDs[i]]['detector'] = 'NRCALONG'
self.part2mod[
|
self.modBpartIDs[i]]['channel']='LONG'
self.part2mod[self.modBpartIDs[i]]['detector'] = 'NRCBLONG'
elif i < 4:
self.part2mod[self.modApartIDs[i]]['channel']='SHORT'
self.part2mod[self.modApartIDs[i]]['detector']='NRCA'+str(i+1)
self.part2mod[self.modBpartIDs[i]]['channel']='SHORT'
self.part2mod[self.modBpartIDs[i]]['detector']='NRCB'+str(i+1)
elif i > 4 and i < 9:
self.part2mod[self.modApartIDs[i]]['channel']='SHORT'
self.part2mod[self.modApartIDs[i]]['detector']='NRCA'+str(i+1-5)
self.part2mod[self.modBpartIDs[i]]['channel']='SHORT'
self.part2mod[self.modBpartIDs[i]]['detector']='NRCB'+str(i+1-5)
elif i > 9 and i < 14:
self.part2mod[self.modApartIDs[i]]['channel']='SHORT'
self.part2mod[self.modApartIDs[i]]['detector']='NRCA'+str(i+1-10)
self.part2mod[self.modBpartIDs[i]]['channel']='SHORT'
self.part2mod[self.modBpartIDs[i]]['detector']='NRCB'+str(i+1-10)
def add_options(self, parser=None, usage=None):
if parser == None:
parser = optparse.OptionParser(usage=usage, conflict_handler="resolve")
parser.add_option('-v', '--verbose', action="count", dest="verbose",default=0)
parser.add_option('-o','--outfilebasename' , default='auto' , type="string",
help='file basename of output file. If \'auto\', then basename is input filename with fits removed (default=%default)')
parser.add_option('-d','--outdir' , default=None , type="string",
help='if specified output directory (default=%default)')
parser.add_option('-s','--outsubdir' , default=None , type="string",
help='if specified gets added to output directory (default=%default)')
parser.add_option('--outsuffix' , default=None , type="string",
help='if specified: output suffix, otherwise _uncal.fits (default=%default)')
return(parser)
def copy_comments(self,filename):
incomments = self.hdr['COMMENT']
return
def copy_history(self,filename):
return
def mkoutfilebasename(self,filename, outfilebasename='auto',outdir=None,outsuffix=None,outsubdir=None):
if outfilebasename.lower() == 'auto':
outfilebasename = re.sub('\.fits$','',filename)
if outfilebasename==filename:
raise RuntimeError('BUG!!! %s=%s' % (outfilebasename,filename))
# new outdir?
if outdir!=None:
(d,f)=os.path.split(outfilebasename)
outfilebasename = os.path.join(outdir,f)
# append suffix?
if outsuffix!=None:
outfilebasename += '.'+outsuffix
# add subdir?
if outsubdir!=None:
(d,f)=os.path.split(outfilebasename)
outfilebasename = os.path.join(d,outsubdir,f)
# make sure output dir exists
dirname = os.path.dirname(outfilebasename)
if dirname!='' and not os.path.isdir(dirname):
os.makedirs(dirname)
if not os.path.isdir(dirname):
raise RuntimeError('ERROR: Cannot create directory %s' % dirname)
return(outfilebasename)
def cryo_update_meta_detector(self,runID=None,filename=None,reffileflag=True):
if runID==None:
runID=self.runID
if runID=='TUCSONNEW':
self.outputmodel.meta.instrument.module = self.hdr['MODULE']
if self.hdr['DETECTOR']=='SW':
self.outputmodel.meta.instrument.channel = 'SHORT'
elif self.hdr['DETECTOR']=='LW':
self.outputmodel.meta.instrument.channel = 'LONG'
else:
raise RuntimeError('wrong DETECTOR=%s' % self.hdr['DETECTOR'])
self.outputmodel.meta.instrument.detector = 'NRC%s%d' % (self.outputmodel.meta.instrument.module,self.hdr['SCA'])
print('TEST!!!',self.outputmodel.meta.instrument.module,self.outputmodel.meta.instrument.channel,self.outputmodel.meta.instrument.detector)
elif runID=='TUCSON_PARTNUM':
idInFilename = filename[0:5]
self.outputmodel.meta.instrument.detector = self.part2mod[idInFilename]['detector']
self.outputmodel.meta.instrument.channel = self.part2mod[idInFilename]['channel']
self.outputmodel.meta.instrument.module = self.part2mod[idInFilename]['module']
elif runID=='CRYO2' or runID=='CRYO3':
detectorname=self.hdr['DETECTOR']
self.outputmodel.meta.instrument.filetype= 'UNCALIBRATED'
if re.search('^NRCA',detectorname):
self.outputmodel.meta.instrument.module = 'A'
elif re.search('^NRCB',detectorname):
self.outputmodel.meta.instrument.module = 'B'
else:
raise RuntimeError('wrong DETECTOR=%s' % detectorname)
if re.search('LONG$',detectorname):
self.outputmodel.meta.instrument.channel = 'LONG'
else:
self.outputmodel.meta.instrument.channel = 'SHORT'
self.outputmodel.meta.instrument.detector = self.hdr['DETECTOR']
print(self.outputmodel.meta.instrument.module)
print(self.outputmodel.meta.instrument.channel)
print(self.outputmodel.meta.instrument.detector)
elif runID=='CV2':
if 'TLDYNEID' in self.hdr:
detectorname=self.hdr['TLDYNEID']
elif 'SCA_ID' in self.hdr:
detectorname=self.hdr['SCA_ID']
else:
print('ERROR! could not get detector!!!')
sys.exit(0)
self.outputmodel.meta.instrument.detector = self.part2mod[detectorname]['detector']
self.outputmodel.meta.instrument.channel = self.part2mod[detectorname]['channel']
self.outputmodel.meta.instrument.module = self.part2mod[detectorname]['module']
# Below three lines added
if 'DESCRIP' in self.hdr:
print('DESCRIP already exist')
elif reffileflag:
self.outputmodel.meta.reffile.description = self.hdr['DESCRIPT']
#if reffileflag:
# self.outputmodel.meta.reffile.description = self.hdr['DESCRIPT']
# #self.outputmodel.meta.reffile.author = self.hdr['AUTHOR']
elif runID=='CV3':
if 'SCA_ID' in self.hdr:
detectorname=self.hdr['SCA_ID']
else:
print("ERROR! could not get detector!!!")
self.outputmodel.meta.instrument.detector = self.part2mod[detectorname]['detector']
self.outputmodel.meta.instrument.channel = self.part2mod[detectorname]['channel']
|
drdangersimon/EZgal
|
examples/convert/convert_basti.py
|
Python
|
gpl-2.0
| 3,979
| 0.047751
|
#!/usr/bin/python
import glob,re,sys,math,pyfits
import numpy as np
import utils
if len( sys.argv ) < 2:
print '\nconvert basti SSP models to ez_gal fits format'
print 'Run in directory with SED models for one metallicity'
print 'Usage: convert_basti.py ez_gal.ascii\n'
sys.exit(2)
fileout = sys.argv[1]
# try to extract meta data out of fileout
sfh = ''; tau = ''; met = ''; imf = ''
# split on _ but get rid of the extension
parts = '.'.join( fileout.split( '.' )[:-1] ).split( '_' )
# look for sfh
for (check,val) in zip( ['ssp','exp'], ['SSP','Exponential'] ):
if parts.count( check ):
sfh = val
sfh_index = parts.index( check )
break
# tau?
if sfh:
tau = parts[sfh_index+1] if sfh == 'exp' else ''
# metallicity
if parts.count( 'z' ):
met = parts[ parts.index( 'z' ) + 1 ]
# imf
for (check,val) in zip( ['krou','salp','chab'], ['Kroupa', 'Salpeter', 'Chabrier'] ):
if parts.count( check ):
imf = val
break
if parts.count( 'n' ):
n = parts[ parts.index( 'n' ) + 1 ]
ae = False
if parts.count( 'ae' ): ae = True
# does the file with masses exist?
has_masses = False
mass_file = glob.glob( 'MLR*.txt' )
if len( mass_file ):
# read it
|
in!
print 'Loadin
|
g masses from %s' % mass_file[0]
data = utils.rascii( mass_file[0], silent=True )
masses = data[:,10:14].sum( axis=1 )
has_masses = True
files = glob.glob( 'SPEC*agb*' )
nages = len( files )
ages = []
for (i,file) in enumerate(files):
ls = []
this = []
# extract the age from the filename and convert to years
m = re.search( 't60*(\d+)$', file )
ages.append( int( m.group(1) )*1e6 )
# read in this file
fp = open( file, 'r' )
for line in fp:
parts = line.strip().split()
ls.append( float( parts[0].strip() ) )
this.append( float( parts[1].strip() ) )
if i == 0:
# if this is the first file, generate the data table
nls = len( ls )
seds = np.empty( (nls,nages) )
# convert to ergs/s/angstrom
seds[:,i] = np.array( this )/4.3607e-33/1e10
# convert to numpy
ages = np.array( ages )
ls = np.array( ls )*10.0
# make sure we are sorted in age
sinds = ages.argsort()
ages = ages[sinds]
seds = seds[:,sinds]
# speed of light
c = utils.convert_length( utils.c, incoming='m', outgoing='a' )
# convert from angstroms to hertz
vs = c/ls
# convert from ergs/s/A to ergs/s/Hz
seds *= ls.reshape( (ls.size,1) )**2.0/c
# and now from ergs/s/Hz to ergs/s/Hz/cm^2.0
seds /= (4.0*math.pi*utils.convert_length( 10, incoming='pc', outgoing='cm' )**2.0)
# sort in frequency space
sinds = vs.argsort()
# generate fits frame with sed in it
primary_hdu = pyfits.PrimaryHDU(seds[sinds,:])
primary_hdu.header.update( 'units', 'ergs/s/cm^2/Hz' )
primary_hdu.header.update( 'has_seds', True )
primary_hdu.header.update( 'nfilters', 0 )
primary_hdu.header.update( 'nzfs', 0 )
# store meta data
if sfh and met and imf:
primary_hdu.header.update( 'has_meta', True )
primary_hdu.header.update( 'model', 'BaSTI', comment='meta data' )
primary_hdu.header.update( 'met', met, comment='meta data' )
primary_hdu.header.update( 'imf', imf, comment='meta data' )
primary_hdu.header.update( 'sfh', sfh, comment='meta data' )
if sfh == 'Exponential': primary_hdu.header.update( 'tau', tau, comment='meta data' )
primary_hdu.header.update( 'n', n, comment='meta data' )
primary_hdu.header.update( 'ae', ae, comment='meta data' )
# store the list of frequencies in a table
vs_hdu = pyfits.new_table(pyfits.ColDefs([pyfits.Column(name='vs', array=vs[sinds], format='D', unit='hertz')]))
vs_hdu.header.update( 'units', 'hertz' )
# and the list of ages
cols = [pyfits.Column(name='ages', array=ages, format='D', unit='years')]
# and masses
if has_masses: cols.append( pyfits.Column(name='masses', array=masses, format='D', unit='m_sun') )
ages_hdu = pyfits.new_table(pyfits.ColDefs( cols ))
if has_masses: ages_hdu.header.update( 'has_mass', True )
# make the fits file in memory
hdulist = pyfits.HDUList( [primary_hdu,vs_hdu,ages_hdu] )
# and write it out
hdulist.writeto( fileout, clobber=True )
|
ssarangi/numba
|
numba/cuda/tests/cudapy/test_sync.py
|
Python
|
bsd-2-clause
| 3,582
| 0
|
from __future__ import print_function, absolute_import
import numpy as np
from numba import cuda, int32, float32
from numba.cuda.testing import unittest
from numba.config import ENABLE_CUDASIM
def useless_sync(ary):
i = cuda.grid(1)
cuda.syncthreads()
ary[i] = i
def simple_smem(ary):
N = 100
sm = cuda.shared.array(N, int32)
i = cuda.grid(1)
if i == 0:
for j in range(N):
sm[j] = j
cuda.syncthreads()
ary[i] = sm[i]
def coop_smem2d(ary):
i, j = cuda.grid(2)
sm = cuda.shared.array((10, 20), float32)
sm[i, j] = (i + 1) / (j + 1)
cuda.syncthreads()
ary[i, j] = sm[i, j]
def dyn_shared_memory(ar
|
y):
i = cuda.grid(1)
sm = cuda.shared.array(0, float32)
sm[i] = i * 2
cuda.syncthreads()
ary[i] = sm[i]
def use_threadfence(ary):
ary[0] += 123
cuda.thre
|
adfence()
ary[0] += 321
def use_threadfence_block(ary):
ary[0] += 123
cuda.threadfence_block()
ary[0] += 321
def use_threadfence_system(ary):
ary[0] += 123
cuda.threadfence_system()
ary[0] += 321
class TestCudaSync(unittest.TestCase):
def test_useless_sync(self):
compiled = cuda.jit("void(int32[::1])")(useless_sync)
nelem = 10
ary = np.empty(nelem, dtype=np.int32)
exp = np.arange(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == exp))
def test_simple_smem(self):
compiled = cuda.jit("void(int32[::1])")(simple_smem)
nelem = 100
ary = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == np.arange(nelem, dtype=np.int32)))
def test_coop_smem2d(self):
compiled = cuda.jit("void(float32[:,::1])")(coop_smem2d)
shape = 10, 20
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape](ary)
exp = np.empty_like(ary)
for i in range(ary.shape[0]):
for j in range(ary.shape[1]):
exp[i, j] = (i + 1) / (j + 1)
self.assertTrue(np.allclose(ary, exp))
def test_dyn_shared_memory(self):
compiled = cuda.jit("void(float32[::1])")(dyn_shared_memory)
shape = 50
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape, 0, ary.size * 4](ary)
self.assertTrue(np.all(ary == 2 * np.arange(ary.size, dtype=np.int32)))
def test_threadfence_codegen(self):
# Does not test runtime behavior, just the code generation.
compiled = cuda.jit("void(int32[:])")(use_threadfence)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.gl;", compiled.ptx)
def test_threadfence_block_codegen(self):
# Does not test runtime behavior, just the code generation.
compiled = cuda.jit("void(int32[:])")(use_threadfence_block)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.cta;", compiled.ptx)
def test_threadfence_system_codegen(self):
# Does not test runtime behavior, just the code generation.
compiled = cuda.jit("void(int32[:])")(use_threadfence_system)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.sys;", compiled.ptx)
if __name__ == '__main__':
unittest.main()
|
thilbern/scikit-learn
|
sklearn/manifold/spectral_embedding_.py
|
Python
|
bsd-3-clause
| 19,492
| 0.000103
|
"""Spectral Embedding"""
# Author: Gael Varoquaux <[email protected]>
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import lobpcg
from ..base import BaseEstimator
from ..externals import six
from ..utils import check_random_state
from ..utils.validation import check_array
from ..utils.graph import graph_laplacian
from ..utils.sparsetools import connected_components
from ..utils.arpack import eigsh
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components the contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components_matrix : array-like, shape: (n_samples,)
An array of bool value indicates the indexes of the nodes
belong to the largest connected components of the given query
node
"""
connected_components_matrix = np.zeros(shape=(graph.shape[0]), dtype=np.bool)
connected_components_matrix[node_id] = True
n_node = graph.shape[0]
for i in range(n_node):
last_num_component = connected_components_matrix.sum()
_, node_to_add = np.where(graph[connected_components_matrix] != 0)
connected_components_matrix[node_to_add] = True
if last_num_component >= connected_components_matrix.sum():
break
return connected_components_matrix
def _graph_is_connected(graph):
""" Return whether the graph is connected (True) or Not (False)
Parameters
----------
graph : array-like or sparse matrix, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components
|
start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value):
"""Set the diagonal of the l
|
aplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition
Parameters
----------
laplacian : array or sparse matrix
The graph laplacian
value : float
The value of the diagonal
Returns
-------
laplacian : array or sparse matrix
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
laplacian.flat[::n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
random_state=None, eigen_tol=0.0,
norm_laplacian=True, drop_first=True):
"""Project the sample on the first eigen vectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigen vectors associated to the
smallest eigen values) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigen vector decomposition works as expected.
Parameters
----------
adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : integer, optional
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
eigen_tol : float, optional, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
drop_first : bool, optional, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
Returns
-------
embedding : array, shape=(n_samples, n_components)
The reduced samples.
Notes
-----
Spectral embedding is most useful when the graph has one connected
component. If there graph has many components, the first few eigenvectors
will simply uncover the connected components of the graph.
References
----------
* http://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
http://dx.doi.org/10.1137%2FS1064827500366124
"""
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if eigen_solver == "amg":
raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
"not available.")
if eigen_solver is None:
eigen_solver = 'arpack'
elif not eigen_solver in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'"
% eigen_solver)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
# Check that the matrices given is symmetric
if ((not sparse.isspmatrix(adjacency) and
not np.all((adjacency - adjacency.T) < 1e-10)) or
(sparse.isspmatrix(adjacency) and
not np.all((adjacency - adjacency.T).data < 1e-10))):
warnings.warn("Graph adjacency matrix should be symmetric. "
"Converted to be symmetric by average with its "
"transpose.")
adjacency = .5 * (adjacency + adjacency.T)
if not _graph_is_connected(adjacency):
warnings.warn("Graph is not fully connected, spectral embedding"
" may not work as expected.")
laplacian, dd = graph_lapla
|
natetrue/ReplicatorG
|
skein_engines/skeinforge-0006/skeinforge_tools/oozebane.py
|
Python
|
gpl-2.0
| 29,728
| 0.038415
|
"""
Oozebane is a script to turn off the extruder before the end of a thread and turn it on before the beginning.
The default 'Activate Oozebane' checkbox is on. When it is on, the functions described below will work, when it is off, the functions
will not be called.
The important value for the oozebane preferences is "Early Shutdown Distance" which is the distance before the end of the thread
that the extruder will be turned off, the default is 1.2. A higher distance means the extruder will turn off sooner and the e
|
nd of the
line will be thinner.
When oozebane turns the extruder off, it slows the feedrate down in steps so in theory the thread will remain at roughly the same
thickness until the end. The "Turn Off Steps" preference is the number of steps, the more steps the smaller the size of the step that
the feedrate will be decreased and the larger the size of the resulting gcode file, the default is three.
Oozebane also turns the extruder on just before the start of a thread. The "Early Startup
|
Maximum Distance" preference is the
maximum distance before the thread starts that the extruder will be turned off, the default is 1.2. The longer the extruder has been
off, the earlier the extruder will turn back on, the ratio is one minus one over e to the power of the distance the extruder has been
off over the "Early Startup Distance Constant". The 'First Early Startup Distance' preference is the distance before the first thread
starts that the extruder will be turned off. This value should be high because, according to Marius, the extruder takes a second or
two to extrude when starting for the first time, the default is twenty five.
When oozebane reaches the point where the extruder would of turned on, it slows down so that the thread will be thick at that point.
Afterwards it speeds the extruder back up to operating speed. The speed up distance is the "After Startup Distance".
The "Minimum Distance for Early Startup" is the minimum distance that the extruder has to be off before the thread begins for the
early start up feature to activate. The "Minimum Distance for Early Shutdown" is the minimum distance that the extruder has to be
off after the thread end for the early shutdown feature to activate.
After oozebane turns the extruder on, it slows the feedrate down where the thread starts. Then it speeds it up in steps so in theory
the thread will remain at roughly the same thickness from the beginning.
To run oozebane, in a shell which oozebane is in type:
> python oozebane.py
The following examples oozebane the files Screw Holder Bottom.gcode & Screw Holder Bottom.stl. The examples are run in a terminal in the
folder which contains Screw Holder Bottom.gcode, Screw Holder Bottom.stl and oozebane.py. The oozebane function will oozebane if the
'Activate Oozebane' checkbox is on. The functions writeOutput and getOozebaneChainGcode check to see if the text has been
oozebaned, if not they call the getWipeChainGcode in wipe.py to nozzle wipe the text; once they have the nozzle
wiped text, then they oozebane.
> python oozebane.py
This brings up the dialog, after clicking 'Oozebane', the following is printed:
File Screw Holder Bottom.stl is being chain oozebaned.
The oozebaned file is saved as Screw Holder Bottom_oozebane.gcode
> python oozebane.py Screw Holder Bottom.stl
File Screw Holder Bottom.stl is being chain oozebaned.
The oozebaned file is saved as Screw Holder Bottom_oozebane.gcode
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import oozebane
>>> oozebane.main()
This brings up the oozebane dialog.
>>> oozebane.writeOutput()
File Screw Holder Bottom.stl is being chain oozebaned.
The oozebaned file is saved as Screw Holder Bottom_oozebane.gcode
>>> oozebane.getOozebaneGcode("
( GCode generated by May 8, 2008 carve.py )
( Extruder Initialization )
..
many lines of gcode
..
")
>>> oozebane.getOozebaneChainGcode("
( GCode generated by May 8, 2008 carve.py )
( Extruder Initialization )
..
many lines of gcode
..
")
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from skeinforge_tools.skeinforge_utilities import euclidean
from skeinforge_tools.skeinforge_utilities import gcodec
from skeinforge_tools.skeinforge_utilities import preferences
from skeinforge_tools import analyze
from skeinforge_tools.skeinforge_utilities import interpret
from skeinforge_tools import wipe
from skeinforge_tools import polyfile
import cStringIO
import math
import sys
import time
__author__ = "Enrique Perez ([email protected])"
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
def getOozebaneChainGcode( fileName, gcodeText, oozebanePreferences = None ):
"Oozebane a gcode linear move text. Chain oozebane the gcode if it is not already oozebaned."
gcodeText = gcodec.getGcodeFileText( fileName, gcodeText )
if not gcodec.isProcedureDone( gcodeText, 'wipe' ):
gcodeText = wipe.getWipeChainGcode( fileName, gcodeText )
return getOozebaneGcode( gcodeText, oozebanePreferences )
def getOozebaneGcode( gcodeText, oozebanePreferences = None ):
"Oozebane a gcode linear move text."
if gcodeText == '':
return ''
if gcodec.isProcedureDone( gcodeText, 'oozebane' ):
return gcodeText
if oozebanePreferences == None:
oozebanePreferences = OozebanePreferences()
preferences.readPreferences( oozebanePreferences )
if not oozebanePreferences.activateOozebane.value:
return gcodeText
skein = OozebaneSkein()
skein.parseGcode( gcodeText, oozebanePreferences )
return skein.output.getvalue()
def writeOutput( fileName = '' ):
"Oozebane a gcode linear move file. Chain oozebane the gcode if it is not already oozebaned. If no fileName is specified, oozebane the first unmodified gcode file in this folder."
if fileName == '':
unmodified = interpret.getGNUTranslatorFilesUnmodified()
if len( unmodified ) == 0:
print( "There are no unmodified gcode files in this folder." )
return
fileName = unmodified[ 0 ]
oozebanePreferences = OozebanePreferences()
preferences.readPreferences( oozebanePreferences )
startTime = time.time()
print( 'File ' + gcodec.getSummarizedFilename( fileName ) + ' is being chain oozebaned.' )
suffixFilename = fileName[ : fileName.rfind( '.' ) ] + '_oozebane.gcode'
oozebaneGcode = getOozebaneChainGcode( fileName, '', oozebanePreferences )
if oozebaneGcode == '':
return
gcodec.writeFileText( suffixFilename, oozebaneGcode )
print( 'The oozebaned file is saved as ' + gcodec.getSummarizedFilename( suffixFilename ) )
analyze.writeOutput( suffixFilename, oozebaneGcode )
print( 'It took ' + str( int( round( time.time() - startTime ) ) ) + ' seconds to oozebane the file.' )
class OozebanePreferences:
"A class to handle the oozebane preferences."
def __init__( self ):
"Set the default preferences, execute title & preferences fileName."
#Set the default preferences.
self.archive = []
self.activateOozebane = preferences.BooleanPreference().getFromValue( 'Activate Oozebane', False )
self.archive.append( self.activateOozebane )
self.afterStartupDistance = preferences.FloatPreference().getFromValue( 'After Startup Distance (millimeters):', 1.2 )
self.archive.append( self.afterStartupDistance )
self.earlyShutdownDistance = preferences.FloatPreference().getFromValue( 'Early Shutdown Distance (millimeters):', 1.2 )
self.archive.append( self.earlyShutdownDistance )
self.earlyStartupDistanceConstant = preferences.FloatPreference().getFromValue( 'Early Startup Distance Constant (millimeters):', 20.0 )
self.archive.append( self.earlyStartupDistanceConstant )
self.earlyStartupMaximumDistance = preferences.FloatPreference().getFromValue( 'Early Startup Maximum Distance (millimeters):', 1.2 )
self.archive.append( self.earlyStartupMaximumDistance )
self.firstEarlyStartupDistance = preferences.FloatPreference().getFromValue( 'First Early Startup Distance (millimeters):', 25.0 )
se
|
JulyKikuAkita/PythonPrac
|
cs15211/BinaryTreeTilt.py
|
Python
|
apache-2.0
| 2,991
| 0.003009
|
__source__ = 'https://leetcode.com/problems/binary-tree-tilt/'
# Time: O(n)
# Space: O(n)
#
# Description: 563. Binary Tree Tilt
#
# Given a binary tree, return the tilt of the whole tree.
#
# The tilt of a tree node is defined as the absolute difference between the sum of all left subtree node values
# and the sum of all right subtree node values. Null node has tilt 0.
#
# The tilt of the whole tree is defined as the sum of all nodes' tilt.
#
# Example:
# Input:
# 1
# / \
# 2 3
# Output: 1
# Explanation:
# Tilt of node 2 : 0
# Tilt of node 3 : 0
# Tilt of node 1 : |2-3| = 1
# Tilt of binary tree : 0 + 0 + 1 = 1
# Note:
#
# The sum of node values in any subtree won't exceed the range of 32-bit integer.
# All the tilt values won't exceed the range of 32-bit integer.
# Hide Company Tags Indeed
# Hide Tags Tree
# Explanation
# If we had each node's subtree sum,
# our answer would look like this psuedocode:
# for each node: ans += abs(node.left.subtreesum - node.right.subtreesum).
# Let _sum(node) be the node's subtree sum.
# We can find it by adding the subtree sum of the left child,
# plus the subtree sum of the right child, plus the node's value.
# While we are visiting the node (each node is visited exactly once),
# we might as well do the ans += abs(left_sum - right_sum) part.
import unittest
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 48ms 97.16%
class Solution(object):
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.ans = 0
def _sum(node):
if not node:
return 0
left, right = _sum(node.left), _sum(node.right)
self.ans += abs(left - right)
return node.val + left + right
_sum(root)
return self.ans
# your function here
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/binary-tree-tilt/solution/
#
Time complexity : O(n). where nn is the number of nodes. Each node is visited once.
Space complexity : O(n). In worst case when the tree is skewed depth of tree will be nn.
In average case depth will be lognlogn.
post-order traversal
/**
* Definition for
|
a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
|
*/
# 3ms 100%
class Solution {
int res = 0;
public int findTilt(TreeNode root) {
postOrder(root);
return res;
}
private int postOrder(TreeNode root) {
if (root == null) return 0;
int left = postOrder(root.left);
int right = postOrder(root.right);
res += Math.abs(left - right);
return left + right + root.val;
}
}
'''
|
rjw57/rbc
|
rbc/parser.py
|
Python
|
mit
| 24,773
| 0.000121
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# CAVEAT UTILITOR
#
# This file was automatically generated by Grako.
#
# https://pypi.python.org/pypi/grako/
#
# Any changes you make to it will be overwritten the next time
# the file is generated.
from __future__ import print_function, division, absolute_import, unicode_literals
from grako.parsing import graken, Parser
from grako.util import re, RE_FLAGS
__version__ = (2015, 12, 26, 22, 15, 59, 5)
__all__ = [
'BParser',
'BSemantics',
'main'
]
class BParser(Parser):
def __init__(self,
whitespace=None,
nameguard=None,
comments_re='/\\*((?:[^\\*]|\\*[^/]|\\n)*?)\\*+/',
eol_comments_re=None,
ignorecase=None,
left_recursion=False,
**kwargs):
super(BParser, self).__init__(
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
left_recursion=left_recursion,
**kwargs
)
@graken()
def _program_(self):
def block1():
self._definition_()
self._cut()
self._closure(block1)
self.ast['@'] = self.last_node
self._check_eof()
@graken()
def _definition_(self):
with self._choice():
with self._option():
self._simpledef_()
with self._option():
self._vectordef_()
with self._option():
self._functiondef_()
self._error('no available options')
@graken()
def _simpledef_(self):
self._name_()
self.ast['name'] = self.last_node
with self._optional():
self._ival_()
self.ast['init'] = self.last_node
self._token(';')
self.ast._define(
['name', 'init'],
[]
)
@graken()
def _vectordef_(self):
self._name_()
self.ast['name'] = self.last_node
self._token('[')
with self._optional():
self._constantexpr_()
self.ast['maxidx'] = self.last_node
self._token(']')
with self._optional():
self._ivallist_()
self.ast['ivals'] = self.last_node
self._token(';')
self.ast._define(
['name', 'maxidx', 'ivals'],
[]
)
@graken()
def _ivallist_(self):
self._ival_()
self.ast.setlist('@', self.last_node)
def block1():
self._token(',')
self._ival_()
self.ast.setlist('@', self.last_node)
self._closure(block1)
@graken()
def _ival_(self):
with self._choice():
with self._option():
self._numericexpr_()
with self._option():
self._characterexpr_()
with self._option():
self._stringexpr_()
self._error('no available options')
@graken()
def _functiondef_(self):
self._name_()
self.ast['name'] = self.last_node
self._token('(')
with self._optional():
self._namelist_()
self.ast['args'] = self.last_node
self._token(')')
self._cut()
self._statement_()
self.ast['body'] = self.last_node
self.ast._define(
['name', 'args', 'body'],
[]
)
@graken()
def _statement_(self):
with self._choice():
with self._option():
self._labelstatement_()
with self._option():
self._gotostatement_()
with self._option():
self._switchstatement_()
with self._option():
self._casestatement_()
with self._option():
self._breakstatement_()
with self._option():
self._autostatement_()
with self._option():
self._extrnstatement_()
with self._option():
self._compoundstatement_()
with self._option():
self._ifstatement_()
with self._option():
self._whilestatement_()
with self._option():
self._returnstatement_()
with self._option():
self._exprstatement_()
with self._option():
self._nullstatement_()
self._error('no available options')
@graken()
def _labelstatement_(self):
with self._ifnot():
with self._group():
self._token('default')
self._name_()
self.ast['label'] = self.last_node
self._token(':')
self._statement_()
self.ast['statement'] = self.last_node
self.ast._define(
['label', 'statement'],
[]
)
@graken()
def _gotostatement_(self):
self._token('goto')
self._cut()
self._name_()
self.ast['label'] = self.last_node
self._token(';')
self.ast._define(
['label'],
[]
)
@graken()
def _switchstatement_(self):
self._token('switch')
self._cut()
self._expr_()
self.ast['rvalue'] = self.last_node
self._cut()
self._statement_()
self.ast['body'] = self.last_node
self.ast._define(
['rvalue', 'body'],
[]
)
@graken()
def _casestatement_(self):
with self._group():
with self._choice():
with self._option():
with self._group():
self._token('case')
self._constantexpr_()
self.ast['cond'] = self.last_node
with self._option():
self._token('default')
self._error('expecting one of: default')
self._cut()
self._token(':')
self._statement_()
self.ast['then'] = self.last_node
self.ast._define(
['cond', 'then'],
[]
)
@graken()
def _breakstatement_(self):
self._token('break')
self._token(';')
@graken()
def _autostatement_(self):
self._token('auto')
self._cut()
self._autovar_()
self.ast.setlist('@', self.last_node)
def block1():
self._token(',')
self._autovar_()
self.ast.setlist('@', self.last_node)
self._closure(block1)
self._token(';')
@graken()
def _autovar_(self):
self._name_()
self.ast['name'] = self.last_node
with self._optional():
self._token('[')
self._constantexpr_()
self.ast['maxidx'] = self.last_node
self._token(']')
|
self.ast._define(
['name', 'maxidx'],
[]
)
@graken()
def _extrnstatement_(self):
self._token('extrn')
self._cut()
self._namelist_()
self.ast['@'] = self.last_node
self._token(';')
@graken()
def _compoundstatement_(self):
self._token('{')
self._cut()
def block1():
self._st
|
atement_()
self._cut()
self._closure(block1)
self.ast['@'] = self.last_node
self._token('}')
@graken()
def _ifstatement_(self):
self._token('if')
self._cut()
self._token('(')
self._expr_()
self.ast['cond'] = self.last_node
self._token(')')
self._statement_()
self.ast['then'] = self.last_node
with self._optional():
self._token('else')
self._statement_()
self.ast['otherwise'] = self.last_node
self.ast._define(
['cond', 'then', 'otherwise'],
[]
)
@graken()
def _whilestatement_(self):
self._token('while')
self._cut()
self._token('(')
self._expr_()
self.a
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/vault_secret_group_py3.py
|
Python
|
mit
| 1,468
| 0.001362
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VaultSecretGroup(Model):
"""Describes a set of certificates which are all in the same Key Vault.
:param source_vault: The relative URL of the Key Vault containing all of
the certificates in VaultCertificates.
:type source_vault: ~azure.mgmt.compute.v2016_03_30.models.SubResource
:param vault_certificates: The list of key vault references in SourceVault
which contain certificates.
:type vault_certificates:
list[~azure.mgmt.compute.v2016_03_30.models.VaultCertificate]
"""
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
'vault_cert
|
ificates': {'key': 'vaultCertificates', 'type': '[VaultCertificate]'},
}
def __init__(self, *, source_vault=None, vault_certificates=None, **kwargs) -> None:
super(VaultSecretGroup, self).__init__(**kwargs)
|
self.source_vault = source_vault
self.vault_certificates = vault_certificates
|
timokoola/mjuna
|
mjuna/mjuna/wsgi.py
|
Python
|
apache-2.0
| 385
| 0.002597
|
"""
WSGI config for mjuna project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.dj
|
angoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mjuna.settings")
from django.core.wsgi import get_wsgi_application
application = get_w
|
sgi_application()
|
mdsol/flask-mauth
|
tests/test_authenticators.py
|
Python
|
mit
| 42,880
| 0.002705
|
# -*- coding: utf-8 -*-
import datetime
import json
import time
from unittest import TestCase
import requests_mauth
import mock
from mock import patch
from six import assertRegex
from flask_mauth.mauth.authenticators import LocalAuthenticator, AbstractMAuthAuthenticator, RemoteAuthenticator, \
mws_attr
from flask_mauth import settings
from flask_mauth.exceptions import InauthenticError, UnableToAuthenticateError
from tests.common import load_key
class _TestAuthenticator(object):
"""
Pseudo-abstract base class for the Test Cases
"""
def test_authentication_present_happy_path(self):
"""With the header present, we are ok"""
request = mock.Mock(headers={settings.x_mws_authentication: 'MWS 1234'})
self.assertTrue(self.authenticator.authentication_present(request))
def test_authentication_present_missing(self):
"""With the header missing we throw an exception"""
request = mock.Mock(headers={})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.authentication_present(request)
self.assertEqual(str(exc.exception),
"Authentication Failed. No mAuth signature present; X-MWS-Authentication header is blank.",
)
def test_authentication_present_blank(self):
"""With the header present but blank we throw an exception"""
request = mock.Mock(headers={settings.x_mws_authentication: ''})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.authentication_present(request)
self.assertEqual(str(exc.exception),
"Authentication Failed. No mAuth signature present; X-MWS-Authentication header is blank."
)
def test_time_valid_happy_path(self):
"""With an ok time, we are ok"""
now = int(time.time())
request = mock.Mock(headers={settings.x_mws_time: '%s' % now})
self.assertTrue(self.authenticator.time_valid(request=request))
def test_time_valid_missing_header(self):
"""With a missing header, we get an exception"""
request = mock.Mock(headers={})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.time_valid(request=request)
self.assertEqual(str(exc.exception),
"Time verification failed for Mock. No x-mws-time present.",
)
def test_time_valid_invalid_header(self):
"""With an invalid header, we get an exception"""
request = mock.Mock(headers={settings.x_mws_time: 'apple'})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.time_valid(request=request)
self.assertEqual(str(exc.exception),
"Time verification failed for Mock. X-MWS-Time Header format incorrect.",
|
)
def test_time_valid_empty_header(self):
"""With
|
an empty header, we get an exception"""
request = mock.Mock(headers={settings.x_mws_time: ''})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.time_valid(request=request)
self.assertEqual(str(exc.exception),
"Time verification failed for Mock. No x-mws-time present.",
)
def test_time_valid_expired_header(self):
"""With an empty header, we get an exception"""
now = int(time.time()) - (AbstractMAuthAuthenticator.ALLOWED_DRIFT_SECONDS * 100 + 1)
request = mock.Mock(headers={settings.x_mws_time: str(now)})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.time_valid(request=request)
assertRegex(self,
str(exc.exception),
r"Time verification failed for Mock. %s "
"not within %ss of [0-9\-]{10} [0-9\:]{7}" % (datetime.datetime.fromtimestamp(now),
AbstractMAuthAuthenticator.ALLOWED_DRIFT_SECONDS),
)
def test_token_valid_happy_path(self):
"""With an expected header, all good"""
request = mock.Mock(headers={settings.x_mws_authentication: 'MWS some-uuid:some hash'})
self.assertTrue(self.authenticator.token_valid(request))
def test_token_valid_invalid_token(self):
"""Invalid token leads to exception"""
request = mock.Mock(headers={settings.x_mws_authentication: 'RWS some-uuid:some hash'})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.token_valid(request)
self.assertEqual(str(exc.exception),
"Token verification failed for Mock. Expected MWS; token was RWS"
)
def test_token_valid_bad_format(self):
"""Badly formatted signature leads to exception"""
request = mock.Mock(headers={settings.x_mws_authentication: 'MWS'})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.token_valid(request)
self.assertEqual(str(exc.exception),
"Token verification failed for Mock. Misformatted Signature.")
def test_log_mauth_service_response_error(self):
"""We log an error for a service error"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
response = mock.Mock(status_code=500, data="Upstream Resource unavailable")
with self.assertRaises(UnableToAuthenticateError) as exc:
self.authenticator.log_mauth_service_response_error(request, response)
error = self.logger.error
error.assert_called_with('MAuth Service: App UUID: {app_uuid}; URL: {url}; '
'MAuth service responded with {status}: {body}'.format(app_uuid=self.app_uuid,
url="/mauth/v2/mauth"
".json?open=1",
status=500,
body="Upstream Resource "
"unavailable"))
def test_log_inauthentic_error(self):
"""We log an error for an InAuthentic error"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
self.authenticator.log_authentication_error(request, message="X-MWS-Time too old")
error = self.logger.error
error.assert_called_with('MAuth Authentication Error: App UUID: {app_uuid}; URL: {url}; '
'Error: {message}'.format(app_uuid=self.app_uuid,
url="/mauth/v2/mauth"
".json?open=1",
message="X-MWS-Time too old"))
def test_log_inauthentic_error_missing_app_uuid(self):
"""We log an error for an InAuthentic error"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
with mock.patch("flask_mauth.mauth.authenticators.mws_attr") as matt:
|
mrunge/openstack_horizon
|
openstack_horizon/dashboards/identity/groups/tables.py
|
Python
|
apache-2.0
| 8,157
| 0
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template import defaultfilters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon_lib import tables
from openstack_horizon import api
from openstack_horizon.dashboards.identity.groups import constants
LOG = logging.getLogger(__name__)
LOGOUT_URL = 'logout'
STATUS_CHOICES = (
("true", True),
("false", False)
)
class CreateGroupLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Group")
url = constants.GROUPS_CREATE_URL
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_group"),)
def allowed(self, request, group):
return api.keystone.keystone_can_edit_group()
class EditGroupLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Group")
url = constants.GROUPS_UPDATE_URL
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_group"),)
def allowed(self, request, group):
return api.keystone.keystone_can_edit_group()
class DeleteGroupsAction(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Group",
u"Delete Groups",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Group",
u"Deleted Groups",
count
)
name = "delete"
policy_rules = (("identity", "identity:delete_group"),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_group()
def delete(self, request, obj_id):
LOG.info('Deleting group "%s".' % obj_id)
api.keystone.group_delete(request, obj_id)
class ManageUsersLink(tables.LinkAction):
name = "users"
verbose_name = _("Modify Users")
url = constants.GROUPS_MANAGE_URL
icon = "pencil"
policy_rules = (("identity", "identity:get_group"),
("identity", "identity:list_users"),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_group()
class GroupFilterAction(tables.FilterAction):
def filter(self, table, groups, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
def comp(group):
if q in group.name.lower():
return True
return False
return filter(comp, groups)
class GroupsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'))
description = tables.Column(lambda obj
|
: getattr(obj, 'description', None),
verbose_name=_('Description'))
id = tables.Column('id', verbose_name=_('Group ID'))
class Meta:
name = "groups"
|
verbose_name = _("Groups")
row_actions = (ManageUsersLink, EditGroupLink, DeleteGroupsAction)
table_actions = (GroupFilterAction, CreateGroupLink,
DeleteGroupsAction)
class UserFilterAction(tables.FilterAction):
def filter(self, table, users, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [user for user in users
if q in user.name.lower()
or q in getattr(user, 'email', '').lower()]
class RemoveMembers(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Remove User",
u"Remove Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Removed User",
u"Removed Users",
count
)
name = "removeGroupMember"
policy_rules = (("identity", "identity:remove_user_from_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def action(self, request, obj_id):
user_obj = self.table.get_object_by_id(obj_id)
group_id = self.table.kwargs['group_id']
LOG.info('Removing user %s from group %s.' % (user_obj.id,
group_id))
api.keystone.remove_group_user(request,
group_id=group_id,
user_id=user_obj.id)
# TODO(lin-hua-cheng): Fix the bug when removing current user
# Keystone revokes the token of the user removed from the group.
# If the logon user was removed, redirect the user to logout.
class AddMembersLink(tables.LinkAction):
name = "add_user_link"
verbose_name = _("Add...")
classes = ("ajax-modal",)
icon = "plus"
url = constants.GROUPS_ADD_MEMBER_URL
policy_rules = (("identity", "identity:list_users"),
("identity", "identity:add_user_to_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def get_link_url(self, datum=None):
return reverse(self.url, kwargs=self.table.kwargs)
class UsersTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('User Name'))
email = tables.Column('email', verbose_name=_('Email'),
filters=[defaultfilters.escape,
defaultfilters.urlize])
id = tables.Column('id', verbose_name=_('User ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'),
status=True,
status_choices=STATUS_CHOICES,
empty_value="False")
class GroupMembersTable(UsersTable):
class Meta:
name = "group_members"
verbose_name = _("Group Members")
table_actions = (UserFilterAction, AddMembersLink, RemoveMembers)
class AddMembers(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Add User",
u"Add Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Added User",
u"Added Users",
count
)
name = "addMember"
icon = "plus"
requires_input = True
success_url = constants.GROUPS_MANAGE_URL
policy_rules = (("identity", "identity:add_user_to_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def action(self, request, obj_id):
user_obj = self.table.get_object_by_id(obj_id)
group_id = self.table.kwargs['group_id']
LOG.info('Adding user %s to group %s.' % (user_obj.id,
group_id))
api.keystone.add_group_user(request,
group_id=group_id,
user_id=user_obj.id)
# TODO(lin-hua-cheng): Fix the bug when adding current user
# Keystone revokes the token of the user added to the group.
# If the logon user was added, redirect the user to logout.
def get_success_url(self, request=None):
group_id = self.table.kwargs.get('group_id', None)
return reverse(self.success_url, args=[group_id])
class GroupNonMembersTable(UsersTable):
class Meta:
name = "group_non_members"
verbose_name = _("Non-Members")
table_actions = (UserFilterAction, AddMembers)
|
matthias-k/pysaliency
|
tests/test_numba_utils.py
|
Python
|
mit
| 762
| 0.001312
|
from hypothesis import given, strategies as st
import numpy as np
from pysaliency.numba_utils import auc_for_one_positive
from pysaliency.roc import general_roc
def test_auc_for_one_positive
|
():
assert auc_for_one_positive(1, [0, 2]) == 0.5
assert auc_for_one_positive(1, [1]) == 0.5
assert auc_for_one_positive(3, [0]) == 1.0
assert auc_for_one_positive(0, [3]) == 0.0
@given(st.lists(
|
st.floats(allow_nan=False, allow_infinity=False), min_size=1), st.floats(allow_nan=False, allow_infinity=False))
def test_simple_auc_hypothesis(negatives, positive):
old_auc, _, _ = general_roc(np.array([positive]), np.array(negatives))
new_auc = auc_for_one_positive(positive, np.array(negatives))
np.testing.assert_allclose(old_auc, new_auc)
|
mogoweb/chromium-crosswalk
|
tools/licenses.py
|
Python
|
bsd-3-clause
| 16,956
| 0.002359
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility for checking and processing licensing information in third_party
directories.
Usage: licenses.py <command>
Commands:
scan
|
scan third_party directories, verifying that we have licensing info
credits generate about:credits on stdout
(You can also import this as a module.)
"""
import cgi
import os
import sys
# Paths from the root of the tree to directories to skip.
PRUNE_PATHS = set([
# Same module occurs
|
in crypto/third_party/nss and net/third_party/nss, so
# skip this one.
os.path.join('third_party','nss'),
# Placeholder directory only, not third-party code.
os.path.join('third_party','adobe'),
# Build files only, not third-party code.
os.path.join('third_party','widevine'),
# Only binaries, used during development.
os.path.join('third_party','valgrind'),
# Used for development and test, not in the shipping product.
os.path.join('third_party','bison'),
os.path.join('third_party','cygwin'),
os.path.join('third_party','gnu_binutils'),
os.path.join('third_party','gold'),
os.path.join('third_party','gperf'),
os.path.join('third_party','lighttpd'),
os.path.join('third_party','llvm'),
os.path.join('third_party','llvm-build'),
os.path.join('third_party','mingw-w64'),
os.path.join('third_party','nacl_sdk_binaries'),
os.path.join('third_party','pefile'),
os.path.join('third_party','perl'),
os.path.join('third_party','psyco_win32'),
os.path.join('third_party','pylib'),
os.path.join('third_party','python_26'),
os.path.join('third_party','pywebsocket'),
os.path.join('third_party','syzygy'),
os.path.join('tools','gn'),
# Chromium code in third_party.
os.path.join('third_party','fuzzymatch'),
os.path.join('tools', 'swarm_client'),
# Stuff pulled in from chrome-internal for official builds/tools.
os.path.join('third_party', 'clear_cache'),
os.path.join('third_party', 'gnu'),
os.path.join('third_party', 'googlemac'),
os.path.join('third_party', 'pcre'),
os.path.join('third_party', 'psutils'),
os.path.join('third_party', 'sawbuck'),
# Redistribution does not require attribution in documentation.
os.path.join('third_party','directxsdk'),
os.path.join('third_party','platformsdk_win2008_6_1'),
os.path.join('third_party','platformsdk_win7'),
])
# Directories we don't scan through.
VCS_METADATA_DIRS = ('.svn', '.git')
PRUNE_DIRS = (VCS_METADATA_DIRS +
('out', 'Debug', 'Release', # build files
'layout_tests')) # lots of subdirs
ADDITIONAL_PATHS = (
os.path.join('breakpad'),
os.path.join('chrome', 'common', 'extensions', 'docs', 'examples'),
os.path.join('chrome', 'test', 'chromeos', 'autotest'),
os.path.join('chrome', 'test', 'data'),
os.path.join('native_client'),
os.path.join('native_client_sdk'),
os.path.join('net', 'tools', 'spdyshark'),
os.path.join('ppapi'),
os.path.join('sandbox', 'linux', 'seccomp-legacy'),
os.path.join('sdch', 'open-vcdiff'),
os.path.join('testing', 'gmock'),
os.path.join('testing', 'gtest'),
# The directory with the word list for Chinese and Japanese segmentation
# with different license terms than ICU.
os.path.join('third_party','icu','source','data','brkitr'),
os.path.join('tools', 'grit'),
os.path.join('tools', 'gyp'),
os.path.join('tools', 'page_cycler', 'acid3'),
os.path.join('url', 'third_party', 'mozilla'),
os.path.join('v8'),
# Fake directory so we can include the strongtalk license.
os.path.join('v8', 'strongtalk'),
)
# Directories where we check out directly from upstream, and therefore
# can't provide a README.chromium. Please prefer a README.chromium
# wherever possible.
SPECIAL_CASES = {
os.path.join('native_client'): {
"Name": "native client",
"URL": "http://code.google.com/p/nativeclient",
"License": "BSD",
},
os.path.join('sandbox', 'linux', 'seccomp-legacy'): {
"Name": "seccompsandbox",
"URL": "http://code.google.com/p/seccompsandbox",
"License": "BSD",
},
os.path.join('sdch', 'open-vcdiff'): {
"Name": "open-vcdiff",
"URL": "http://code.google.com/p/open-vcdiff",
"License": "Apache 2.0, MIT, GPL v2 and custom licenses",
"License Android Compatible": "yes",
},
os.path.join('testing', 'gmock'): {
"Name": "gmock",
"URL": "http://code.google.com/p/googlemock",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('testing', 'gtest'): {
"Name": "gtest",
"URL": "http://code.google.com/p/googletest",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('third_party', 'angle'): {
"Name": "Almost Native Graphics Layer Engine",
"URL": "http://code.google.com/p/angleproject/",
"License": "BSD",
},
os.path.join('third_party', 'cros_system_api'): {
"Name": "Chromium OS system API",
"URL": "http://www.chromium.org/chromium-os",
"License": "BSD",
# Absolute path here is resolved as relative to the source root.
"License File": "/LICENSE.chromium_os",
},
os.path.join('third_party', 'GTM'): {
"Name": "Google Toolbox for Mac",
"URL": "http://code.google.com/p/google-toolbox-for-mac/",
"License": "Apache 2.0",
"License File": "COPYING",
},
os.path.join('third_party', 'lss'): {
"Name": "linux-syscall-support",
"URL": "http://code.google.com/p/linux-syscall-support/",
"License": "BSD",
"License File": "/LICENSE",
},
os.path.join('third_party', 'ots'): {
"Name": "OTS (OpenType Sanitizer)",
"URL": "http://code.google.com/p/ots/",
"License": "BSD",
},
os.path.join('third_party', 'pdfsqueeze'): {
"Name": "pdfsqueeze",
"URL": "http://code.google.com/p/pdfsqueeze/",
"License": "Apache 2.0",
"License File": "COPYING",
},
os.path.join('third_party', 'ppapi'): {
"Name": "ppapi",
"URL": "http://code.google.com/p/ppapi/",
},
os.path.join('third_party', 'scons-2.0.1'): {
"Name": "scons-2.0.1",
"URL": "http://www.scons.org",
"License": "MIT",
"License File": "NOT_SHIPPED",
},
os.path.join('third_party', 'trace-viewer'): {
"Name": "trace-viewer",
"URL": "http://code.google.com/p/trace-viewer",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('third_party', 'v8-i18n'): {
"Name": "Internationalization Library for v8",
"URL": "http://code.google.com/p/v8-i18n/",
"License": "Apache 2.0",
},
os.path.join('third_party', 'WebKit'): {
"Name": "WebKit",
"URL": "http://webkit.org/",
"License": "BSD and GPL v2",
# Absolute path here is resolved as relative to the source root.
"License File": "/webkit/LICENSE",
},
os.path.join('third_party', 'webpagereplay'): {
"Name": "webpagereplay",
"URL": "http://code.google.com/p/web-page-replay",
"License": "Apache 2.0",
"License File": "NOT_SHIPPED",
},
os.path.join('tools', 'grit'): {
"Name": "grit",
"URL": "http://code.google.com/p/grit-i18n",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('tools', 'gyp'): {
"Name": "gyp",
"URL": "http://code.google.com/p/gyp",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('v8'): {
"Name": "V8 JavaScript Engine",
"URL": "http://code.google.com/p/v8",
"License": "BSD",
},
os.path.join('v8', 'strongtalk'): {
"Name": "Strongtalk",
"URL": "http://www.strongtalk.org/",
"License": "BSD",
# Absolute path here is resolved as
|
noroutine/ansible
|
lib/ansible/modules/cloud/amazon/s3_lifecycle.py
|
Python
|
gpl-3.0
| 15,265
| 0.002489
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: s3_lifecycle
short_description: Manage s3 bucket lifecycle rules in AWS
description:
- Manage s3 bucket lifecycle rules in AWS
version_added: "2.0"
author: "Rob White (@wimnat)"
notes:
- If specifying expiration time as days then transition time must also be specified in days
- If specifying expiration time as a date then transition time must also be specified as a date
requirements:
- python-dateutil
options:
name:
description:
- "Name of the s3 bucket"
required: true
expiration_date:
description:
- >
Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must
be midnight and a GMT timezone must be specified.
required: false
default: null
expiration_days:
description:
- "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer."
required: false
default: null
prefix:
description:
- "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket."
required: false
default: null
rule_id:
description:
- "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided."
required: false
default: null
state:
description:
- "Create or remove the lifecycle rule"
required: false
default: present
choices: [ 'present', 'absent' ]
status:
description:
- "If 'enabled', the rule is currently being applied. If 'disabled', the rule is not currently being applied."
required: false
default: enabled
choices: [ 'enabled', 'disabled' ]
storage_class:
description:
- "The storage class to transition to. Currently there are two supported values - 'glacier' or 'standard_ia'."
- "The 'standard_ia' class is only being available from Ansible version 2.2."
required: false
default: glacier
choices: [ 'glacier', 'standard_ia']
transition_date:
description:
- >
Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class.
The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified,
this parameter is required."
required: false
default: null
transition_days:
description:
- "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required."
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
- s3_lifecycle:
name: mybucket
expiration_days: 30
prefix: /logs/
status: enabled
state: present
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
- s3_lifecycle:
name: mybucket
transition_days: 7
expiration_days: 90
prefix: /logs/
status: enabled
state: present
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030.
# Note that midnight GMT must be specified.
# Be sure to quote your date strings
- s3_lifecycle:
name: mybucket
transition_date: "2020-12-30T00:00:00.000Z"
expiration_date: "2030-12-30T00:00:00.000Z"
prefix: /logs/
status: enabled
state: present
# Disable the rule created above
- s3_lifecycle:
name: mybucket
prefix: /logs/
status: disabled
state: present
# Delete the lifecycle rule created above
- s3_lifecycle:
name: mybucket
prefix: /logs/
state: absent
# Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class.
- s3_lifecycle:
name: mybucket
prefix: /backups/
storage_class: standard_ia
transition_days: 31
state: present
status: enabled
'''
import xml.etree.ElementTree as ET
import copy
import datetime
try:
import dateutil.parser
HAS_DATEUTIL = True
except ImportError:
HAS_DATEUTIL = False
try:
import boto
import boto.ec2
from boto.s3.connection import OrdinaryCallingFormat, Location
from boto.s3.lifecycle
|
import Lifecycle, Rule, Expiration, Transition
from boto.exception import BotoServerError, S3Respo
|
nseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info
def create_lifecycle_rule(connection, module):
name = module.params.get("name")
expiration_date = module.params.get("expiration_date")
expiration_days = module.params.get("expiration_days")
prefix = module.params.get("prefix")
rule_id = module.params.get("rule_id")
status = module.params.get("status")
storage_class = module.params.get("storage_class")
transition_date = module.params.get("transition_date")
transition_days = module.params.get("transition_days")
changed = False
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
module.fail_json(msg=e.message)
# Get the bucket's current lifecycle rules
try:
current_lifecycle_obj = bucket.get_lifecycle_config()
except S3ResponseError as e:
if e.error_code == "NoSuchLifecycleConfiguration":
current_lifecycle_obj = Lifecycle()
else:
module.fail_json(msg=e.message)
# Create expiration
if expiration_days is not None:
expiration_obj = Expiration(days=expiration_days)
elif expiration_date is not None:
expiration_obj = Expiration(date=expiration_date)
else:
expiration_obj = None
# Create transition
if transition_days is not None:
transition_obj = Transition(days=transition_days, storage_class=storage_class.upper())
elif transition_date is not None:
transition_obj = Transition(date=transition_date, storage_class=storage_class.upper())
else:
transition_obj = None
# Create rule
rule = Rule(rule_id, prefix, status.title(), expiration_obj, transition_obj)
# Create lifecycle
lifecycle_obj = Lifecycle()
appended = False
# If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
if current_lifecycle_obj:
# If rule ID exists, use that for comparison otherwise compare based on prefix
for existing_rule in current_lifecycle_obj:
if rule.id == existing_rule.id:
if compare_rule(rule, existing_rule):
lifecycle_obj.append(rule)
appended = True
else:
lifecycle_obj.append(rule)
changed = True
appended = True
elif rule.prefix == existing_rule.prefix:
existing_rule.id = None
if compare_rule(rule, existing_rule):
lifecycle_obj.append(rule)
appended = True
else:
lifecycle_obj.append(rule)
changed = True
appended = True
else:
lifecycle_obj.append(existing_rule)
# If nothing appended
|
ethronsoft/stor
|
bindings/python/setup.py
|
Python
|
bsd-2-clause
| 737
| 0.004071
|
from setuptools import setup
setup(
name="pystor",
version="0.9.1",
author="Ethronsoft",
author_email='[email protected]',
zip_safe=False,
packages=["ethronsoft", "ethronsoft.pystor"],
license=open(
|
"LICENSE.txt").read(),
include_package_data=True,
|
keywords="nosql document store serverless embedded",
url="https://github.com/ethronsoft/stor",
description="Python bindings to esft::stor, a C++ NoSQL serverless document store",
install_requires=[
'enum34'
],
setup_requires=[
'pytest-runner'
],
tests_require=[
'pytest'
],
entry_points={
'console_scripts':[
"pystor = ethronsoft.pystor.__main__:main"
]
}
)
|
opennode/nodeconductor-openstack
|
src/waldur_openstack/openstack/urls.py
|
Python
|
mit
| 928
| 0.009698
|
from . import views
def register_in(router):
router.register(r'openstack', views.OpenStackServiceViewSet, base_name='openstack')
router.register(r'openstack-images', views.ImageViewSe
|
t, base_name='openstack-image')
router.register(r'openstack-flavors', views.FlavorViewSet, base_name='openstack-flavor')
router.register(r'openstack-tenants', views
|
.TenantViewSet, base_name='openstack-tenant')
router.register(r'openstack-service-project-link', views.OpenStackServiceProjectLinkViewSet, base_name='openstack-spl')
router.register(r'openstack-security-groups', views.SecurityGroupViewSet, base_name='openstack-sgp')
router.register(r'openstack-floating-ips', views.FloatingIPViewSet, base_name='openstack-fip')
router.register(r'openstack-networks', views.NetworkViewSet, base_name='openstack-network')
router.register(r'openstack-subnets', views.SubNetViewSet, base_name='openstack-subnet')
|
PreppyLLC-opensource/django-advanced-filters
|
advanced_filters/migrations/0001_initial.py
|
Python
|
mit
| 1,420
| 0.003521
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-07 23:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AdvancedFilter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('url', models.CharField(max_length=255)),
('b64_query', models.CharField(max_length=2048)),
('model', models.CharField(bl
|
ank=True, max_length=64, null=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_advanced_filters', to=settings.AUTH_USER_MODEL)),
('groups', models.ManyToManyField(blank=True, to='auth.Group')),
('users', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
option
|
s={
'verbose_name_plural': 'Advanced Filters',
'verbose_name': 'Advanced Filter',
},
),
]
|
henrymp/coursebuilder
|
controllers/utils.py
|
Python
|
apache-2.0
| 17,556
| 0.000627
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers that are not directly related to course content."""
__author__ = 'Saifu Angto ([email protected])'
import base64
import hmac
import os
import time
import urlparse
import appengine_config
from models import transforms
from models.config import ConfigProperty
from models.config import ConfigPropertyEntity
from models.courses import Course
from models.models import Student
from models.roles import Roles
import webapp2
from google.appengine.api import namespace_manager
from google.appengine.api import users
# The name of the template dict key that stores a course's base location.
COURSE_BASE_KEY = 'gcb_course_base'
# The name of the template dict key that stores data from course.yaml.
COURSE_INFO_KEY = 'course_info'
XSRF_SECRET_LENGTH = 20
XSRF_SECRET = ConfigProperty(
'gcb_xsrf_secret', str, (
'Text used to encrypt tokens, which help prevent Cross-site request '
'forgery (CSRF, XSRF). You can set the value to any alphanumeric text, '
'preferably using 16-64 characters. Once you change this value, the '
'server rejects all subsequent requests issued using an old value for '
'this variable.'),
'course builder XSRF secret')
class ReflectiveRequestHandler(object):
"""Uses reflection to handle custom get() and post() requests.
Use this class as a mix-in with any webapp2.RequestHandler to allow request
dispatching to multiple get() and post() methods based on the 'action'
parameter.
Open your existing webapp2.RequestHandler, add this class as a mix-in.
Define the following class variables:
default_action = 'list'
get_actions = ['default_action', 'edit']
post_actions = ['save']
Add instance methods named get_list(self), get_edit(self), post_save(self).
These methods will now be called automatically based on the 'action'
GET/POST parameter.
"""
def create_xsrf_token(self, action):
return XsrfTokenManager.create_xsrf_token(action)
def get(self):
"""Handles GET."""
action = self.request.get('action')
if not action:
action = self.default_action
if not action in self.get_actions:
self.error(404)
return
handler = getattr(self, 'get_%s' % action)
if not handler:
self.error(404)
return
return handler()
def post(self):
"""Handles POST."""
action = self.request.get('action')
if not action or not action in self.post_actions:
self.error(404)
return
handler = getattr(self, 'post_%s' % action)
if not handler:
self.error(404)
return
# Each POST request must have valid XSRF token.
xsrf_token = self.request.get('xsrf_token')
if not XsrfTokenManager.is_xsrf_token_valid(xsrf_token, action):
self.error(403)
return
return handler()
class ApplicationHandler(webapp2.RequestHandler):
"""A handler that is aware of the application context."""
@classmethod
def is_absolute(cls, url):
return bool(urlparse.urlparse(url).scheme)
@classmethod
def get_base_href(cls, handler):
"""Computes current course <base> href."""
base = handler.app_context.get_slug()
if not base.endswith('/'):
base = '%s/' % base
# For IE to work with the <base> tag, its href must be an absolute URL.
if not cls.is_absolute(base):
parts = urlparse.urlparse(handler.request.url)
base = urlparse.urlunparse(
(parts.scheme, parts.n
|
etloc, base, None, None, None))
return base
def __init__(self, *args, **kwargs):
super(ApplicationHandler, self).__init__(*args, **kwargs)
self.template_value = {}
|
def get_template(self, template_file, additional_dirs=None):
"""Computes location of template files for the current namespace."""
self.template_value[COURSE_INFO_KEY] = self.app_context.get_environ()
self.template_value['is_course_admin'] = Roles.is_course_admin(
self.app_context)
self.template_value[
'is_read_write_course'] = self.app_context.fs.is_read_write()
self.template_value['is_super_admin'] = Roles.is_super_admin()
self.template_value[COURSE_BASE_KEY] = self.get_base_href(self)
return self.app_context.get_template_environ(
self.template_value[COURSE_INFO_KEY]['course']['locale'],
additional_dirs
).get_template(template_file)
def canonicalize_url(self, location):
"""Adds the current namespace URL prefix to the relative 'location'."""
is_relative = (
not self.is_absolute(location) and
not location.startswith(self.app_context.get_slug()))
has_slug = (
self.app_context.get_slug() and self.app_context.get_slug() != '/')
if is_relative and has_slug:
location = '%s%s' % (self.app_context.get_slug(), location)
return location
def redirect(self, location):
super(ApplicationHandler, self).redirect(
self.canonicalize_url(location))
class BaseHandler(ApplicationHandler):
"""Base handler."""
def __init__(self, *args, **kwargs):
super(BaseHandler, self).__init__(*args, **kwargs)
self.course = None
def get_course(self):
if not self.course:
self.course = Course(self)
return self.course
def find_unit_by_id(self, unit_id):
"""Gets a unit with a specific id or fails with an exception."""
return self.get_course().find_unit_by_id(unit_id)
def get_units(self):
"""Gets all units in the course."""
return self.get_course().get_units()
def get_lessons(self, unit_id):
"""Gets all lessons (in order) in the specific course unit."""
return self.get_course().get_lessons(unit_id)
def get_progress_tracker(self):
"""Gets the progress tracker for the course."""
return self.get_course().get_progress_tracker()
def get_user(self):
"""Validate user exists."""
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
else:
return user
def personalize_page_and_get_user(self):
"""If the user exists, add personalized fields to the navbar."""
user = self.get_user()
if user:
self.template_value['email'] = user.email()
self.template_value['logoutUrl'] = (
users.create_logout_url(self.request.uri))
return user
def personalize_page_and_get_enrolled(self):
"""If the user is enrolled, add personalized fields to the navbar."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return None
student = Student.get_enrolled_student_by_email(user.email())
if not student:
self.redirect('/preview')
return None
return student
def assert_xsrf_token_or_fail(self, request, action):
"""Asserts the current request has proper XSRF token or fails."""
token = request.get('xsrf_token')
if not token or not XsrfTokenManager.is_xsrf_token_valid(token, action):
self.error(403)
return False
return True
def render(self, template_file):
"
|
shashank971/edx-platform
|
common/test/acceptance/tests/helpers.py
|
Python
|
agpl-3.0
| 24,524
| 0.003058
|
"""
Test helper functions and base classes.
"""
import inspect
import json
import unittest
import functools
import operator
import pprint
import requests
import os
import urlparse
from contextlib import contextmanager
from datetime import datetime
from path import Path as path
from bok_choy.javascript import js_defined
from bok_choy.web_app_test import WebAppTest
from bok_choy.promise import EmptyPromise, Promise
from opaque_keys.edx.locator import CourseLocator
from pymongo import MongoClient, ASCENDING
from openedx.core.lib.tests.assertions.events import assert_event_matches, is_matching_event, EventMatchTolerates
from xmodule.partitions.partitions import UserPartition
from xmodule.partitions.tests.test_partitions import MockUserPartitionScheme
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from unittest import TestCase
from ..pages.common import BASE_URL
MAX_EVENTS_IN_FAILURE_OUTPUT = 20
def skip_if_browser(browser):
"""
Method decorator that skips a test if browser is `browser`
Args:
browser (str): name of internet browser
Returns:
Decorated function
"""
def decorator(test_function):
@functools.wraps(test_function)
def wrapper(self, *args, **kwargs):
if self.browser.name == browser:
raise unittest.SkipTest('Skipping as this test will not work with {}'.format(browser))
test_function(self, *args, **kwargs)
return wrapper
return decorator
def is_youtube_available():
"""
Check if the required youtube urls are available.
If a URL in `youtube_api_urls` is not reachable then subsequent URLs will not be checked.
Returns:
bool:
"""
youtube_api_urls = {
'main': 'https://www.youtube.com/',
'player': 'https://www.youtube.com/iframe_api',
# For transcripts, you need to check an actual video, so we will
# just specify our default video and see if that one is available.
'transcript': 'http://video.google.com/timedtext?lang=en&v=3_yD_cEKoCk',
}
for url in youtube_api_urls.itervalues():
try:
response = requests.get(url, allow_redirects=False)
except requests.exceptions.ConnectionError:
return False
if response.status_code >= 300:
return False
return True
def load_data_str(rel_path):
"""
Load a file from the "data" directory as a string.
`rel_path` is the path relative to the data directory.
"""
full_path = path(__file__).abspath().dirname() / "data" / rel_path
with open(full_path) as data_file:
return data_file.read()
def remove_file(filename):
"""
Remove a file if it exists
|
"""
if os.path.exists(filename):
os.remove(filename)
def disable_animations(page):
"""
Disable jQuery and CSS3 animations.
"
|
""
disable_jquery_animations(page)
disable_css_animations(page)
def enable_animations(page):
"""
Enable jQuery and CSS3 animations.
"""
enable_jquery_animations(page)
enable_css_animations(page)
@js_defined('window.jQuery')
def disable_jquery_animations(page):
"""
Disable jQuery animations.
"""
page.browser.execute_script("jQuery.fx.off = true;")
@js_defined('window.jQuery')
def enable_jquery_animations(page):
"""
Enable jQuery animations.
"""
page.browser.execute_script("jQuery.fx.off = false;")
def disable_css_animations(page):
"""
Disable CSS3 animations, transitions, transforms.
"""
page.browser.execute_script("""
var id = 'no-transitions';
// if styles were already added, just do nothing.
if (document.getElementById(id)) {
return;
}
var css = [
'* {',
'-webkit-transition: none !important;',
'-moz-transition: none !important;',
'-o-transition: none !important;',
'-ms-transition: none !important;',
'transition: none !important;',
'-webkit-transition-property: none !important;',
'-moz-transition-property: none !important;',
'-o-transition-property: none !important;',
'-ms-transition-property: none !important;',
'transition-property: none !important;',
'-webkit-transform: none !important;',
'-moz-transform: none !important;',
'-o-transform: none !important;',
'-ms-transform: none !important;',
'transform: none !important;',
'-webkit-animation: none !important;',
'-moz-animation: none !important;',
'-o-animation: none !important;',
'-ms-animation: none !important;',
'animation: none !important;',
'}'
].join(''),
head = document.head || document.getElementsByTagName('head')[0],
styles = document.createElement('style');
styles.id = id;
styles.type = 'text/css';
if (styles.styleSheet){
styles.styleSheet.cssText = css;
} else {
styles.appendChild(document.createTextNode(css));
}
head.appendChild(styles);
""")
def enable_css_animations(page):
"""
Enable CSS3 animations, transitions, transforms.
"""
page.browser.execute_script("""
var styles = document.getElementById('no-transitions'),
head = document.head || document.getElementsByTagName('head')[0];
head.removeChild(styles)
""")
def select_option_by_text(select_browser_query, option_text):
"""
Chooses an option within a select by text (helper method for Select's select_by_visible_text method).
"""
select = Select(select_browser_query.first.results[0])
select.select_by_visible_text(option_text)
def get_selected_option_text(select_browser_query):
"""
Returns the text value for the first selected option within a select.
"""
select = Select(select_browser_query.first.results[0])
return select.first_selected_option.text
def get_options(select_browser_query):
"""
Returns all the options for the given select.
"""
return Select(select_browser_query.first.results[0]).options
def generate_course_key(org, number, run):
"""
Makes a CourseLocator from org, number and run
"""
default_store = os.environ.get('DEFAULT_STORE', 'draft')
return CourseLocator(org, number, run, deprecated=(default_store == 'draft'))
def select_option_by_value(browser_query, value):
"""
Selects a html select element by matching value attribute
"""
select = Select(browser_query.first.results[0])
select.select_by_value(value)
def options_selected():
"""
Returns True if all options in select element where value attribute
matches `value`. if any option is not selected then returns False
and select it. if value is not an option choice then it returns False.
"""
all_options_selected = True
has_option = False
for opt in select.options:
if opt.get_attribute('value') == value:
has_option = True
if not opt.is_selected():
all_options_selected = False
opt.click()
# if value is not an option choice then it should return false
if all_options_selected and not has_option:
all_options_selected = False
return all_options_selected
# Make sure specified option is actually selected
EmptyPromise(options_selected, "Option is selected").fulfill()
def is_option_value_selected(browser_query, value):
"""
return true if given value is selected in html select element, else return false.
"""
select = Select(browser_query.first.results[0])
ddl_selected_value = select.firs
|
TeluguOCR/banti_telugu_ocr
|
tests/linegraph_test.py
|
Python
|
apache-2.0
| 691
| 0.002894
|
from random import random
from banti.linegraph import LineGraph
cl
|
ass Weight():
def __init__(self, val):
self.val = val
def combine(self, other):
return random() < .3, Weight(int(100*random())+(self.val+other.val)//2)
def strength(self):
return self.val
def __repr__(self):
return "{}".format(self.val)
weights = [Weight(val) for val in range(10, 80, 10)]
print(list(enumerate(weights)))
lgraph = LineGraph(weights)
print(lgraph.lchildren)
print(lgraph)
lgraph.process_tree()
print(lgraph)
pa
|
ths = lgraph.get_paths()
for path in paths:
print(path, lgraph.path_strength(path))
print("Strongest Path: ", lgraph.strongest_path())
|
juergenz/pie
|
src/pie/chat_commands.py
|
Python
|
mit
| 3,649
| 0.004111
|
__all__ = ['chatcommand', 'execute_chat_command', 'save_matchsettings', '_register_chat_command']
import functools
import inspect
from .events import eventhandler, send_event
from .log import logger
from .asyncio_loop import loop
_registered_chat_commands = {} # dict of all registered chat commands
async def execute_chat_command(server, player, cmd):
#if not player.is_admin():
#r = check_rights(player)
args = cmd.split(' ')
if args[len(args) - 1] is '':
del args[len(args) - 1]
if args[0] in _registered_chat_commands:
try:
if len(args) == 1:
server.run_task(_registered_chat_commands[args[0]](server, player))
else:
server.run_task(_registered_chat_commands[args[0]](server, player, *args[1:]))
except Exception as exp:
server.chat_send_error('fault use of chat command: ' + args[0], player)
server.chat_send_error(str(exp), player)
server.chat_send('use /help to see available chat commands', player)
raise
else:
server.chat_send_error('unknown chat command: ' + args[0], player)
server.chat_send('use /help to see available chat commands', player)
def _register_chat_command(chat_command, function):
if chat_command not in _registered_chat_commands:
_registered_chat_commands[chat_command] = function
else:
logger.error('chatcommand ' + "'" + chat_command + "'" + ' already registered to ' + str(function))
return False
def _unregister_chat_command(chat_command):
if chat_command not in _registered_chat_commands:
raise 'chat command not registered'
else:
del _registered_chat_commands[chat_command]
# @chatcommand decorator
def chatcommand(cmd):
def chatcommand_decorator(func):
if _register_chat_command(cmd, func) is False:
return
module = inspect.getmodule(func)
logger.debug('chatcommand ' + "'" + cmd + "' connected to " + str(func) + ' in module ' + str(module))
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return chatcommand_decorator
@eventhandler('ManiaPlanet.PlayerChat')
async def _on_player_chat(server, callback):
p = server.player_from_login(callback.login)
# ignore normal chat
if not callback.isCommand:
if p is not None:
send_event(server, 'pie.PlayerChat', p)
return
server.run_task(execute_chat_command(server, p, callback.text))
@chatcommand('/help')
async def cmd_help(server, player):
"""list all chat commands"""
server.chat_send('help:', player)
for cmd in _registered_chat_commands:
i
|
f _registered_chat_commands[cmd].__doc__ is None:
docstr = 'no description set'
else:
docstr = _registered_chat_commands[cmd].__doc__
server.chat_send(cmd + ' - ' + docstr, player)
async def save_matchsettings(server, filename = None):
|
await server.rpc.SaveMatchSettings('MatchSettings\\' + server.config.matchsettings)
@chatcommand('/savematchsettings')
async def cmd_savematchsettings(server, player):
await save_matchsettings(server)
server.chat_send('matchsettings saved: ' + server.config.matchsettings)
@chatcommand('/shutdown')
async def cmd_shutdown(server, player):
await server.chat_send_wait('pie shutdown')
loop.stop()
@chatcommand('/players')
async def cmd_players(server, player):
for player in server.players:
server.chat_send(server.players[player].nickname)
|
applitools/eyes.selenium.python
|
applitools/selenium/capture/eyes_webdriver_screenshot.py
|
Python
|
apache-2.0
| 9,401
| 0.004148
|
from __future__ import absolute_import
import base64
import typing as tp
from selenium.common.exceptions import WebDriverException
from applitools.core import EyesScreenshot, EyesError, Point, Region, OutOfBoundsError
from applitools.utils import image_utils
from applitools.selenium import eyes_selenium_utils
from applitools.selenium.frames import FrameChain
if tp.TYPE_CHECKING:
from PIL import Image
from applitools.utils.custom_types import ViewPort
from applitools.selenium import EyesWebDriver
class EyesWebDriverScreenshot(EyesScreenshot):
@staticmethod
def create_from_base64(screenshot64, driver):
"""
Creates an instance from the base64 data.
:param screenshot64: The base64 representation of the png bytes.
:param driver: The webdriver for the session.
"""
return EyesWebDriverScreenshot(driver, screenshot64=screenshot64)
@staticmethod
def create_from_image(screenshot, driver):
# type: (Image.Image, EyesWebDriver) -> EyesWebDriverScreenshot
"""
Creates an instance from the base64 data.
:param screenshot: The screenshot image.
:param driver: The
|
webdriver for the session.
"""
return EyesWebDriverScreenshot(driver, screenshot=screenshot)
def __init__(self, driver, screenshot=None, screenshot64=None,
is_viewport_scr
|
eenshot=None, frame_location_in_screenshot=None):
# type: (EyesWebDriver, Image.Image, None, tp.Optional[bool], tp.Optional[Point]) -> None
"""
Initializes a Screenshot instance. Either screenshot or screenshot64 must NOT be None.
Should not be used directly. Use create_from_image/create_from_base64 instead.
:param driver: EyesWebDriver instance which handles the session from which the screenshot
was retrieved.
:param screenshot: image instance. If screenshot64 is None,
this variable must NOT be none.
:param screenshot64: The base64 representation of a png image. If screenshot
is None, this variable must NOT be none.
:param is_viewport_screenshot: Whether the screenshot object represents a
viewport screenshot or a full screenshot.
:param frame_location_in_screenshot: The location of the frame relative
to the top,left of the screenshot.
:raise EyesError: If the screenshots are None.
"""
if screenshot is None and screenshot64 is None:
raise EyesError("both screenshot and screenshot64 are None!")
if screenshot64:
screenshot = image_utils.image_from_bytes(base64.b64decode(screenshot64))
# initializing of screenshot
super(EyesWebDriverScreenshot, self).__init__(image=screenshot)
self._driver = driver
self._viewport_size = driver.get_default_content_viewport_size(force_query=False) # type: ViewPort
self._frame_chain = driver.frame_chain.clone()
if self._frame_chain:
chain_len = len(self._frame_chain)
self._frame_size = self._frame_chain[chain_len - 1].outer_size
else:
try:
self._frame_size = driver.get_entire_page_size()
except WebDriverException:
# For Appium, we can't get the "entire page size", so we use the viewport size.
self._frame_size = self._viewport_size
# For native Appium Apps we can't get the scroll position, so we use (0,0)
try:
self._scroll_position = driver.get_current_position()
except (WebDriverException, EyesError):
self._scroll_position = Point(0, 0)
if is_viewport_screenshot is None:
is_viewport_screenshot = (self._screenshot.width <= self._viewport_size['width']
and self._screenshot.height <= self._viewport_size['height'])
self._is_viewport_screenshot = is_viewport_screenshot
if frame_location_in_screenshot is None:
if self._frame_chain:
frame_location_in_screenshot = EyesWebDriverScreenshot \
.calc_frame_location_in_screenshot(self._frame_chain, is_viewport_screenshot)
else:
# The frame is the default content
frame_location_in_screenshot = Point(0, 0)
if self._is_viewport_screenshot:
frame_location_in_screenshot.offset(-self._scroll_position.x,
-self._scroll_position.y)
self._frame_location_in_screenshot = frame_location_in_screenshot
self._frame_screenshot_intersect = Region(frame_location_in_screenshot.x,
frame_location_in_screenshot.y,
self._frame_size['width'],
self._frame_size['height'])
self._frame_screenshot_intersect.intersect(Region(width=self._screenshot.width,
height=self._screenshot.height))
@staticmethod
def calc_frame_location_in_screenshot(frame_chain, is_viewport_screenshot):
first_frame = frame_chain[0]
location_in_screenshot = Point(first_frame.location['x'], first_frame.location['y'])
# We only need to consider the scroll of the default content if the screenshot is a
# viewport screenshot. If this is a full page screenshot, the frame location will not
# change anyway.
if is_viewport_screenshot:
location_in_screenshot.x -= first_frame.parent_scroll_position.x
location_in_screenshot.y -= first_frame.parent_scroll_position.y
# For inner frames we must calculate the scroll
inner_frames = frame_chain[1:]
for frame in inner_frames:
location_in_screenshot.x += frame.location['x'] - frame.parent_scroll_position.x
location_in_screenshot.y += frame.location['y'] - frame.parent_scroll_position.y
return location_in_screenshot
@property
def frame_chain(self):
return self._frame_chain
def get_base64(self):
if not self._screenshot64:
self._screenshot64 = image_utils.get_base64(self._screenshot)
return self._screenshot64
def get_location_relative_to_frame_viewport(self, location):
result = {'x': location['x'], 'y': location['y']}
if self._frame_chain or self._is_viewport_screenshot:
result['x'] -= self._scroll_position.x
result['y'] -= self._scroll_position.y
return result
def get_sub_screenshot_by_region(self, region):
sub_screenshot_region = self.get_intersected_region(region)
if sub_screenshot_region.is_empty():
raise OutOfBoundsError("Region {0} is out of bounds!".format(region))
# If we take a screenshot of a region inside a frame, then the frame's (0,0) is in the
# negative offset of the region..
sub_screenshot_frame_location = Point(-region.left, -region.top)
# FIXME Calculate relative region location? (same as the java version)
screenshot = image_utils.get_image_part(self._screenshot, sub_screenshot_region)
return EyesWebDriverScreenshot(self._driver, screenshot,
is_viewport_screenshot=self._is_viewport_screenshot,
frame_location_in_screenshot=sub_screenshot_frame_location)
def get_element_region_in_frame_viewport(self, element):
location, size = element.location, element.size
relative_location = self.get_location_relative_to_frame_viewport(location)
x, y = relative_location['x'], relative_location['y']
width, height = size['width'], size['height']
# We only care about the part of the element which is in the viewport.
if x < 0:
diff = -x
# IMPORTANT the diff is
|
kotfic/reddit_elfeed_wrapper
|
reddit_elfeed_wrapper/app.py
|
Python
|
gpl-2.0
| 2,387
| 0.001676
|
from functools import wraps
from flask import Flask, make_response
from werkzeug.contrib.atom import AtomFeed
from datetime import datetime as dt
from HTMLParser import HTMLParser
from bs4 import BeautifulSoup
import praw
app = Flask(__name__)
def get_api():
USER_AGENT = "reddit_wrapper for personalized rss see: /u/kotfic"
return praw.Reddit(user_agent=USER_AGENT)
def reddit(label, subreddit, limit=25):
"""Decorator used to wrap functions that alter the body of a subreddit
feed. This function calls out to the subreddit using PRAW and passes the
decorated function each article object one at a time. the function is
expected to return a string containing the desired contents of the atom
<content> tag."""
def _reddit(func):
@wraps(func)
def wrap_reddit():
base = "http://www.reddit.com/r/{}/"
feed = AtomFeed(label,
feed_url=base.format(subreddit),
url=base.format(subreddit))
articles = get_api().get_subreddit(subreddit).get_hot(limit=limit)
for article in articles:
feed.add(article.title,
func(article),
content_type='html',
author=article.author.name,
url=article.url,
updated=dt.fromtimestamp(int(article.created)),
published=dt.fromtimestamp(int(article.created)))
r = make_response(feed.get_response())
r.headers['Content-Type'] = "application/xml"
return r
return wrap_reddit
return _reddi
|
t
@app.route('/r/python.atom')
@reddit("Python Subreddit", "python")
def python(article):
try:
return HTMLParser().unescape(article.self
|
text_html)
except TypeError:
return ''
@app.route('/r/funny.atom')
@reddit("Funny Subreddit", "funny")
def funny(article):
try:
soup = BeautifulSoup("<img src=\"{}\" />".format(article.url))
return str(soup)
except TypeError:
return ''
@app.route('/r/emacs.atom')
@reddit("Emacs Subreddit", "emacs")
def emacs(article):
try:
return HTMLParser().unescape(article.selftext_html)
except TypeError:
return ''
def main():
app.run(debug=True)
if __name__ == "__main__":
main()
|
tsdmgz/ansible
|
lib/ansible/modules/storage/netapp/sf_account_manager.py
|
Python
|
gpl-3.0
| 8,755
| 0.002284
|
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_account_manager
short_description: Manage SolidFire accounts
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create, destroy, or update accounts on SolidFire
options:
state:
description:
- Whether the specified account should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- Unique username for this account. (May be 1 to 64 characters in length).
required: true
new_name:
description:
- New name for the user account.
required: false
default: None
initiator_secret:
description:
- CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable.
- The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret.
- If not specified, a random secret is created.
required: false
target_secret:
description:
- CHAP secret to use for the target (mutual CHAP authentication).
- Should be 12-16 characters long and impenetrable.
- The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret.
- If not specified, a random secret is created.
required: false
attributes:
description: List of Name/Value pairs in JSON object format.
required: false
account_id:
description:
- The ID of the account to manage or update.
required: false
default: None
status:
description:
- Status of the account.
required: false
'''
EXAMPLES = """
- name: Create Account
sf_account_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: TenantA
- name: Modify Account
sf_account_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: TenantA
new_name: TenantA-Renamed
- name: Delete Account
sf_account_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
name: TenantA-Renamed
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireAccount(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
account_id=dict(required=False, type='int', default=None),
new_name=dict(required=False, type='str', default=None),
initiator_secret=dict(required=False, type='str'),
target_secret=dict(required=False, type='str'),
attributes=dict(required=False, type='dict'),
status=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.account_id = p['account_id']
self.new_name = p['new_name']
self.initiator_secret = p['initiator_secret']
self.target_secret = p['target_secret']
self.attributes = p['attributes']
self.status = p['status']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_account(self):
"""
Return account object if found
:return: Details about the account. None if not found.
:rtype: dict
"""
account_list = self.sfe.list_accounts()
for account in account_list.accounts:
if account.username == self.name:
# Update self.account_id:
if self.account_id is not None:
if account.account_id == self.account_id:
return account
else:
self.account_id = account.account_id
return account
return None
def create_account(self):
try:
self.sfe.add_account(username=self.name,
initiator_secret=self.initiator_secret,
target_secret=self.target_secret,
attributes=self.attributes)
|
except Exception as e:
self.module.fail_json(msg='Error creating account %s: %s)' % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_account(self):
try:
self.sfe.remove_account(account_id=self.account_id)
except Exception as e:
self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)),
exception=traceback.format_exc
|
())
def update_account(self):
try:
self.sfe.modify_account(account_id=self.account_id,
username=self.new_name,
status=self.status,
initiator_secret=self.initiator_secret,
target_secret=self.target_secret,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
account_exists = False
update_account = False
account_detail = self.get_account()
if account_detail:
account_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the account
if account_detail.username is not None and self.new_name is not None and \
account_detail.username != self.new_name:
update_account = True
changed = True
elif account_detail.status is not None and self.status is not None \
and account_detail.status != self.status:
update_account = True
changed = True
elif account_detail.initiator_secret is not None and self.initiator_secret is not None \
and account_detail.initiator_secret != self.initiator_secret:
update_account = True
changed = True
elif account_detail.target_secret is not None and self.target_secret is not None \
and account_detail.target_secret != self.target_secret:
update_account = True
changed = True
elif account_detail.attributes is not None and self.attributes is not None \
and account_detail.attributes != self.attributes:
update_account = True
changed = True
else:
if self.state == 'present':
|
marc0uk/twit
|
twit.py
|
Python
|
mit
| 1,468
| 0.008174
|
import sys, os
import tweepy
# File with colon-separaten consumer/access token and secret
consumer_file='twitter.consumer'
access_file='twitter.access'
def __load_auth(file):
if os.path.exists(file):
with open(file) as f:
|
tokens = f.readline().replace('\n','').replace('\r','').split(':')
if len(tokens) == 2:
return tokens[0],tokens[1]
else:
raise ValueError("Expecting two colon-separated tokens")
else:
raise IOError("File not found: %s" % file)
def twit(message, secret_dir='/secret'):
#
# Load the twitter co
|
nsumer and access tokens and secrets
consumer_token, consumer_secret = __load_auth(os.path.join(secret_dir, consumer_file))
access_token, access_secret = __load_auth(os.path.join(secret_dir, access_file))
#
# Perform OAuth authentication
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_access_token(access_token, access_secret)
#
# Create the API and post the status update
try:
api = tweepy.API(auth)
api.update_status(message)
except tweepy.error.TweepError, e:
print "Failed to post status update"
print "Error: %s" % str(e)
print "Using:"
print " consumer[%s][%s]" % (consumer_token, consumer_secret)
print " access[%s][%s]" % (access_token, access_secret)
if __name__ == '__main__':
tokens = sys.argv[1:]
#
twit(' '.join(tokens))
|
sarumont/py-trello
|
trello/util.py
|
Python
|
bsd-3-clause
| 3,964
| 0.002018
|
# -*- coding: utf-8 -*-
from __future__ import with_statement, print_function, absolute_import
import os
from requests_oauthlib import OAuth1Session
def create_oauth_token(expiration=None, scope=None, key=None, secret=None, name=None, output=True):
"""
Script to obtain an OAuth token from Trello.
Must have TRELLO_API_KEY and TRELLO_API_SECRET set in your environment
To set the token's expiration, set TRELLO_EXPIRATION as a string in your
environment settings (eg. 'never'), otherwise it will default to 30 days.
More info on token scope here:
https://trello.com/docs/gettingstarted/#getting-a-token
|
-from-a-user
"""
request_token_url = 'https://trello.com/1/OAuthGetRequestToken'
authorize_url = 'https://trello.com/1/OAuthAuthorizeToken'
access_tok
|
en_url = 'https://trello.com/1/OAuthGetAccessToken'
expiration = expiration or os.environ.get('TRELLO_EXPIRATION', "30days")
scope = scope or os.environ.get('TRELLO_SCOPE', 'read,write')
trello_key = key or os.environ['TRELLO_API_KEY']
trello_secret = secret or os.environ['TRELLO_API_SECRET']
name = name or os.environ.get('TRELLO_NAME', 'py-trello')
# Step 1: Get a request token. This is a temporary token that is used for
# having the user authorize an access token and to sign the request to obtain
# said access token.
session = OAuth1Session(client_key=trello_key, client_secret=trello_secret)
response = session.fetch_request_token(request_token_url)
resource_owner_key, resource_owner_secret = response.get('oauth_token'), response.get('oauth_token_secret')
if output:
print("Request Token:")
print(" - oauth_token = %s" % resource_owner_key)
print(" - oauth_token_secret = %s" % resource_owner_secret)
print("")
# Step 2: Redirect to the provider. Since this is a CLI script we do not
# redirect. In a web application you would redirect the user to the URL
# below.
print("Go to the following link in your browser:")
print("{authorize_url}?oauth_token={oauth_token}&scope={scope}&expiration={expiration}&name={name}".format(
authorize_url=authorize_url,
oauth_token=resource_owner_key,
expiration=expiration,
scope=scope,
name=name
))
# After the user has granted access to you, the consumer, the provider will
# redirect you to whatever URL you have told them to redirect to. You can
# usually define this in the oauth_callback argument as well.
# Python 3 compatibility (raw_input was renamed to input)
try:
inputFunc = raw_input
except NameError:
inputFunc = input
accepted = 'n'
while accepted.lower() == 'n':
accepted = inputFunc('Have you authorized me? (y/n) ')
oauth_verifier = inputFunc('What is the PIN? ')
# Step 3: Once the consumer has redirected the user back to the oauth_callback
# URL you can request the access token the user has approved. You use the
# request token to sign this request. After this is done you throw away the
# request token and use the access token returned. You should store this
# access token somewhere safe, like a database, for future use.
session = OAuth1Session(client_key=trello_key, client_secret=trello_secret,
resource_owner_key=resource_owner_key, resource_owner_secret=resource_owner_secret,
verifier=oauth_verifier)
access_token = session.fetch_access_token(access_token_url)
if output:
print("Access Token:")
print(" - oauth_token = %s" % access_token['oauth_token'])
print(" - oauth_token_secret = %s" % access_token['oauth_token_secret'])
print("")
print("You may now access protected resources using the access tokens above.")
print("")
return access_token
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
ellmo/rogue-python-engine
|
rpe/rendering/__init__.py
|
Python
|
gpl-3.0
| 15
| 0.066667
|
import ren
|
derer
|
|
HowAU/python-training
|
generator/contact.py
|
Python
|
apache-2.0
| 2,357
| 0.017288
|
from model.contact import Contact #создаем скрипт для генерации групп с последующим сохранением в файл
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try: #почитай про трай
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts","file"]) #опция n задает кол-во генерируемых данных, опия ф задает файл, куда все должно помещатся
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts: #данная структура (в общем) позволяет управлять скриптом получения параметров групп с использованием раздела Edit Configuration
#мы можем задать число групп и адрес положения файла результата
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen): #генерация случайных данных для теста
symbols = string.ascii_letters+string.digits + string.punctuation + " "*10 #данные которые применяем в случайной строке
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(
|
maxlen))]) #случайным образом выбирает символы из заданной строки
testdata = [Contact(firstname="", middlename="", lastname="")] + [
Contact(firstname="John", middlename="Jay", lastname="Johnson", home="123", mobile="456", work="789",
email=
|
"[email protected]", email2="[email protected]", email3="[email protected]", phone2="456")
for i in range(random.randrange(n))
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out: #открываем файл с флагом w - write (запись) и что-то туда записываем
jsonpickle.set_encoder_options("json", indent = 2)
out.write(jsonpickle.encode(testdata))
#out.write(json.dumps(testdata, default=lambda x: x.__dict__, indent=2)) #функция dumps превращает структуру данных в строку в формате джейсон
|
Tiduszk/CS-100
|
Chapter 2/Practice Exam/Practice Exam.py
|
Python
|
gpl-3.0
| 1,064
| 0.032895
|
#Made by Zachary C. on 9/21/16 last edited on 9/21/16
#CONSTANTS
HOURS_DAY
|
= 24
MINUTES_HOUR = 60
SECONDS_MINUTE = 60
#1. Greet the user and explain the program
#2. Ask the user to input the number of days
#3. save the number
|
of days
days = float(input('This program converts days into hours, minutes, and seconds.\nPlease enter the number of days: '))
#4. Calculate the number of hours (days * hours in day)
#5. Save the number of hours
hours = days * HOURS_DAY
#6. Calculate the number of minutes (hours * minutes in hour)
#7. Save the number of minutes
minutes = hours * MINUTES_HOUR
#8. Calculate the number of seconds (minutes * seconds in minute)
#9. Save the number of seconds
seconds = minutes * SECONDS_MINUTE
#10. Display mumber of hours
#11. Display number of minutes
#12. Display number of seconds
#13. Signoff
print('In ' , days , ' days there are ' , int(format(hours , '.0f')) , ' hours or ' , int(format(minutes , '.0f')) , ' minutes or ' , \
int(format(seconds , '.0f')) , ' seconds.\nThanks for using my program. Bye.' , sep='')
#14. End
|
ChunggiLee/ChunggiLee.github.io
|
Heatmap/newData.py
|
Python
|
bsd-3-clause
| 22,372
| 0.013767
|
# -*- coding: utf-8 -*-
import sys, numpy, scipy
import scipy.cluster.hierarchy as hier
import scipy.spatial.distance as dist
import csv
import scipy.stats as stats
import json
import networkx as nx
from networkx.readwrite import json_graph
def makeNestedJson(leaf) :
leaf=json.loads(leaf)
#A tree is a directed graph - create one with a dummy root
DG=nx.DiGraph()
DG.add_node('root')
#Construct the tree as a directed graph and annotate the nodes with attributes
#Edges go from parent to child
for e in leaf:
DG.add_node(e['id'],label=e['label'])
#If there's a parent, use it...
if 'parent' in e: DG.add_edge(e['parent'],e['id'])
#else create a dummy parent from the dummy root
else: DG.add_edge('root',e['id'])
#Get the tree as JSON
data = json_graph.tree_data(DG,root='root')
#and dump the data from the dummy root's children down...
return json.dumps(data['children'])
# This function puts root and makes hierarchy tree
def makeHier(data, length, typeRC, parentId, grandParentId):
# put very first data (root)
hierData = str(int(data[len(data)-1])) + "."
#print (hierData)
# data : whole data, len(hierMatrix)-1 : data's length, hierData : current stored data array
getElem (data, len(data)-1, hierData, length, typeRC, parentId, grandParentId)
# This function puts other data excluding root
# data : total hiermatrix, parentNum : cluster number, hier : total string which separate ".", length : each total length of col or row matrix, parentId : parent Id (it differs parent number)
def getElem(data, parentNum, hier, length, typeRC, parentId, grandParentId):
#'parent' : parentId , 'id' : data[parentNum] (current Id)
#print(rowLeafNum)
#print(colLeafNum)
# Check whether it is
if parentNum-4 >= 0 :
#isChecked = 0
# Put current data
if (parentNum != len(data)-1):
#leafData.append(str(int(hierMatrix[-1])) + ".")
hier += str(int(data[parentNum])) + "."
#
if (typeRC == "row"):
global rowLeafNum
rowLeafNum = rowLeafNum + 1
if int(data[parentNum]) > length:
global content
content['label'] = "null"
else:
global content
content['label'] = rowNameArr[int(data[parentNum])][0]
global content
content['parent'] = int(grandParentId)
global content
content['id'] = int(data[parentNum])
global leafData
leafData += str(content) + ", "
dotLeafData.append(hier)
else :
global colLeafNum
colLeafNum = colLeafNum + 1
#print(colHeaders)
#print(int(data[parentNum])-1)
if int(data[parentNum]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum])-1]
global colContent
colContent['parent'] = int(grandParentId)
global colContent
colContent['id'] = int(data[parentNum])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print ("gradParentId : " + str(int(grandParentId)))
#print ("parentId : " + str(int(parentId)))
#print ("id : " + str(int(data[parentNum])))
#print (leafData[rowLeafNum])
#print (colLeafData[colLeafNum])
#print (hier)
#print(content)
#print(colContent)
#print("leafleafleafleafleafleafleaf")
#print(leafData)
#print(colLeafData)
if data[parentNum-3] >= length and data[parentNum-4] >= length:
#print (parentNum-3 , data[parentNum-3])
#print (parentNum-4 , data[parentNum-4])
getElem(data, searchNum(data, numpy.where(data==data[parentNum-4]), parentNum-4), hier,length,typeRC, int(data[parentNum]-4), int(data[parentNum]))
getElem(data, searchNum(data, numpy.where(data==data[parentNum-3]), parentNum-3), hier,length,typeRC, int(data[pare
|
ntNum]-3), int(data[parentNum]))
elif data[parentNum-3] < length and data[parentNum-4] > length:
#print (parentNum-4 , data[parentNum-4])
hier += str(int(data[parentNum-3])) + "."
if (typeRC == "row"):
rowLeafNum = rowLeafNum + 1
if int(data[parentNum-3]) > length:
global content
content['label'] = "null"
else:
|
global content
content['label'] = rowNameArr[int(data[parentNum-3])][0]
global content
content['parent'] = int(int(data[parentNum]))
global content
content['id'] = int(data[parentNum-3])
global leafData
leafData += str(content) + ", "
dotLeafData.append(hier)
else :
colLeafNum = colLeafNum + 1
if int(data[parentNum-3]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum-3])-1]
global colContent
colContent['parent'] = int(int(data[parentNum]))
global colContent
colContent['id'] = int(data[parentNum-3])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print(content)
#print (leafData[rowLeafNum])
#print (colLeafData[colLeafNum])
removeNum = len(str(int(data[parentNum-3]))) + 1
hier = hier[:-removeNum]
getElem(data, searchNum(data, numpy.where(data==data[parentNum-4]), parentNum-4), hier, length,typeRC, int(data[parentNum]-4), int(data[parentNum]))
elif data[parentNum-3] > length and data[parentNum-4] < length:
#print (parentNum-3 , data[parentNum-3])
hier += str(int(data[parentNum-4])) + "."
if (typeRC == "row"):
rowLeafNum = rowLeafNum + 1
if int(data[parentNum-4]) > length:
global content
content['label'] = "null"
else:
global content
content['label'] = rowNameArr[int(data[parentNum-4])][0]
global content
content['parent'] = int(int(data[parentNum]))
global content
content['id'] = int(data[parentNum-4])
global leafData
leafData += str(content) + ", "
global dotLeafData
dotLeafData.append(hier)
else :
colLeafNum = colLeafNum + 1
if int(data[parentNum-4]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum-4])-1]
global colContent
colContent['parent'] = int(int(data[parentNum]))
global colContent
colContent['id'] = int(data[parentNum-4])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print(content)
removeNum = len(str(int(data[parentNum-4]))) + 1
|
stadelmanma/netl-AP_MAP_FLOW
|
apmapflow/scripts/apm_process_paraview_data.py
|
Python
|
gpl-3.0
| 6,758
| 0.000148
|
r"""
Description: Generates 2-D data maps from OpenFoam data saved by paraview
as a CSV file. The data has to be saved as point data and the following fields
are expected p, points:0->2, u:0->2. An aperture map is the second main input
and is used to generate the interpolation coordinates as well as convert
the flow velocities into volumetic flow rates. This script assumes the OpenFoam
simulation was performed on a geometry symmetric about the X-Z plane.
For usage information run: ``apm_process_paraview_data -h``
| Written By: Matthew stadelman
| Date Written: 2016/09/29
| Last Modfied: 2017/04/23
|
"""
import argparse
from argparse import RawDescriptionHelpFormatter as RawDesc
import os
import scipy as sp
from scipy.interpolate import griddata
from apmapflow import _get_logger, set_main_logger_level, DataField
# setting up logger
set_main_logger_level('info')
logger = _get_logger('apmapflow.scripts')
# setting a few convenience globals
avg_fact = None
voxel_size = None
base_name = None
# creating arg parser
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawDesc)
# adding arguments
parser.add_argument('-v', '--verbose', action='store_true',
help='debug messages are printed to the screen')
parser.add_argument('-o', '--output-dir',
type=os.path.realpath, default=os.getcwd(),
help='''outputs file to the specified
directory, sub-directories are created as needed''')
parser.add_argument('--rho', type=float, default=1000,
help='fluid density for kinematic pressure conversion')
parser.add_argument('data_file', type=os.path.realpath,
help='paraview CSV data file')
parser.add_argument('map_file', type=os.path.realpath,
help='matching aperture map used for OpenFoam simulation')
parser.add_argument('voxel_size', type=float,
help='voxel to meter conversion factor of aperture map')
parser.add_argument('avg_fact', type=float,
help='''horizontal averaging factor of aperture map''')
parser.add_argument('base_name', nargs='?', default=None,
help='''base name to save fields as, i.e. base_name + "-p-map.txt",
defaults to the name of the CSV file''')
def main():
r"""
Processes commandline args and runs script
"""
global avg_fact, voxel_size, base_name
#
args = parser.parse_args()
if args.verbose:
set_main_logger_level('debug')
#
# these will be command-line args
para_infile = args.data_file
aper_infile = args.map_file
avg_fact = args.avg_fact
voxel_size = args.voxel_size
#
base_name = args.base_name
if base_name is None:
base_name = os.path.basename(para_infile).split('.')[0]
base_name = os.path.join(args.output_dir, base_name)
#
aper_map, data_dict = read_data_files(para_infile, aper_infile)
map_coords, data_coords = generate_coordinate_arrays(aper_map, data_dict)
save_data_maps(map_coords, data_coords, aper_map, data_dict, args.rho)
def read_data_files(para_file, map_file):
r"""
Reads in the paraview data file and aperture map file.
"""
#
# reading aperture map
logger.info('reading aperture map...')
|
aper_map = DataField(map_file)
#
# reading first line of paraview file to get column names
logger.info('reading paraview data file')
with open(para_file, 'r') as file:
cols = file.readline()
cols = cols.strip().replace('"', '').lower()
cols = cols.split(',')
#
# reading entire dat
|
aset and splitting into column vectors
data = sp.loadtxt(para_file, delimiter=',', dtype=float, skiprows=1)
data_dict = {}
for i, col in enumerate(cols):
data_dict[col] = data[:, i]
#
return aper_map, data_dict
def generate_coordinate_arrays(aper_map, para_data_dict):
r"""
Generates the coordinate arrays to use in data interpolation for coverting
paraview point data into a 2-D data map.
"""
#
# generating XYZ coordinates from map to interpolate to
logger.info('calculating aperture map cell center coordinates...')
temp = sp.arange(aper_map.data_map.size, dtype=int)
temp = sp.unravel_index(temp, aper_map.data_map.shape[::-1])
map_coords = sp.zeros((aper_map.data_map.size, 3), dtype=float)
#
# half voxel added to make map points be cell centers
map_coords[:, 0] = temp[0] * avg_fact * voxel_size + voxel_size/2.0
map_coords[:, 2] = temp[1] * avg_fact * voxel_size + voxel_size/2.0
#
# pulling XYZ coordinates from the data file
logger.info('processing data file data for coordinates...')
data_coords = sp.zeros((para_data_dict['points:0'].shape[0], 3))
data_coords[:, 0] = para_data_dict['points:0']
data_coords[:, 1] = para_data_dict['points:1']
data_coords[:, 2] = para_data_dict['points:2']
#
return map_coords, data_coords
def save_data_maps(map_coords, data_coords, aper_map, data_dict, density):
r"""
Converts the raw paraview point data into a 2-D data distribution and
saves the file by appending to the base_name.
"""
#
# generating p field
logger.info('generating and saving pressure field...')
field = data_dict['p'] * density # openFoam outputs kinematic pressure
field = griddata(data_coords, field, map_coords, method='nearest')
field = sp.reshape(field, aper_map.data_map.shape[::-1])
sp.savetxt(base_name+'-p-map.txt', field.T, delimiter='\t')
#
# generating Ux -> Qx field
logger.info('generating and saving Qx field...')
field = data_dict['u:0']
field = griddata(data_coords, field, map_coords, method='nearest')
field = sp.reshape(field, aper_map.data_map.shape[::-1])
field = field * aper_map.data_map.T * voxel_size**2
sp.savetxt(base_name+'-qx-map.txt', field.T, delimiter='\t')
#
# generating Uz -> Qz field
logger.info('generating and saving Qz field...')
field = data_dict['u:2']
field = griddata(data_coords, field, map_coords, method='nearest')
field = sp.reshape(field, aper_map.data_map.shape[::-1])
field = field * aper_map.data_map.T * voxel_size**2
sp.savetxt(base_name+'-qz-map.txt', field.T, delimiter='\t')
#
# generating Um -> Qm field
logger.info('generating and saving Q magnitude field...')
field = sp.sqrt(data_dict['u:0'] ** 2 + data_dict['u:2'] ** 2)
field = griddata(data_coords, field, map_coords, method='nearest')
field = sp.reshape(field, aper_map.data_map.shape[::-1])
field = field * aper_map.data_map.T * voxel_size**2
sp.savetxt(base_name+'-qm-map.txt', field.T, delimiter='\t')
|
dmgawel/helios-server
|
helios/south_migrations/0007_auto__add_field_voterfile_voter_file_content__chg_field_voterfile_vote.py
|
Python
|
apache-2.0
| 11,336
| 0.00891
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'VoterFile.voter_file_content'
db.add_column('helios_voterfile', 'voter_file_content', self.gf('django.db.models.fields.TextField')(null=True), keep_default=False)
# Changing field 'VoterFile.voter_file'
db.alter_column('helios_voterfile', 'voter_file', self.gf('django.db.models.fields.files.FileField')(max_length=250, null=True))
def backwards(self, orm):
# Deleting field 'VoterFile.voter_file_content'
db.delete_column('helios_voterfile', 'voter_file_content')
# User chose to not deal with backwards NULL issues for 'VoterFile.voter_file'
raise RuntimeError("Cannot reverse this migration. 'VoterFile.voter_file' and its values cannot be restored.")
models = {
'helios_auth.user': {
'Meta': {'unique_together': "(('user_type', 'user_id'),)", 'object_name': 'User'},
'admin_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('helios_auth.jsonfield.JSONField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'token': ('helios_auth.jsonfield.JSONField', [], {'null': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user_type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'helios.auditedballot': {
'Meta': {'object_name': 'AuditedBallot'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_vote': ('django.db.models.fields.TextField', [], {}),
'vote_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'helios.castvote': {
'Meta': {'object_name': 'CastVote'},
'cast_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalidated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'quarantined_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'released_from_quarantine_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'verified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'vote': ('helios.datatypes.djangofield.LDObjectField', [], {}),
'vote_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'vote_tinyhash': ('django.db.models.fields.CharField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'voter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Voter']"})
},
'helios.election': {
'Meta': {'object_name': 'Election'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios_auth.User']"}),
'archived_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'cast_url': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'complaint_period_ends_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datatype': ('django.db.models.fields.CharField', [], {'default': "'legacy/Election'", 'max_length': '250'}),
'description': ('django.db.models.fields.TextField', [], {}),
'election_type': ('django.db.models.fields.CharField', [], {'default': "'election'", 'max_length': '250'}),
'eligibility': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'encrypted_tally': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'featured_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'frozen_at': ('django.db.models.fields.DateTimeField', [],
|
{'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'openreg': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'private_key': ('helios.datatypes.djangofield.LDO
|
bjectField', [], {'null': 'True'}),
'private_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'public_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'questions': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'registration_starts_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'result': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'result_proof': ('helios_auth.jsonfield.JSONField', [], {'null': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tallies_combined_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'tallying_finished_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'tallying_started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'tallying_starts_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'use_advanced_audit_features': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'use_voter_aliases': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'voters_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'voting_ended_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'voting_ends_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'voting_extended_until': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'voting_started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'voting_starts_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'})
},
'helios.electionlog': {
'Meta': {'object_name': 'ElectionLog'},
'at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'helios.trustee': {
'Meta': {'object_name': 'Trustee'},
'decryption_factors': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'decryption_proofs': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['h
|
msullivan/advent-of-code
|
2018/8a.py
|
Python
|
mit
| 712
| 0.007022
|
#!/usr/bin/env pyt
|
hon3
import sys
from collections import defaultdict, deque
from dataclasses import dataclass
@dataclass
class Nobe:
children: object
metadata: object
argh = 0
def parse(data):
global argh
children = data.popleft()
metadata = data.popleft()
print(children, metadata)
nobe = Nobe([], [])
for x in range(children):
nobe.children.append(parse(data))
for x in range(metadata):
argh += data.popleft()
def main(args):
data = [s.strip() for s in
|
sys.stdin][0]
data = deque([int(x) for x in data.split(' ')])
print(data)
print(len(data))
parse(data)
print(argh)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
DarkmatterVale/ChatterBot
|
chatterbot/adapters/io/io.py
|
Python
|
bsd-3-clause
| 594
| 0
|
from chatterbot.adapters import Adapter
from chatterbot.adapters.exceptions import AdapterNotImplementedError
class IOAdapter(Adapter):
"""
This is an abstract class that represents the interfac
|
e
that all input-output adapters should implement.
"""
def process_input(self):
"""
Returns data retrieved from the input source.
"""
raise AdapterNotImplementedError()
def process_response(self, input_value):
"""
|
Takes an input value.
Returns an output value.
"""
raise AdapterNotImplementedError()
|
gregdp/segger
|
Segger/iseg_dialog.py
|
Python
|
mit
| 43,661
| 0.0366
|
# Copyright (c) 2020 Greg Pintilie - [email protected]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distrib
|
ute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
|
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import chimera
import os
import os.path
import Tkinter
from CGLtk import Hybrid
import VolumeData
import _multiscale
import MultiScale.surface
import _surface
import numpy
import _contour
import Matrix
import VolumeViewer
from sys import stderr
from time import clock
import sets
import FitMap
from axes import prAxes
import regions
import graph
from Segger import dev_menus, timing, seggerVersion
OML = chimera.openModels.list
REG_OPACITY = 0.45
# http://geomalgorithms.com/a06-_intersect-2.html
from segment_dialog import current_segmentation, segmentation_map
class ISeg_Dialog ( chimera.baseDialog.ModelessDialog ):
title = "iSeg - Icosahedral Segmentation (Segger v" + seggerVersion + ")"
name = "segger_iseg"
buttons = ( "Close" )
help = 'https://github.com/gregdp/segger'
def fillInUI(self, parent):
self.group_mouse_mode = None
tw = parent.winfo_toplevel()
self.toplevel_widget = tw
tw.withdraw()
parent.columnconfigure(0, weight = 1)
row = 0
menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)
tw.config(menu = menubar)
f = Tkinter.Frame(parent)
f.grid(column=0, row=row, sticky='ew')
l = Tkinter.Label(f, text=' ')
l.grid(column=0, row=row, sticky='w')
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " 1. Tools -> Higher-Order Structure -> Icosahedron Surface.", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " - show & match icosahedron to current map (change Orientation if necesary)", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " 2. Make icosahedral surface mesh", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " ", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
b = Tkinter.Button(ff, text="Make", command=self.Icos2)
b.grid (column=1, row=0, sticky='w', padx=5, pady=1)
b = Tkinter.Button(ff, text="Toggle Display - Mesh/Solid", command=self.ToggleDisp)
b.grid (column=3, row=0, sticky='w', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " 3. Push outward", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " # iterations: ", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
self.numIt = Tkinter.StringVar(ff)
self.numIt.set ( "100" )
e = Tkinter.Entry(ff, width=7, textvariable=self.numIt)
e.grid(column=1, row=0, sticky='w', padx=5, pady=1)
l = Tkinter.Label(ff, text = ", stiffness: ", anchor = 'w')
l.grid(column=2, row=0, sticky='ew', padx=5, pady=1)
self.springF = Tkinter.StringVar(ff)
self.springF.set ( "0.2" )
e = Tkinter.Entry(ff, width=7, textvariable=self.springF)
e.grid(column=3, row=0, sticky='w', padx=5, pady=1)
b = Tkinter.Button(ff, text="Push", command=self.Icos2Push)
b.grid (column=4, row=0, sticky='w', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " - Set radius:", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " ", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
sv = Tkinter.StringVar(ff)
sv.trace("w", lambda name, index, mode, sv=sv: self.set_rad_changed_cb(sv.get()) )
self.setRad = sv
e = Tkinter.Entry(ff, width=7, textvariable=sv )
e.grid(column=1, row=0, sticky='w', padx=5, pady=1)
# Radius
#rs = Hybrid.Scale(ff, '', 1, 1500, 0.01, 1150, length=200)
#rs.frame.grid(row = row, column = 1, sticky = 'ew', padx=5, pady=1, columnspan=10)
#rs.entry.config ( width=100 )
#rs.callback(self.radius_changed_cb)
#rs.entry.bind('<KeyPress-Return>', self.radius_changed_cb)
#self.radius = rs
self.rad = Tkinter.DoubleVar(ff)
self.rad.set ( 100 )
smod = self.GetMod ( "Icosahedron Faces" )
if smod != None :
print "Found faces..."
verts, tris = smod.icosVerts0, smod.icosTris
p1 = smod.icosVerts [ tris[0][0] ]
r = numpy.sqrt ( numpy.sum(p1*p1) )
p1 = smod.icosVerts0 [ tris[0][0] ]
r0 = numpy.sqrt ( numpy.sum(p1*p1) )
print " - rad %.4f, orig: %.4f" % (r, r0)
self.rad.set ( r )
self.radius = Tkinter.Scale(ff, from_=0, to=1500, variable=self.rad, orient=Tkinter.HORIZONTAL, length=350, command=self.radius_changed_cb)
self.radius.grid(column=2, row=0, sticky='w', padx=5, pady=1, columnspan=10)
row = row + 1
#ff = Tkinter.Frame(f)
#ff.grid(column=0, row=row, sticky='w')
#w = Scale(from_=0, to=100, resolution=0.1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " 5. Cross-correlation / Mask densities between", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " start radius: ", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
self.startRad = Tkinter.StringVar(ff)
e = Tkinter.Entry(ff, width=7, textvariable=self.startRad)
e.grid(column=1, row=0, sticky='w', padx=5, pady=1)
l = Tkinter.Label(ff, text = ", end radius: ", anchor = 'w')
l.grid(column=2, row=0, sticky='ew', padx=5, pady=1)
self.endRad = Tkinter.StringVar(ff)
e = Tkinter.Entry(ff,
|
metasmile/strsync
|
strsync/strsync.py
|
Python
|
gpl-3.0
| 26,146
| 0.004169
|
# -*- coding: utf-8 -*-
# strsync - Automatically translate and synchronize .strings files from defined base language.
# Copyright (c) 2015 metasmile [email protected] (github.com/metasmile)
from __future__ import print_function
import strparser, strparser_intentdefinition, strlocale, strtrans
import time, os, sys, argparse, codecs, csv
from os.path import expanduser
from fuzzywuzzy import fuzz
from colorama import init
from colorama import Fore, Back, Style
import unicodedata2
init(autoreset=True)
def len_unicode(ustr):
return len(unicodedata2.normalize('NFC', ustr.decode('utf-8')))
def resolve_file_path(file):
return os.path.join(os.path.dirname(__file__), file)
def join_path_all(target_dir, target_files):
return map(lambda f: os.path.join(target_dir, f), target_files)
def rget(dictionary, key):
items = []
if key in dictionary:
items.append(dictionary[key])
for dict_value in [value for value in dictionary.values() if isinstance(value, dict)]:
items += rget(dict_value, key)
return items
def main():
parser = argparse.ArgumentParser(
description='Automatically translate and synchronize .strings files from defined base language.')
parser.add_argument('-b', '--base-lang-name',
help='A base(or source) localizable resource name.(default=\'Base\'), (e.g. "Base" via \'Base.lproj\', "en" via \'en.lproj\')',
default='Base', required=False)
parser.add_argument('-x', '--excluding-lang-names', type=str,
help='A localizable resource name that you want to exclude. (e.g. "Base" via \'Base.lproj\', "en" via \'en.lproj\')',
default=[], required=False, nargs='+')
parser.add_argument('-f', '--force-translate-keys', type=str,
help='Keys in the strings to update and translate by force. (input nothing for all keys.)',
default=[], required=False, nargs='*')
parser.add_argument('-o', '--following-base-keys', type=str, help='Keys in the strings to follow from "Base.',
default=[], required=False, nargs='+')
parser.add_argument('-w', '--following-base-if-not-exists', type=str, help='With this option, all keys will be followed up with base values if they does not exist.',
default=None, required=False, nargs='*')
parser.add_argument('-l', '--cutting-length-ratio-with-base', type=float,
help='Keys in the float as the ratio to compare the length of "Base"',
default=[], required=False, nargs='+')
parser.add_argument('-c', '--ignore-comments', help='Allows ignoring comment synchronization.', default=None,
required=False, nargs='*')
parser.add_argument('-v', '--verify-results', help='Verify translated results via reversed results', default=None,
required=False, nargs='*')
parser.add_argument('-s', '--include-secondary-languages', help='Include Additional Secondary Languages. (+63 language codes)', default=None,
required=False, nargs='*')
parser.add_argument('-i', '--ignore-unverified-results',
help='Allows ignoring unverified results when appending them.', default=None, required=False,
nargs='*')
parser.add_argument('target path', help='Target localization resource path. (root path of Base.lproj, default=./)',
default='./', nargs='?')
parser.add_argument('only for keys', help='Some specified keys for exclusive work. All operations will work for only that keys therefore other keys will be ignored. Not specified by default. (default=None)',
default=None, nargs='*')
args = vars(parser.parse_args())
reload(sys)
sys.setdefaultencoding('utf-8')
# configure argume
|
nts
__LOCALE_XCODE_BASE_LOWERCASE__ = 'base'
__DIR_SUFFIX__ = ".lproj"
__FILE_SUFFIX__ = ".strings"
__FILE_INTENT_SUFFIX__ = ".intentdefinition"
__FILE_DICT_SUFFIX__ = ".stringsdict"
__RESOURCE_PATH__ = expanduser(args['target path'])
__ONLY_FOR_KEYS__ = args['only for keys']
__BASE_LANG__ = args['base_lang_name']
__EXCLUDING_LANGS__ = args['excluding_lang_names']
__KEYS_FORCE_TRANSLATE__ = args['force_translate_keys']
|
__KEYS_FORCE_TRANSLATE_ALL__ = ('--force-translate-keys' in sys.argv or '-f' in sys.argv) and not __KEYS_FORCE_TRANSLATE__
__KEYS_FOLLOW_BASE__ = args['following_base_keys']
__CUTTING_LENGTH_RATIO__ = (args['cutting_length_ratio_with_base'] or [0])[0]
__FOLLOWING_ALL_KEYS_IFNOT_EXIST__ = args['following_base_if_not_exists'] is not None
__IGNORE_COMMENTS__ = args['ignore_comments'] is not None
__IGNORE_UNVERIFIED_RESULTS__ = args['ignore_unverified_results'] is not None
__RATIO_TO_IGNORE_UNVERIFIED_RESULTS__ = int(
args['ignore_unverified_results'][0]) if __IGNORE_UNVERIFIED_RESULTS__ and len(
args['ignore_unverified_results']) else 0
__VERIFY_TRANS_RESULTS__ = __IGNORE_UNVERIFIED_RESULTS__ or args['verify_results'] is not None
__INCLUDE_SECONDARY_LANGUAGES__ = args['include_secondary_languages'] is not None
# Locale settings
# [language designator] en, fr
# [language designator]_[region designator] en_GB, zh_HK
# [language designator]-[script designator] az-Arab, zh-Hans
# [language designator]-[script designator]_[region designator] zh-Hans_HK
print('(i) Initializing for supported languages ...')
__lang_codes = strlocale.default_supporting_xcode_lang_codes()
if __INCLUDE_SECONDARY_LANGUAGES__:
__lang_codes += strlocale.secondary_supporting_xcode_lang_codes()
__XCODE_LPROJ_SUPPORTED_LOCALES_MAP__ = strlocale.map_locale_codes(__lang_codes, strtrans.supported_locale_codes())
__XCODE_LPROJ_SUPPORTED_LOCALES__ = __XCODE_LPROJ_SUPPORTED_LOCALES_MAP__.keys()
print(Fore.WHITE + '(i) Supported numbers of locale code :', str(len(__XCODE_LPROJ_SUPPORTED_LOCALES__)),
Style.RESET_ALL)
print(__XCODE_LPROJ_SUPPORTED_LOCALES__)
# handle base
if __BASE_LANG__.endswith(__DIR_SUFFIX__):
__BASE_RESOUCE_DIR__ = __BASE_LANG__
__BASE_LANG__ = __BASE_LANG__.split(__DIR_SUFFIX__)[0]
else:
__BASE_RESOUCE_DIR__ = __BASE_LANG__ + __DIR_SUFFIX__
if not __BASE_LANG__.lower() == __LOCALE_XCODE_BASE_LOWERCASE__:
__BASE_LANG__ = strlocale.lang(__BASE_LANG__)
# setup Translator & langs
# read ios langs
print(Fore.WHITE + '(i) Fetching supported locale codes for ios9 ...', Style.RESET_ALL)
__IOS9_CODES__ = [lang_row[0] for lang_row in
csv.reader(open(resolve_file_path('lc_ios9.tsv'), 'rb'), delimiter='\t')]
print(Fore.WHITE + '(i) Supported numbers of locale code :', len(__IOS9_CODES__), Style.RESET_ALL)
global_result_logs = {}
def merge_two_dicts(x, y):
'''Given two dicts, merge them into a new dict as a shallow copy.'''
z = x.copy()
z.update(y)
return z
# core function
def synchronize(target_file, lc): #add,remove, update (translate or copy from base)
# parse target file
target_kv = {}
target_kc = {}
target_error_lines = []
if not notexist_or_empty_file(target_file):
parsed_strings = strparser.parse_strings(filename=target_file)
for item in parsed_strings:
k, e = item['key'], item['error']
# line error
if e:
target_error_lines.append(e)
if not target_error_lines:
target_kv[k] = item['value']
target_kc[k] = item['comment']
# parsing complete or return.
if target_error_lines:
print('(!) Syntax error - Skip')
return False, None, None, target_error_lines
# base
base_content = base_dict[os.path.basename(target_file)]
base_kv = {}
base_kc = {}
for item in base_content:
k, e = item['key'], item['error']
# line
|
DemocracyClub/yournextrepresentative
|
ynr/apps/candidates/management/commands/candidates_create_csv.py
|
Python
|
agpl-3.0
| 3,846
| 0.00026
|
from collections import defaultdict
from django.core.files.storage import DefaultStorage
from django.core.management.base import BaseCommand, CommandError
from candidates.csv_helpers import list_to_csv, memberships_dicts_for_csv
from elections.models import Election
def safely_write(output_filename, memberships_list):
"""
Use Django's storage backend to write the CSV file to the MEDIA_ROOT.
If using S3 (via Django Storages) the file is atomically written when the
file is closed (when the context manager closes).
That is, the file can be opened and written to but nothing changes at
the public S3 URL until the object is closed. Meaning it's not possible to
|
have a half written file.
If not using S3, there will be a short time where the file is empty
during write.
"""
csv = list_to_csv(memberships_list)
file_store = DefaultStorage()
with file_store.open(output_filename, "wb") as out_file:
out_file.write(csv.encode("utf-8"))
class Command(BaseCommand):
help = "Output CSV files for all elections"
def add_arguments(self, parser):
parser.add_argumen
|
t(
"--site-base-url",
help="The base URL of the site (for full image URLs)",
)
parser.add_argument(
"--election",
metavar="ELECTION-SLUG",
help="Only output CSV for the election with this slug",
)
def slug_to_file_name(self, slug):
return "{}-{}.csv".format(self.output_prefix, slug)
def handle(self, **options):
if options["election"]:
try:
election = Election.objects.get(slug=options["election"])
election_slug = election.slug
except Election.DoesNotExist:
message = "Couldn't find an election with slug {election_slug}"
raise CommandError(
message.format(election_slug=options["election"])
)
else:
election_slug = None
self.options = options
self.output_prefix = "candidates"
membership_by_election, elected_by_election = memberships_dicts_for_csv(
election_slug
)
# Write a file per election, optionally adding candidates
# We still want a file to exist if there are no candidates yet,
# as the files linked to as soon as the election is created
election_qs = Election.objects.all()
if election_slug:
election_qs = election_qs.filter(slug=election_slug)
for election in election_qs:
safely_write(
self.slug_to_file_name(election.slug),
membership_by_election.get(election.slug, []),
)
# Make a CSV file per election date
slugs_by_date = defaultdict(list)
for slug in membership_by_election.keys():
slugs_by_date[slug.split(".")[-1]].append(slug)
for date, slugs in slugs_by_date.items():
memberships_for_date = []
for slug in slugs:
memberships_for_date += membership_by_election[slug]
safely_write(self.slug_to_file_name(date), memberships_for_date)
# If we're not outputting a single election, output all elections
if not election_slug:
sorted_elections = sorted(
membership_by_election.keys(),
key=lambda key: key.split(".")[-1],
)
all_memberships = []
all_elected = []
for slug in sorted_elections:
all_memberships += membership_by_election[slug]
all_elected += elected_by_election[slug]
safely_write(self.slug_to_file_name("all"), all_memberships)
safely_write(self.slug_to_file_name("elected-all"), all_elected)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/effective_network_security_rule_py3.py
|
Python
|
mit
| 5,742
| 0.003657
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityRule(Model):
"""Effective network security rules.
:param name: The name of the security rule specified by the user (if
created by the user).
:type name: str
:param protocol: The network protocol this rule applies to. Possible
values are: 'Tcp', 'Udp', and 'All'. Possible values include: 'Tcp',
'Udp', 'All'
:type protocol: str or
~azure.mgmt.network.v2017_10_01.models.EffectiveSecurityRuleProtocol
:param source_port_range: The source
|
port or range.
:type source_port_range: str
:pa
|
ram destination_port_range: The destination port or range.
:type destination_port_range: str
:param source_port_ranges: The source port ranges. Expected values include
a single integer between 0 and 65535, a range using '-' as seperator (e.g.
100-400), or an asterix (*)
:type source_port_ranges: list[str]
:param destination_port_ranges: The destination port ranges. Expected
values include a single integer between 0 and 65535, a range using '-' as
seperator (e.g. 100-400), or an asterix (*)
:type destination_port_ranges: list[str]
:param source_address_prefix: The source address prefix.
:type source_address_prefix: str
:param destination_address_prefix: The destination address prefix.
:type destination_address_prefix: str
:param source_address_prefixes: The source address prefixes. Expected
values include CIDR IP ranges, Default Tags (VirtualNetwork,
AureLoadBalancer, Internet), System Tags, and the asterix (*).
:type source_address_prefixes: list[str]
:param destination_address_prefixes: The destination address prefixes.
Expected values include CIDR IP ranges, Default Tags (VirtualNetwork,
AureLoadBalancer, Internet), System Tags, and the asterix (*).
:type destination_address_prefixes: list[str]
:param expanded_source_address_prefix: The expanded source address prefix.
:type expanded_source_address_prefix: list[str]
:param expanded_destination_address_prefix: Expanded destination address
prefix.
:type expanded_destination_address_prefix: list[str]
:param access: Whether network traffic is allowed or denied. Possible
values are: 'Allow' and 'Deny'. Possible values include: 'Allow', 'Deny'
:type access: str or
~azure.mgmt.network.v2017_10_01.models.SecurityRuleAccess
:param priority: The priority of the rule.
:type priority: int
:param direction: The direction of the rule. Possible values are: 'Inbound
and Outbound'. Possible values include: 'Inbound', 'Outbound'
:type direction: str or
~azure.mgmt.network.v2017_10_01.models.SecurityRuleDirection
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'source_port_range': {'key': 'sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'destinationPortRange', 'type': 'str'},
'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'},
'destination_port_ranges': {'key': 'destinationPortRanges', 'type': '[str]'},
'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'},
'destination_address_prefix': {'key': 'destinationAddressPrefix', 'type': 'str'},
'source_address_prefixes': {'key': 'sourceAddressPrefixes', 'type': '[str]'},
'destination_address_prefixes': {'key': 'destinationAddressPrefixes', 'type': '[str]'},
'expanded_source_address_prefix': {'key': 'expandedSourceAddressPrefix', 'type': '[str]'},
'expanded_destination_address_prefix': {'key': 'expandedDestinationAddressPrefix', 'type': '[str]'},
'access': {'key': 'access', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'direction': {'key': 'direction', 'type': 'str'},
}
def __init__(self, *, name: str=None, protocol=None, source_port_range: str=None, destination_port_range: str=None, source_port_ranges=None, destination_port_ranges=None, source_address_prefix: str=None, destination_address_prefix: str=None, source_address_prefixes=None, destination_address_prefixes=None, expanded_source_address_prefix=None, expanded_destination_address_prefix=None, access=None, priority: int=None, direction=None, **kwargs) -> None:
super(EffectiveNetworkSecurityRule, self).__init__(**kwargs)
self.name = name
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_port_ranges = source_port_ranges
self.destination_port_ranges = destination_port_ranges
self.source_address_prefix = source_address_prefix
self.destination_address_prefix = destination_address_prefix
self.source_address_prefixes = source_address_prefixes
self.destination_address_prefixes = destination_address_prefixes
self.expanded_source_address_prefix = expanded_source_address_prefix
self.expanded_destination_address_prefix = expanded_destination_address_prefix
self.access = access
self.priority = priority
self.direction = direction
|
wolfv/SilverFlask
|
silverflask/controllers/security_controller.py
|
Python
|
bsd-2-clause
| 1,824
| 0.002193
|
from flask import render_template, jsonify, url_for, abort, request, redirect, current_app
from flask_wtf import Form
from flask_user import current_user
from silverflask import db
from
|
silverflask.models import User
from silv
|
erflask.fields import GridField
from silverflask.core import Controller
from silverflask.controllers.cms_controller import CMSController
class SecurityController(CMSController):
url_prefix = CMSController.url_prefix + '/security'
urls = {
'/edit/<int:record_id>': 'edit_user',
'/gridfield': 'get_users',
'/': 'form'
}
allowed_actions = {
'edit_user'
}
@staticmethod
def edit_user(record_id):
user_obj = db.session.query(User).get(record_id)
if not user_obj:
abort("Not found", 404)
form_class = User.get_cms_form()
form = form_class(request.form, obj=user_obj)
if form.validate_on_submit():
form.populate_obj(user_obj)
if form['new_password'].data:
user_obj.set_password(form['new_password'].data)
db.session.commit()
return redirect(url_for(".form"))
return render_template("data_object/edit.html", elem=user_obj, form=form)
@staticmethod
def get_users():
q = User.query.all()
res = []
for r in q:
d = r.as_dict()
d.update({"edit_url": url_for(".edit_user", record_id=r.id)})
res.append(d)
return jsonify(data=res)
@staticmethod
def form():
class SecurityForm(Form):
gridfield = GridField(
urls={"get": url_for(".get_users")},
buttons=[],
display_cols=["id", "name"]
)
return render_template("assetmanager.html", form=SecurityForm())
|
icarito/sugar
|
extensions/cpsection/webaccount/view.py
|
Python
|
gpl-3.0
| 4,477
| 0
|
# Copyright (C) 2013, Walter Bender - Raul Gutierrez Segales
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gettext import gettext as _
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Gdk
from jarabe.webservice.accountsmanager import get_webaccount_services
from jarabe.controlpanel.sectionview import SectionView
from sugar3.graphics.icon import CanvasIcon, Icon
from sugar3.graphics import style
def get_service_name(service):
if hasattr(service, '_account'):
if hasattr(service._account, 'get_description'):
return service._account.get_description()
return ''
class WebServicesConfig(SectionView):
def __init__(self, model, alerts):
SectionView.__init__(self)
self._model = model
self.restart_alerts = alerts
services = get_webaccount_services()
grid = Gtk.Grid()
if len(services) == 0:
grid.set_row_spacing(style.DEFAULT_SPACING)
icon = Icon(pixel_size=style.LARGE_ICON_SIZE,
icon_name='module-webaccount',
stroke_color=style.COLOR_BUTTON_GREY.get_svg(),
fill_color=style.COLOR_TRANSPARENT.get_svg())
grid.attach(icon, 0, 0, 1, 1)
icon.show()
label = Gtk.Label()
label.set_justify(Gtk.Justification.CENTER)
label.set_markup(
'<span foreground="%s" size="large">%s</span>'
% (style.COLOR_BUTTON_GREY.get_html(),
GLib.markup_escape_text(
_('No web services are installed.\n'
'Please visit %s for more details.' %
'http://wiki.sugarlabs.org/go/WebServices'))))
label.show()
grid.attach(label, 0, 1, 1, 1)
alignment = Gtk.Alignment.new(0.5, 0.5, 0.1, 0.1)
alignment.add(grid)
grid.show()
self.add(alignment)
alignment.show()
return
grid.set_row_spacing(style.DEFAULT_SPACING * 4)
grid.set_column_s
|
pacing(style.DEFAULT_SPACING * 4)
grid.set_border_width(style.DEFAULT_SPACING * 2)
grid.set_column_homogeneous(True)
width = Gdk.Screen.width() - 2 * style.GRID_CELL_SIZE
nx = int(width / (style.GRID_CELL_SIZE + style.DEFAULT_SPACING * 4))
self._servic
|
e_config_box = Gtk.VBox()
x = 0
y = 0
for service in services:
service_grid = Gtk.Grid()
icon = CanvasIcon(icon_name=service.get_icon_name())
icon.show()
service_grid.attach(icon, x, y, 1, 1)
icon.connect('activate', service.config_service_cb, None,
self._service_config_box)
label = Gtk.Label()
label.set_justify(Gtk.Justification.CENTER)
name = get_service_name(service)
label.set_markup(name)
service_grid.attach(label, x, y + 1, 1, 1)
label.show()
grid.attach(service_grid, x, y, 1, 1)
service_grid.show()
x += 1
if x == nx:
x = 0
y += 1
alignment = Gtk.Alignment.new(0.5, 0, 0, 0)
alignment.add(grid)
grid.show()
vbox = Gtk.VBox()
vbox.pack_start(alignment, False, False, 0)
alignment.show()
scrolled = Gtk.ScrolledWindow()
vbox.pack_start(scrolled, True, True, 0)
self.add(vbox)
scrolled.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scrolled.show()
workspace = Gtk.VBox()
scrolled.add_with_viewport(workspace)
workspace.show()
workspace.add(self._service_config_box)
workspace.show_all()
vbox.show()
def undo(self):
pass
|
lsaffre/atelier
|
atelier/invlib/utils.py
|
Python
|
bsd-2-clause
| 8,921
| 0.000897
|
# -*- coding: UTF-8 -*-
# Copyright 2017-2021 Rumma & Ko Ltd
# License: BSD, see LICENSE for more details.
"""Utilities for atelier.invlib
"""
from invoke.exceptions import Exit
from atelier.utils import confirm, cd
def must_confirm(*args, **kwargs):
if not confirm(''.join(args)):
raise Exit("User failed to confirm.")
def must_exist(p):
if not p.exists():
raise Exception("No such file: %s" % p.absolute())
def run_cmd(ctx, chdir, args):
cmd = ' '.join(map(str, args))
print("Invoke {}".format(cmd))
with cd(chdir):
ctx.run(cmd, pty=True)
class DocTree(object):
"""
Base class for a doctree descriptor. Atelier currently supports
`Sphinx <http://www.sphinx-doc.org/en/stable/>`__ and `Nikola
<https://getnikola.com/>`__ docs.
"""
src_path = None
out_path = None
has_intersphinx = False
# html_baseurl = None
conf_globals = None
def __init__(self, prj, rel_doc_tree):
self.rel_path = rel_doc_tree
self.prj = prj
if rel_doc_tree in ('', '.'):
src_path = prj.root_dir
else:
src_path = prj.root_dir / rel_doc_tree
# The src_path may not exist if this is on a Project which
# has been created from a normally installed main_package
# (because there it has no source code).
if src_path.exists():
self.src_path = src_path
def __repr__(self):
return "{}({!r}, {!r})".format(self.__class__, self.prj, self.rel_path)
def __str__(self):
return self.rel_path
def make_messages(self, ctx):
pass
def build_docs(self, ctx, *cmdline_args):
raise NotImplementedError()
def publish_docs(self, ctx):
# build_dir = docs_dir / ctx.build_dir_name
if self.src_path is None:
return
build_dir = self.out_path
if build_dir.exists():
docs_dir = self.src_path
# name = '%s_%s' % (ctx.project_name, docs_dir.name)
# dest_ur
|
l = ctx.docs_rsync_dest % name
if "%" in ctx.docs_rsync_dest:
name = '%s_%s' % (ctx.project_name, docs_dir.name)
dest_url = ctx.docs_rsync_dest % name
else:
dest_url = ctx.docs_rsync_dest.format(
prj=ctx.project_name, docs=docs_dir.name)
self.publish_do
|
c_tree(ctx, build_dir, dest_url)
def publish_doc_tree(self, ctx, build_dir, dest_url):
print("Publish to ", dest_url)
with cd(build_dir):
args = ['rsync', '-e', 'ssh', '-r']
args += ['--verbose']
args += ['--progress'] # show progress
args += ['--delete'] # delete files in dest
args += ['--times'] # preserve timestamps
# the problem with --times is that it fails when several
# users can publish to the same server alternatively.
# Only the owner of a file can change the mtime, other
# users can't, even if they have write permission through
# the group.
args += ['--exclude', '.doctrees']
args += ['./'] # source
args += [dest_url] # dest
cmd = ' '.join(args)
# must_confirm("%s> %s" % (build_dir, cmd))
ctx.run(cmd, pty=True)
class SphinxTree(DocTree):
"""
The default docs builder using Sphinx.
:cmd:`sphinx-build`
.. command:: sphinx-build
http://www.sphinx-doc.org/en/stable/invocation.html#invocation-of-sphinx-build
"""
has_intersphinx = True
def __init__(self, prj, src_path):
super(SphinxTree, self).__init__(prj, src_path)
if self.src_path is None:
return
cfg = prj.config
self.out_path = self.src_path / cfg['build_dir_name']
def make_messages(self, ctx):
if self.src_path is None:
return
self.load_conf()
translated_languages = self.conf_globals.get('translated_languages', [])
if len(translated_languages):
# Extract translatable messages into pot files (sphinx-build -M gettext ./ .build/)
args = ['sphinx-build', '-b', 'gettext', '.', self.out_path]
run_cmd(ctx, self.src_path, args)
# Create or update the .pot files (sphinx-intl update -p .build/gettext -l de -l fr)
args = ['sphinx-intl', 'update', '-p', self.out_path / "gettext"]
for lng in translated_languages:
args += ['-l', lng]
run_cmd(ctx, self.src_path, args)
def build_docs(self, ctx, *cmdline_args):
if self.src_path is None:
return
docs_dir = self.src_path
print("Invoking Sphinx in directory %s..." % docs_dir)
builder = 'html'
if ctx.use_dirhtml:
builder = 'dirhtml'
self.sphinx_build(ctx, builder, docs_dir, cmdline_args)
self.load_conf()
translated_languages = self.conf_globals.get('translated_languages', [])
for lng in translated_languages:
self.sphinx_build(ctx, builder, docs_dir, cmdline_args, lng)
self.sync_docs_data(ctx, docs_dir)
def load_conf(self):
if self.src_path is None:
return
if self.conf_globals is not None:
return
conf_py = self.src_path / "conf.py"
self.conf_globals = {'__file__': conf_py}
code = compile(open(conf_py, "rb").read(), conf_py, 'exec')
exec(code, self.conf_globals)
# self.html_baseurl = conf_globals.get("html_baseurl", None)
def __str__(self):
if self.src_path is None:
return super(SphinxTree, self).__str__()
self.load_conf()
return u"{}->{}".format(self.rel_path, self.conf_globals.get('html_title'))
def sphinx_build(self, ctx, builder, docs_dir,
cmdline_args=[], language=None, build_dir_cmd=None):
if self.out_path is None:
return
# args = ['sphinx-build', builder]
args = ['sphinx-build', '-b', builder]
args += ['-T'] # show full traceback on exception
args += cmdline_args
# ~ args += ['-a'] # all files, not only outdated
# ~ args += ['-P'] # no postmortem
# ~ args += ['-Q'] # no output
build_dir = self.out_path
if language is not None:
args += ['-D', 'language=' + language]
# needed in select_lang.html template
args += ['-A', 'language=' + language]
# if language != ctx.languages[0]:
build_dir = build_dir / language
# seems that the default location for the .doctrees directory
# is no longer in .build but the source directory.
args += ['-d', str(build_dir / '.doctrees')]
if ctx.tolerate_sphinx_warnings:
args += ['-w', 'warnings_%s.txt' % builder]
else:
args += ['-W'] # consider warnings as errors
args += ['--keep-going'] # but keep going until the end to show them all
# args += ['-vvv'] # increase verbosity
# args += ['-w'+Path(ctx.root_dir,'sphinx_doctest_warnings.txt')]
args += ['.', str(build_dir)]
run_cmd(ctx, docs_dir, args)
if build_dir_cmd is not None:
with cd(build_dir):
ctx.run(build_dir_cmd, pty=True)
def sync_docs_data(self, ctx, docs_dir):
# build_dir = docs_dir / ctx.build_dir_name
if self.src_path is None:
return
build_dir = self.out_path
for data in ('dl', 'data'):
src = (docs_dir / data).absolute()
if src.is_dir():
target = build_dir / 'dl'
target.mkdir(exist_ok=True)
cmd = 'cp -ur %s %s' % (src, target.parent)
ctx.run(cmd, pty=True)
if False:
# according to http://mathiasbynens.be/notes/rel-shortcut-icon
for n in ['favicon.ico']:
src = (docs_dir / n).absolute()
if src.exists():
target = build_dir / n
cmd = 'cp %s %s' % (src,
|
tylertian/Openstack
|
openstack F/cinder/cinder/api/sizelimit.py
|
Python
|
apache-2.0
| 1,789
| 0.001118
|
# vim: tabstop=4
|
shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
|
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Request Body limiting middleware.
"""
import webob.dec
import webob.exc
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import log as logging
from cinder import wsgi
#default request size is 112k
max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size',
default=114688,
help='Max size for body of a request')
FLAGS = flags.FLAGS
FLAGS.register_opt(max_request_body_size_opt)
LOG = logging.getLogger(__name__)
class RequestBodySizeLimiter(wsgi.Middleware):
"""Add a 'cinder.context' to WSGI environ."""
def __init__(self, *args, **kwargs):
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if (req.content_length > FLAGS.osapi_max_request_body_size
or len(req.body) > FLAGS.osapi_max_request_body_size):
msg = _("Request is too large.")
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
return self.application
|
googleapis/python-dns
|
tests/unit/test_changes.py
|
Python
|
apache-2.0
| 12,894
| 0.000388
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestChanges(unittest.TestCase):
PROJECT = "project"
ZONE_NAME = "example.com"
CHANGES_NAME = "changeset_id"
@staticmethod
def _get_target_class():
from google.cloud.dns.changes import Changes
return Changes
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _s
|
etUpConstants(self):
from google.cloud._helpers import UTC
from google.cloud._helpers import _NOW
self.WHEN = _NOW().replace(tzinfo=UTC)
def _make_resource(se
|
lf):
from google.cloud._helpers import _datetime_to_rfc3339
when_str = _datetime_to_rfc3339(self.WHEN)
return {
"kind": "dns#change",
"id": self.CHANGES_NAME,
"startTime": when_str,
"status": "done",
"additions": [
{
"name": "test.example.com",
"type": "CNAME",
"ttl": "3600",
"rrdatas": ["www.example.com"],
}
],
"deletions": [
{
"name": "test.example.com",
"type": "CNAME",
"ttl": "86400",
"rrdatas": ["other.example.com"],
}
],
}
def _verifyResourceProperties(self, changes, resource, zone):
from google.cloud._helpers import _rfc3339_to_datetime
self.assertEqual(changes.name, resource["id"])
started = _rfc3339_to_datetime(resource["startTime"])
self.assertEqual(changes.started, started)
self.assertEqual(changes.status, resource["status"])
r_additions = resource.get("additions", ())
self.assertEqual(len(changes.additions), len(r_additions))
for found, expected in zip(changes.additions, r_additions):
self.assertEqual(found.name, expected["name"])
self.assertEqual(found.record_type, expected["type"])
self.assertEqual(found.ttl, int(expected["ttl"]))
self.assertEqual(found.rrdatas, expected["rrdatas"])
self.assertIs(found.zone, zone)
r_deletions = resource.get("deletions", ())
self.assertEqual(len(changes.deletions), len(r_deletions))
for found, expected in zip(changes.deletions, r_deletions):
self.assertEqual(found.name, expected["name"])
self.assertEqual(found.record_type, expected["type"])
self.assertEqual(found.ttl, int(expected["ttl"]))
self.assertEqual(found.rrdatas, expected["rrdatas"])
self.assertIs(found.zone, zone)
def test_ctor(self):
zone = _Zone()
changes = self._make_one(zone)
self.assertIs(changes.zone, zone)
self.assertIsNone(changes.name)
self.assertIsNone(changes.status)
self.assertIsNone(changes.started)
self.assertEqual(list(changes.additions), [])
self.assertEqual(list(changes.deletions), [])
def test_from_api_repr_missing_additions_deletions(self):
self._setUpConstants()
RESOURCE = self._make_resource()
del RESOURCE["additions"]
del RESOURCE["deletions"]
zone = _Zone()
klass = self._get_target_class()
changes = klass.from_api_repr(RESOURCE, zone=zone)
self._verifyResourceProperties(changes, RESOURCE, zone)
def test_from_api_repr(self):
self._setUpConstants()
RESOURCE = self._make_resource()
zone = _Zone()
klass = self._get_target_class()
changes = klass.from_api_repr(RESOURCE, zone=zone)
self._verifyResourceProperties(changes, RESOURCE, zone)
def test_name_setter_bad_value(self):
zone = _Zone()
changes = self._make_one(zone)
with self.assertRaises(ValueError):
changes.name = 12345
def test_name_setter(self):
zone = _Zone()
changes = self._make_one(zone)
changes.name = "NAME"
self.assertEqual(changes.name, "NAME")
def test_add_record_set_invalid_value(self):
zone = _Zone()
changes = self._make_one(zone)
with self.assertRaises(ValueError):
changes.add_record_set(object())
def test_add_record_set(self):
from google.cloud.dns.resource_record_set import ResourceRecordSet
zone = _Zone()
changes = self._make_one(zone)
rrs = ResourceRecordSet(
"test.example.com", "CNAME", 3600, ["www.example.com"], zone
)
changes.add_record_set(rrs)
self.assertEqual(list(changes.additions), [rrs])
def test_delete_record_set_invalid_value(self):
zone = _Zone()
changes = self._make_one(zone)
with self.assertRaises(ValueError):
changes.delete_record_set(object())
def test_delete_record_set(self):
from google.cloud.dns.resource_record_set import ResourceRecordSet
zone = _Zone()
changes = self._make_one(zone)
rrs = ResourceRecordSet(
"test.example.com", "CNAME", 3600, ["www.example.com"], zone
)
changes.delete_record_set(rrs)
self.assertEqual(list(changes.deletions), [rrs])
def test_create_wo_additions_or_deletions(self):
self._setUpConstants()
RESOURCE = self._make_resource()
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
zone = _Zone(client)
changes = self._make_one(zone)
with self.assertRaises(ValueError):
changes.create()
self.assertEqual(len(conn._requested), 0)
def test_create_w_bound_client(self):
from google.cloud.dns.resource_record_set import ResourceRecordSet
self._setUpConstants()
RESOURCE = self._make_resource()
PATH = "projects/%s/managedZones/%s/changes" % (self.PROJECT, self.ZONE_NAME)
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
zone = _Zone(client)
changes = self._make_one(zone)
changes.add_record_set(
ResourceRecordSet(
"test.example.com", "CNAME", 3600, ["www.example.com"], zone
)
)
changes.delete_record_set(
ResourceRecordSet(
"test.example.com", "CNAME", 86400, ["other.example.com"], zone
)
)
changes.create()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req["method"], "POST")
self.assertEqual(req["path"], "/%s" % PATH)
SENT = {"additions": RESOURCE["additions"], "deletions": RESOURCE["deletions"]}
self.assertEqual(req["data"], SENT)
self._verifyResourceProperties(changes, RESOURCE, zone)
def test_create_w_alternate_client(self):
from google.cloud.dns.resource_record_set import ResourceRecordSet
self._setUpConstants()
RESOURCE = self._make_resource()
PATH = "projects/%s/managedZones/%s/changes" % (self.PROJECT, self.ZONE_NAME)
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
zone = _Zone(client1)
changes = self._make_one(zone)
changes.add_record_set(
ResourceRecordSet(
"test.example.com", "CNAME", 3600, ["www.example.com"], zone
)
)
changes.delete
|
tuck182/syslog-ng-mod-lumberjack-py
|
src/lumberjack/client/process.py
|
Python
|
gpl-2.0
| 3,727
| 0.015562
|
from lumberjack.client.file_descriptor import FileDescriptorEndpoint
from lumberjack.client.message_receiver import MessageReceiverFactory
from lumberjack.client.message_forwarder import RetryingMessageForwarder
from lumberjack.client.protocol import LumberjackProtocolFactory
from lumberjack.util.object_pipe import ObjectPipe
from multiprocessing import Process
from twisted.internet import ssl, task, defer, endpoints
from twisted.python.filepath import FilePath
class ClientChild(object):
_on_shutdown = defer.Deferred()
def __init__(self, pipe, shutdown_message, **kwargs):
self._pipe = pipe
self._shutdown_message = shutdown_message
pass
def __call__(self, *args, **kwargs):
self._pipe.close_writer()
task.react(lambda reactor: self.init_reactor(reactor, *args, **kwargs))
def init_reactor(self, reactor, servers, ssl_certificate, *args, **kwargs):
forwarder = self.create_message_forwarder(reactor)
self.create_message_reader(reactor, forwarder)
self.create_ssl_client(reactor, forwarder, servers[0], ssl_certificate)
# Create a defer which, when fired, will shut down the app
done = defer.Deferred()
self._on_shutdown.addCallback(lambda x: done.callback(x))
return done
def on_shutdown(self):
print("got shutdown message")
def create_ssl_client(self, reactor, forwarder, server, ssl_certificate):
factory = LumberjackProtocolFactory(forwarder)
host, port = self.parse_server(server)
options = self.create_ssl_context(host, ssl_certificate)
connector = reactor.connectSSL(host, port, factory, options)
return connector
def parse_server(self, server_string):
try:
host, port = server_string.split(':')
return host, int(port)
except ValueError:
return server_string, 5043
def create_ssl_context(self, host, ssl_certificate):
#ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
#ssl_context.load_verify_locations(cafile = ssl_certificate)
#ssl_context.verify_mode = ssl.CERT_REQUIRED
certData = FilePath(ssl_certificate).getContent()
authority = ssl.Certificate.loadPEM(certData)
options = ssl.optionsForClientTLS(host, authority)
return options
def create_message_reader(self, reactor
|
, f
|
orwarder):
factory = MessageReceiverFactory(forwarder, shutdown_params = ShutdownParams(
message = self._shutdown_message,
deferred = self._on_shutdown
))
endpoint = FileDescriptorEndpoint(reactor, self._pipe.get_reader().fileno())
endpoint.listen(factory)
return endpoint
def create_message_forwarder(self, reactor):
forwarder = RetryingMessageForwarder()
return forwarder
def acknowledge_sent(self, msg_id):
self._queue.acknowledge(msg_id)
# FIXME: Need to handle monitoring of child process and restart if lost
# FIXME: Need to ensure pipe doesn't block if child can't be written to
class ClientProcess(object):
_pipe = None
_shutdown_message = "SHUTDOWN"
def __init__(self, **kwargs):
self._pipe = ObjectPipe()
self._thread = Process(
target = ClientChild(
pipe = self._pipe,
shutdown_message = self._shutdown_message,
**kwargs),
name = "lumberjack.Client",
kwargs = kwargs
)
def start(self):
self._thread.start()
self._pipe.close_reader()
def write(self, message):
self._pipe.write(message)
def shutdown(self, graceful = True):
self.write(self._shutdown_message)
self._pipe.close_writer()
if (graceful):
self._thread.join()
else:
self._thread.terminate()
class ShutdownParams(object):
def __init__(self, message, deferred):
self.message = message
self.deferred = deferred
|
viktorTarasov/PyKMIP
|
kmip/services/server/crypto/api.py
|
Python
|
apache-2.0
| 2,580
| 0
|
# Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import ABCMeta
from abc import abstractmethod
import six
@six.add_metaclass(ABCMeta)
class CryptographicEngine(object):
"""
The abstract base class of the cryptographic engine hierarchy.
A cryptographic engine is responsible for generating all cryptographic
objects and conducting all cryptographic operations for a KMIP server
instance.
"""
@abstractmethod
def create_symmetric_key(self, algorithm, length):
"""
Create a symmetric key.
Args:
algorithm(CryptographicAlgorithm): An enumeration specifying the
algorithm for which the created key will be compliant.
length(int): The length of the key to be created. This value must
be compliant with the constraints of the provided algorithm.
Returns:
dict: A dictionary containing the key data, with the following
key/value fields:
* value - the bytes of the key
* format - a KeyFormatType enumeration for the bytes format
"""
@abstractmethod
def create_asymmetric_key_pair(self, algorithm, length):
"""
|
Create an asymmetric key pair.
Args:
algorithm(CryptographicAlgorithm): An enumeration specifying the
algorithm for which the created keys will be compliant.
length(int): The length of the keys to be created. This value must
be compliant with the constraints of the provided algorithm.
Returns:
|
dict: A dictionary containing the public key data, with the
following key/value fields:
* value - the bytes of the key
* format - a KeyFormatType enumeration for the bytes format
dict: A dictionary containing the private key data, identical in
structure to the public key dictionary.
"""
|
Azulinho/flocker
|
flocker/provision/_install.py
|
Python
|
apache-2.0
| 36,812
| 0
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
# -*- test-case-name: flocker.provision.test.test_install -*-
"""
Install flocker on a remote node.
"""
import posixpath
from textwrap import dedent
from urlparse import urljoin, urlparse
from effect import Func, Effect
import yaml
from zope.interface import implementer
from characteristic import attributes
from pyrsistent import PRecord, field
from ._libcloud import INode
from ._common import PackageSource, Variants
from ._ssh import (
run, run_from_args,
sudo, sudo_from_args,
put,
run_remotely
)
from ._effect import sequence
from flocker import __version__ as version
from flocker.cli import configure_ssh
from flocker.common.version import (
get_installable_version, get_package_key_suffix, is_release,
)
# A systemctl sub-command to start or restart a service. We use restart here
# so that if it is already running it gets restart (possibly necessary to
# respect updated configuration) and because restart will also start it if it
# is not running.
START = "restart"
ZFS_REPO = {
'centos-7': "https://s3.amazonaws.com/archive.zfsonlinux.org/"
"epel/zfs-release.el7.noarch.rpm",
}
ARCHIVE_BUCKET = 'clusterhq-archive'
def get_repository_url(distribution, flocker_version):
"""
Return the URL for the repository of a given distribution.
For ``yum``-using distributions this gives the URL to a package that adds
entries to ``/etc/yum.repos.d``. For ``apt``-using distributions, this
gives the URL for a repo containing a Packages(.gz) file.
:param bytes distribution: The Linux distribution to get a repository for.
:param bytes flocker_version: The version of Flocker to get a repository
for.
:return bytes: The URL pointing to a repository of packages.
:raises: ``UnsupportedDistribution`` if the distribution is unsuppo
|
rted.
"""
distribution_to_url = {
# TODO instead of hardcoding keys, use the _to_Distribution map
# and then choose the name
'centos-7': "https://{archive_bucket}.s3.amazonaws.com/"
"{key}/clusterhq-release$(rpm -E %dist).noarch.rpm".format(
archive_bucket=ARCHIVE_BUCKET,
key='centos',
),
# This could hardcode the version number instead o
|
f using
# ``lsb_release`` but that allows instructions to be shared between
# versions, and for earlier error reporting if you try to install on a
# separate version. The $(ARCH) part must be left unevaluated, hence
# the backslash escapes (one to make shell ignore the $ as a
# substitution marker, and then doubled to make Python ignore the \ as
# an escape marker). The output of this value then goes into
# /etc/apt/sources.list which does its own substitution on $(ARCH)
# during a subsequent apt-get update
'ubuntu-14.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/'
'$(lsb_release --release --short)/\\$(ARCH)'.format(
archive_bucket=ARCHIVE_BUCKET,
key='ubuntu' + get_package_key_suffix(
flocker_version),
),
'ubuntu-15.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/'
'$(lsb_release --release --short)/\\$(ARCH)'.format(
archive_bucket=ARCHIVE_BUCKET,
key='ubuntu' + get_package_key_suffix(
flocker_version),
),
}
try:
return distribution_to_url[distribution]
except KeyError:
raise UnsupportedDistribution()
def get_repo_options(flocker_version):
"""
Get a list of options for enabling necessary yum repositories.
:param bytes flocker_version: The version of Flocker to get options for.
:return: List of bytes for enabling (or not) a testing repository.
"""
is_dev = not is_release(flocker_version)
if is_dev:
return ['--enablerepo=clusterhq-testing']
else:
return []
class UnsupportedDistribution(Exception):
"""
Raised if trying to support a distribution which is not supported.
"""
@attributes(['distribution'])
class DistributionNotSupported(NotImplementedError):
"""
Raised when the provisioning step is not supported on the given
distribution.
:ivar bytes distribution: The distribution that isn't supported.
"""
def __str__(self):
return "Distribution not supported: %s" % (self.distribution,)
@implementer(INode)
class ManagedNode(PRecord):
"""
A node managed by some other system (eg by hand or by another piece of
orchestration software).
"""
address = field(type=bytes, mandatory=True)
private_address = field(type=(bytes, type(None)),
initial=None, mandatory=True)
distribution = field(type=bytes, mandatory=True)
def task_client_installation_test():
"""
Check that the CLI is working.
"""
return run_from_args(['flocker-deploy', '--version'])
def install_cli_commands_yum(distribution, package_source):
"""
Install Flocker CLI on CentOS.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
commands = [
sudo(command="yum install -y " + get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version))),
]
if use_development_branch:
repo = dedent(b"""\
[clusterhq-build]
name=clusterhq-build
baseurl=%s
gpgcheck=0
enabled=0
""") % (base_url,)
commands.append(put(content=repo,
path='/tmp/clusterhq-build.repo'))
commands.append(sudo_from_args([
'cp', '/tmp/clusterhq-build.repo',
'/etc/yum.repos.d/clusterhq-build.repo']))
repo_options = ['--enablerepo=clusterhq-build']
else:
repo_options = get_repo_options(
flocker_version=get_installable_version(version))
if package_source.os_version:
package = 'clusterhq-flocker-cli-%s' % (package_source.os_version,)
else:
package = 'clusterhq-flocker-cli'
# Install Flocker CLI and all dependencies
commands.append(sudo_from_args(
["yum", "install"] + repo_options + ["-y", package]))
return sequence(commands)
def install_cli_commands_ubuntu(distribution, package_source):
"""
Install flocker CLI on Ubuntu.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join
|
RedHatQE/python-moncov
|
test/code/while_some_while_some.py
|
Python
|
gpl-3.0
| 48
| 0.104167
|
i
|
= 0
while i <3:
while i <2:
i += 1
i +
|
= 1
|
reuk/waveguide
|
scripts/python/boundary_modelling.py
|
Python
|
gpl-2.0
| 3,922
| 0.001785
|
from math import pi, sin, cos, tan, sqrt
from recordclass import recordclass
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
from functools import reduce
def db2a(db):
return np.power(10, (db / 20.0))
def a2db(a):
return 20 * np.log10(a)
def series_coeffs(c):
return reduce(lambda (a, b), (x, y): (
np.convolve(a, x), np.convolve(b, y)), c)
def twopass_coeffs(c):
return series_coeffs(c + c)
def get_linkwitz_riley_coeffs(gain, lo, hi, sr):
def get_c(cutoff, sr):
wcT = pi * cutoff / sr
return 1 / tan(wcT)
def get_lopass_coeffs(gain, cutoff, sr):
c = get_c(cutoff, sr)
a0 = c * c + c * sqrt(2) + 1
b = [gain / a0, 2 * gain / a0, gain / a0]
a = [1, (-2 * (c * c - 1)) / a0, (c * c - c * sqrt(2) + 1) / a0]
return b, a
def get_hipass_coeffs(gain, cutoff, sr):
c = get_c(cutoff, sr)
a0 = c * c + c * sqrt(2) + 1
b = [(gain * c * c) / a0, (-2 * gain * c * c) / a0, (gain * c * c) / a0]
a = [1, (-2 * (c * c - 1)) / a0, (c * c - c * sqrt(2) + 1) / a0]
return b, a
return twopass_coeffs([get_lopass_coeffs(gain, hi, sr),
get_hipass_coeffs(gain, lo, sr)])
def get_notch_coeffs(gain, centre, sr, Q):
A = db2a(gain / 2)
w0 = 2 * pi * centre / sr
cw0 = cos(w0)
sw0 = sin(w0)
alpha = sw0 / 2 * Q
a0 = 1 + alpha / A
b = [(1 + alpha * A) / a0, (-2 * cw0) / a0, (1 - alpha * A) / a0]
a = [1, (-2 * cw0) / a0, (1 - alpha / A) / a0]
return b, a
def get_peak_coeffs(gain, centre, sr, Q):
A = db2a(gain / 2)
w0 = 2 * pi * centre / sr
cw0 = cos(w0)
sw0 = sin(w0)
alpha = sw0 / 2 * Q
a0 = 1 + alpha / A
b = [(1 + (alpha * A)) / a0, (-2 * cw0) / a0, (1 -
|
alpha * A) / a0]
a = [1, (-2 * cw0) / a0, (1 - alpha / A) / a0]
return b, a
BiquadMemory = recordclass('BiquadMemory', ['z1', 'z2'])
BiquadCoefficients = recordclass(
'BiquadCoeff
|
icients', [
'b0', 'b1', 'b2', 'a1', 'a2'])
def biquad_step(i, bm, bc):
out = i * bc.b0 + bm.z1
bm.z1 = i * bc.b1 - bc.a1 * out + bm.z2
bm.z2 = i * bc.b2 - bc.a2 * out
return out
def biquad_cascade(i, bm, bc):
for m, c in zip(bm, bc):
i = biquad_step(i, m, c)
return i
def impedance_filter(c):
num = c[0]
den = c[1]
summed = [a + b for a, b in zip(den, num)]
subbed = [a - b for a, b in zip(den, num)]
norm = 1 / subbed[0]
summed = [i * norm for i in summed]
subbed = [i * norm for i in subbed]
return [summed, subbed]
def eighth_order_step(i, m, c):
out = i * c[0][0] + m[0]
m[0] = i * c[0][1] - c[1][1] * out + m[1]
m[1] = i * c[0][2] - c[1][2] * out + m[2]
m[2] = i * c[0][3] - c[1][3] * out + m[3]
m[3] = i * c[0][4] - c[1][4] * out + m[4]
m[4] = i * c[0][5] - c[1][5] * out + m[5]
m[5] = i * c[0][6] - c[1][6] * out + m[6]
m[6] = i * c[0][7] - c[1][7] * out + m[7]
m[7] = i * c[0][8] - c[1][8] * out
return out
def main():
edges = [30, 60, 120, 240]
corners = zip(edges[:-1], edges[1:])
centres = [(a + b) / 2 for a, b in corners]
#c = [get_linkwitz_riley_coeffs(1, b, a, edges[-1] * 2) for b, a in corners]
sr = 2000
c = [get_peak_coeffs(-24, i, sr, 1) for i in centres]
c.append([[1, 0, 0], [1, 0, 0]])
bm = [BiquadMemory(0, 0) for _ in c]
bc = [BiquadCoefficients(b0, b1, b2, a1, a2)
for [b0, b1, b2], [a0, a1, a2] in c]
c.append(series_coeffs(c))
# c.append(impedance_filter(c[-1]))
wh = [signal.freqz(b, a) for b, a in c]
plt.subplot(111)
plt.title("Frequency response - reflection filter")
for w, h in wh:
plt.semilogx(w, 20 * np.log10(np.abs(h)))
plt.ylabel('Amplitude Response (dB)')
plt.xlabel('Frequency (rad/sample)')
plt.grid()
plt.show()
if __name__ == "__main__":
main()
|
if1live/marika
|
server/sample.py
|
Python
|
mit
| 6,943
| 0.004609
|
################################################################################
# Copyright (C) 2012-2013 Leap Motion, Inc. All rights reserved. #
# Leap Motion proprietary and confidential. Not for distribution. #
# Use subject to the terms of the Leap Motion SDK Agreement available at #
# https://developer.leapmotion.com/sdk_agreement, or another agreement #
# between Leap Motion and you, your company or other organization. #
################################################################################
# set library path
import os, sys, inspect
src_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
arch_dir = 'lib/x64' if sys.maxsize > 2**32 else 'lib/x86'
sys.path.insert(0, os.path.abspath(os.path.join(src_dir, arch_dir)))
import Leap, sys, thread, time
from Leap import CircleGesture, KeyTapGesture, ScreenTapGesture, SwipeGesture
class SampleListener(Leap.Listener):
finger_names = ['Thumb', 'Index', 'Middle', 'Ring', 'Pinky']
bone_names = ['Metacarpal', 'Proximal', 'Intermediate', 'Distal']
state_names = ['STATE_INVALID', 'STATE_START', 'STATE_UPDATE', 'STATE_END']
def on_init(self, controller):
print "Initialized"
def on_connect(self, controller):
print "Connected"
# Enable gestures
controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE);
controller.enable_gesture(Leap.Gesture.TYPE_KEY_TAP);
controller.enable_gesture(Leap.Gesture.TYPE_SCREEN_TAP);
controller.enable_gesture(Leap.Gesture.TYPE_SWIPE);
def on_disconnect(self, controller):
# Note: not dispatched when running in a debugger.
print "Disconnected"
def on_exit(self, controller):
print "Exited"
def on_frame(self, controller):
# Get the most recent frame and report some basic information
frame = controller.frame()
print "Frame id: %d, timestamp: %d, hands: %d, fingers: %d, tools: %d, gestures: %d" % (
frame.id, frame.timestamp, len(frame.hands), len(frame.fingers), len(frame.tools), len(frame.gestures()))
# Get hands
for hand in frame.hands:
handType = "Le
|
ft hand" if hand.is_left else "Right hand"
print " %s, id %d, position: %s" % (
handType, hand.id, hand.palm_position)
# Get the hand's normal vector and direction
normal = hand.palm_normal
direction = hand.direction
# Calculate the hand's pitch, roll, and yaw angles
print " pitch: %f degree
|
s, roll: %f degrees, yaw: %f degrees" % (
direction.pitch * Leap.RAD_TO_DEG,
normal.roll * Leap.RAD_TO_DEG,
direction.yaw * Leap.RAD_TO_DEG)
# Get arm bone
arm = hand.arm
print " Arm direction: %s, wrist position: %s, elbow position: %s" % (
arm.direction,
arm.wrist_position,
arm.elbow_position)
# Get fingers
for finger in hand.fingers:
print " %s finger, id: %d, length: %fmm, width: %fmm" % (
self.finger_names[finger.type()],
finger.id,
finger.length,
finger.width)
# Get bones
for b in range(0, 4):
bone = finger.bone(b)
print " Bone: %s, start: %s, end: %s, direction: %s" % (
self.bone_names[bone.type],
bone.prev_joint,
bone.next_joint,
bone.direction)
# Get tools
for tool in frame.tools:
print " Tool id: %d, position: %s, direction: %s" % (
tool.id, tool.tip_position, tool.direction)
# Get gestures
for gesture in frame.gestures():
if gesture.type == Leap.Gesture.TYPE_CIRCLE:
circle = CircleGesture(gesture)
# Determine clock direction using the angle between the pointable and the circle normal
if circle.pointable.direction.angle_to(circle.normal) <= Leap.PI/2:
clockwiseness = "clockwise"
else:
clockwiseness = "counterclockwise"
# Calculate the angle swept since the last frame
swept_angle = 0
if circle.state != Leap.Gesture.STATE_START:
previous_update = CircleGesture(controller.frame(1).gesture(circle.id))
swept_angle = (circle.progress - previous_update.progress) * 2 * Leap.PI
print " Circle id: %d, %s, progress: %f, radius: %f, angle: %f degrees, %s" % (
gesture.id, self.state_names[gesture.state],
circle.progress, circle.radius, swept_angle * Leap.RAD_TO_DEG, clockwiseness)
if gesture.type == Leap.Gesture.TYPE_SWIPE:
swipe = SwipeGesture(gesture)
print " Swipe id: %d, state: %s, position: %s, direction: %s, speed: %f" % (
gesture.id, self.state_names[gesture.state],
swipe.position, swipe.direction, swipe.speed)
if gesture.type == Leap.Gesture.TYPE_KEY_TAP:
keytap = KeyTapGesture(gesture)
print " Key Tap id: %d, %s, position: %s, direction: %s" % (
gesture.id, self.state_names[gesture.state],
keytap.position, keytap.direction )
if gesture.type == Leap.Gesture.TYPE_SCREEN_TAP:
screentap = ScreenTapGesture(gesture)
print " Screen Tap id: %d, %s, position: %s, direction: %s" % (
gesture.id, self.state_names[gesture.state],
screentap.position, screentap.direction )
if not (frame.hands.is_empty and frame.gestures().is_empty):
print ""
def state_string(self, state):
if state == Leap.Gesture.STATE_START:
return "STATE_START"
if state == Leap.Gesture.STATE_UPDATE:
return "STATE_UPDATE"
if state == Leap.Gesture.STATE_STOP:
return "STATE_STOP"
if state == Leap.Gesture.STATE_INVALID:
return "STATE_INVALID"
def main():
# Create a sample listener and controller
listener = SampleListener()
controller = Leap.Controller()
# Have the sample listener receive events from the controller
controller.add_listener(listener)
# Keep this process running until Enter is pressed
print "Press Enter to quit..."
try:
sys.stdin.readline()
except KeyboardInterrupt:
pass
finally:
# Remove the sample listener when done
controller.remove_listener(listener)
if __name__ == "__main__":
main()
|
radez/packstack
|
packstack/plugins/cinder_250.py
|
Python
|
apache-2.0
| 16,938
| 0.010922
|
"""
Installs and configures Cinder
"""
import os
import re
import uuid
import logging
from packstack.installer import exceptions
from packstack.installer import processors
from packstack.installer import validators
from packstack.installer import basedefs
from packstack.installer import utils
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile
from packstack.installer import exceptions
from packstack.installer import output_messages
# Controller object will
# be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-Cinder"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding OpenStack Cinder configuration")
paramsList = [
{"CMD_OPTION" : "cinder-host",
"USAGE" : "The IP address of the server on which to install Cinder",
"PROMPT" : "Enter the IP address of the Cinder server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_CINDER_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "cinder-db-passwd",
"USAGE" : "The password to use for the Cinder to access DB",
"PROMPT" : "Enter the password for the Cinder DB access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_CINDER_DB_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "cinder-ks-passwd",
"USAGE" : "The password to use for the Cinder to authenticate with Keystone",
"PROMPT" : "Enter the password for the Cinder Keystone access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_CINDER_KS_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "cinder-backend",
"USAGE" : ("The Cinder backend to use, valid options are: "
"lvm, gluster, nfs"),
"PROMPT" : "Enter the Cinder backend to be configured",
"OPTION_LIST" : ["lvm", "gluster", "nfs"]
|
,
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "lvm",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False
|
,
"CONF_NAME" : "CONFIG_CINDER_BACKEND",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "CINDER",
"DESCRIPTION" : "Cinder Config parameters",
"PRE_CONDITION" : "CONFIG_CINDER_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def check_lvm_options(config):
return (config.get('CONFIG_CINDER_INSTALL', 'n') == 'y' and
config.get('CONFIG_CINDER_BACKEND', 'lvm') == 'lvm')
paramsList = [
{"CMD_OPTION" : "cinder-volumes-create",
"USAGE" : ("Create Cinder's volumes group. This should only be done for "
"testing on a proof-of-concept installation of Cinder. This "
"will create a file-backed volume group and is not suitable "
"for production usage."),
"PROMPT" : ("Should Cinder's volumes group be created (for proof-of-concept "
"installation)?"),
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "y",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_CINDER_VOLUMES_CREATE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "CINDERVOLUMECREATE",
"DESCRIPTION" : "Cinder volume create Config parameters",
"PRE_CONDITION" : check_lvm_options,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def check_lvm_vg_options(config):
return (config.get('CONFIG_CINDER_INSTALL', 'n') == 'y' and
config.get('CONFIG_CINDER_BACKEND', 'lvm') == 'lvm' and
config.get('CONFIG_CINDER_VOLUMES_CREATE', 'y') == 'y')
paramsList = [
{"CMD_OPTION" : "cinder-volumes-size",
"USAGE" : ("Cinder's volumes group size. Note that actual volume size "
"will be extended with 3% more space for VG metadata."),
"PROMPT" : "Enter Cinder's volumes group usable size",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "20G",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_CINDER_VOLUMES_SIZE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "CINDERVOLUMESIZE",
"DESCRIPTION" : "Cinder volume size Config parameters",
"PRE_CONDITION" : check_lvm_vg_options,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def check_gluster_options(config):
return (config.get('CONFIG_CINDER_INSTALL', 'n') == 'y' and
config.get('CONFIG_CINDER_BACKEND', 'lvm') == 'gluster')
paramsList = [
{"CMD_OPTION" : "cinder-gluster-mounts",
"USAGE" : ("A single or comma separated list of gluster volume shares "
"to mount, eg: ip-address:/vol-name "),
"PROMPT" : ("Enter a single or comma separated list of gluster volume "
"shares to use with Cinder"),
"OPTION_LIST" : ["^'([\d]{1,3}\.){3}[\d]{1,3}:/.*'"],
"VALIDATORS" : [validators.validate_multi_regexp],
"PROCES
|
shaypal5/ezenum
|
tests/test_string_enum.py
|
Python
|
mit
| 408
| 0
|
"""Testing the StringEnum class."""
import ezenum as eze
def test_basic():
"""Just check it o
|
ut."""
rgb = eze.StringEnum(['Red', 'Green', 'Blue'])
assert rgb.Red == 'Red'
assert rgb.Green == 'Green'
assert rgb.Blue == 'Blue'
assert rgb[
|
0] == 'Red'
assert rgb[1] == 'Green'
assert rgb[2] == 'Blue'
assert len(rgb) == 3
assert repr(rgb) == "['Red', 'Green', 'Blue']"
|
sonntagsgesicht/regtest
|
.aux/venv/lib/python3.9/site-packages/bandit/plugins/insecure_ssl_tls.py
|
Python
|
apache-2.0
| 9,646
| 0
|
# -*- coding:utf-8 -*-
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# SPDX-License-Identifier: Apache-2.0
import bandit
from bandit.core import test_properties as test
def get_bad_proto_versions(config):
return config['bad_protocol_versions']
def gen_config(name):
if name == 'ssl_with_bad_version':
return {'bad_protocol_versions':
['PROTOCOL_SSLv2',
'SSLv2_METHOD',
'SSLv23_METHOD',
'PROTOCOL_SSLv3', # strict option
'PROTOCOL_TLSv1', # strict option
'SSLv3_METHOD', # strict option
'TLSv1_METHOD']} # strict option
@test.takes_config
@test.checks('Call')
@test.test_id('B502')
def ssl_with_bad_version(context, config):
"""**B502: Test for SSL use with bad version used**
Several highly publicized exploitable flaws have been discovered
in all versions of SSL and early versions of TLS. It is strongly
recommended that use of the following known broken protocol versions be
avoided:
- SSL v2
- SSL v3
- TLS v1
- TLS v1.1
This plugin test scans for calls to Python methods with parameters that
indicate the used broken SSL/TLS protocol versions. Currently, detection
supports methods using Python's native SSL/TLS support and the pyOpenSSL
module. A HIGH severity warning will be reported whenever known broken
protocol versions are detected.
It is worth noting that native support for TLS 1.2 is only available in
more recent Python versions, specifically 2.7.9 and up, and 3.x
A note on 'SSLv23':
Amongst the available SSL/TLS versions provided by Python/pyOpenSSL there
exists the option to use SSLv23. This very poorly named option actually
means "use the highest version of SSL/TLS supported by both the server and
client". This may (and should be) a version well in advance of SSL v2 or
v3. Bandit can scan for the use of SSLv23 if desired, but its detection
does not necessarily indicate a problem.
When using SSLv23 it is important to also provide flags to explicitly
exclude bad versions of SSL/TLS from the protocol versions considered. Both
the Python native and pyOpenSSL modules provide the ``OP_NO_SSLv2`` and
``OP_NO_SSLv3`` flags for this purpose.
**Config Options:**
.. code-block:: yaml
ssl_with_bad_version:
bad_protocol_versions:
- PROTOCOL_SSLv2
- SSLv2_METHOD
- SSLv23_METHOD
- PROTOCOL_SSLv3 # strict option
- PROTOCOL_TLSv1 # strict option
- SSLv3_METHOD # strict option
- TLSv1_METHOD # strict option
:Example:
.. code-block:: none
>> Issue: ssl.wrap_socket call with insecure SSL/TLS protocol version
identified, security issue.
Severity: High Confidence: High
Location: ./examples/ssl-insecure-version.py:13
12 # strict tests
13 ssl.wrap_socket(ssl_version=ssl.PROTOCOL_SSLv3)
14 ssl.wrap_socket(ssl_version=ssl.PROTOCOL_TLSv1)
.. seealso::
- :func:`ssl_with_bad_defaults`
- :func:`ssl_with_no_version`
- http://heartbleed.com/
- https://poodlebleed.com/
- https://security.openstack.org/
- https://security.openstack.org/guidelines/dg_move-data-securely.html
.. versionadded:: 0.9.0
"""
bad_ssl_versions = get_bad_proto_versions(config)
if context.call_function_name_qual == 'ssl.wrap_socket':
if context.check_call_arg_value('ssl_version', bad_ssl_versions):
return bandit.Issue(
severity=bandit.HIGH,
confidence=bandit.HIGH,
text="ssl.wrap_socket call with insecure SSL/TLS protocol "
"version identified, security issue.",
lineno=context.get_lineno_for_call_arg('ssl_version'),
)
elif context.call_function_name_qual == 'pyOpenSSL.SSL.Context':
if context.check_call_arg_value('method', bad_ssl_versions):
return bandit.Issue(
severity=bandit.HIGH,
confidence=bandit.HIGH,
text="SSL.Context call with insecure SSL/TLS protocol "
"version identified, security issue.",
lineno=context.get_lineno_for_call_arg('method'),
)
elif (context.call_function_name_qual != 'ssl.wrap_socket' and
context.call_function_name_qual != 'pyOpenSSL.SSL.Context'):
if (context.check_call_arg_value('method', bad_ssl_versions) or
context.check_call_arg_value('ssl_version', bad_ssl_versions)):
lineno = (context.get_lineno_for_call_arg('method') or
context.get_lineno_for_call_arg('ssl_version'))
return bandit.Issue(
severity=bandit.MEDIUM,
confidence=bandit.MEDIUM,
text="Function call with insecure SSL/TLS protocol "
"identified, possible security issue.",
lineno=lineno,
)
@test.takes_config("ssl_with_bad_version")
@test.checks('FunctionDef')
@test.test_id('B503')
def ssl_with_bad_defaults(context, config):
"""**B503: Test for SSL use with bad defaults specified**
This plugin is part of a family of tests that detect the use of known bad
versions of SSL/TLS, please see :doc:`../plugins/ssl_with_bad_version` for
a complete discussion. Specifically, this plugin test scans for Python
methods with default parameter values that specify the use of broken
SSL/TLS protocol versions. Currently, detection supports methods using
Python's native SSL/TLS support and the pyOpenSSL module. A MEDIUM severity
warning will be reported whenever known broken protocol versions are
detected.
**Config Options:**
This test shares the configuration provided for the standard
:doc:`../plugins/ssl_with_bad_version` test, please refer to its
documentation.
:Example:
.. code-block:: none
>> Issue: Function definition identified with insecure SSL/TLS protocol
version by default, possible security issue.
Severity: Medium Confidence: Medium
Location: ./examples/ssl-insecure-version.py:28
27
28 def open_ssl_socket(version=SSL.SSLv2_METHOD):
29 pass
.. seealso::
- :func:`ssl_with_bad_version`
- :func:`ssl_with_no_version`
- http://heartbleed.com/
- https:/
|
/poodlebleed.com/
- https://security.openstack.org/
- https://sec
|
urity.openstack.org/guidelines/dg_move-data-securely.html
.. versionadded:: 0.9.0
"""
bad_ssl_versions = get_bad_proto_versions(config)
for default in context.function_def_defaults_qual:
val = default.split(".")[-1]
if val in bad_ssl_versions:
return bandit.Issue(
severity=bandit.MEDIUM,
confidence=bandit.MEDIUM,
text="Function definition identified with insecure SSL/TLS "
"protocol version by default, possible security "
"issue."
)
@test.checks('Call')
@test.test_id('B504')
def ssl_with_no_version(context):
"""**B504: Test for SSL use with no version specified**
This plugin is part of a family of tests that detect the use of known bad
versions of SSL/TLS, please see :doc:`../plugins/ssl_with_bad_version` for
a complete discussion. Specifically, This plugin test scans for specific
methods in Python's native SSL/TLS support and the pyOpenSSL module that
configure the version of SSL/TLS protocol to use. These methods are known
to provide default value that maximize compatibility, but permit use of the
aforementioned broken protocol versions. A LOW severity warning will be
reported whenever this is detected.
**Config Options:**
This test shares the configuration provided for the standard
:doc:`../plugins/ssl_with_bad_version` test, please refer to its
documentation.
:Ex
|
gamechanger/kafka-python
|
kafka/protocol/legacy.py
|
Python
|
apache-2.0
| 14,397
| 0.002084
|
from __future__ import absolute_import
import logging
import struct
import six
from six.moves import xrange
import kafka.common
import kafka.protocol.commit
import kafka.protocol.fetch
import kafka.protocol.message
import kafka.protocol.metadata
import kafka.protocol.offset
import kafka.protocol.produce
from kafka.codec import (
gzip_encode, gzip_decode, snappy_encode, snappy_decode
)
from kafka.common import (
ProtocolError, ChecksumError,
UnsupportedCodecError,
ConsumerMetadataResponse
)
from kafka.util import (
crc32, read_short_string, read_int_string, relative_unpack,
write_short_string, write_int_string, group_by_topic_and_partition
)
log = logging.getLogger(__name__)
ATTRIBUTE_CODEC_MASK = 0x03
CODEC_NONE = 0x00
CODEC_GZIP = 0x01
CODEC_SNAPPY = 0x02
ALL_CODECS = (CODEC_NONE, CODEC_GZIP, CODEC_SNAPPY)
class KafkaProtocol(object):
"""
Class to encapsulate all of the protocol encoding/decoding.
This class does not have any state associated with it, it is purely
for organization.
"""
PRODUCE_KEY = 0
FETCH_KEY = 1
OFFSET_KEY = 2
METADATA_KEY = 3
OFFSET_COMMIT_KEY = 8
OFFSET_FETCH_KEY = 9
CONSUMER_METADATA_KEY = 10
###################
# Private API #
###################
@classmethod
def _encode_message_header(cls, client_id, correlation_id, request_key,
version=0):
"""
Encode the common request envelope
"""
return struct.pack('>hhih%ds' % len(client_id),
request_key, # ApiKey
version, # ApiVersion
correlation_id, # CorrelationId
len(client_id), # ClientId size
client_id) # ClientId
@classmethod
def _encode_message_set(cls, messages):
"""
Encode a MessageSet. Unlike other arrays in the protocol,
MessageSets are not length-prefixed
Format
======
MessageSet => [Offset MessageSize Message]
Offset => int64
MessageSize => int32
"""
message_set = []
for message in messages:
encoded_message = KafkaProtocol._encode_message(message)
message_set.append(struct.pack('>qi%ds' % len(encoded_message), 0,
len(encoded_message),
encoded_message))
return b''.join(message_set)
@classmethod
def _encode_message(cls, message):
"""
Encode a single message.
The magic number of a message is a format version number.
The only supported magic number right now is zero
Format
======
Message => Crc MagicByte Attributes Key Value
Crc => int32
MagicByte => int8
Attributes => int8
Key => bytes
Value => bytes
"""
if message.magic == 0:
msg = b''.join([
struct.pack('>BB', message.magic, message.attributes),
write_int_string(message.key),
write_int_string(message.value)
])
crc = crc32(msg)
msg = struct.pack('>i%ds' % len(msg), crc, msg)
else:
raise ProtocolError("Unexpected magic number: %d" % message.magic)
return msg
##################
# Public API #
##################
@classmethod
def encode_produce_request(cls, payloads=(), acks=1, timeout=1000):
"""
Encode a ProduceRequest struct
Arguments:
payloads: list of ProduceRequestPayload
acks: How "acky" you want the request to be
1: written to disk by the leader
0: immediate response
-1: waits for all replicas to be in sync
timeout: Maximum time (in ms) the server will wait for replica acks.
This is _not_ a socket timeout
Returns: ProduceRequest
"""
if acks not in (1, 0, -1):
raise ValueError('ProduceRequest acks (%s) must be 1, 0, -1' % acks)
return kafka.protocol.produce.ProduceRequest(
required_acks=acks,
timeout=timeout,
topics=[(
topic,
[(
partition,
[(0, 0, kafka.protocol.message.Message(msg.value, key=msg.key,
magic=msg.magic,
attributes=msg.attributes))
for msg in payload.messages])
for partition, payload in topic_payloads.items()])
for topic, topic_payloads in group_by_topic_and_partition(payloads).items()])
@classmethod
def decode_produce_response(cls, response):
"""
Decode ProduceResponse to ProduceResponsePayload
Arguments:
response: ProduceResponse
Return: list of ProduceResponsePayload
"""
return [
kafka.common.ProduceResponsePayload(topic, partition, error, offset)
for topic, partitions in response.topics
for partition, error, offset in partitions
]
@classmethod
def encode_fetch_request(cls, payloads=(), max_wait_time=100, min_bytes=4096):
"""
Encodes a FetchRequest struct
Arguments:
payloads: list of FetchRequestPayload
max_wait_time (int, optional): ms to block waiting for min_bytes
data. Defaults to 100.
min_bytes (int, optional): minimum bytes required to return before
max_wait_time. Defaults to 4096.
Return: FetchRequest
"""
return kafka.protocol.fetch.FetchRequest(
replica_id=-1,
max_wait_time=max_wait_time,
min_bytes=min_bytes,
topics=[(
topic,
[(
partition,
payload.offset,
payload.max_bytes)
for partition, payload in topic_payloads.items()])
for topic, topic_payloads in group_by_topic_and_partition(payloads).items()])
@classmethod
def decode_fetch_response(cls, response):
"""
Decode FetchResponse struct to FetchResponsePayloads
Arguments:
response: FetchResponse
"""
return [
kafka.common.FetchResponsePayload(
topic, partition, error, highwater_offset, [
kafka.common.OffsetAndMessage(offset, message)
for offset, _, message in messages])
for topic, partitions in response.topics
for partition, error, highwater_offset, messages in partitions
]
@classmethod
def encode_offset_request(cls, payloads=()):
return kafka.protocol.offset.OffsetRequest(
replica_id=-1,
topics=[(
topic,
[(
partition,
payload.time,
payload.max_offsets)
for partition, payload in six.iteritems(topic_payloads)])
for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
@classmethod
def decode_offset_response(cls, response):
"""
Decode OffsetResponse into OffsetResponsePayloads
Arguments:
response: OffsetResponse
Returns: list of OffsetResponsePayloads
"""
return [
kafka.common.OffsetResponsePayl
|
oad(topic, partition, error, tuple(offsets))
for topic, partitions in response.topics
for partition, error, offsets in partitions
]
@classmethod
def encode_metadata_request(cls, topics=(), payloads=None):
"""
Encode a MetadataRequest
Arguments:
topics: list of strings
"""
if payloads i
|
s not None:
topics = payloads
return
|
lmr/autotest
|
cli/user.py
|
Python
|
gpl-2.0
| 2,827
| 0
|
#
# Copyright 2008 Google Inc. All Rights Reserved.
"""
The user module contains the objects and methods used to
manage users in Autotest.
The valid action is:
list: lists user(s)
The common options are:
--ulist / -U: file containing a list of USERs
See topic_common.py for a High Level Design and Algorithm.
"""
import os
import sys
from autotest.cli import topic_common, action_common
class user(topic_common.atest):
"""User class
ate
|
st user list <options>"""
usage_action = 'list'
topic = msg_topic = 'user'
msg_items = '<users>'
def __init__(self):
"""Add to the parser the options common to all the
user actions"""
super(user, self).__init__()
self.p
|
arser.add_option('-U', '--ulist',
help='File listing the users',
type='string',
default=None,
metavar='USER_FLIST')
self.topic_parse_info = topic_common.item_parse_info(
attribute_name='users',
filename_option='ulist',
use_leftover=True)
def get_items(self):
return self.users
class user_help(user):
"""Just here to get the atest logic working.
Usage is set by its parent"""
pass
class user_list(action_common.atest_list, user):
"""atest user list <user>|--ulist <file>
[--acl <ACL>|--access_level <n>]"""
def __init__(self):
super(user_list, self).__init__()
self.parser.add_option('-a', '--acl',
help='Only list users within this ACL')
self.parser.add_option('-l', '--access_level',
help='Only list users at this access level')
def parse(self):
(options, leftover) = super(user_list, self).parse()
self.acl = options.acl
self.access_level = options.access_level
return (options, leftover)
def execute(self):
filters = {}
check_results = {}
if self.acl:
filters['aclgroup__name__in'] = [self.acl]
check_results['aclgroup__name__in'] = None
if self.access_level:
filters['access_level__in'] = [self.access_level]
check_results['access_level__in'] = None
if self.users:
filters['login__in'] = self.users
check_results['login__in'] = 'login'
return super(user_list, self).execute(op='get_users',
filters=filters,
check_results=check_results)
def output(self, results):
if self.verbose:
keys = ['id', 'login', 'access_level']
else:
keys = ['login']
super(user_list, self).output(results, keys)
|
lo-windigo/fragdev
|
images/urls.py
|
Python
|
agpl-3.0
| 801
| 0.004994
|
# This file is part of the FragDev Website.
#
# the FragDev Website is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# th
|
e Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# the FragDev Website is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Fr
|
agDev Website. If not, see <http://www.gnu.org/licenses/>.
# Placeholder urlpatterns list, in case views are added
app_name = 'images'
urlpatterns = []
|
J-Adrian-Zimmer/GraphIsomorphism
|
TestGraphs.py
|
Python
|
mit
| 5,052
| 0.047902
|
from Graph import Graph
def mkTestGraph4():
return Graph(
['a','b','c','d'],
[ ('a','b'),
('b','c'),
('c','a'),
('a','d')
]
)
def mkTestGraph4b(): ## isomorphic with 4
return Graph(
['a','c','b','d'],
[ ('a','c'),
('b','c'),
('b','a'),
('a','d')
]
)
return Graph(
['a','c','b','d'],
[ ('a','c'),
('b','c'),
('b','a'),
('a','d')
]
)
def mk5Clique():
return Graph(
5,
[ (x,y) for x in range(5) for y in range(5) ]
)
def mkTestGraph6():
return Graph(
['a','b','c','d','e','f'],
[ ('a','b'),
('b','c'),
('c','d'),
('a','d'),
('d','b'),
('c','e')
]
)
'''
Schematic of test graph 6
a -- b f
| | (also edge between d and b)
d -- c -- e
rows of correct give number of shortest paths
from a source node to all nodes
'''
def mkTestGraph6b(): ## not isomorphic with 6
## (d,b) edge replaced with
## (a,c)
return Graph(
['a','b','c','d','e','f'],
[ ('a','b'),
('b','c'),
('c','d'),
('a','d'),
('a','c'),
('c','e')
]
)
def mkPetersenGraph():
return Graph(
10,
[ (0,1),(1,2),(2,3),(3,4),(4,0), # outer polygon
(5,6),(6,7),(7,8),(8,9),(9,5), # inner polygon
(0,5),(1,8),(2,6),(3,9),(4,7) ] # btwn polygons
)
class PossibleEdges:
## this keeps a list of edges (x,y) such that the ith
## edge has x at 2*i position and y at 2*i+1 position
## the order of the edges in the list doesn't matter
## and changes with each restart
from array import array
def __init__(me,numNodes):
me.totalNum = int( 0.5 + numNodes*(numNodes-1.0)/2.0 )
me.edges = PossibleEdges.array('H',[0]*(2*me.totalNum))
me.last_idx = me.totalNum-1
edge_index = 0
for i in range(numNodes):
for j in range(i+1,numNodes):
me.edges[ edge_index*2 ] = i
me.edges[ edge_index*2+1 ] = j
edge_index += 1
assert edge_index-1 == me.last_idx
def restart(me):
me.last_idx = me.totalNum-1
def remove(me,idx):
idx2 = 2*idx
lx2 = 2*me.last_idx
x = me.edges[idx2]
y = me.edges[idx2+1]
me.edges[idx2] = me.edges[lx2]
me.edges[idx2+1] = me.edges[lx2+1]
me.edges[lx2] = x
me.edges[lx2+1] = y
me.last_idx -= 1
return (x,y)
class MakeRandom:
from random import SystemRandom,seed,randrange,randrange
seed( SystemRandom().random() )
def __init__(me,numNodes):
me.numNodes = numNodes
me.possible_edges = PossibleEdges(numNodes)
def getEdges(me,numEdges):
me.possible_edges.restart()
assert numEdges > 0 and \
numEdges < me.possible_edges.totalNum, (
"MakeRandom: number of edges "
"expected to be positive and less "
" than total "
"for an undirected graph without "
"loops or multiple edges"
)
count = 0
# print 'generating ' + str(me.possible_edges.totalNum) + \
# ' edge pairs '
edges = []
while count<numEdges:
i = MakeRandom.randrange(me.possible_edges.totalNum)
edges.append(me.possible_edges.remove(i))
count += 1
return edges
def getIsoPair(me,density=0.5 ):
## return two graphs with different labelling
numEdges = int(
0.5 + me.possible_edges.totalNum * density )
print "making isometric Pair with " + \
str(me.numNodes) + \
" nodes and " + str(numEdges) + " edges."
edges = me.getEdges(numEdges)
gph1 = Graph(me.numNodes,edges)
return (gph1,gph1.relabelledClone())
def getNonIsoPair(me,density=0.5):
## return two graphs with different labelling
## they have same number of edges but one edge
## is different
numEdges = int(
0.5 + me.possible_edges.totalNum * density )
print "making non isometric Pair with " + \
str(me.numNodes) + \
" nodes and " + str(numEdges) + " edges."
edges = me.getEdges(numEdges+1)
## make graphs by removing a random edge
i = MakeRandom.randrange(numEdges)
j = i # for 2nd graph need random j different from i
while j==i: j = MakeRandom.randrange(numEdges)
return (
Graph(me.numNodes, edges[0:i] + ed
|
ges[i+1:]),
Graph(me.numNodes, edges[0:j] + edges[j+1:]).
|
relabelledClone()
)
|
macronucleus/chromagnon
|
Chromagnon/ndviewer/main.py
|
Python
|
mit
| 43,167
| 0.011328
|
#!/usr/bin/env priithon
import os, sys
import six
import wx, wx.lib.scrolledpanel as scrolled
import wx.lib.agw.aui as aui # from wxpython4.0, wx.aui does not work well, use this instead
try:
from ..Priithon import histogram, useful as U
from ..PriCommon import guiFuncs as G ,microscope, imgResample
from .. import imgio
except ValueError:
from Priithon import histogram, useful as U
from PriCommon import guiFuncs as G ,microscope, imgResample
import imgio
from . import viewer2
from . import glfunc as GL
import OpenGL
import numpy as N
from scipy import ndimage as nd
GLUTINITED = False
FRAMESIZE = (1200,768)
#if __name__ != '__main__':
# _display = wx.GetClientDisplayRect()[-2:]
# FRAMESIZE = (min(FRAMESIZE[0], _display[0]), min(FRAMESIZE[1], _display[1]))
_rgbList = [
(1,0,0),
(0,1,0),
(0,0,1),
(1,1,0),
(0,1,1),
(1,0,1),
(1,1,1),
]
_rgbList_names = ['red','green','blue', 'yellow', 'cyan', 'magenta', 'grey']
_rgbList_menuIDs = [wx.NewId() for i in range(len(_rgbList))]
def initglut():
global GLUTINITED
if not GLUTINITED and sys.platform.startswith(('linux', 'win')):
from OpenGL import GLUT
try:
GLUT.glutInit([]) ## in order to call Y.glutText()
except OpenGL.error.NullFunctionError:
#pass
raise RuntimeError('FreeGlut is not installed on your computer')
#print('FreeGlut is not installed on your computer')
GLUTINITED = True
class ImagePanel(wx.Panel):
viewCut = False
def __init__(self, parent, imFile=None, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize):
wx.Panel.__init__(self, parent, id, pos, size, name='')
# to make consistent with the older viewers
self.parent = self
self._mgr = aui.AuiManager()
self._mgr.SetManagedWindow(self)
self._perspectives = []
#self.loaded = False
## self.doc contains all the information on the displayed image
if isinstance(imFile, six.string_types):#str):
self.doc = imgio.Reader(imFile)
else:
self.doc = imFile
#self.zsec = [self.doc.nz//2]
#self.zlast = [0]
if self.doc: # can be ChromagnonEditor
self.doc.zlast = 0
self.addImageXY()
#self.zshape= self.doc.shape[:-2]
def __del__(self):
self._mgr.UnInit()
self.doOnClose()
def doOnClose(self):
pass
def addImageXY(self):
## draw viewer
|
## each dimension is assgined a number: 0 -- z; 1 -- y; 2 -- x
## each view has two dimensions (x-y view: (1,2); see below viewer2.GLViewer() calls) and
## an axis normal to it (x-y view: 0)
self.viewers = [] # XY, XZ, ZY
self.viewers.append(viewer2.GLViewer(self, dims=(1,2),
style=wx.BORDER_SUNKEN,
size=wx.Size(self.doc.nx, self.doc.ny)
|
))
self._mgr.AddPane(self.viewers[0], aui.AuiPaneInfo().Floatable(False).Name('XY').Caption("XY").BestSize((self.doc.nx, self.doc.ny)).CenterPane().Position(0))
self.viewers[-1].setMyDoc(self.doc, self)
self.viewers[-1].setAspectRatio(self.doc.pxlsiz[-2]/self.doc.pxlsiz[-1])
imgs2view = self.takeSlice((0,))[0]
for i, img in enumerate(imgs2view):
self.viewers[-1].addImg(img, None)
if hasattr(self.doc, 'alignParms'):
alignParm = self.doc.alignParms[self.doc.t,i]
self.viewers[-1].updateAlignParm(-1, alignParm)
# sliders
if 1:#self.doc.nz > 1 or self.doc.nt > 1:
self.addZslider()
ysize = int(self.doc.nz > 1) * 60 + int(self.doc.nt > 1) * 40
ysize = max(self.doc.nz, ysize)
self._mgr.AddPane(self.sliderPanel, aui.AuiPaneInfo().Name('Image').Caption("Image").Right().Position(1).BestSize((200,ysize)).MinSize((200,ysize)))
# histogram
self.recalcHist_todo_Set = set()
self.initHists() # histogram/aligner panel
self.setupHistArrL()
self.recalcHistL(False)
self.autoFitHistL()
self._mgr.AddPane(self.histsPanel, aui.AuiPaneInfo().Name('Histogram').Caption("HistoPanel").MaximizeButton(True).Right().Position(0).BestSize((200, self.doc.ny)).MinSize((200,50+70*2)).MaxSize((250,self.doc.ny)))#MinSize((200,50+70*self.doc.nw)).MaxSize((250,self.doc.ny)))
wx.CallAfter(self._mgr.Update)
self.histsPanel.Layout()
def updateGLGraphics(self, viewToUpdate = -1, RefreshNow=True):
'''
update cropbox and the slicing lines in all viewers;
set new image to viewer indicated by viewToUpdate:
-1 -- no updating viewer image
0,1,2 -- update viewToUpdate
3 -- update all viewers
'''
# viewers
if hasattr(viewToUpdate, '__iter__') or viewToUpdate >= 0:
if viewToUpdate == 3:
views2update = list(range(3))
elif type(viewToUpdate) == int:
views2update = [viewToUpdate]
else:
views2update = viewToUpdate
views2update = [i for i in views2update if i < len(self.viewers)]
imgs2view = self.takeSlice(views2update)
for i in views2update:
v = self.viewers[i]
for j, img in enumerate(imgs2view[i]):
if v.dims != (1,0):
v.setImage(j, img, 0)
else:
v.setImage(j, img.transpose(1,0), 0)
# draw lines
for v in self.viewers:
v.viewGpx = []
if v.useCropbox:
lowerBound = self.doc.roi_start.take(v.dims) #cropbox_l.take(v.dims) + ld
upperBound = self.doc.roi_size.take(v.dims) + lowerBound #cropbox_u.take(v.dims) + ld
v.viewGpx.append(GL.graphix_cropbox(lowerBound, upperBound))
pps = self._mgr.GetAllPanes()
if not any([pp.name == 'ZY' for pp in pps]) or not self.orthogonal_toggle.GetValue():
for v in self.viewers:
if v.viewGpx:
v.updateGlList([ g.GLfunc for g in v.viewGpx ], RefreshNow)
else:
v.updateGlList(None, RefreshNow)
v.useHair = False
#v.dragSide = 0
else:
#wx.Yield()
#if self.orthogonal_toggle.GetValue():
for v in self.viewers:
v.viewGpx.append(GL.graphix_slicelines(v))
v.updateGlList([ g.GLfunc for g in v.viewGpx ], RefreshNow)
#g = GL.graphix_slicelines(v)
#v.updateGlList([ g.GLfunc ], RefreshNow)
v.useHair = True
#else:
#for v in self.viewers:
#v.updateGlList(None, RefreshNow)
#v.useHair = False
#v.dragSide = 0
#self.doc.setIndices()
old="""
def IsCut(self):
return self.viewCut"""
def updateCropboxEdit(self):
pass
def addZslider(self):
self.sliderPanel = wx.Panel(self, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
self.sliderPanel.SetSizer(sizer)
# image info
# \n
box = G.newSpaceV(sizer)
bb, box = G.newStaticBox(self.sliderPanel, box, title='Image info', size=(150,-1))#wx.DefaultSize)
if sys.platform.startswith(('win', 'linux')):
fsize = 9
else:
fsize = 11
font = wx.Font(fsize, wx.SWISS, wx.NORMAL, wx.NORMAL)
# pixel size
pxsiz = tuple(self.doc.pxlsiz[::-1])
dimstr = ('X', 'Y', 'Z')
line = 'Pixel size (nm):\n'
pxstr = ' '
for i, d in enumerate(pxsiz):
if d:
pxstr += '%s %i: ' % (dimstr[i], int(d*1000))
if pxstr:
line += pxstr[:-2]
else:
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/system/firewalld.py
|
Python
|
gpl-3.0
| 29,694
| 0.001886
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Adam Miller <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: firewalld
short_description: Manage arbitrary ports/services with firewalld
description:
- This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules.
version_added: "1.4"
options:
service:
description:
- Name of a service to add/remove to/from firewalld.
- The service must be listed in output of firewall-cmd --get-services.
type: str
port:
description:
- Name of a port or port range to add/remove to/from firewalld.
- Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges.
type: str
rich_rule:
description:
- Rich rule to add/remove to/from firewalld.
type: str
source:
description:
- The source/network you would like to add/remove to/from firewalld.
type: str
version_added: "2.0"
interface:
description:
- The interface you would like to add/remove to/from a zone in firewalld.
type: str
version_added: "2.1"
icmp_block:
description:
- The icmp block you would like to add/remove to/from a zone in firewalld.
type: str
version_added: "2.8"
icmp_block_inversion:
description:
- Enable/Disable inversion of icmp blocks for a zone in firewalld.
type: str
version_added: "2.8"
zone:
description:
- >
The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream.
- Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).
- Possible values include C(block), C(dmz), C(drop), C(external), C(home), C(internal), C(public), C(trusted), C(work) ]
type: str
default: system-default(public)
permanent:
description:
- Should this configuration be in the running firewalld configuration or persist across reboots.
- As of Ansible 2.3, permanent operations can operate on firewalld configs when it is not running (requires firewalld >= 3.0.9).
- Note that if this is C(no), immediate is assumed C(yes).
type: bool
immediate:
description:
- Should this configuration be applied immediately, if set as permanent.
type: bool
default: no
version_added: "1.9"
state:
description:
- Enable or disable a setting.
- 'For ports: Should this port accept(enabled) or reject(disabled) connections.'
- The states C(present) and C(absent) can only be used in zone level operations (i.e. when no other parameters but zone and state are set).
type: str
required: true
choices: [ absent, disabled, enabled, present ]
timeout:
description:
- The amount of time the rule should be in effect for when non-permanent.
type: int
default: 0
masquerade:
description:
- The masquerade setting you would like to enable/disable to/from zones within firewalld.
type: str
version_added: "2.1"
offline:
description:
- Whether to run this module even when firewalld is offline.
type: bool
version_added: "2.3"
notes:
- Not tested on any Debian based system.
- Requires the python2 bindings of firewalld, which may not be installed by default.
- For distributions where the python2 firewalld bindings are unavailable (e.g Fedora 28 and later) you will have to set the
ansible_python_interpreter for these hosts to the python3 interpreter path and install the python3 bindings.
- Zone transactions (creating, deleting) can be performed by using only the zone and state parameters "present" or "absent".
Note that zone transactions must explicitly be permanent. This is a limitation in firewalld.
This also means that you will have to reload firewalld after adding a zone that you wish to perform immediate actions on.
The module will not take care of this for you implicitly because that would undo any previously performed immediate actions which were not
permanent. Therefore, if you require immediate access to a newly created zone it is recommended you reload firewalld immediately after the zone
creation returns with a changed state and before you perform any other immediate, non-permanent actions on that zone.
requirements: [ 'firewalld >= 0.2.11' ]
author: "Adam Miller (@maxamillion)"
'''
EXAMPLES = r'''
- firewalld:
service: https
permanent: yes
state: enabled
- firewalld:
port: 8081/tcp
permanent: yes
state: disabled
- firewalld:
port: 161-162/udp
permanent: yes
state: enabled
- firewalld:
zone: dmz
service: http
permanent: yes
state: enabled
- firewalld:
rich_rule: rule service name="ftp" audit limit value="1/m" accept
permanent: yes
state: enabled
- firewalld:
source: 192.0.2.0/24
zone: internal
state: enabled
- firewalld:
zone: trusted
interface: eth2
permanent: yes
state: enabled
- firewalld:
masquerade: yes
state: enabled
permanent: yes
zone: dmz
- firewalld:
zone: custom
state: present
permanent: yes
- firewalld:
zone: drop
state: present
permanent: yes
icmp_block_inversion: yes
- firewalld:
zone: drop
state: present
permanent: yes
icmp_block: echo-request
- name: Redirect port 443 to 8443 with Rich Rule
firewalld:
rich_rule: rule forward-port port=443 protocol=tcp to-port=8443
zone: public
permanent: yes
immediate: yes
state: enabled
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.firewalld import FirewallTransaction, fw_offline
try:
from firewall.client import Rich_
|
Rule
from firewall.client import FirewallClientZoneSettings
except ImportError:
# The import errors are handled via FirewallTransaction, don't need to
# duplicate that here
|
pass
class IcmpBlockTransaction(FirewallTransaction):
"""
IcmpBlockTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(IcmpBlockTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, icmp_block, timeout):
return icmp_block in self.fw.getIcmpBlocks(self.zone)
def get_enabled_permanent(self, icmp_block, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
return icmp_block in fw_settings.getIcmpBlocks()
def set_enabled_immediate(self, icmp_block, timeout):
self.fw.addIcmpBlock(self.zone, icmp_block, timeout)
def set_enabled_permanent(self, icmp_block, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addIcmpBlock(icmp_block)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, icmp_block, timeout):
self.fw.removeIcmpBlock(self.zone, icmp_block)
def set_disabled_permanent(self, icmp_block, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeIcmpBlock(icmp_block)
self.update_fw_settings(fw_zone, fw_settings)
class IcmpBlockInversionTransaction(FirewallTransaction):
"""
IcmpBlockInversionTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(IcmpBlockInversionTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self):
if self.fw.queryIcmpBlo
|
mark-adams/django-waffle
|
waffle/south_migrations/0004_auto__add_field_flag_testing.py
|
Python
|
bsd-3-clause
| 5,923
| 0.008779
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Flag.testing'
db.add_column('waffle_flag', 'testing', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'Flag.testing'
db.delete_column('waffle_flag', 'testing')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', '
|
model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'waffle.flag': {
'Meta': {'object_name': 'Flag'}
|
,
'authenticated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everyone': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'rollout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'superusers': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'testing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'waffle.sample': {
'Meta': {'object_name': 'Sample'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'})
},
'waffle.switch': {
'Meta': {'object_name': 'Switch'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['waffle']
|
texttochange/vusion-backend
|
vusion/persist/model_manager.py
|
Python
|
bsd-3-clause
| 2,407
| 0.001662
|
from datetime import datetime
class ModelManager(object):
def __init__(self, db, collection_name, has_stats=False, **kwargs):
self.property_helper = None
self.log_helper = None
s
|
elf.collection_name = collection_name
self.db = db
if 'logger' in kwargs:
self.log_helper = kwargs['logger']
if collection_name in self.db.collection_names():
self.collection = self.db[collection_name]
|
else:
self.collection = self.db.create_collection(collection_name)
if has_stats:
self.add_stats_collection()
def add_stats_collection(self):
self.stats_collection_name = '%s_stats' % self.collection_name
if self.stats_collection_name in self.db.collection_names():
self.stats_collection = self.db[self.stats_collection_name]
else:
self.stats_collection = self.db.create_collection(self.stats_collection_name)
def close_connection(self):
pass
def save_object(self, instance):
instance.validate_fields()
return self.collection.save(instance.get_as_dict())
#deprecated: name is confusing
def save_document(self, document):
document.validate_fields()
return self.collection.save(document.get_as_dict())
def set_property_helper(self, property_helper):
self.property_helper = property_helper
def set_log_helper(self, log_helper):
self.log_helper = log_helper
def __getattr__(self,attr):
orig_attr = self.collection.__getattribute__(attr)
if callable(orig_attr):
def hooked(*args, **kwargs):
result = orig_attr(*args, **kwargs)
# prevent wrapped_class from becoming unwrapped
if result == self.collection:
return self
return result
return hooked
else:
return orig_attr
def get_local_time(self, date_format='datetime'):
if self.property_helper is None:
return datetime.now()
return self.property_helper.get_local_time(date_format)
def log(self, msg, level='msg'):
if self.log_helper is not None:
self.log_helper.log(msg, level)
def drop(self):
self.collection.drop()
if hasattr(self, 'stats_collection'):
self.stats_collection.drop()
|
DeepSOIC/Lattice
|
latticeShapeString.py
|
Python
|
lgpl-2.1
| 12,398
| 0.014518
|
#***************************************************************************
#* *
#* Copyright (c) 2015 - Victor Titov (DeepSOIC) *
#* <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
from latticeCommon import *
import latticeBaseFeature
import latticeExecuter
import latticeCompoundExplorer as LCE
from latticeBoundBox import getPrecisionBoundBox #needed for alignment
import FreeCAD as App
import Part
from Draft import _ShapeString
__title__="BoundingBox module for FreeCAD"
__author__ = "DeepSOIC"
__url__ = ""
def findFont(font_file_name):
'''checks for existance of the file in a few locations and returns the full path of the first one found'''
import os
if os.path.isabs(font_file_name):
if not os.path.exists(font_file_name):
raise ValueError("Font file not found: " + font_file_name )
return font_file_name
dirlist = [] #list of directories to probe
import latticeDummy
lattice_path = os.path.dirname(latticeDummy.__file__)
dirlist.append(lattice_path + "/fonts")
if len(App.ActiveDocument.FileName) > 0:
dirlist.append(os.path.dirname(App.ActiveDocument.FileName)+"/fonts")
dirlist.append(os.path.abspath(os.curdir))
#todo: figure out the path to system fonts, and add it here
#do the probing
for _dir in dirlist:
if os.path.exists(_dir + "/" + font_file_name):
return _dir + "/" + font_file_name
raise ValueError("Font file not found: "+font_file_name +". Locations probed: \n"+'\n'.join(dirlist))
# -------------------------- document object --------------------------------------------------
def makeLatticeShapeString(name):
'''makeBoundBox(name): makes a BoundBox object.'''
obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name)
LatticeShapeString(obj)
ViewProviderLatticeShapeString(obj.ViewObject)
return obj
class FoolFeatureDocumentObject:
'''A class that is to be fed to Draft ShapeString object instead of a real one, to obtain shapes it generates'''
def __init__(self):
self.Placement = App.Placement()
self.Shape = Part.Shape()
self.properties = []
self.Proxy = None
def addProperty(self, proptype, propname, group = None, hint = None):
setattr(self,propname,None)
self.properties.append((proptype, propname, group, hint))
class LatticeShapeString:
"The LatticeShapeString object"
def __init__(self,obj):
self.Type = "LatticeShapeString"
#initialize accompanying Draft ShapeString
self.makeFoolObj(obj)
foolObj = self.foolObj
#add Draft ShapeString's properties to document object in posession of our LatticeShapeString
for (proptype, propname, group, hint) in foolObj.properties:
if propname != "String": #we'll define our own string property
obj.addProperty(proptype,propname,"Lattice ShapeString",hint)
obj.addProperty("App::PropertyLink","ArrayLink","Lattice ShapeString","array to use for the shapestring")
obj.addProperty("App::PropertyStringList","Strings","Lattice ShapeString","Strings to put at each placement.")
obj.addProperty("App::PropertyEnumeration","XAlign","Lattice ShapeString","Horizontal alignment of individual strings")
obj.XAlign = ['None','Left','Right','Middle']
obj.addProperty("App::PropertyEnumeration","YAlign","Lattice ShapeString","Vertical alignme
|
nt of individual strings")
obj.YAlign = ['None','Top','Bottom','Middle']
obj.addProperty("App::PropertyBool","AlignPrecisionBoundBox","Lattice ShapeString","Use precision bounding box for alignment. Warning: slow!")
obj.addProperty("App::PropertyFile","FullPathToFont","Lattice ShapeString","Full path of font file that is actually being used.")
obj.setEditorMode("
|
FullPathToFont", 1) # set read-only
obj.Proxy = self
self.setDefaults(obj)
def makeFoolObj(self,obj):
'''Makes an object that mimics a Part::FeaturePython, and makes a Draft
ShapeString object on top of it. Both are added as attributes to self.
This is needed to re-use Draft ShapeString'''
if hasattr(self, "foolObj"):
return
foolObj = FoolFeatureDocumentObject()
self.draft_shape_string = _ShapeString(foolObj)
self.foolObj = foolObj
def setDefaults(self, obj):
'''initializes the properties, so that LatticeShapeString can be used with no initial fiddling'''
obj.FontFile = "FreeUniversal-Regular.ttf"
obj.Size = 10
obj.Tracking = 0
obj.Strings = ['string1','string2']
def execute(self,obj):
nOfStrings = len(obj.Strings)
lattice = obj.ArrayLink
if lattice is None:
plms = [App.Placement() for i in range(0,nOfStrings)]
else:
if not latticeBaseFeature.isObjectLattice(lattice):
latticeExecuter.warning(obj,"ShapeString's link to array must point to a lattice. It points to a generic shape. Results may be unexpected.")
leaves = LCE.AllLeaves(lattice.Shape)
plms = [leaf.Placement for leaf in leaves]
#update foolObj's properties
self.makeFoolObj(obj) #make sure we have one - fixes defunct Lattice ShapeString after save-load
for (proptype, propname, group, hint) in self.foolObj.properties:
if propname != "String": #ignore "String", that will be taken care of in the following loop
setattr(self.foolObj, propname, getattr(obj, propname))
self.foolObj.FontFile = findFont(obj.FontFile)
obj.FullPathToFont = self.foolObj.FontFile
shapes = []
for i in range( 0 , min(len(plms),len(obj.Strings)) ):
if len(obj.Strings[i]) > 0:
#generate shapestring using Draft
self.foolObj.String = obj.Strings[i]
self.foolObj.Shape = None
self.draft_shape_string.execute(self.foolObj)
shape = self.foolObj.Shape
#calculate alignment point
if obj.XAlign == 'None' and obj.YAlign == 'None':
pass #need not calculate boundbox
else:
if obj.AlignPrecisionBoundBox:
bb = getPrecisionBoundBox(shape)
else:
bb = shape.BoundBox
alignPnt = App.Vector()
|
Eigenlabs/EigenD
|
plg_macosx/caprobe.py
|
Python
|
gpl-3.0
| 794
| 0.002519
|
#
# Copyright 2009 Eigenlabs Ltd. htt
|
p://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope th
|
at it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
import macosx_native
def main():
macosx_native.probe_coreaudio(True,True)
|
lowitty/zacademy
|
bin/trap_snmp_v2_v3.py
|
Python
|
mit
| 6,374
| 0.004864
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import logging
import time
import logging.config
dir_cur = os.path.normpath(os.path.dirname(os.path.abspath(__file__)).split('bin')[0])
if dir_cur not in sys.path:
sys.path.insert(0, dir_cur)
log_dir = os.path.normpath(dir_cur + os.path.sep + 'logs' + os.path.sep + 'snmp_trap_logs')
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
log_file = os.path.normpath(log_dir + os.path.sep + "snmp_trap_v2v3_" + str(os.getpid()) + ".log")
logging_config = os.path.normpath(dir_cur + os.path.sep + 'config' + os.path.sep + 'snmp_trap' +
os.path.sep + 'logging.config')
logging.config.fileConfig(logging_config, defaults={'log_file': log_file})
from com.ericsson.xn.commons import CommonUtil
CommonUtil.pre_check(systems=['Linux', 'Windows', 'Darwin'])
from com.ericsson.xn.snmp import SnmpTrapUtils
sep = os.path.sep
snmp_conf = dir_cur + sep + 'config' + sep + 'snmp_trap'
template_dir = os.path.normpath(snmp_conf + sep + 'templates')
mapping_file = os.path.normpath(snmp_conf + sep + 'mappings')
alarm_id_file = os.path.normpath(snmp_conf + sep + 'oids' + sep + 'alarmid_oids')
timestamp_file = os.path.normpath(snmp_conf + sep + 'oids' + sep + 'time_oids')
v3_auth_file = os.path.normpath(snmp_conf + sep + 'v3_auth')
id_file = os.path.normpath(snmp_conf + sep + 'id')
options = SnmpTrapUtils.get_and_check_options()
traps_map = SnmpTrapUtils.read_used_trap_templates(options, template_dir, mapping_file, alarm_id_file, timestamp_file)
logging.debug('**Start to send traps**')
if not traps_map:
msg = 'Fail to read the alarm template files.'
logging.error(msg)
# print msg
else:
client_ip = None if '' == options.clientip else options.clientip
engine_id = None if '' == options.engineid else options.engineid
list_engine = SnmpTrapUtils.init_trap_engine(traps_map, options, v3_auth_file, client_ip,engine_id)
if not list_engine:
msg = 'Fail to init the trap engines.'
logging.error(msg)
# print msg
else:
if 'n' == options.mode:
t = SnmpTrapUtils.SendTrapNormal(options, traps_map, list_engine[0], list_engine[1], list_engine[2],
list_engine[3], id_file)
try:
t.start()
while not t.b_stop:
time.sleep(.5)
except KeyboardInterrupt:
t.stop()
msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now."
logging.info(msg)
print msg
# print msg
elif 'c'
|
== options.mode:
if 1 < len(options.list.split(',')):
msg = "We can only send one alarm in Clear mode, you have feed more than one alarm " \
"IDs for the '--list' option."
logging.crit
|
ical(msg)
else:
t = SnmpTrapUtils.SendTrapNormal(options, traps_map, list_engine[0], list_engine[1], list_engine[2],
list_engine[3], id_file, False)
try:
t.start()
while not t.b_stop:
time.sleep(.5)
except KeyboardInterrupt:
msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now."
logging.info(msg)
print msg
elif 's' == options.mode:
try:
t = SnmpTrapUtils.SendTrapDurationMode(options, traps_map, list_engine[0], list_engine[1],
list_engine[2], list_engine[3], id_file)
t.start()
while not t.b_stop:
time.sleep(.5)
except KeyboardInterrupt:
msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now."
logging.info(msg)
print msg
t.stop()
elif 'p' == options.mode:
if 0 != len(options.list.split(',')) % 2:
msg = "In pare storm mode, number of alarms should be an EVEN number, otherwise there will be mismatch."
logging.critical(msg)
else:
try:
t = SnmpTrapUtils.SendTrapDurationMode(options, traps_map, list_engine[0], list_engine[1],
list_engine[2], list_engine[3], id_file, True)
t.start()
while not t.b_stop:
time.sleep(.5)
except KeyboardInterrupt:
msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now."
logging.info(msg)
print msg
t.stop()
elif 'sn' == options.mode:
try:
t = SnmpTrapUtils.SendTrapDurationModeNonAps(options, traps_map, list_engine[0], list_engine[1],
list_engine[2], list_engine[3], id_file)
t.start()
while not t.b_stop:
time.sleep(.5)
except KeyboardInterrupt:
msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now."
logging.info(msg)
print msg
t.stop()
elif 'pn' == options.mode:
if 0 != len(options.list.split(',')) % 2:
msg = "In pare storm mode, number of alarms should be an EVEN number, otherwise there will be mismatch."
logging.critical(msg)
else:
try:
t = SnmpTrapUtils.SendTrapDurationModeNonAps(options, traps_map, list_engine[0], list_engine[1],
list_engine[2], list_engine[3], id_file, True)
t.start()
while not t.b_stop:
time.sleep(.5)
except Exception as e:
logging.error(str(e))
print str(e)
t.stop()
else:
msg = "Other mode is not supported yet, exit now."
logging.critical(msg)
logging.debug('**End of sending traps**')
|
voutilad/courtlistener
|
cl/api/urls.py
|
Python
|
agpl-3.0
| 2,240
| 0
|
from cl.api import views
from cl.audio import api_views as audio_views
from cl.people_db import api_views as judge_views
from cl.search import api_views as search_views
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
# Search & Audio
router.register(r'dockets', search_views.DocketViewSet)
router.register(r'courts', search_views.CourtViewSet)
router.register(r'audio', audio_views.AudioViewSet)
router.register(r'clusters', search_views.OpinionClusterViewSet)
router.register(r'opinions', search_views.OpinionViewSet)
router.register(r'opinions-cited', search_views.OpinionsCitedViewSet)
router.register(r'search', search_views.SearchViewSet, base_name='search')
# Judges
router.register(r'people', judge_views.PersonViewSet)
router.register(r'positions', judge_views.PositionViewSet)
router.register(r'retention-events', judge_views.RetentionEventViewSet)
router.register(r'educations', judge_views.EducationViewSet)
router.register(r'schools', judge_views.SchoolViewSet)
router.register(r'political-affiliations',
judge_views.PoliticalAffiliationViewSet)
router.register(r'sources', judge_views.SourceViewSet)
router.register(r'aba-ratings', judge_views.ABARatingViewSet)
urlpatterns = [
url(r'^api-auth/',
include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/rest/(?P<version>[v3]+)/', include(router.urls)),
# Documentation
url(r'^api/$',
views.api_index,
name='api_index'),
url(r'^api/jurisdictions/$',
views.court_index,
name='court_index'),
url(r'^api
|
/rest-info/(?P<version>v[123])?/?$',
views.rest_docs,
name='rest_docs'),
url(r'^api/bulk-info/$',
|
views.bulk_data_index,
name='bulk_data_index'),
url(r'^api/rest/v(?P<version>[123])/coverage/(?P<court>.+)/$',
views.coverage_data,
name='coverage_data'),
# Pagerank file
url(r'^api/bulk/external_pagerank/$',
views.serve_pagerank_file,
name='pagerank_file'),
# Deprecation Dates:
# v1: 2016-04-01
# v2: 2016-04-01
url(r'^api/rest/v(?P<v>[12])/.*',
views.deprecated_api,
name='deprecated_api'),
]
|
rvrheenen/OpenKattis
|
Python/judgingmoose/judgingmoose.py
|
Python
|
mit
| 176
| 0.017045
|
l, r = [int(x) for x in input()
|
.split()]
if max(l,r) == 0:
print("Not a moose")
elif l == r:
print("Even {}".format(l+r))
else
|
:
print("Odd {}".format(max(l,r)*2))
|
neosinha/automationengine
|
AutomationEngine/QueryTool/Main.py
|
Python
|
mit
| 1,129
| 0.009743
|
"""
Created on April 14, 2017
@author Miguel Contreras Morales
"""
import QueryTool
import datetime
import cherrypy as QueryServer
import os
if __name__ == "__main__":
"""
This initializes CherryPy services
+ self - no input required
"""
print "Intializing!"
portnum = 9100
# start the QeueryServer
QueryServer.config.update({'server.socket_host' : '127.0.0.1',
'se
|
rver.socket_port': portnum,
'server.socket_timeout': 600,
'server.thread_pool' : 8,
'server.max_request_body_size': 0
})
wwwPath = os.path.join(os.getcwd(),'www')
print wwwPath
staticdir = './www'
print staticdir
conf = {
'/': {
|
'tools.sessions.on': True,
'tools.staticdir.on': True,
'tools.staticdir.dir': wwwPath
}
}
QueryServer.quickstart(QueryTool.QueryTool(dbaddress="10.30.5.203:27017", path= wwwPath), '/', conf)
|
phobson/bokeh
|
bokeh/properties.py
|
Python
|
bsd-3-clause
| 195
| 0.010256
|
from
|
bokeh.util.deprecate import deprecated_module
deprecated_module('bokeh.properties', '0.11', 'use bokeh.core.properties instead')
del deprecated_module
fro
|
m .core.properties import * # NOQA
|
open-synergy/opnsynid-l10n-indonesia
|
l10n_id_taxform_bukti_potong_pph_f113309/__openerp__.py
|
Python
|
agpl-3.0
| 719
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2017 OpenSynergy Indonesia
# License AGPL-3.0 o
|
r later (http://www.gnu.org/licenses/agpl).
{
"name": "Indonesia - Bukti Potong PPh 4 Aya
|
t 2 (F.1.1.33.09)",
"version": "8.0.1.1.0",
"category": "localization",
"website": "https://opensynergy-indonesia.com/",
"author": "OpenSynergy Indonesia",
"license": "AGPL-3",
"application": False,
"installable": True,
"depends": [
"l10n_id_taxform_bukti_potong_pph_common",
],
"data": [
"security/ir.model.access.csv",
"data/l10n_id_bukti_potong_type.xml",
"views/bukti_potong_pph_f113309_in_views.xml",
"views/bukti_potong_pph_f113309_out_views.xml",
],
}
|
ruohoruotsi/Wavelet-Tree-Synth
|
nnet/keeper_LSTMVRAE-JayHack-RyotaKatoh-chainer/dataset.py
|
Python
|
gpl-2.0
| 7,469
| 0.002946
|
import gzip
import os
import numpy as np
import cPickle as pickle
import six
from six.moves.urllib import request
import scipy
from scipy import io
# from sklearn import decomposition
'''
BVH
'''
def load_bvh_data(file_path):
frames = 0
frame_time = 0.0
with open(file_path, "rb") as f:
lines = f.readlines()
n = 0
while lines[n].find('MOTION') < 0:
n += 1
assert n < len(lines)
# frames
n += 1
frames = int(lines[n].split(" ")[-1].replace('\n', ''))
# frame time
n += 1
frame_time = float(lines[n].split(" ")[-1].replace('\n', ''))
# motion data
n += 1
for i in range(frames):
motion = lines[n + i].split(' ')
if i == 0:
dim = len(motion)
global motion_data
motion_data = np.zeros(frames * dim, dtype=np.float32).reshape((frames, dim))
for j in range(dim):
motion_data[i, j] = float(motion[j].replace('\n', ''))
return frames, frame_time, motion_data
'''
MNIST
'''
def load_mnist(images, labels, num):
dim = 784
data = np.zeros(num * dim, dtype=np.uint8).reshape((num, dim))
target = np.zeros(num, dtype=np.uint8).reshape((num, ))
with gzip.open(images, 'rb') as f_images,\
gzip.open(labels, 'rb') as f_labels:
|
f_images.read(16)
f_labels.read(8)
for i in six.moves.range(num):
target[i] = ord(f_labels.read(1))
for j in six.moves.range(dim):
data[i, j] = ord(f_images.read(1))
return data, target
def download_mnist_data(data_dir):
parent = 'http://yann.lecun.com/exdb/mnist'
train_images = 'train-images-idx3-ubyte.gz'
train_labels = 'train-labels-idx1-ubyte.gz'
test_images = 't10k-images-idx3-ubyte.gz'
tes
|
t_labels = 't10k-labels-idx1-ubyte.gz'
num_train = 60000
num_test = 10000
print('Downloading {:s}...'.format(train_images))
request.urlretrieve('{:s}/{:s}'.format(parent, train_images), train_images)
print('Done')
print('Downloading {:s}...'.format(train_labels))
request.urlretrieve('{:s}/{:s}'.format(parent, train_labels), train_labels)
print('Done')
print('Downloading {:s}...'.format(test_images))
request.urlretrieve('{:s}/{:s}'.format(parent, test_images), test_images)
print('Done')
print('Downloading {:s}...'.format(test_labels))
request.urlretrieve('{:s}/{:s}'.format(parent, test_labels), test_labels)
print('Done')
print('Converting training data...')
data_train, target_train = load_mnist(train_images, train_labels,
num_train)
print('Done')
print('Converting test data...')
data_test, target_test = load_mnist(test_images, test_labels, num_test)
mnist = {}
mnist['data'] = np.append(data_train, data_test, axis=0)
mnist['target'] = np.append(target_train, target_test, axis=0)
print('Done')
print('Save output...')
with open('%s/mnist/mnist.pkl' % data_dir, 'wb') as output:
six.moves.cPickle.dump(mnist, output, -1)
print('Done')
print('Convert completed')
def load_mnist_data(data_dir):
if not os.path.exists('%s/mnist/mnist.pkl' % data_dir):
download_mnist_data(data_dir)
with open('%s/mnist/mnist.pkl' % data_dir, 'rb') as mnist_pickle:
mnist = six.moves.cPickle.load(mnist_pickle)
return mnist
'''
SVHN
'''
def download_svhn_data(data_dir):
parent = 'http://ufldl.stanford.edu/housenumbers'
train_images = 'train_32x32.mat'
test_images = 'test_32x32.mat'
data_path = data_dir+"/SVHN/"
if not os.path.exists(data_path):
os.mkdir(data_path)
print('Downloading {:s}...'.format(train_images))
request.urlretrieve('{:s}/{:s}'.format(parent, train_images), data_path+train_images)
print('Done')
print('Downloading {:s}...'.format(test_images))
request.urlretrieve('{:s}/{:s}'.format(parent, test_images), data_path+test_images)
print('Done')
def svhn_pickle_checker(data_dir):
if os.path.exists(data_dir+'/SVHN/train_x.pkl') and os.path.exists(data_dir+'/SVHN/train_y.pkl') \
and os.path.exists(data_dir+'/SVHN/test_x.pkl') and os.path.exists(data_dir+'/SVHN/test_y.pkl'):
return 1
else:
return 0
def load_svhn(data_dir, toFloat=True, binarize_y=True, dtype=np.float32, pca=False, n_components=1000):
# if svhn_pickle_checker(data_dir) == 1:
# print "load from pickle file."
# train_x = pickle.load(open(data_dir+'/SVHN/train_x.pkl'))
# train_y = pickle.load(open(data_dir+'/SVHN/train_y.pkl'))
# test_x = pickle.load(open(data_dir+'/SVHN/test_x.pkl'))
# test_y = pickle.load(open(data_dir+'/SVHN/test_y.pkl'))
#
# return train_x, train_y, test_x, test_y
if not os.path.exists(data_dir+'/SVHN/train_32x32.mat') or not os.path.exists(data_dir+'/SVHN/test_32x32.mat'):
download_svhn_data(data_dir)
train = scipy.io.loadmat(data_dir+'/SVHN/train_32x32.mat')
train_x = train['X'].swapaxes(0,1).T.reshape((train['X'].shape[3], -1))
train_y = train['y'].reshape((-1)) - 1
test = scipy.io.loadmat(data_dir+'/SVHN/test_32x32.mat')
test_x = test['X'].swapaxes(0,1).T.reshape((test['X'].shape[3], -1))
test_y = test['y'].reshape((-1)) - 1
if toFloat:
train_x = train_x.astype(dtype)/256.
test_x = test_x.astype(dtype)/256.
if binarize_y:
train_y = binarize_labels(train_y)
test_y = binarize_labels(test_y)
# if pca:
# x_stack = np.vstack([train_x, test_x])
# pca = decomposition.PCA(n_components=n_components)
# pca.whiten=True
# # pca.fit(x_stack)
# # x_pca = pca.transform(x_stack)
# x_pca = pca.fit_transform(x_stack)
# train_x = x_pca[:train_x.shape[0], :]
# test_x = x_pca[train_x.shape[0]:, :]
#
# with open('%s/SVHN/pca.pkl' % data_dir, "wb") as f:
# pickle.dump(pca, f)
# with open('%s/SVHN/train_x.pkl' % data_dir, "wb") as f:
# pickle.dump(train_x, f)
# with open('%s/SVHN/train_y.pkl' % data_dir, "wb") as f:
# pickle.dump(train_y, f)
# with open('%s/SVHN/test_x.pkl' % data_dir, "wb") as f:
# pickle.dump(test_x, f)
# with open('%s/SVHN/test_y.pkl' % data_dir, "wb") as f:
# pickle.dump(test_y, f)
return train_x, train_y, test_x, test_y
def binarize_labels(y, n_classes=10):
new_y = np.zeros((y.shape[0], n_classes))
for i in range(y.shape[0]):
new_y[i, y[i]] = 1
return new_y.astype(np.float32)
'''
Shakespeare
'''
def load_shakespeare(data_dir):
vocab = {}
words = open('%s/tinyshakespeare/input.txt' % data_dir, 'rb').read()
words = list(words)
dataset = np.ndarray((len(words), ), dtype=np.int32)
for i, word in enumerate(words):
if word not in vocab:
vocab[word] = len(vocab)
dataset[i] = vocab[word]
return dataset, words, vocab
'''
music
'''
def load_midi_data(data_dir):
import midi.utils as utils
from midi import MidiInFile as mf
from midi import MidiToText as mt
f = open(data_dir, 'rb')
midiIn = mf.MidiInFile(mt.MidiToText(), f)
midiIn.read()
f.close()
midi_data = utils.midiread(data_dir, dt=0.5)
return midi_data.piano_roll
|
daschwa/typing-test
|
server.py
|
Python
|
mit
| 395
| 0
|
#!/usr/bin/env python
from livereload import Server, shell
server
|
= Server()
style = ("style.scss", "style.css")
script = ("typing-test.js", "typing-test-compiled.js")
server.watch(style[0], shell(["sass", style[0]], output=style[1]))
server.watch(script[0], shell(["babel", script[0]], output=script[1]))
server.watch("index.html")
server.serve(po
|
rt=8080, host="localhost", open_url=True)
|
ryanarnold/complaints_categorizer
|
categorizer/feature_selection.py
|
Python
|
mit
| 2,179
| 0.005048
|
from collections import Counter
def TFIDF(TF, complaints, term):
if TF >= 1:
n = len(complaints)
x = sum([1 for complaint in complaints if term in complaint['body']])
return log(TF + 1) * log(n / x)
else:
return 0
def DF(vocab, complaints):
term_DF = dict()
for term in vocab:
term_DF[term] = sum([1 for complaint in complaints if term in complaint['body']])
threshold = 3
features = [term for term in term_DF.keys() if term_DF[term] > threshold]
return features
def chi_square(vocab, complaints, categories):
features = []
chi_table = dict()
N = len(complaints)
for term in vocab:
chi_table[term] = dict()
for category in categories:
chi_table[term][category] = dict()
A = 0
B = 0
C = 0
D = 0
for complaint in complaints:
if term in complaint['body'] and complaint['category'] == category:
A += 1
if term in
|
complaint['body'] and complaint['category'] != category:
B += 1
if term not in complaint['body'] and complaint['category'] == category:
C += 1
if term not in complaint['body'] and complaint['category'] != category:
D += 1
try:
chi_table[term][category]['chi'] = (N * ((A * D) - (C * B))**2) / ((A + C) * (B + D) * (A + B) * (C + D))
chi_table[t
|
erm][category]['freq'] = A + C
except ZeroDivisionError:
print(term)
print(category)
print(A)
print(B)
print(C)
print(D)
input()
pass
chi_table[term]['chi_average'] = float()
for category in categories:
P = chi_table[term][category]['freq'] / N
chi_table[term]['chi_average'] += P * chi_table[term][category]['chi']
if chi_table[term]['chi_average'] > 3:
features.append(term)
print('Extracted {0} features'.format(len(features)))
return features
|
Vauxoo/stock-logistics-warehouse
|
stock_inventory_revaluation/wizards/stock_change_standard_price.py
|
Python
|
agpl-3.0
| 974
| 0
|
# Copyright 2016-17 Eficent Business and IT Consulting Services S.L.
# (http://www.eficent.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import a
|
pi, models
class StockChangeStandardPrice(models.TransientModel):
_inherit = "stock.change.standard.price"
@api.model
def default_get(self, fields):
res = super(StockChangeStandardPrice, self).default_get(fields)
product_or_template = self.env[self._context['active_model']].browse(
self._context['active_id
|
'])
if 'counterpart_account_id' in fields:
# We can only use one account here, so we use the decrease
# account. It will be ignored anyway, because we'll use the
# increase/decrease accounts defined in the product category.
res['counterpart_account_id'] = product_or_template.categ_id. \
property_inventory_revaluation_decrease_account_categ.id
return res
|
gf712/PyML
|
tests/nearest_neighbours_tests.py
|
Python
|
mit
| 2,186
| 0.004575
|
import unittest
from pyml.nearest_neighbours import KNNClassifier, KNNRegressor
from pyml.datasets import gaussian, regression
from pyml.preprocessing import train_test_split
class TestKNNClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.datapoints, cls.labels = gaussian(n=100, d=2, labels=3, sigma=0.1, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.datapoints, cls.labels,
train_split=0.95, seed=1970)
cls.classifier = KNNClassifier(n=5)
cls.classifier.train(X=cls.X_train, y=c
|
ls.y_train)
def test_train(self):
self.assertEqual(self.classifier.X, self.X_train)
def test_predict(self):
predictions = self.classifier.predict(X=self.X_test)
self.assertEqual(predictions, [2, 2, 0, 0, 2, 0, 2, 2, 1, 1, 2, 0, 2, 2, 0])
def test_score(self):
ac
|
curacy = self.classifier.score(X=self.X_test, y_true=self.y_test)
self.assertEqual(accuracy, 1.0)
class TestKNNRegressor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = regression(100, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y,
train_split=0.8, seed=1970)
cls.regressor = KNNRegressor(n=5)
cls.regressor.train(X=cls.X_train, y=cls.y_train)
def test_train(self):
self.assertEqual(self.regressor.X, self.X_train)
def test_predict(self):
predictions = self.regressor.predict(X=self.X_test)
self.assertEqual(predictions[:5], [3.1161666191379163, 4.933573052500679, 6.611283497257544,
9.185848057766739, 3.110023909806445])
def test_score_mse(self):
mse = self.regressor.score(X=self.X_test, y_true=self.y_test, scorer='mse')
self.assertEqual(mse, 1.5470835956432736)
def test_score_mae(self):
mae = self.regressor.score(X=self.X_test, y_true=self.y_test, scorer='mae')
self.assertEqual(mae, 1.024567537840727)
|
itsMagondu/IoTNeuralNetworks
|
noisefilter/noisefilter/urls.py
|
Python
|
mit
| 1,603
| 0.001871
|
""" Default urlconf for noisefilter """
from django.conf import settings
from django.conf.urls import
|
include, url
from django.conf.urls.static import static
from django.contrib import admin
from
|
django.contrib.sitemaps.views import index, sitemap
from django.views.generic.base import TemplateView
from django.views.defaults import (permission_denied,
page_not_found,
server_error)
sitemaps = {
# Fill me with sitemaps
}
admin.autodiscover()
urlpatterns = [
url(r'', include('filter.urls')),
url(r'base', include('base.urls')),
# Admin
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Sitemap
url(r'^sitemap\.xml$', index, {'sitemaps': sitemaps}),
url(r'^sitemap-(?P<section>.+)\.xml$', sitemap, {'sitemaps': sitemaps}),
# robots.txt
url(r'^robots\.txt$',
TemplateView.as_view(
template_name='robots.txt',
content_type='text/plain')
),
]
if settings.DEBUG:
# Add debug-toolbar
import debug_toolbar #noqa
urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls)))
# Serve media files through Django.
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Show error pages during development
urlpatterns += [
url(r'^403/$', permission_denied),
url(r'^404/$', page_not_found),
url(r'^500/$', server_error)
]
|
jmcguire/rpg-toolkit-website
|
rpgtoolkit.py
|
Python
|
mit
| 4,134
| 0.012821
|
#!/usr/bin/env python
"""
rpgtoolkit.py
Generate a random webpage from a config file.
Lots of gaming resources are simple variations on a theme. Here's a big list, choose a random thing from the list, and interpolate a bit using data from some other lists.
Here's how this program works: given a config file, figure out how to make a website from it. It looks for the "meta" config hash to figure out how to kick itself off. It also knows how to interpolate simple variables.
Created by Justin McGuire <[email protected]>.
"""
import sys
import random
import re
import yaml
import os
import logging
class ToolConfig:
config = None
def __init__(self, config_file):
self.load_config(config_file)
self.title = self.config['meta']['title']
self.copyright = self.config['meta']['copyright']
self.generate = self.config['meta']['generate']
self.start = self.config['meta']['start']
self.norepeats = True
self.saved_tags = {}
def load_config(self, config_file):
"""load the config file into the static config variable, but only once"""
if not os.path.isfile(config_file):
sys.exit("config file: %s is not a file" % config_file)
if not self.config:
with open(config_file) as file:
self.config = yaml.load(file)
def create(self):
"""get an random selection."""
# if we don't care about repeats, reload the config file after every use
if not self.norepeats:
self.backup_config = self.config
# start the string with the "start" variable
select = self.get_random_item_from( self.start )
logging.debug("inital string %s" % select)
select = self.interpolate(select)
# these get set in interpolate, but must be unset elsewhere, since i
|
t's a
# recursive function that doesn't know when its time is over
self.saved_tags = {}
if not self.norepeats:
self.config = self.backup_config
return select
def get_random_item_from(self, listn
|
ame):
"""remove a random item from one of the lists in the config, and return it"""
pick = random.randint(0, len(self.config[listname]) - 1)
return self.config[listname].pop(pick)
def interpolate(self, string):
"""replace references in string with other items from hash, recursive"""
# look for a reference, which looks like [hashname]
m = re.search(r'\[([^]]*)\]', string)
if m:
tag = m.group(1)
logging.debug("found tag %s" % tag)
# the listname may need to be saved, so it can be reused later
if ':' in tag:
(list_name, saved_tag) = tag.split(':')
else:
list_name = tag
saved_tag = ''
logging.debug("tag split into list_name/saved_tag: %s/%s" % (list_name, saved_tag))
# get the new selection to replace the tag with
selection = ''
if list_name in self.saved_tags:
# check if the list_name is actually a saved tag
selection = self.saved_tags[list_name]
else:
# otherwise grab a random selection from the choosen list
selection = self.get_random_item_from(list_name)
# if we want to save the selection, do that now
if saved_tag:
self.saved_tags[saved_tag] = selection
# there may be more interpolation
logging.debug("replacing [%s] with %s" % (tag, selection))
string = self.interpolate( string.replace('[%s]' % tag, selection, 1) )
return string
def main(config_file):
logging.basicConfig(level=logging.WARNING)
tool_config = ToolConfig(config_file)
print tool_config.title
# print out each random selection
for x in range(tool_config.generate):
item = tool_config.create()
print "%d: %s" % (x+1, item)
def usage(error_msg=''):
usage_msg = "usage: %s <config_file>" % sys.argv[0]
if error_msg:
sys.exit("%s\n%s" % (error_msg, usage_msg))
else:
sys.exit(usage_msg)
if __name__ == '__main__':
# make sure our arguments are correct
if len(sys.argv) > 1:
config_file = sys.argv[1]
if not os.path.isfile(config_file):
usage("config file %s isn't a file" % config_file)
main(config_file)
else:
usage()
|
kubernetes-client/python
|
kubernetes/client/models/v1_ingress_class_spec.py
|
Python
|
apache-2.0
| 5,087
| 0
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1IngressClassSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'controller': 'str',
'parameters': 'V1IngressClassParametersReference'
}
attribute_map = {
'controller': 'controller',
'parameters': 'parameters'
}
def __init__(self, controller=None, parameters=None, local_vars_configuration=None): # noqa: E501
"""V1IngressClassSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._controller = None
self._parameters = None
self.discriminator = None
if controller is not None:
self.controller = controller
if parameters is not None:
self.parameters = parameters
@property
def controller(self):
"""Gets the control
|
ler of this V1IngressClassSpec. # noqa: E501
Controll
|
er refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable. # noqa: E501
:return: The controller of this V1IngressClassSpec. # noqa: E501
:rtype: str
"""
return self._controller
@controller.setter
def controller(self, controller):
"""Sets the controller of this V1IngressClassSpec.
Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable. # noqa: E501
:param controller: The controller of this V1IngressClassSpec. # noqa: E501
:type: str
"""
self._controller = controller
@property
def parameters(self):
"""Gets the parameters of this V1IngressClassSpec. # noqa: E501
:return: The parameters of this V1IngressClassSpec. # noqa: E501
:rtype: V1IngressClassParametersReference
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1IngressClassSpec.
:param parameters: The parameters of this V1IngressClassSpec. # noqa: E501
:type: V1IngressClassParametersReference
"""
self._parameters = parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1IngressClassSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1IngressClassSpec):
return True
return self.to_dict() != other.to_dict()
|
morreene/tradenews
|
tradenews/newscluster/models.py
|
Python
|
bsd-3-clause
| 533
| 0.001876
|
# -*- coding: utf-8 -*-
import datetime as dt
from tradenews.database import (
Col
|
umn,
db,
Model,
SurrogatePK,
)
class NewsCluster(SurrogatePK, Model):
__tablename__ = 'newscluster'
# id = Column(db.Integer(), nullable=False, primary_key=True)
date = Column(db.Text(), nullable=False, default=dt.datetime.utcnow)
title = Column(db.Text(), nullable=True)
text = Col
|
umn(db.Text(), nullable=True)
cluster = Column(db.Integer(), nullable=True)
def __init__(self):
db.Model.__init__(self)
|
burnpanck/traits
|
examples/tutorials/traits_4.0/interfaces/interfaces.py
|
Python
|
bsd-3-clause
| 4,275
| 0.011696
|
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
#--(Interfaces)-----------------------------------------------------------------
"""
Interfaces
==========
In Traits 3.0, the ability to define, implement and use *interfaces* has been
added to
|
the package.
Defining Interfaces
-------------------
Interfaces are defined by subclassing from the **Interface** class, as shown
in the example below::
from traits.api import Interface
class IName ( Interface ):
def get_name ( self ):
" Returns the name of an object. "
This same code is shown in the **IName Interface**
|
tab of the code.
Interface classes are intended mainly as documentation of the methods and
traits that the interface defines, and should not contain any actual
implementation code, although no check is performed to enforce this currently.
Implementing Interfaces
-----------------------
A class declares that it implements one or more interfaces using the
**implements** function, which has the form::
implements( interface [, interface2, ..., interfacen] )
The semantics of this function is that the class declares that it implements
each of the *interfaces* specified as an argument to **implements**.
Also, the call to **implements** must occur at class scope within the class
definition, as shown in the following example::
from traits.api import HasTraits, implements
class Person ( HasTraits ):
implements( IName )
...
Only a single call to **implements** should occur within a class definition.
Refer to the **Person Class** tab in the code for a complete example of using
**implements**.
Note that in the current version, traits does not check to ensure that the
class containing the **implements** function actually implements the interfaces
it says it does.
Using Interfaces
----------------
Being able to define and implement interfaces would be of little use without
the ability to *use* interfaces in your code. In traits, using an interface is
accomplished using the **Instance** trait, as shown in the following example::
from traits.api import HasTraits, Instance
class Apartment ( HasTraits ):
renter = Instance( IName )
Using an interface class in an **Instance** trait definition declares that the
trait only accepts values which are objects that either:
- Implement the specified interface.
- Can be adapted to an object that implements the specified interface.
Additional information on what it means to *adapt* an object to implement an
interface is presented in the next section of the tutorial.
As before, the **Instance** trait can also be used with classes that are not
interfaces, such as::
from traits.api import HasTraits, Instance
class Apartment ( HasTraits ):
renter = Instance( Person )
In this case, the value of the trait must be an object which is an instance of
the specified class or one of its subclasses.
"""
#--<Imports>--------------------------------------------------------------------
from traits.api import *
#--[IName Interface]------------------------------------------------------------
# Define the 'IName' interface:
class IName ( Interface ):
def get_name ( self ):
""" Returns the name of an object. """
#--[Person Class]---------------------------------------------------------------
class Person ( HasTraits ):
implements( IName )
first_name = Str( 'John' )
last_name = Str( 'Doe' )
# Implementation of the 'IName' interface:
def get_name ( self ):
""" Returns the name of an object. """
return ('%s %s' % ( self.first_name, self.last_name ))
#--[Apartment Class]------------------------------------------------------------
# Define a class using an object that implements the 'IName' interface:
class Apartment ( HasTraits ):
renter = Instance( IName )
#--[Example*]--------------------------------------------------------------------
# Create an object implementing the 'IName' interface:
william = Person( first_name = 'William', last_name = 'Adams' )
# Create an apartment, and assign 'renter' an object implementing 'IName':
apt = Apartment( renter = william )
# Verify that the object works correctly:
print 'Renter is:', apt.renter.get_name()
|
chemelnucfin/tensorflow
|
tensorflow/python/data/kernel_tests/multi_device_iterator_test.py
|
Python
|
apache-2.0
| 19,062
| 0.00724
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.MultiDeviceIterator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
def skip_v2_test_combinations():
# TODO(b/121264236): Support v2 behavior for these tests.
return combinations.combine(tf_api_version=1, mode=["eager", "graph"])
class MultiDeviceIteratorTest(test_base.DatasetTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(skip_v2_test_combinations(),
combinations.combine(num_inits=[0, 1, 42])))
def testInitOnly(self, num_inits):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
for _ in range(num_inits):
self.evaluate(multi_device_iterator.initializer)
@combinations.generate(skip_v2_test_combinations())
def testBasic(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(skip_v2_test_combinations())
def testOneOnSameDevice(self):
with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:0", "/cpu:1"])
config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(skip_v2_test_combinations())
def testRepeatDevices(self):
with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(20)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2", "/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 20, 4):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2, elem_on_3, elem_on_4 = elements
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
self.assertEqual(i + 2, self.evaluate(elem_on_3))
self.assertEqual(i + 3, self.evaluate(elem_on_4))
with self.assertRaises(errors.OutOfRangeError):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2, elem_on_3, elem_on_4 = elements
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
self.evaluate(elem_on_3)
self.evaluate(elem_on_4)
@combinations.generate(skip_v2_test_combinations())
def testNotFullyDivisible(self):
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
elem_on_1 = multi_device_iterator.get_next("/cpu:1")
self.assertEqual(8, self.evaluate(elem_on_1))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(skip_v2_test_combinations())
def testGetNextAsOptional(self):
if context.executing_eagerly():
return
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
elem_on_1_has_value_t = elem_on_1.has_value()
elem_on_1_t = elem_on_1.get_value()
elem_on_2_has_value_t = elem_on_2.has_value()
elem_on_2_t = elem_on_2.get_value()
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config) as sess:
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(i, elem_on_1_value)
elem_on_2_has_value, elem_on_2_value = sess.run(
[elem_on_2_has_value_t, elem_on_2_t])
self.assertTrue(elem_on_2_has_value)
self.assertEqual(i + 1, elem_on_2_value)
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(8, elem_on_1_value)
self.assertFalse(self.evaluate(elem_on_1_has_value_t))
self.assertFalse(self.evaluate(elem_on_2_has_value_t))
with self.
|
assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_1_t)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_2_t)
@combinations.generate(skip_v2_tes
|
t_combinations())
def testUneven(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"], max_buffer_size=4)
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(
|
webcomics/dosage
|
dosagelib/plugins/projectfuture.py
|
Python
|
mit
| 2,118
| 0
|
# SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from .common import _ParserScraper
class ProjectFuture(_ParserScraper):
imageSearch = '//td[@class="tamid"]/img'
prevSearch = '//a[./img[@alt="Previous"]]'
def __init__(self, name, comic, first, last=None):
if name == 'ProjectFuture':
super(ProjectFuture, self).__init__(name)
else:
super(ProjectFuture, self).__init__('ProjectFuture/' + name)
self.url = 'http://www.projectfuturecomic.com/' + comic + '.php'
self.stripUrl = self.url + '?strip=%s'
self.firstStripUrl = self.stripUrl % first
if last:
self.url = self.stripUrl
self.endOfLife = True
@classmethod
def getmodules(cls):
|
return (
cls('AWalkInTheWoods', 'simeon', '1', last='12'),
cls('BenjaminBuranAndTheArkOfUr', 'ben', '00', last='23'),
cls('BookOfTenets', 'tenets', '01', last='45'),
cls('CriticalMass', 'criticalmass', 'cover', last='26'),
cls('DarkLordRising', 'darklord', '01-00', last='10-10'),
cls('Emily', 'emily', '01-00'),
cls('FishingTrip', 'fishing', '
|
01-00'),
cls('HeadsYouLose', 'heads', '00-01', last='07-12'),
cls('NiallsStory', 'niall', '00'),
cls('ProjectFuture', 'strip', '0'),
cls('RedValentine', 'redvalentine', '1', last='6'),
cls('ShortStories', 'shorts', '01-00'),
cls('StrangeBedfellows', 'bedfellows', '1', last='6'),
cls('TheAxemanCometh', 'axeman', '01-01', last='02-18'),
cls('ToCatchADemon', 'daxxon', '01-00', last='03-14'),
cls('TheDarkAngel', 'darkangel', 'cover', last='54'),
cls('TheEpsilonProject', 'epsilon', '00-01'),
cls('TheHarvest', 'harvest', '01-00'),
cls('TheSierraChronicles', 'sierra', '0', last='29'),
cls('TheTuppenyMan', 'tuppenny', '00', last='16'),
cls('TurningANewPage', 'azrael', '1', last='54'),
)
|
mPowering/django-orb
|
orb/management/commands/load_orb_languages.py
|
Python
|
gpl-3.0
| 2,656
| 0.002259
|
"""
Management command to load language fixtures as tags
"""
from __future__ import unicode_literals
import csv
import os
import re
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from orb.models import Category, Tag
def has_data(input):
"""Identify if the input contains any meaningful string content
CSV input may include non-breaking space which is a Unicode character,
however the csv module does not handle unicode.
Args:
input: string value
Returns:
bool
"""
input = input.replace("\xc2\xa0", " ")
return bool(re.compile("\S").match(input))
class Command(BaseCommand):
help = "Loads languages from CSV fixtures into tag database"
def add_arguments(self, parser):
parser.add_argument(
"--file",
dest="fixture",
default="orb/fixtures/iso639.csv",
help="CSV file path",
)
parser.add_argument(
"--image",
|
dest="image",
default="tag/language_default.png",
help="Default image (static image path)",
)
parser.add_ar
|
gument(
"--user",
dest="user",
type=int,
default=1,
help="Default user to mark as creating",
)
parser.add_argument(
"--iso6392",
action="store_true",
dest="iso6392",
default=False,
help="Flag for including all ISO 639.2 (only ISO 639.1 included by default)",
)
def handle(self, *args, **options):
try:
user = User.objects.get(pk=options["user"])
except User.DoesNotExist:
raise CommandError("No match user found for '{0}'".format(options["user"]))
category, _ = Category.objects.get_or_create(name="Language", defaults={
'top_level': True,
})
if not os.path.exists(options["fixture"]):
raise CommandError("Cannot find file '{0}'".format(options["fixture"]))
with open(options["fixture"]) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row = {k: v.decode('utf-8') for k, v in row.items()}
if not options["iso6392"] and not has_data(row["iso639-1"]):
continue
tag, _ = Tag.objects.get_or_create(name=row["English"], defaults={
"create_user": user,
"update_user": user,
"category": category,
"image": options["image"],
})
|
Chasego/codirit
|
leetcode/034-Search-for-a-Range/SearchForaRange_001.py
|
Python
|
mit
| 654
| 0.010703
|
class Solution:
# @param {integer[]} nums
# @param {integer} target
# @return {integer[]}
def searchRange(self, nums, targe
|
t):
res = []
l, r = 0, len(nums) - 1
while l <= r:
m = (l + r) /2
if nums[m] < target:
l = m + 1
else:
r = m - 1
res.append(l)
l, r = 0, len(nums) - 1
while l <= r:
m = (l + r) /2
|
if nums[m] <= target:
l = m + 1
else:
r = m - 1
res.append(r)
res = [-1, -1] if res[0] > res[1] else res
return res
|
kubeflow/kfserving-lts
|
test/e2e/predictor/test_torchserve.py
|
Python
|
apache-2.0
| 2,082
| 0.000961
|
# Copyright 2019 kubeflow.org.
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl
|
e law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from kubernetes import client
from kfserving import (
constants,
KFServingClient,
V1beta1InferenceService,
V1beta1InferenceServiceSpec,
V1beta1PredictorSpec,
V1beta1TorchServeSpec,
)
from kubernetes.client import V1ResourceRequirements
from ..common.utils import predict
from ..common.utils import KFSERVING_TEST_NAMESPACE
KFServing = KFServingClient(config_file=os.environ.get("KUBECONFIG", "~/.kube/config"))
def test_torchserve_kfserving():
service_name = "mnist"
predictor = V1beta1PredictorSpec(
min_replicas=1,
pytorch=V1beta1TorchServeSpec(
storage_uri="gs://kfserving-examples/models/torchserve/image_classifier",
protocol_version="v1",
resources=V1ResourceRequirements(
requests={"cpu": "1", "memory": "4Gi"},
limits={"cpu": "1", "memory": "4Gi"},
),
),
)
isvc = V1beta1InferenceService(
api_version=constants.KFSERVING_V1BETA1,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name=service_name, namespace=KFSERVING_TEST_NAMESPACE
),
spec=V1beta1InferenceServiceSpec(predictor=predictor),
)
KFServing.create(isvc)
KFServing.wait_isvc_ready(service_name, namespace=KFSERVING_TEST_NAMESPACE)
res = predict(service_name, "./data/torchserve_input.json")
assert(res.get("predictions")[0] == 2)
KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
|
Colstuwjx/scit-sys
|
openstack_api.py
|
Python
|
gpl-2.0
| 7,352
| 0.005849
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import time
from creds import get_nova_obj
from scit_config import *
from scit_db import *
#get authed nova obj
nova = get_nova_obj()
def create_nova_vm(logger, server_name, usr_dst):
conf = getScitConfig()
retry = int(conf["scit"]["scit_clean_retry"])
#check status and write into db
ret = create_vm_min(logger, server_name, usr_dst)
if not ret:
while True:
if retry <= 0:
print "create vm " + server_name + " timeout"
if logger:
logger.error("create vm " + server_name + " timeout.")
return False
else:
delete_nova_vm(logger, serve
|
r_name, None)
time.sleep(10)
retry = retry - 1
|
ret = create_vm_min(logger, server_name, usr_dst)
if ret:
break
#write into db
addVm(ret["vm_name"], ret["vm_fixip"], "READY")
return True
#minimal create vm
def create_vm_min(logger, server_name, usr_dst):
ret = {}
ret["vm_name"] = server_name
try:
f = open(usr_dst)
user_data = f.read()
f.close()
except:
if logger:
logger.error("create vm failed, is there a init script?")
return False
#read the conf
conf = getScitConfig()
img = conf["instance"]["instance_img"]
flvr = conf["instance"]["instance_flvr"]
key_pair = conf["instance"]["instance_keypair"]
network_id = conf["network"]["network_ext_netid"]
#query whether the name is already exists.
#try create
if not nova.keypairs.findall(name=key_pair):
with open(os.path.expanduser('/root/.ssh/id_rsa.pub')) as fpubkey:
nova.keypairs.create(name=key_pair, public_key=fpubkey.read())
ta = time.time()
try:
image = nova.images.find(name=img)
flavor = nova.flavors.find(name=flvr)
network = nova.networks.find(id=network_id)
instance = nova.servers.create(name=server_name, image=image, flavor=flavor, userdata=user_data, network=network, key_name=key_pair)
except:
if logger:
logger.error("failed create nova vm, exception throw out.")
print "expceton found when try creating nova vm."
return False
status = instance.status
while status == 'BUILD':
time.sleep(5)
print "waiting vm active.."
# Retrieve the instance again so the status field updates
instance = nova.servers.get(instance.id)
status = instance.status
tb = time.time()
t = int(tb-ta + (tb-ta - int(tb-ta))/1.0)
print "Total: " + str(t) + " s."
if logger:
logger.info("create vm " + server_name + ", Total " + str(t) + " s.")
#not active or network is not ok
if status != 'ACTIVE':
return False
instance = nova.servers.get(instance.id)
network_flag = False
if instance.networks:
for item in instance.networks:
if instance.networks[item]:
ret["vm_fixip"] = instance.networks[item][0]
network_flag = True
if not network_flag:
print "vm network init failed."
if logger:
logger.error("vm: " + server_name + " network init failed.")
return False
print "successful create vm: " + server_name
if logger:
logger.info("vm: " + server_name + " created.")
return ret
#bind floatip to vm
#check whether a clean server is ok to online
def vm_extra_set(logger, server_name, floatip):
try:
instance = nova.servers.find(name = server_name)
except:
print "vm " + server_name + "not found."
if logger:
logger.error("vm " + server_name + "not found.")
return False
if instance.status == "ACTIVE":
floating_ip = nova.floating_ips.find(ip=floatip)
instance.add_floating_ip(floating_ip)
#check whether server is ok
#write into db
updateFloatip(server_name, floatip)
return True
else:
return False
def vm_free_set(logger, server_name):
instance = None
try:
instance = nova.servers.find(name = server_name)
except:
print "vm " + server_name + "not found."
if logger:
logger.error("vm " + server_name + "not found?!")
return False
floatip = ""
for item in instance.networks:
if len(instance.networks[item]) == 2:
floatip = instance.networks[item][1]
else:
return False
#free the floatip
instance.remove_floating_ip(floatip)
return floatip
#delete the vm
def delete_nova_vm(logger, server_name, float_ip):
#clean the env
#remove the knownlist info
if not server_name:
print "vm name illegal."
if logger:
logger.warn("vm name illegal, delete task stopped.")
print "deleting vm " + server_name
if logger:
logger.info("try deleting vm " + server_name)
if float_ip:
os.popen("sed -i '/^.*" + float_ip + ".*/d' /root/.ssh/known_hosts")
#os.popen("sed -i '/^.*" + float_ip + ".*/d' /etc/ansible/hosts")
try:
instance = nova.servers.find(name=server_name)
except:
print "vm: " + server_name + " not found."
if logger:
logger.warn("vm " + server_name + " not found.")
return True
instance.delete()
#clear the db
#runSQL("delete from scit_vm where vm_name = " + server_name + ";")
delVm(server_name)
#confirm that is delete ok
conf = getScitConfig()
retry = int(conf["scit"]["scit_clean_retry"])
while True:
if retry <= 0:
print "delete task timeout."
if logger:
logger.error("delete vm: " + server_name + " task timeout.")
return False
try:
instance = nova.servers.find(name=server_name)
retry = retry - 1
except:
break
#clear the vm
def clear_nova_vm(logger):
#clear the all nova vm
instances = nova.servers.list()
retry = 0
if instances:
for server in instances:
print "deleting the vm: " + server.name
if logger:
logger.info("deleting the vm: " + server.name)
server.delete()
else:
return True
#wait the clear ok
while True:
if retry > 10:
#retry 10 times
print "clear vm failed, timeout.."
if logger:
logger.error("clear vm retry timeout.")
return False
instances = nova.servers.list()
if instances:
retry = retry + 1
time.sleep(10)
else:
print "all vm cleared.."
logger.info("cleared the vms..")
return True
#main func
def main():
create_nova_vm(None, server_name="test2", img="CentOS 6.5 x86_64", flvr="m1.small", usr_dst="/root/openstack/pys/scit-sys/scripts/init.sh", key_pair="dns_test", network_id="0e13d973-f3a7-4e65-aba0-7d0f392ce13b")
#delete_nova_vm(None, server_name="test2", float_ip="192.168.1.122")
return 0
#code entry
if __name__ == '__main__':
#main()
vm_extra_set(None, "SCIT_VM00", "192.168.1.122")
#clear_nova_vm(None)
|
Dangetsu/vnr
|
Frameworks/Sakura/py/libs/scripts/cabocha.py
|
Python
|
gpl-3.0
| 307
| 0.026059
|
# codi
|
ng: utf8
# jmdict.py
# 2/14/2014 jichi
if __name__ == '__main__':
import sys
sys.path.append('..')
def get(dic):
"""
@param dic str such as ipadic or unidic
@return bool
"""
import rc
ret
|
urn rc.runscript('getcabocha.py', (dic,))
if __name__ == "__main__":
get('unidic')
# EOF
|
gzqichang/wa
|
qevent/qevent/models.py
|
Python
|
mit
| 5,215
| 0.001778
|
from django.db import models
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.utils import timezone
from qbase.time import timesince
from qbase import fields
from qbase.models import get_contenttype
from qevent.registry import check
import functools
from collections import defaultdict
def stream(f):
"""
把返回过滤条件的函数,变成queryset
并扩充了 offset, limit 条件
"""
@functools.wraps(f)
def wrapped(manager, *args, **kwargs):
offset, limit = kwargs.pop('_offset', None), kwargs.pop('_limit', None)
qs = f(manager, *args, **kwargs)
if isinstance(qs, dict):
qs = manager.filter(**qs)
elif isinstance(qs, (list, tuple)):
qs = manager.filter(*qs)
if offset or limit:
qs = qs[offset:limit]
return qs
return wrapped
class ActionManager(models.Manager):
# 强制指定private=True, 才会显示所有events; 否则只显示 可公开的events
def public(self, *args, **kwargs):
if 'private' not in kwargs:
kwargs['private'] = False
elif kwargs.get('private'):
del kwargs['private']
return self.filter(*args, **kwargs)
@stream
def actor(self, obj, **kwargs):
"""
指定 object AS actor 的所有 actions
"""
check(obj)
return obj.actor_actions.public(**kwargs)
@stream
def target(self, obj, **kwargs):
"""
指定 object AS target 的所有 actions
"""
check(obj)
return obj.target_actions.public(**kwargs)
@stream
def relative(self, obj, **kwargs):
"""
指定 object AS relative 的所有 actions
"""
check(o
|
bj)
return obj.relative_actions.public(**kwargs)
def _object_actions(self, obj):
check(obj)
ct = get_contenttype(obj)
return models.Q(
actor_type_id=ct.pk,
actor_obj
|
ect_id=obj.pk,
) | models.Q(
target_type_id=ct.pk,
target_object_id=obj.pk,
) | models.Q(
relative_type_id=ct.pk,
relative_object_id=obj.pk,
)
@stream
def any(self, obj, **kwargs):
"""
指定 object 的所有 actions
"""
return self.public(self._object_actions(obj), **kwargs)
@stream
def content(self, model, **kwargs):
"""
指定 model 的所有 actions
"""
check(model)
ct = get_contenttype(model)
return self.public(
(models.Q(actor_type_id=ct.pk) |
models.Q(target_type_id=ct.pk) |
models.Q(relative_type_id=ct.pk)
), **kwargs)
model_actions = content
Actions = ActionManager()
class Action(models.Model):
actor_type = models.ForeignKey(ContentType, related_name='act_actor', db_index=True, null=True, blank=True, default=None)
actor_object_id = fields.char_index_null(max_length=64)
actor = GenericForeignKey('actor_type', 'actor_object_id')
#
verb = fields.char_index('动作', max_length=32)
#
target_type = models.ForeignKey(ContentType, related_name='act_target', db_index=True)
target_object_id = fields.char_index(max_length=64)
target = GenericForeignKey('target_type', 'target_object_id')
#
relative_type = models.ForeignKey(ContentType, related_name='act_relative', null=True, blank=True, default=None)
relative_object_id = fields.char_null(max_length=64)
relative = GenericForeignKey('relative_type', 'relative_object_id')
#
timestamp = fields.datetime_auto_add()
description = fields.text('描述')
actor_only = fields.falsy('单向')
private = fields.falsy('私密')
class Meta:
verbose_name = verbose_name_plural = '事件'
ordering = ('-timestamp', )
objects = Actions
def __str__(self):
return '{} {} {}'.format(str(self.actor), self.verb, str(self.target))
def timesince(self, now=None):
return timesince(self.timestamp, now)
def action_handler(verb, **kwargs):
"""
qevent.signals.action 的处理函数
"""
kwargs.pop('signal', None)
actor = kwargs.pop('sender')
if hasattr(verb, '_proxy____args'):
verb = verb . _proxy____args[0]
event = Action(
actor_type=get_contenttype(actor),
actor_object_id=actor.pk if actor else None,
verb=str(verb),
timestamp=kwargs.pop('timestamp', timezone.now()),
description=kwargs.pop('description', None),
private=bool(kwargs.pop('private', False)),
)
for opt in ('target', 'relative'):
obj = kwargs.pop(opt, None)
if obj is not None:
check(obj)
setattr(event, opt+'_type', get_contenttype(obj))
setattr(event, opt+'_object_id', obj.pk)
hasattr(event, 'data') and len(kwargs) and setattr(event, 'data', kwargs)
event.save()
return event
|
algorhythms/LeetCode
|
324 Wiggle Sort II py3.py
|
Python
|
mit
| 2,047
| 0.000489
|
#!/usr/bin/python3
"""
Given an unsorted array nums, reorder it such
|
that nums[0] < nums[1] > nums[2]
< nums[3]....
Example 1:
Input: nums = [1, 5, 1, 1, 6, 4]
Output: One possible answer is [1, 4, 1, 5, 1, 6].
Example 2:
Input: nums = [1, 3, 2, 2, 3, 1]
Output: One possible answer is [2, 3, 1, 3, 1, 2].
Note:
You may assume all input has valid answer.
Follow U
|
p:
Can you do it in O(n) time and/or in-place with O(1) extra space?
"""
from typing import List
class Solution:
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
Median + 3-way partitioning
"""
n = len(nums)
# mid = self.find_kth(nums, 0, n, (n - 1) // 2)
# median = nums[mid]
median = list(sorted(nums))[n//2]
# three way pivot
odd = 1
even = n - 1 if (n - 1) % 2 == 0 else n - 2
i = 0
while i < n:
if nums[i] < median:
if i >= even and i % 2 == 0:
i += 1
continue
nums[i], nums[even] = nums[even], nums[i]
even -= 2
elif nums[i] > median:
if i <= odd and i % 2 == 1:
i += 1
continue
nums[i], nums[odd] = nums[odd], nums[i]
odd += 2
else:
i += 1
def find_kth(self, A, lo, hi, k):
p = self.pivot(A, lo, hi)
if k == p:
return p
elif k > p:
return self.find_kth(A, p + 1, hi, k)
else:
return self.find_kth(A, lo, p, k)
def pivot(self, A, lo, hi):
# need 3-way pivot, otherwise TLE
p = lo
closed = lo
for i in range(lo + 1, hi):
if A[i] < A[p]:
closed += 1
A[closed], A[i] = A[i], A[closed]
A[closed], A[p] = A[p], A[closed]
return closed
if __name__ == "__main__":
Solution().wiggleSort([1, 5, 1, 1, 6, 4])
|
edx/edx-platform
|
cms/djangoapps/contentstore/views/tests/test_item.py
|
Python
|
agpl-3.0
| 160,015
| 0.003406
|
"""Tests for items views."""
import json
import re
from datetime import datetime, timedelta
from unittest.mock import Mock, PropertyMock, patch
import ddt
from django.conf import settings
from django.http import Http404
from django.test import TestCase
from django.test.client import RequestFactory
from django.urls import reverse
from edx_proctoring.exceptions import ProctoredExamNotFoundException
from opaque_keys import InvalidKeyError
from opaque_keys.edx.asides import AsideUsageKeyV2
from opaque_keys.edx.keys import CourseKey, UsageKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from pyquery import PyQuery
from pytz import UTC
from web_fragments.fragment import Fragment
from webob import Response
from xblock.core import XBlockAside
from xblock.exceptions import NoSuchHandlerError
from xblock.fields import Scope, ScopeIds, String
from xblock.runtime import DictKeyValueStore, KvsFieldData
from xblock.test.tools import TestRuntime
from xblock.validation import ValidationMessage
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from cms.djangoapps.contentstore.utils import reverse_course_url, reverse_usage_url
from cms.djangoapps.contentstore.views import item as item_module
from common.djangoapps.student.tests.factories import UserFactory
from common.djangoapps.xblock_django.models import (
XBlockConfiguration,
XBlockStudioConfiguration,
XBlockStudioConfigurationFlag
)
from common.djangoapps.xblock_django.user_service import DjangoXBlockUserService
from lms.djangoapps.lms_xblock.mixin import NONSENSICAL_ACCESS_RESTRICTION
from openedx.core.djangoapps.discussions.models import DiscussionsConfiguration
from xmodule.capa_module import ProblemBlock # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.course_module import DEFAULT_START_DATE # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore import ModuleStoreEnum # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.exceptions import ItemNotFoundError # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, ModuleStoreTestCase # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, LibraryFactory, check_mongo_calls # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.partitions.partitions import ( # lint-amnesty, pylint: disable=wrong-import-order
ENROLLMENT_TRACK_PARTITION_ID,
MINIMUM_STATIC_PARTITION_ID,
Group,
UserPartition
)
from xmodule.partitions.tests.test_partitions import MockPartitionService # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.x_module import STUDENT_VIEW, STUDIO_VIEW # lint-amnesty, pylint: disable=wrong-import-order
from ..component import component_handler, get_component_templates
from ..item import (
ALWAYS,
VisibilityState,
_get_module_info,
_get_source_index,
_xblock_type_and_display_name,
add_container_page_publishing_info,
create_xblock_info,
)
class AsideTest(XBlockAside):
"""
Test xblock aside class
"""
FRAG_CONTENT = "<p>Aside Foo rendered</p>"
field11 = String(default="aside1_default_value1", scope=Scope.content)
field12 = String(default="aside1_default_value2", scope=Scope.settings)
field13 = String(default="aside1_default_value3", scope=Scope.parent)
@XBlockAside.aside_for('student_view')
def student_view_aside(self, block, context): # pylint: disable=unused-argument
"""Add to the student view"""
return Fragment(self.FRAG_CONTENT)
class ItemTest(CourseTestCase):
""" Base test class for create, save, and delete """
def setUp(self):
super().setUp()
self.course_key = self.course.id
self.usage_key = self.course.location
def get_item_from_modulestore(self, usage_key, verify_is_draft=False):
"""
Get the item referenced by the UsageKey from the modulestore
"""
item = self.store.get_item(usage_key)
if verify_is_draft:
self.assertTrue(getattr(item, 'is_draft', False))
return item
def response_usage_key(self, response):
"""
Get the UsageKey from the response payload and verify that the status_code was 200.
:param response:
"""
parsed = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, 200)
key = UsageKey.from_string(parsed['locator'])
if key.course_key.run is None:
key = key.map_into_course(CourseKey.from_string(parsed['courseKey']))
return key
def create_xblock(self, parent_usage_key=None, display_name=None, category=None, boilerplate=None): # lint-amnesty, pylint: disable=missing-function-docstring
data = {
'parent_locator': str(
self.usage_key
)if parent_usage_key is None else str(parent_usage_key),
'category': category
}
if display_name is not None:
data['display_name'] = display_name
if boilerplate is not None:
data['boilerplate'] = boilerplate
return self.client.ajax_post(reverse('xblock_handler'), json.dumps(data))
def _create_vertical(self, parent_usage_key=None):
"""
Creates a vertical, returning its UsageKey.
"""
|
resp = sel
|
f.create_xblock(category='vertical', parent_usage_key=parent_usage_key)
self.assertEqual(resp.status_code, 200)
return self.response_usage_key(resp)
@ddt.ddt
class GetItemTest(ItemTest):
"""Tests for '/xblock' GET url."""
def _get_preview(self, usage_key, data=None):
""" Makes a request to xblock preview handler """
preview_url = reverse_usage_url("xblock_view_handler", usage_key, {'view_name': 'container_preview'})
data = data if data else {}
resp = self.client.get(preview_url, data, HTTP_ACCEPT='application/json')
return resp
def _get_container_preview(self, usage_key, data=None):
"""
Returns the HTML and resources required for the xblock at the specified UsageKey
"""
resp = self._get_preview(usage_key, data)
self.assertEqual(resp.status_code, 200)
resp_content = json.loads(resp.content.decode('utf-8'))
html = resp_content['html']
self.assertTrue(html)
resources = resp_content['resources']
self.assertIsNotNone(resources)
return html, resources
def _get_container_preview_with_error(self, usage_key, expected_code, data=None, content_contains=None):
""" Make request and asserts on response code and response contents """
resp = self._get_preview(usage_key, data)
self.assertEqual(resp.status_code, expected_code)
if content_contains:
self.assertContains(resp, content_contains, status_code=expected_code)
return resp
@ddt.data(
(1, 17, 15, 16, 12),
(2, 17, 15, 16, 12),
(3, 17, 15, 16, 12),
)
@ddt.unpack
def test_get_query_count(self, branching_factor, chapter_queries, section_queries, unit_queries, problem_queries):
self.populate_course(branching_factor)
# Retrieve it
with check_mongo_calls(chapter_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['chapter'][-1]))
with check_mongo_calls(section_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['sequential'][-1]))
with check_mongo_calls(unit_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['vertical'][-1]))
with check_mongo_calls(problem_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['problem'][-1]))
@ddt.data(
(1, 30),
(2, 32),
(3, 34),
)
@ddt.unpack
def test_container_get_qu
|
SKIRT/PTS
|
modeling/fitting/component.py
|
Python
|
agpl-3.0
| 4,328
| 0.000693
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.fitting.component Contains the FittingComponent class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __fut
|
ure__ i
|
mport absolute_import, division, print_function
# Import standard modules
from abc import ABCMeta
# Import astronomical modules
from astropy.table import Table
# Import the relevant PTS classes and modules
from ..component.component import ModelingComponent
from .tables import RunsTable
from .run import FittingRun
from .context import FittingContext
# -----------------------------------------------------------------
class FittingComponent(ModelingComponent):
"""
This class...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(FittingComponent, self).__init__(*args, **kwargs)
# -- Attributes --
self.context = None
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(FittingComponent, self).setup(**kwargs)
# Load the fitting context
self.context = FittingContext(self.fit_path)
# -----------------------------------------------------------------
@property
def runs_table_path(self):
"""
This function ...
:return:
"""
return self.context.runs_table_path
# -----------------------------------------------------------------
@property
def database_path(self):
"""
This function ...
:return:
"""
return self.context.database_path
# -----------------------------------------------------------------
@property
def statistics_path(self):
"""
This function ...
:return:
"""
return self.context.statistics_path
# -----------------------------------------------------------------
@property
def populations_path(self):
"""
This function ...
:return:
"""
return self.context.populations_path
# -----------------------------------------------------------------
@property
def earth_instrument_name(self):
"""
This function ...
:return:
"""
return self.context.earth_instrument_name
# -----------------------------------------------------------------
def load_fitting_run(self, name):
"""
This function ...
:param name:
:return:
"""
model_name = self.model_for_run(name)
return FittingRun(self.config.path, name, model_name)
# -----------------------------------------------------------------
@property
def runs_table(self):
"""
This function ...
:return:
"""
return RunsTable.from_file(self.runs_table_path)
# -----------------------------------------------------------------
@property
def run_names(self):
"""
This function ...
:return:
"""
return self.runs_table.run_names
# -----------------------------------------------------------------
def model_for_run(self, run_name):
"""
This function ...
:param run_name:
:return:
"""
return self.runs_table.model_for_run(run_name)
# -----------------------------------------------------------------
@property
def statistics(self):
"""
This function ...
:return:
"""
return Table.read(self.statistics_path)
# -----------------------------------------------------------------
|
Grumbel/scatterbackup
|
tests/test_fileinfo.py
|
Python
|
gpl-3.0
| 1,443
| 0.000693
|
#!/usr/bin/env python3
# ScatterBackup - A chaotic backup solution
# Copyright (C) 2015 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; wi
|
thout even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from scat
|
terbackup.fileinfo import FileInfo
class FileInfoTestCase(unittest.TestCase):
def test_from_file(self):
fileinfo = FileInfo.from_file("tests/data/test.txt")
self.assertEqual(11, fileinfo.size)
self.assertEqual("6df4d50a41a5d20bc4faad8a6f09aa8f", fileinfo.blob.md5)
self.assertEqual("bc9faaae1e35d52f3dea9651da12cd36627b8403", fileinfo.blob.sha1)
# def test_json(self):
# fileinfo = FileInfo.from_file("tests/test.txt")
# jstxt = fileinfo.json()
# fileinfo2 = FileInfo.from_json(jstxt)
# self.assertEqual(fileinfo, fileinfo2)
if __name__ == '__main__':
unittest.main()
# EOF #
|
mahmoud/wapiti
|
wapiti/operations/test_basic.py
|
Python
|
bsd-3-clause
| 1,717
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base
from misc import GetPageInfo
from models import PageIdentifier
from category import GetSubcategoryInfos
from revisions import GetCurrentContent, GetPageRevisionInfos
from meta import GetSourceInfo
def test_unicode_title():
get_beyonce = GetCurrentContent("Beyoncé Knowles")
assert get_beyonce()
def test_coercion_basic():
pid = PageIdentifier(title='Africa', page_id=123, ns=4, source='enwp')
get_subcats = GetSubcategoryInfos(pid)
assert get_subcats.input_param == 'Category:Africa'
def test_web_request():
url = 'http://upload.wikimedia.org/wikipedia/commons/d/d2/Mcgregor.jpg'
get_photo = base.WebRequestOperation(url)
res = get_photo()
text = res[0]
assert len(text) == 16408
def test_get_html():
get_africa_html = base.GetPageHTML('Africa')
res = get_africa_html()
text = res[0]
assert len(text) > 350000
def test_missing_revisions():
get_revs = GetPageRevisionInfos('Coffee_lololololol')
rev_list = get_revs()
'''
Should return 'missing' and negative pageid
'''
assert len(rev_list) == 0
def test_get_meta():
get_source_info = GetSourceInfo()
meta = get_source_info()
assert meta
def test_client_passed_to_subops():
|
# This tests whether the client object given to the initial operation
|
# is passed to its sub-operations.
# Use just enough titles to force multiplexing so that we can get
# sub ops to test.
titles = ['a'] * (base.DEFAULT_QUERY_LIMIT.get_limit() + 1)
client = base.MockClient()
op = GetPageInfo(titles, client=client)
assert id(op.subop_queues[0].peek().client) == id(client)
|
Yas3r/OWASP-ZSC
|
lib/generator/linux_x86/dir_create.py
|
Python
|
gpl-3.0
| 378
| 0.026455
|
#!/usr/bin/env python
'''
OWASP ZSC | ZCR Shellcoder
ZeroDay Cyber Research
Z3r0D4y.Com
Ali Razmjoo
shellcode template used : http://shell-storm.org/shellcode/files/shellcode-57.php
'''
|
from core import stack
from core import tem
|
plate
def run(dirname):
command = 'mkdir %s' %(str(dirname))
return template.sys(stack.generate(command.replace('[space]',' '),'%ecx','string'))
|
boyska/pyFsdb
|
fsdb/Fsdb.py
|
Python
|
lgpl-3.0
| 9,607
| 0.001353
|
# -*- coding: utf-8 -*-
import os
import errno
import stat
import unicodedata
import hashlib
import shutil
import logging
import config
class Fsdb(object):
"""File system database
expose a simple api (add,get,remove)
to menage the saving of files on disk.
files are placed under specified fsdb root folder and
are managed using a directory tree generated from the file digest
"""
CONFIG_FILE = ".fsdb.conf"
def __init__(self, fsdbRoot, mode=None, deep=None, hash_alg=None):
"""Create an fsdb instance.
If file named ".fsdb.conf" it is found in @fsdbRoot,
the file will be parsed, config options will be loded and
function parameters will be ignored.
If there is not such file, function parameters will be loaded and
written to ".fsdb.conf" in @fsdbRoot
Args:
fsdbRoot -- root path under will be placed all files
mode -- string reppresenting the mask (octal)
to use for files/folders creation (default: "0770")
deep -- number of levels to use for directory tree (default: 3)
hash_alg -- string name of the hash algorithm to use (default: "sha1")q
logHandler -- handler that will be used to log message
"""
self.logger = logging.getLogger(__name__)
# cleanup the path
fsdbRoot = os.path.expanduser(fsdbRoot) # replace ~
fsdbRoot = os.path.expandvars(fsdbRoot) # replace vars
fsdbRoot = os.path.normpath(fsdbRoot) # replace /../ and so on
fsdbRoot = os.path.realpath(fsdbRoot) # resolve links
# check if path it's absolute
if not os.path.isabs(fsdbRoot):
raise Exception("fsdb can not operate on relative path")
# on different platforms same unicode string could have different rappresentation
if isinstance(fsdbRoot, unicode):
fsdbRoot = unicodedata.normalize("NFC", fsdbRoot)
configPath = os.path.join(fsdbRoot, Fsdb.CONFIG_FILE)
if Fsdb.configExists(fsdbRoot):
# warn user about config ignoring and load config from file
self.logger.debug("Fsdb config file found. Runtime parameters will be ignored. ["+configPath+"]")
conf = config.loadConf(configPath)
self._conf = conf
else:
conf = dict()
if mode is not None:
conf['mode'] = mode
if deep is not None:
conf['deep'] = deep
if hash_alg is not None:
conf['hash_alg'] = hash_alg
conf = config.normalizeConf(conf)
self._conf = conf
# make all parent directories if they do not exist
self._makedirs(fsdbRoot)
# write config file
config.writeConf(configPath, conf)
oldmask = os.umask(0)
os.chmod(configPath, self._conf['mode'])
os.umask(oldmask)
# fsdbRoot it is an existing regular folder and we have read and write permission
self.fsdbRoot = fsdbRoot
self.logger.debug("Fsdb initialized successfully: "+self.__str__())
def add(self, filePath):
"""Add an existing file to fsdb.
File under @filePath will be copied under fsdb directory tree
Args:
filePath -- path of the file to be add
Returns:
String rapresenting the digest of the file
"""
if not os.path.isfile(filePath):
raise Exception("fsdb can not add: not regular file received")
digest = Fsdb.fileDigest(filePath, algorithm=self._conf['hash_alg'])
if self.exists(digest):
self.logger.debug('Added File: ['+digest+'] ( Already exists. Skipping transfer)')
return digest
absPath = self.getFilePath(digest)
absFolderPath = os.path.dirname(absPath)
# make all parent directories if they do not exist
self._makedirs(absFolderPath)
# copy file and set permission
oldmask = os.umask(0)
shutil.copyfile(filePath, absPath)
os.chmod(absPath, self._conf['mode'])
os.umask(oldmask)
self.logger.debug('Added file: "'+filePath+'" -> "'+absPath+'" [ '+digest+' ]')
return digest
def remove(self, digest):
"""Remove an existing file from fsdb.
File with the given digest will be removed from fsdb and
the directory tree will be cleaned (remove empty folders)
Args:
digest -- digest of the file to remove
"""
# remove file
absPath = self.getFilePath(digest)
os.remove(absPath)
# clean directory tree
tmpPath = os.path.dirname(absPath)
while tmpPath != self.fsdbRoot:
if os.path.islink(tmpPath):
raise Exception('fsdb found a link in db tree: "'+tmpPath+'"')
if len(os.listdir(tmpPath)) > 0:
break
os.rmdir(tmpPath)
tmpPath = os.path.dirname(tmpPath)
self.logger.debug('Removed file: "'+absPath+'" [ '+digest+' ]')
def exists(self, digest):
"""Check file existence in fsdb
Returns:
True if file exists under this instance of fsdb, false otherwise
"""
return os.path.isfile(self.getFilePath(digest))
def getFilePath(self, digest):
"""Retrieve path to the file with the given digest
Args:
digest -- digest of the file
Returns:
String rapresenting the absolute path of the file
"""
relPath = Fsdb.generateDirTreePath(digest, self._conf['deep'])
return os.path.join(self.fsdbRoot, relPath)
def _makedirs(self, path):
"""Make folders recursively for the given path and
check read and write permission on the path
Args:
path -- path to the leaf folder
"""
try:
oldmask = os.umask(0)
os.makedirs(path, self._conf['mode'])
os.umask(oldmask)
except OSError, e:
if(e.errno == errno.EACCES):
raise Exception("not sufficent permissions to write on fsdb folder: \""+path+'\"')
elif(e.errno == errno.EEXIST):
fstat = os.stat(path)
if not stat.S_ISDIR(fstat.st_mode):
raise Exception("fsdb folder already exists but it is not a regular folder: \""+path+'\"')
elif not os.access(path, os.R_OK and os.W_OK):
raise Exception("not sufficent permissions to write on fsdb folder: \""+path+'\"')
else:
raise e
def __str__(self):
return "{root: "+self.fsdbRoot+", mode: "+str(oct(self._conf['mode']))+", deep: "+str(self._conf['deep'])+", hash_alg: "+self._conf['hash_alg']+"}"
@staticmethod
def fileDigest(filepath, algorithm="sha1", block_size=2**20):
"""Calculate digest
File with the given digest will be removed from fsdb and
the directory tree will be cleaned (remove empty folders)
Args:
digest -- digest of the file to remove
"""
if(algorithm == "md5"):
algFunct = hashlib.md5
elif(algorithm == "sha1" or algorithm == "sha"):
algFunct = hashlib.sha1
elif(algorithm == "sha224"):
algFunct = hashlib.sha224
elif(algorithm == "sha256"):
algFunct = hashlib.sha256
elif(algorithm == "sha384"):
algFunct = hashlib.sha384
elif(algorithm == "sha512" or algor
|
ithm == "sha2"):
algFunct = hashlib.sha512
else:
raise ValueError('"' + algorithm + '" it is not a supported algorithm function')
hashM = algFunct()
with open(filepath, 'r') as f:
data = f.read(block_size)
hashM.update(data)
return hashM.hexdigest()
@st
|
aticmethod
def generateDirTreePath(fileDigest, deep):
"""Generate a relative path from the given fileDigest
relative path has a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.